ghc-lib-parser-9.4.7.20230826/compiler/0000755000000000000000000000000014472400056015241 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/0000755000000000000000000000000014472377771015663 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/0000755000000000000000000000000014472377770017270 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/PrimOps/0000755000000000000000000000000014472377770020661 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Types/0000755000000000000000000000000014472377770020374 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/ByteCode/0000755000000000000000000000000014472377770017360 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/0000755000000000000000000000000014472377770016376 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Dataflow/0000755000000000000000000000000014472377770020137 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/CmmToAsm/0000755000000000000000000000000014472377770017342 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/CmmToAsm/CFG/0000755000000000000000000000000014472377770017741 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/0000755000000000000000000000000014472377770016552 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Coercion/0000755000000000000000000000000014472377770020313 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Map/0000755000000000000000000000000014472377770017267 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/0000755000000000000000000000000014472377770017314 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/0000755000000000000000000000000014472377770017430 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCon/0000755000000000000000000000000014472377770017606 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Unfold/0000755000000000000000000000000014472377770020001 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/0000755000000000000000000000000014472377770016533 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/FastString/0000755000000000000000000000000014472377770020617 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Graph/0000755000000000000000000000000014472377770017574 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/List/0000755000000000000000000000000014472377770017446 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/0000755000000000000000000000000014472377770017115 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Backpack/0000755000000000000000000000000014472377770020614 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Config/0000755000000000000000000000000014472377770020322 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Env/0000755000000000000000000000000014472377770017645 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Errors/0000755000000000000000000000000014472377770020371 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Pipeline/0000755000000000000000000000000014472377770020662 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/0000755000000000000000000000000014472377770016234 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/0000755000000000000000000000000014472377770017350 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Errors/0000755000000000000000000000000014472377770020624 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Pmc/0000755000000000000000000000000014472377770020067 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Pmc/Solver/0000755000000000000000000000000014472377770021341 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/0000755000000000000000000000000014472377770016671 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Ext/0000755000000000000000000000000014472377770017431 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Recomp/0000755000000000000000000000000014472377770020116 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Linker/0000755000000000000000000000000014472377770017106 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Linker/Static/0000755000000000000000000000000014472377770020335 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/0000755000000000000000000000000014472377770017116 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Errors/0000755000000000000000000000000014472377770020372 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/PostProcess/0000755000000000000000000000000014472377770021402 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/0000755000000000000000000000000014472377771017447 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/Reg/0000755000000000000000000000000014472377771020164 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/0000755000000000000000000000000014472377771017306 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Eval/0000755000000000000000000000000014472377771020175 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Heap/0000755000000000000000000000000014472377771020163 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Interpreter/0000755000000000000000000000000014472377771021611 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Settings/0000755000000000000000000000000014472377771017463 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Stg/0000755000000000000000000000000014472377771016420 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Stg/InferTags/0000755000000000000000000000000014472377771020302 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/StgToCmm/0000755000000000000000000000000014472377771017360 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/SysTools/0000755000000000000000000000000014472377771017462 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/0000755000000000000000000000000014472377771016231 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Errors/0000755000000000000000000000000014472377771017505 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Errors/Hole/0000755000000000000000000000000014472377771020374 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Solver/0000755000000000000000000000000014472377771017503 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types/0000755000000000000000000000000014472377771017335 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Utils/0000755000000000000000000000000014472377771017331 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/0000755000000000000000000000000014472377771016767 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/CostCentre/0000755000000000000000000000000014472377771021040 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Fixity/0000755000000000000000000000000014472377771020243 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Hint/0000755000000000000000000000000014472377771017671 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id/0000755000000000000000000000000014472377771017323 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/0000755000000000000000000000000014472377771017647 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/0000755000000000000000000000000014472377771020235 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Var/0000755000000000000000000000000014472377771017517 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/0000755000000000000000000000000014472377771016602 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Finder/0000755000000000000000000000000014472377771020011 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Home/0000755000000000000000000000000014472377771017472 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/0000755000000000000000000000000014472377771020027 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/0000755000000000000000000000000014472377771016763 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Binary/0000755000000000000000000000000014472377771020207 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/IO/0000755000000000000000000000000014472377771017272 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Monad/0000755000000000000000000000000014472375230020005 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Monad/State/0000755000000000000000000000000014472377771021101 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Panic/0000755000000000000000000000000014472377771020015 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Ppr/0000755000000000000000000000000014472377771017524 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/Language/0000755000000000000000000000000014470055371016770 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/0000755000000000000000000000000014472377771020370 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/0000755000000000000000000000000014472377771021656 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/compiler/cbits/0000755000000000000000000000000014472377771016366 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/0000755000000000000000000000000014472400073014733 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/0000755000000000000000000000000014472400073016116 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/0000755000000000000000000000000014472400073017730 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/0000755000000000000000000000000014472400112021021 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/GHC/0000755000000000000000000000000014472400073021430 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/GHC/Platform/0000755000000000000000000000000014472400104023207 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/GHC/Settings/0000755000000000000000000000000014472400073023230 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/lib/0000755000000000000000000000000014472400073016664 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/0000755000000000000000000000000014472400073020072 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/ghc-boot/0000755000000000000000000000000014472400073021574 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/ghc-boot/build/0000755000000000000000000000000014472400073022673 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/ghc-boot/build/GHC/0000755000000000000000000000000014472400073023274 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/ghc-boot/build/GHC/Platform/0000755000000000000000000000000014472400073025060 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/rts/0000755000000000000000000000000014472400073016726 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/rts/build/0000755000000000000000000000000014472400073020025 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/rts/build/include/0000755000000000000000000000000014472400112021442 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/0000755000000000000000000000000014472400056015403 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/0000755000000000000000000000000014472400056017105 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/0000755000000000000000000000000014472400056017516 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/GHC/0000755000000000000000000000000014470055371020123 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/GHC/ForeignSrcLang/0000755000000000000000000000000014472375231022770 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/GHC/LanguageExtensions/0000755000000000000000000000000014472375231023730 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/0000755000000000000000000000000014472375231017514 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Data/0000755000000000000000000000000014472375231020365 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Platform/0000755000000000000000000000000014472375231021300 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Unit/0000755000000000000000000000000014470055371020431 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Utils/0000755000000000000000000000000014472375231020614 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/0000755000000000000000000000000014472400056017057 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/0000755000000000000000000000000014470055371017464 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/0000755000000000000000000000000014472375231020411 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/0000755000000000000000000000000014472377771021301 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/InfoTable/0000755000000000000000000000000014472367564023143 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/0000755000000000000000000000000014470055371023006 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/cbits/0000755000000000000000000000000014472377771020204 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghci/0000755000000000000000000000000014472400056016315 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/0000755000000000000000000000000014472377771017110 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/TH/0000755000000000000000000000000014472375231017410 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/0000755000000000000000000000000014472400056020637 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/0000755000000000000000000000000014470055371022366 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/0000755000000000000000000000000014472375231023753 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/0000755000000000000000000000000014472375231024266 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/Lib/0000755000000000000000000000000014472375231024774 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/rts/0000755000000000000000000000000014472400056014237 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/rts/include/0000755000000000000000000000000014472400012015652 5ustar0000000000000000ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/BaseDir.hs0000644000000000000000000000561614472375231021371 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Note [Base Dir] -- ~~~~~~~~~~~~~~~~~ -- GHC's base directory or top directory containers miscellaneous settings and -- the package database. The main compiler of course needs this directory to -- read those settings and read and write packages. ghc-pkg uses it to find the -- global package database too. -- -- In the interest of making GHC builds more relocatable, many settings also -- will expand `${top_dir}` inside strings so GHC doesn't need to know it's on -- installation location at build time. ghc-pkg also can expand those variables -- and so needs the top dir location to do that too. module GHC.BaseDir where import Prelude -- See Note [Why do we import Prelude here?] import Data.List (stripPrefix) import System.FilePath -- Windows #if defined(mingw32_HOST_OS) import System.Environment (getExecutablePath) -- POSIX #elif defined(darwin_HOST_OS) || defined(linux_HOST_OS) || defined(freebsd_HOST_OS) || defined(openbsd_HOST_OS) || defined(netbsd_HOST_OS) import System.Environment (getExecutablePath) #endif -- | Expand occurrences of the @$topdir@ interpolation in a string. expandTopDir :: FilePath -> String -> String expandTopDir = expandPathVar "topdir" -- | @expandPathVar var value str@ -- -- replaces occurrences of variable @$var@ with @value@ in str. expandPathVar :: String -> FilePath -> String -> String expandPathVar var value str | Just str' <- stripPrefix ('$':var) str , null str' || isPathSeparator (head str') = value ++ expandPathVar var value str' expandPathVar var value (x:xs) = x : expandPathVar var value xs expandPathVar _ _ [] = [] -- | Calculate the location of the base dir getBaseDir :: IO (Maybe String) #if defined(mingw32_HOST_OS) getBaseDir = Just . (\p -> p "lib") . rootDir <$> getExecutablePath where -- locate the "base dir" when given the path -- to the real ghc executable (as opposed to symlink) -- that is running this function. rootDir :: FilePath -> FilePath rootDir = takeDirectory . takeDirectory . normalise #elif defined(darwin_HOST_OS) || defined(linux_HOST_OS) || defined(freebsd_HOST_OS) || defined(openbsd_HOST_OS) || defined(netbsd_HOST_OS) -- on unix, this is a bit more confusing. -- The layout right now is something like -- -- /bin/ghc-X.Y.Z <- wrapper script (1) -- /bin/ghc <- symlink to wrapper script (2) -- /lib/ghc-X.Y.Z/bin/ghc <- ghc executable (3) -- /lib/ghc-X.Y.Z <- $topdir (4) -- -- As such, we first need to find the absolute location to the -- binary. -- -- getExecutablePath will return (3). One takeDirectory will -- give use /lib/ghc-X.Y.Z/bin, and another will give us (4). -- -- This of course only works due to the current layout. If -- the layout is changed, such that we have ghc-X.Y.Z/{bin,lib} -- this would need to be changed accordingly. -- getBaseDir = Just . (\p -> p "lib") . takeDirectory . takeDirectory <$> getExecutablePath #else getBaseDir = return Nothing #endif ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Names.hs0000644000000000000000000035002014472400112020640 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[GHC.Builtin.Names]{Definitions of prelude modules and names} Nota Bene: all Names defined in here should come from the base package - ModuleNames for prelude modules, e.g. pREL_BASE_Name :: ModuleName - Modules for prelude modules e.g. pREL_Base :: Module - Uniques for Ids, DataCons, TyCons and Classes that the compiler "knows about" in some way e.g. intTyConKey :: Unique minusClassOpKey :: Unique - Names for Ids, DataCons, TyCons and Classes that the compiler "knows about" in some way e.g. intTyConName :: Name minusName :: Name One of these Names contains (a) the module and occurrence name of the thing (b) its Unique The way the compiler "knows about" one of these things is where the type checker or desugarer needs to look it up. For example, when desugaring list comprehensions the desugarer needs to conjure up 'foldr'. It does this by looking up foldrName in the environment. - RdrNames for Ids, DataCons etc that the compiler may emit into generated code (e.g. for deriving). It's not necessary to know the uniques for these guys, only their names Note [Known-key names] ~~~~~~~~~~~~~~~~~~~~~~ It is *very* important that the compiler gives wired-in things and things with "known-key" names the correct Uniques wherever they occur. We have to be careful about this in exactly two places: 1. When we parse some source code, renaming the AST better yield an AST whose Names have the correct uniques 2. When we read an interface file, the read-in gubbins better have the right uniques This is accomplished through a combination of mechanisms: 1. When parsing source code, the RdrName-decorated AST has some RdrNames which are Exact. These are wired-in RdrNames where the we could directly tell from the parsed syntax what Name to use. For example, when we parse a [] in a type we can just insert an Exact RdrName Name with the listTyConKey. Currently, I believe this is just an optimisation: it would be equally valid to just output Orig RdrNames that correctly record the module etc we expect the final Name to come from. However, were we to eliminate isBuiltInOcc_maybe it would become essential (see point 3). 2. The knownKeyNames (which consist of the basicKnownKeyNames from the module, and those names reachable via the wired-in stuff from GHC.Builtin.Types) are used to initialise the "OrigNameCache" in GHC.Iface.Env. This initialization ensures that when the type checker or renamer (both of which use GHC.Iface.Env) look up an original name (i.e. a pair of a Module and an OccName) for a known-key name they get the correct Unique. This is the most important mechanism for ensuring that known-key stuff gets the right Unique, and is why it is so important to place your known-key names in the appropriate lists. 3. For "infinite families" of known-key names (i.e. tuples and sums), we have to be extra careful. Because there are an infinite number of these things, we cannot add them to the list of known-key names used to initialise the OrigNameCache. Instead, we have to rely on never having to look them up in that cache. See Note [Infinite families of known-key names] for details. Note [Infinite families of known-key names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Infinite families of known-key things (e.g. tuples and sums) pose a tricky problem: we can't add them to the knownKeyNames finite map which we use to ensure that, e.g., a reference to (,) gets assigned the right unique (if this doesn't sound familiar see Note [Known-key names] above). We instead handle tuples and sums separately from the "vanilla" known-key things, a) The parser recognises them specially and generates an Exact Name (hence not looked up in the orig-name cache) b) The known infinite families of names are specially serialised by GHC.Iface.Binary.putName, with that special treatment detected when we read back to ensure that we get back to the correct uniques. See Note [Symbol table representation of names] in GHC.Iface.Binary and Note [How tuples work] in GHC.Builtin.Types. Most of the infinite families cannot occur in source code, so mechanisms (a) and (b) suffice to ensure that they always have the right Unique. In particular, implicit param TyCon names, constraint tuples and Any TyCons cannot be mentioned by the user. For those things that *can* appear in source programs, c) GHC.Iface.Env.lookupOrigNameCache uses isBuiltInOcc_maybe to map built-in syntax directly onto the corresponding name, rather than trying to find it in the original-name cache. See also Note [Built-in syntax and the OrigNameCache] Note that one-tuples are an exception to the rule, as they do get assigned known keys. See Note [One-tuples] (Wrinkle: Make boxed one-tuple names have known keys) in GHC.Builtin.Types. -} {-# LANGUAGE CPP #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Builtin.Names ( Unique, Uniquable(..), hasKey, -- Re-exported for convenience ----------------------------------------------------------- module GHC.Builtin.Names, -- A huge bunch of (a) Names, e.g. intTyConName -- (b) Uniques e.g. intTyConKey -- (c) Groups of classes and types -- (d) miscellaneous things -- So many that we export them all ) where import GHC.Prelude import GHC.Unit.Types import GHC.Unit.Module.Name import GHC.Types.Name.Occurrence import GHC.Types.Name.Reader import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Data.FastString {- ************************************************************************ * * allNameStrings * * ************************************************************************ -} allNameStrings :: [String] -- Infinite list of a,b,c...z, aa, ab, ac, ... etc allNameStrings = [ c:cs | cs <- "" : allNameStrings, c <- ['a'..'z'] ] {- ************************************************************************ * * \subsection{Local Names} * * ************************************************************************ This *local* name is used by the interactive stuff -} itName :: Unique -> SrcSpan -> Name itName uniq loc = mkInternalName uniq (mkOccNameFS varName (fsLit "it")) loc -- mkUnboundName makes a place-holder Name; it shouldn't be looked at except possibly -- during compiler debugging. mkUnboundName :: OccName -> Name mkUnboundName occ = mkInternalName unboundKey occ noSrcSpan isUnboundName :: Name -> Bool isUnboundName name = name `hasKey` unboundKey {- ************************************************************************ * * \subsection{Known key Names} * * ************************************************************************ This section tells what the compiler knows about the association of names with uniques. These ones are the *non* wired-in ones. The wired in ones are defined in GHC.Builtin.Types etc. -} basicKnownKeyNames :: [Name] -- See Note [Known-key names] basicKnownKeyNames = genericTyConNames ++ [ -- Classes. *Must* include: -- classes that are grabbed by key (e.g., eqClassKey) -- classes in "Class.standardClassKeys" (quite a few) eqClassName, -- mentioned, derivable ordClassName, -- derivable boundedClassName, -- derivable numClassName, -- mentioned, numeric enumClassName, -- derivable monadClassName, functorClassName, realClassName, -- numeric integralClassName, -- numeric fractionalClassName, -- numeric floatingClassName, -- numeric realFracClassName, -- numeric realFloatClassName, -- numeric dataClassName, isStringClassName, applicativeClassName, alternativeClassName, foldableClassName, traversableClassName, semigroupClassName, sappendName, monoidClassName, memptyName, mappendName, mconcatName, -- The IO type ioTyConName, ioDataConName, runMainIOName, runRWName, -- Type representation types trModuleTyConName, trModuleDataConName, trNameTyConName, trNameSDataConName, trNameDDataConName, trTyConTyConName, trTyConDataConName, -- Typeable typeableClassName, typeRepTyConName, someTypeRepTyConName, someTypeRepDataConName, kindRepTyConName, kindRepTyConAppDataConName, kindRepVarDataConName, kindRepAppDataConName, kindRepFunDataConName, kindRepTYPEDataConName, kindRepTypeLitSDataConName, kindRepTypeLitDDataConName, typeLitSortTyConName, typeLitSymbolDataConName, typeLitNatDataConName, typeLitCharDataConName, typeRepIdName, mkTrTypeName, mkTrConName, mkTrAppName, mkTrFunName, typeSymbolTypeRepName, typeNatTypeRepName, typeCharTypeRepName, trGhcPrimModuleName, -- KindReps for common cases starKindRepName, starArrStarKindRepName, starArrStarArrStarKindRepName, -- WithDict withDictClassName, -- Dynamic toDynName, -- Numeric stuff negateName, minusName, geName, eqName, mkRationalBase2Name, mkRationalBase10Name, -- Conversion functions rationalTyConName, ratioTyConName, ratioDataConName, fromRationalName, fromIntegerName, toIntegerName, toRationalName, fromIntegralName, realToFracName, -- Int# stuff divIntName, modIntName, -- String stuff fromStringName, -- Enum stuff enumFromName, enumFromThenName, enumFromThenToName, enumFromToName, -- Applicative stuff pureAName, apAName, thenAName, -- Functor stuff fmapName, -- Monad stuff thenIOName, bindIOName, returnIOName, failIOName, bindMName, thenMName, returnMName, joinMName, -- MonadFail monadFailClassName, failMName, -- MonadFix monadFixClassName, mfixName, -- Arrow stuff arrAName, composeAName, firstAName, appAName, choiceAName, loopAName, -- Ix stuff ixClassName, -- Show stuff showClassName, -- Read stuff readClassName, -- Stable pointers newStablePtrName, -- GHC Extensions groupWithName, considerAccessibleName, -- Strings and lists unpackCStringName, unpackCStringUtf8Name, unpackCStringAppendName, unpackCStringAppendUtf8Name, unpackCStringFoldrName, unpackCStringFoldrUtf8Name, cstringLengthName, -- Overloaded lists isListClassName, fromListName, fromListNName, toListName, -- Overloaded record dot, record update getFieldName, setFieldName, -- List operations concatName, filterName, mapName, zipName, foldrName, buildName, augmentName, appendName, -- FFI primitive types that are not wired-in. stablePtrTyConName, ptrTyConName, funPtrTyConName, int8TyConName, int16TyConName, int32TyConName, int64TyConName, word8TyConName, word16TyConName, word32TyConName, word64TyConName, -- Others otherwiseIdName, inlineIdName, eqStringName, assertName, assertErrorName, traceName, printName, dollarName, -- ghc-bignum integerFromNaturalName, integerToNaturalClampName, integerToNaturalThrowName, integerToNaturalName, integerToWordName, integerToIntName, integerToWord64Name, integerToInt64Name, integerFromWordName, integerFromWord64Name, integerFromInt64Name, integerAddName, integerMulName, integerSubName, integerNegateName, integerAbsName, integerPopCountName, integerQuotName, integerRemName, integerDivName, integerModName, integerDivModName, integerQuotRemName, integerEncodeFloatName, integerEncodeDoubleName, integerGcdName, integerLcmName, integerAndName, integerOrName, integerXorName, integerComplementName, integerBitName, integerTestBitName, integerShiftLName, integerShiftRName, naturalToWordName, naturalPopCountName, naturalShiftRName, naturalShiftLName, naturalAddName, naturalSubName, naturalSubThrowName, naturalSubUnsafeName, naturalMulName, naturalQuotRemName, naturalQuotName, naturalRemName, naturalAndName, naturalAndNotName, naturalOrName, naturalXorName, naturalTestBitName, naturalBitName, naturalGcdName, naturalLcmName, naturalLog2Name, naturalLogBaseWordName, naturalLogBaseName, naturalPowModName, naturalSizeInBaseName, bignatFromWordListName, bignatEqName, -- Float/Double integerToFloatName, integerToDoubleName, naturalToFloatName, naturalToDoubleName, rationalToFloatName, rationalToDoubleName, -- Other classes randomClassName, randomGenClassName, monadPlusClassName, -- Type-level naturals knownNatClassName, knownSymbolClassName, knownCharClassName, -- Overloaded labels fromLabelClassOpName, -- Implicit Parameters ipClassName, -- Overloaded record fields hasFieldClassName, -- Call Stacks callStackTyConName, emptyCallStackName, pushCallStackName, -- Source Locations srcLocDataConName, -- Annotation type checking toAnnotationWrapperName -- The SPEC type for SpecConstr , specTyConName -- The Either type , eitherTyConName, leftDataConName, rightDataConName -- The Void type , voidTyConName -- Plugins , pluginTyConName , frontendPluginTyConName -- Generics , genClassName, gen1ClassName , datatypeClassName, constructorClassName, selectorClassName -- Monad comprehensions , guardMName , liftMName , mzipName -- GHCi Sandbox , ghciIoClassName, ghciStepIoMName -- StaticPtr , makeStaticName , staticPtrTyConName , staticPtrDataConName, staticPtrInfoDataConName , fromStaticPtrName -- Fingerprint , fingerprintDataConName -- Custom type errors , errorMessageTypeErrorFamName , typeErrorTextDataConName , typeErrorAppendDataConName , typeErrorVAppendDataConName , typeErrorShowTypeDataConName -- Unsafe coercion proofs , unsafeEqualityProofName , unsafeEqualityTyConName , unsafeReflDataConName , unsafeCoercePrimName ] genericTyConNames :: [Name] genericTyConNames = [ v1TyConName, u1TyConName, par1TyConName, rec1TyConName, k1TyConName, m1TyConName, sumTyConName, prodTyConName, compTyConName, rTyConName, dTyConName, cTyConName, sTyConName, rec0TyConName, d1TyConName, c1TyConName, s1TyConName, repTyConName, rep1TyConName, uRecTyConName, uAddrTyConName, uCharTyConName, uDoubleTyConName, uFloatTyConName, uIntTyConName, uWordTyConName, prefixIDataConName, infixIDataConName, leftAssociativeDataConName, rightAssociativeDataConName, notAssociativeDataConName, sourceUnpackDataConName, sourceNoUnpackDataConName, noSourceUnpackednessDataConName, sourceLazyDataConName, sourceStrictDataConName, noSourceStrictnessDataConName, decidedLazyDataConName, decidedStrictDataConName, decidedUnpackDataConName, metaDataDataConName, metaConsDataConName, metaSelDataConName ] {- ************************************************************************ * * \subsection{Module names} * * ************************************************************************ --MetaHaskell Extension Add a new module here -} pRELUDE :: Module pRELUDE = mkBaseModule_ pRELUDE_NAME gHC_PRIM, gHC_PRIM_PANIC, gHC_PRIM_EXCEPTION, gHC_TYPES, gHC_GENERICS, gHC_MAGIC, gHC_MAGIC_DICT, gHC_CLASSES, gHC_PRIMOPWRAPPERS, gHC_BASE, gHC_ENUM, gHC_GHCI, gHC_GHCI_HELPERS, gHC_CSTRING, gHC_SHOW, gHC_READ, gHC_NUM, gHC_MAYBE, gHC_NUM_INTEGER, gHC_NUM_NATURAL, gHC_NUM_BIGNAT, gHC_LIST, gHC_TUPLE, dATA_EITHER, dATA_VOID, dATA_LIST, dATA_STRING, dATA_FOLDABLE, dATA_TRAVERSABLE, gHC_CONC, gHC_IO, gHC_IO_Exception, gHC_ST, gHC_IX, gHC_STABLE, gHC_PTR, gHC_ERR, gHC_REAL, gHC_FLOAT, gHC_TOP_HANDLER, sYSTEM_IO, dYNAMIC, tYPEABLE, tYPEABLE_INTERNAL, gENERICS, rEAD_PREC, lEX, gHC_INT, gHC_WORD, mONAD, mONAD_FIX, mONAD_ZIP, mONAD_FAIL, aRROW, gHC_DESUGAR, rANDOM, gHC_EXTS, gHC_IS_LIST, cONTROL_EXCEPTION_BASE, gHC_TYPEERROR, gHC_TYPELITS, gHC_TYPELITS_INTERNAL, gHC_TYPENATS, gHC_TYPENATS_INTERNAL, dATA_COERCE, dEBUG_TRACE, uNSAFE_COERCE :: Module gHC_PRIM = mkPrimModule (fsLit "GHC.Prim") -- Primitive types and values gHC_PRIM_PANIC = mkPrimModule (fsLit "GHC.Prim.Panic") gHC_PRIM_EXCEPTION = mkPrimModule (fsLit "GHC.Prim.Exception") gHC_TYPES = mkPrimModule (fsLit "GHC.Types") gHC_MAGIC = mkPrimModule (fsLit "GHC.Magic") gHC_MAGIC_DICT = mkPrimModule (fsLit "GHC.Magic.Dict") gHC_CSTRING = mkPrimModule (fsLit "GHC.CString") gHC_CLASSES = mkPrimModule (fsLit "GHC.Classes") gHC_PRIMOPWRAPPERS = mkPrimModule (fsLit "GHC.PrimopWrappers") gHC_BASE = mkBaseModule (fsLit "GHC.Base") gHC_ENUM = mkBaseModule (fsLit "GHC.Enum") gHC_GHCI = mkBaseModule (fsLit "GHC.GHCi") gHC_GHCI_HELPERS= mkBaseModule (fsLit "GHC.GHCi.Helpers") gHC_SHOW = mkBaseModule (fsLit "GHC.Show") gHC_READ = mkBaseModule (fsLit "GHC.Read") gHC_NUM = mkBaseModule (fsLit "GHC.Num") gHC_MAYBE = mkBaseModule (fsLit "GHC.Maybe") gHC_NUM_INTEGER = mkBignumModule (fsLit "GHC.Num.Integer") gHC_NUM_NATURAL = mkBignumModule (fsLit "GHC.Num.Natural") gHC_NUM_BIGNAT = mkBignumModule (fsLit "GHC.Num.BigNat") gHC_LIST = mkBaseModule (fsLit "GHC.List") gHC_TUPLE = mkPrimModule (fsLit "GHC.Tuple") dATA_EITHER = mkBaseModule (fsLit "Data.Either") dATA_VOID = mkBaseModule (fsLit "Data.Void") dATA_LIST = mkBaseModule (fsLit "Data.List") dATA_STRING = mkBaseModule (fsLit "Data.String") dATA_FOLDABLE = mkBaseModule (fsLit "Data.Foldable") dATA_TRAVERSABLE= mkBaseModule (fsLit "Data.Traversable") gHC_CONC = mkBaseModule (fsLit "GHC.Conc") gHC_IO = mkBaseModule (fsLit "GHC.IO") gHC_IO_Exception = mkBaseModule (fsLit "GHC.IO.Exception") gHC_ST = mkBaseModule (fsLit "GHC.ST") gHC_IX = mkBaseModule (fsLit "GHC.Ix") gHC_STABLE = mkBaseModule (fsLit "GHC.Stable") gHC_PTR = mkBaseModule (fsLit "GHC.Ptr") gHC_ERR = mkBaseModule (fsLit "GHC.Err") gHC_REAL = mkBaseModule (fsLit "GHC.Real") gHC_FLOAT = mkBaseModule (fsLit "GHC.Float") gHC_TOP_HANDLER = mkBaseModule (fsLit "GHC.TopHandler") sYSTEM_IO = mkBaseModule (fsLit "System.IO") dYNAMIC = mkBaseModule (fsLit "Data.Dynamic") tYPEABLE = mkBaseModule (fsLit "Data.Typeable") tYPEABLE_INTERNAL = mkBaseModule (fsLit "Data.Typeable.Internal") gENERICS = mkBaseModule (fsLit "Data.Data") rEAD_PREC = mkBaseModule (fsLit "Text.ParserCombinators.ReadPrec") lEX = mkBaseModule (fsLit "Text.Read.Lex") gHC_INT = mkBaseModule (fsLit "GHC.Int") gHC_WORD = mkBaseModule (fsLit "GHC.Word") mONAD = mkBaseModule (fsLit "Control.Monad") mONAD_FIX = mkBaseModule (fsLit "Control.Monad.Fix") mONAD_ZIP = mkBaseModule (fsLit "Control.Monad.Zip") mONAD_FAIL = mkBaseModule (fsLit "Control.Monad.Fail") aRROW = mkBaseModule (fsLit "Control.Arrow") gHC_DESUGAR = mkBaseModule (fsLit "GHC.Desugar") rANDOM = mkBaseModule (fsLit "System.Random") gHC_EXTS = mkBaseModule (fsLit "GHC.Exts") gHC_IS_LIST = mkBaseModule (fsLit "GHC.IsList") cONTROL_EXCEPTION_BASE = mkBaseModule (fsLit "Control.Exception.Base") gHC_GENERICS = mkBaseModule (fsLit "GHC.Generics") gHC_TYPEERROR = mkBaseModule (fsLit "GHC.TypeError") gHC_TYPELITS = mkBaseModule (fsLit "GHC.TypeLits") gHC_TYPELITS_INTERNAL = mkBaseModule (fsLit "GHC.TypeLits.Internal") gHC_TYPENATS = mkBaseModule (fsLit "GHC.TypeNats") gHC_TYPENATS_INTERNAL = mkBaseModule (fsLit "GHC.TypeNats.Internal") dATA_COERCE = mkBaseModule (fsLit "Data.Coerce") dEBUG_TRACE = mkBaseModule (fsLit "Debug.Trace") uNSAFE_COERCE = mkBaseModule (fsLit "Unsafe.Coerce") gHC_SRCLOC :: Module gHC_SRCLOC = mkBaseModule (fsLit "GHC.SrcLoc") gHC_STACK, gHC_STACK_TYPES :: Module gHC_STACK = mkBaseModule (fsLit "GHC.Stack") gHC_STACK_TYPES = mkBaseModule (fsLit "GHC.Stack.Types") gHC_STATICPTR :: Module gHC_STATICPTR = mkBaseModule (fsLit "GHC.StaticPtr") gHC_STATICPTR_INTERNAL :: Module gHC_STATICPTR_INTERNAL = mkBaseModule (fsLit "GHC.StaticPtr.Internal") gHC_FINGERPRINT_TYPE :: Module gHC_FINGERPRINT_TYPE = mkBaseModule (fsLit "GHC.Fingerprint.Type") gHC_OVER_LABELS :: Module gHC_OVER_LABELS = mkBaseModule (fsLit "GHC.OverloadedLabels") gHC_RECORDS :: Module gHC_RECORDS = mkBaseModule (fsLit "GHC.Records") rOOT_MAIN :: Module rOOT_MAIN = mkMainModule (fsLit ":Main") -- Root module for initialisation mkInteractiveModule :: Int -> Module -- (mkInteractiveMoudule 9) makes module 'interactive:Ghci9' mkInteractiveModule n = mkModule interactiveUnit (mkModuleName ("Ghci" ++ show n)) pRELUDE_NAME, mAIN_NAME :: ModuleName pRELUDE_NAME = mkModuleNameFS (fsLit "Prelude") mAIN_NAME = mkModuleNameFS (fsLit "Main") mkPrimModule :: FastString -> Module mkPrimModule m = mkModule primUnit (mkModuleNameFS m) mkBignumModule :: FastString -> Module mkBignumModule m = mkModule bignumUnit (mkModuleNameFS m) mkBaseModule :: FastString -> Module mkBaseModule m = mkBaseModule_ (mkModuleNameFS m) mkBaseModule_ :: ModuleName -> Module mkBaseModule_ m = mkModule baseUnit m mkThisGhcModule :: FastString -> Module mkThisGhcModule m = mkThisGhcModule_ (mkModuleNameFS m) mkThisGhcModule_ :: ModuleName -> Module mkThisGhcModule_ m = mkModule thisGhcUnit m mkMainModule :: FastString -> Module mkMainModule m = mkModule mainUnit (mkModuleNameFS m) mkMainModule_ :: ModuleName -> Module mkMainModule_ m = mkModule mainUnit m {- ************************************************************************ * * RdrNames * * ************************************************************************ -} main_RDR_Unqual :: RdrName main_RDR_Unqual = mkUnqual varName (fsLit "main") -- We definitely don't want an Orig RdrName, because -- main might, in principle, be imported into module Main eq_RDR, ge_RDR, le_RDR, lt_RDR, gt_RDR, compare_RDR, ltTag_RDR, eqTag_RDR, gtTag_RDR :: RdrName eq_RDR = nameRdrName eqName ge_RDR = nameRdrName geName le_RDR = varQual_RDR gHC_CLASSES (fsLit "<=") lt_RDR = varQual_RDR gHC_CLASSES (fsLit "<") gt_RDR = varQual_RDR gHC_CLASSES (fsLit ">") compare_RDR = varQual_RDR gHC_CLASSES (fsLit "compare") ltTag_RDR = nameRdrName ordLTDataConName eqTag_RDR = nameRdrName ordEQDataConName gtTag_RDR = nameRdrName ordGTDataConName eqClass_RDR, numClass_RDR, ordClass_RDR, enumClass_RDR, monadClass_RDR :: RdrName eqClass_RDR = nameRdrName eqClassName numClass_RDR = nameRdrName numClassName ordClass_RDR = nameRdrName ordClassName enumClass_RDR = nameRdrName enumClassName monadClass_RDR = nameRdrName monadClassName map_RDR, append_RDR :: RdrName map_RDR = nameRdrName mapName append_RDR = nameRdrName appendName foldr_RDR, build_RDR, returnM_RDR, bindM_RDR, failM_RDR :: RdrName foldr_RDR = nameRdrName foldrName build_RDR = nameRdrName buildName returnM_RDR = nameRdrName returnMName bindM_RDR = nameRdrName bindMName failM_RDR = nameRdrName failMName left_RDR, right_RDR :: RdrName left_RDR = nameRdrName leftDataConName right_RDR = nameRdrName rightDataConName fromEnum_RDR, toEnum_RDR :: RdrName fromEnum_RDR = varQual_RDR gHC_ENUM (fsLit "fromEnum") toEnum_RDR = varQual_RDR gHC_ENUM (fsLit "toEnum") enumFrom_RDR, enumFromTo_RDR, enumFromThen_RDR, enumFromThenTo_RDR :: RdrName enumFrom_RDR = nameRdrName enumFromName enumFromTo_RDR = nameRdrName enumFromToName enumFromThen_RDR = nameRdrName enumFromThenName enumFromThenTo_RDR = nameRdrName enumFromThenToName ratioDataCon_RDR, integerAdd_RDR, integerMul_RDR :: RdrName ratioDataCon_RDR = nameRdrName ratioDataConName integerAdd_RDR = nameRdrName integerAddName integerMul_RDR = nameRdrName integerMulName ioDataCon_RDR :: RdrName ioDataCon_RDR = nameRdrName ioDataConName newStablePtr_RDR :: RdrName newStablePtr_RDR = nameRdrName newStablePtrName bindIO_RDR, returnIO_RDR :: RdrName bindIO_RDR = nameRdrName bindIOName returnIO_RDR = nameRdrName returnIOName fromInteger_RDR, fromRational_RDR, minus_RDR, times_RDR, plus_RDR :: RdrName fromInteger_RDR = nameRdrName fromIntegerName fromRational_RDR = nameRdrName fromRationalName minus_RDR = nameRdrName minusName times_RDR = varQual_RDR gHC_NUM (fsLit "*") plus_RDR = varQual_RDR gHC_NUM (fsLit "+") toInteger_RDR, toRational_RDR, fromIntegral_RDR :: RdrName toInteger_RDR = nameRdrName toIntegerName toRational_RDR = nameRdrName toRationalName fromIntegral_RDR = nameRdrName fromIntegralName fromString_RDR :: RdrName fromString_RDR = nameRdrName fromStringName fromList_RDR, fromListN_RDR, toList_RDR :: RdrName fromList_RDR = nameRdrName fromListName fromListN_RDR = nameRdrName fromListNName toList_RDR = nameRdrName toListName compose_RDR :: RdrName compose_RDR = varQual_RDR gHC_BASE (fsLit ".") not_RDR, dataToTag_RDR, succ_RDR, pred_RDR, minBound_RDR, maxBound_RDR, and_RDR, range_RDR, inRange_RDR, index_RDR, unsafeIndex_RDR, unsafeRangeSize_RDR :: RdrName and_RDR = varQual_RDR gHC_CLASSES (fsLit "&&") not_RDR = varQual_RDR gHC_CLASSES (fsLit "not") dataToTag_RDR = varQual_RDR gHC_PRIM (fsLit "dataToTag#") succ_RDR = varQual_RDR gHC_ENUM (fsLit "succ") pred_RDR = varQual_RDR gHC_ENUM (fsLit "pred") minBound_RDR = varQual_RDR gHC_ENUM (fsLit "minBound") maxBound_RDR = varQual_RDR gHC_ENUM (fsLit "maxBound") range_RDR = varQual_RDR gHC_IX (fsLit "range") inRange_RDR = varQual_RDR gHC_IX (fsLit "inRange") index_RDR = varQual_RDR gHC_IX (fsLit "index") unsafeIndex_RDR = varQual_RDR gHC_IX (fsLit "unsafeIndex") unsafeRangeSize_RDR = varQual_RDR gHC_IX (fsLit "unsafeRangeSize") readList_RDR, readListDefault_RDR, readListPrec_RDR, readListPrecDefault_RDR, readPrec_RDR, parens_RDR, choose_RDR, lexP_RDR, expectP_RDR :: RdrName readList_RDR = varQual_RDR gHC_READ (fsLit "readList") readListDefault_RDR = varQual_RDR gHC_READ (fsLit "readListDefault") readListPrec_RDR = varQual_RDR gHC_READ (fsLit "readListPrec") readListPrecDefault_RDR = varQual_RDR gHC_READ (fsLit "readListPrecDefault") readPrec_RDR = varQual_RDR gHC_READ (fsLit "readPrec") parens_RDR = varQual_RDR gHC_READ (fsLit "parens") choose_RDR = varQual_RDR gHC_READ (fsLit "choose") lexP_RDR = varQual_RDR gHC_READ (fsLit "lexP") expectP_RDR = varQual_RDR gHC_READ (fsLit "expectP") readField_RDR, readFieldHash_RDR, readSymField_RDR :: RdrName readField_RDR = varQual_RDR gHC_READ (fsLit "readField") readFieldHash_RDR = varQual_RDR gHC_READ (fsLit "readFieldHash") readSymField_RDR = varQual_RDR gHC_READ (fsLit "readSymField") punc_RDR, ident_RDR, symbol_RDR :: RdrName punc_RDR = dataQual_RDR lEX (fsLit "Punc") ident_RDR = dataQual_RDR lEX (fsLit "Ident") symbol_RDR = dataQual_RDR lEX (fsLit "Symbol") step_RDR, alt_RDR, reset_RDR, prec_RDR, pfail_RDR :: RdrName step_RDR = varQual_RDR rEAD_PREC (fsLit "step") alt_RDR = varQual_RDR rEAD_PREC (fsLit "+++") reset_RDR = varQual_RDR rEAD_PREC (fsLit "reset") prec_RDR = varQual_RDR rEAD_PREC (fsLit "prec") pfail_RDR = varQual_RDR rEAD_PREC (fsLit "pfail") showsPrec_RDR, shows_RDR, showString_RDR, showSpace_RDR, showCommaSpace_RDR, showParen_RDR :: RdrName showsPrec_RDR = varQual_RDR gHC_SHOW (fsLit "showsPrec") shows_RDR = varQual_RDR gHC_SHOW (fsLit "shows") showString_RDR = varQual_RDR gHC_SHOW (fsLit "showString") showSpace_RDR = varQual_RDR gHC_SHOW (fsLit "showSpace") showCommaSpace_RDR = varQual_RDR gHC_SHOW (fsLit "showCommaSpace") showParen_RDR = varQual_RDR gHC_SHOW (fsLit "showParen") error_RDR :: RdrName error_RDR = varQual_RDR gHC_ERR (fsLit "error") -- Generics (constructors and functions) u1DataCon_RDR, par1DataCon_RDR, rec1DataCon_RDR, k1DataCon_RDR, m1DataCon_RDR, l1DataCon_RDR, r1DataCon_RDR, prodDataCon_RDR, comp1DataCon_RDR, unPar1_RDR, unRec1_RDR, unK1_RDR, unComp1_RDR, from_RDR, from1_RDR, to_RDR, to1_RDR, datatypeName_RDR, moduleName_RDR, packageName_RDR, isNewtypeName_RDR, conName_RDR, conFixity_RDR, conIsRecord_RDR, selName_RDR, prefixDataCon_RDR, infixDataCon_RDR, leftAssocDataCon_RDR, rightAssocDataCon_RDR, notAssocDataCon_RDR, uAddrDataCon_RDR, uCharDataCon_RDR, uDoubleDataCon_RDR, uFloatDataCon_RDR, uIntDataCon_RDR, uWordDataCon_RDR, uAddrHash_RDR, uCharHash_RDR, uDoubleHash_RDR, uFloatHash_RDR, uIntHash_RDR, uWordHash_RDR :: RdrName u1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "U1") par1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "Par1") rec1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "Rec1") k1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "K1") m1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "M1") l1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "L1") r1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "R1") prodDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit ":*:") comp1DataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "Comp1") unPar1_RDR = varQual_RDR gHC_GENERICS (fsLit "unPar1") unRec1_RDR = varQual_RDR gHC_GENERICS (fsLit "unRec1") unK1_RDR = varQual_RDR gHC_GENERICS (fsLit "unK1") unComp1_RDR = varQual_RDR gHC_GENERICS (fsLit "unComp1") from_RDR = varQual_RDR gHC_GENERICS (fsLit "from") from1_RDR = varQual_RDR gHC_GENERICS (fsLit "from1") to_RDR = varQual_RDR gHC_GENERICS (fsLit "to") to1_RDR = varQual_RDR gHC_GENERICS (fsLit "to1") datatypeName_RDR = varQual_RDR gHC_GENERICS (fsLit "datatypeName") moduleName_RDR = varQual_RDR gHC_GENERICS (fsLit "moduleName") packageName_RDR = varQual_RDR gHC_GENERICS (fsLit "packageName") isNewtypeName_RDR = varQual_RDR gHC_GENERICS (fsLit "isNewtype") selName_RDR = varQual_RDR gHC_GENERICS (fsLit "selName") conName_RDR = varQual_RDR gHC_GENERICS (fsLit "conName") conFixity_RDR = varQual_RDR gHC_GENERICS (fsLit "conFixity") conIsRecord_RDR = varQual_RDR gHC_GENERICS (fsLit "conIsRecord") prefixDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "Prefix") infixDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "Infix") leftAssocDataCon_RDR = nameRdrName leftAssociativeDataConName rightAssocDataCon_RDR = nameRdrName rightAssociativeDataConName notAssocDataCon_RDR = nameRdrName notAssociativeDataConName uAddrDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "UAddr") uCharDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "UChar") uDoubleDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "UDouble") uFloatDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "UFloat") uIntDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "UInt") uWordDataCon_RDR = dataQual_RDR gHC_GENERICS (fsLit "UWord") uAddrHash_RDR = varQual_RDR gHC_GENERICS (fsLit "uAddr#") uCharHash_RDR = varQual_RDR gHC_GENERICS (fsLit "uChar#") uDoubleHash_RDR = varQual_RDR gHC_GENERICS (fsLit "uDouble#") uFloatHash_RDR = varQual_RDR gHC_GENERICS (fsLit "uFloat#") uIntHash_RDR = varQual_RDR gHC_GENERICS (fsLit "uInt#") uWordHash_RDR = varQual_RDR gHC_GENERICS (fsLit "uWord#") fmap_RDR, replace_RDR, pure_RDR, ap_RDR, liftA2_RDR, foldable_foldr_RDR, foldMap_RDR, null_RDR, all_RDR, traverse_RDR, mempty_RDR, mappend_RDR :: RdrName fmap_RDR = nameRdrName fmapName replace_RDR = varQual_RDR gHC_BASE (fsLit "<$") pure_RDR = nameRdrName pureAName ap_RDR = nameRdrName apAName liftA2_RDR = varQual_RDR gHC_BASE (fsLit "liftA2") foldable_foldr_RDR = varQual_RDR dATA_FOLDABLE (fsLit "foldr") foldMap_RDR = varQual_RDR dATA_FOLDABLE (fsLit "foldMap") null_RDR = varQual_RDR dATA_FOLDABLE (fsLit "null") all_RDR = varQual_RDR dATA_FOLDABLE (fsLit "all") traverse_RDR = varQual_RDR dATA_TRAVERSABLE (fsLit "traverse") mempty_RDR = nameRdrName memptyName mappend_RDR = nameRdrName mappendName ---------------------- varQual_RDR, tcQual_RDR, clsQual_RDR, dataQual_RDR :: Module -> FastString -> RdrName varQual_RDR mod str = mkOrig mod (mkOccNameFS varName str) tcQual_RDR mod str = mkOrig mod (mkOccNameFS tcName str) clsQual_RDR mod str = mkOrig mod (mkOccNameFS clsName str) dataQual_RDR mod str = mkOrig mod (mkOccNameFS dataName str) {- ************************************************************************ * * \subsection{Known-key names} * * ************************************************************************ Many of these Names are not really "built in", but some parts of the compiler (notably the deriving mechanism) need to mention their names, and it's convenient to write them all down in one place. -} wildCardName :: Name wildCardName = mkSystemVarName wildCardKey (fsLit "wild") runMainIOName, runRWName :: Name runMainIOName = varQual gHC_TOP_HANDLER (fsLit "runMainIO") runMainKey runRWName = varQual gHC_MAGIC (fsLit "runRW#") runRWKey orderingTyConName, ordLTDataConName, ordEQDataConName, ordGTDataConName :: Name orderingTyConName = tcQual gHC_TYPES (fsLit "Ordering") orderingTyConKey ordLTDataConName = dcQual gHC_TYPES (fsLit "LT") ordLTDataConKey ordEQDataConName = dcQual gHC_TYPES (fsLit "EQ") ordEQDataConKey ordGTDataConName = dcQual gHC_TYPES (fsLit "GT") ordGTDataConKey specTyConName :: Name specTyConName = tcQual gHC_TYPES (fsLit "SPEC") specTyConKey eitherTyConName, leftDataConName, rightDataConName :: Name eitherTyConName = tcQual dATA_EITHER (fsLit "Either") eitherTyConKey leftDataConName = dcQual dATA_EITHER (fsLit "Left") leftDataConKey rightDataConName = dcQual dATA_EITHER (fsLit "Right") rightDataConKey voidTyConName :: Name voidTyConName = tcQual dATA_VOID (fsLit "Void") voidTyConKey -- Generics (types) v1TyConName, u1TyConName, par1TyConName, rec1TyConName, k1TyConName, m1TyConName, sumTyConName, prodTyConName, compTyConName, rTyConName, dTyConName, cTyConName, sTyConName, rec0TyConName, d1TyConName, c1TyConName, s1TyConName, repTyConName, rep1TyConName, uRecTyConName, uAddrTyConName, uCharTyConName, uDoubleTyConName, uFloatTyConName, uIntTyConName, uWordTyConName, prefixIDataConName, infixIDataConName, leftAssociativeDataConName, rightAssociativeDataConName, notAssociativeDataConName, sourceUnpackDataConName, sourceNoUnpackDataConName, noSourceUnpackednessDataConName, sourceLazyDataConName, sourceStrictDataConName, noSourceStrictnessDataConName, decidedLazyDataConName, decidedStrictDataConName, decidedUnpackDataConName, metaDataDataConName, metaConsDataConName, metaSelDataConName :: Name v1TyConName = tcQual gHC_GENERICS (fsLit "V1") v1TyConKey u1TyConName = tcQual gHC_GENERICS (fsLit "U1") u1TyConKey par1TyConName = tcQual gHC_GENERICS (fsLit "Par1") par1TyConKey rec1TyConName = tcQual gHC_GENERICS (fsLit "Rec1") rec1TyConKey k1TyConName = tcQual gHC_GENERICS (fsLit "K1") k1TyConKey m1TyConName = tcQual gHC_GENERICS (fsLit "M1") m1TyConKey sumTyConName = tcQual gHC_GENERICS (fsLit ":+:") sumTyConKey prodTyConName = tcQual gHC_GENERICS (fsLit ":*:") prodTyConKey compTyConName = tcQual gHC_GENERICS (fsLit ":.:") compTyConKey rTyConName = tcQual gHC_GENERICS (fsLit "R") rTyConKey dTyConName = tcQual gHC_GENERICS (fsLit "D") dTyConKey cTyConName = tcQual gHC_GENERICS (fsLit "C") cTyConKey sTyConName = tcQual gHC_GENERICS (fsLit "S") sTyConKey rec0TyConName = tcQual gHC_GENERICS (fsLit "Rec0") rec0TyConKey d1TyConName = tcQual gHC_GENERICS (fsLit "D1") d1TyConKey c1TyConName = tcQual gHC_GENERICS (fsLit "C1") c1TyConKey s1TyConName = tcQual gHC_GENERICS (fsLit "S1") s1TyConKey repTyConName = tcQual gHC_GENERICS (fsLit "Rep") repTyConKey rep1TyConName = tcQual gHC_GENERICS (fsLit "Rep1") rep1TyConKey uRecTyConName = tcQual gHC_GENERICS (fsLit "URec") uRecTyConKey uAddrTyConName = tcQual gHC_GENERICS (fsLit "UAddr") uAddrTyConKey uCharTyConName = tcQual gHC_GENERICS (fsLit "UChar") uCharTyConKey uDoubleTyConName = tcQual gHC_GENERICS (fsLit "UDouble") uDoubleTyConKey uFloatTyConName = tcQual gHC_GENERICS (fsLit "UFloat") uFloatTyConKey uIntTyConName = tcQual gHC_GENERICS (fsLit "UInt") uIntTyConKey uWordTyConName = tcQual gHC_GENERICS (fsLit "UWord") uWordTyConKey prefixIDataConName = dcQual gHC_GENERICS (fsLit "PrefixI") prefixIDataConKey infixIDataConName = dcQual gHC_GENERICS (fsLit "InfixI") infixIDataConKey leftAssociativeDataConName = dcQual gHC_GENERICS (fsLit "LeftAssociative") leftAssociativeDataConKey rightAssociativeDataConName = dcQual gHC_GENERICS (fsLit "RightAssociative") rightAssociativeDataConKey notAssociativeDataConName = dcQual gHC_GENERICS (fsLit "NotAssociative") notAssociativeDataConKey sourceUnpackDataConName = dcQual gHC_GENERICS (fsLit "SourceUnpack") sourceUnpackDataConKey sourceNoUnpackDataConName = dcQual gHC_GENERICS (fsLit "SourceNoUnpack") sourceNoUnpackDataConKey noSourceUnpackednessDataConName = dcQual gHC_GENERICS (fsLit "NoSourceUnpackedness") noSourceUnpackednessDataConKey sourceLazyDataConName = dcQual gHC_GENERICS (fsLit "SourceLazy") sourceLazyDataConKey sourceStrictDataConName = dcQual gHC_GENERICS (fsLit "SourceStrict") sourceStrictDataConKey noSourceStrictnessDataConName = dcQual gHC_GENERICS (fsLit "NoSourceStrictness") noSourceStrictnessDataConKey decidedLazyDataConName = dcQual gHC_GENERICS (fsLit "DecidedLazy") decidedLazyDataConKey decidedStrictDataConName = dcQual gHC_GENERICS (fsLit "DecidedStrict") decidedStrictDataConKey decidedUnpackDataConName = dcQual gHC_GENERICS (fsLit "DecidedUnpack") decidedUnpackDataConKey metaDataDataConName = dcQual gHC_GENERICS (fsLit "MetaData") metaDataDataConKey metaConsDataConName = dcQual gHC_GENERICS (fsLit "MetaCons") metaConsDataConKey metaSelDataConName = dcQual gHC_GENERICS (fsLit "MetaSel") metaSelDataConKey -- Primitive Int divIntName, modIntName :: Name divIntName = varQual gHC_CLASSES (fsLit "divInt#") divIntIdKey modIntName = varQual gHC_CLASSES (fsLit "modInt#") modIntIdKey -- Base strings Strings unpackCStringName, unpackCStringFoldrName, unpackCStringUtf8Name, unpackCStringFoldrUtf8Name, unpackCStringAppendName, unpackCStringAppendUtf8Name, eqStringName, cstringLengthName :: Name cstringLengthName = varQual gHC_CSTRING (fsLit "cstringLength#") cstringLengthIdKey eqStringName = varQual gHC_BASE (fsLit "eqString") eqStringIdKey unpackCStringName = varQual gHC_CSTRING (fsLit "unpackCString#") unpackCStringIdKey unpackCStringAppendName = varQual gHC_CSTRING (fsLit "unpackAppendCString#") unpackCStringAppendIdKey unpackCStringFoldrName = varQual gHC_CSTRING (fsLit "unpackFoldrCString#") unpackCStringFoldrIdKey unpackCStringUtf8Name = varQual gHC_CSTRING (fsLit "unpackCStringUtf8#") unpackCStringUtf8IdKey unpackCStringAppendUtf8Name = varQual gHC_CSTRING (fsLit "unpackAppendCStringUtf8#") unpackCStringAppendUtf8IdKey unpackCStringFoldrUtf8Name = varQual gHC_CSTRING (fsLit "unpackFoldrCStringUtf8#") unpackCStringFoldrUtf8IdKey -- The 'inline' function inlineIdName :: Name inlineIdName = varQual gHC_MAGIC (fsLit "inline") inlineIdKey -- Base classes (Eq, Ord, Functor) fmapName, eqClassName, eqName, ordClassName, geName, functorClassName :: Name eqClassName = clsQual gHC_CLASSES (fsLit "Eq") eqClassKey eqName = varQual gHC_CLASSES (fsLit "==") eqClassOpKey ordClassName = clsQual gHC_CLASSES (fsLit "Ord") ordClassKey geName = varQual gHC_CLASSES (fsLit ">=") geClassOpKey functorClassName = clsQual gHC_BASE (fsLit "Functor") functorClassKey fmapName = varQual gHC_BASE (fsLit "fmap") fmapClassOpKey -- Class Monad monadClassName, thenMName, bindMName, returnMName :: Name monadClassName = clsQual gHC_BASE (fsLit "Monad") monadClassKey thenMName = varQual gHC_BASE (fsLit ">>") thenMClassOpKey bindMName = varQual gHC_BASE (fsLit ">>=") bindMClassOpKey returnMName = varQual gHC_BASE (fsLit "return") returnMClassOpKey -- Class MonadFail monadFailClassName, failMName :: Name monadFailClassName = clsQual mONAD_FAIL (fsLit "MonadFail") monadFailClassKey failMName = varQual mONAD_FAIL (fsLit "fail") failMClassOpKey -- Class Applicative applicativeClassName, pureAName, apAName, thenAName :: Name applicativeClassName = clsQual gHC_BASE (fsLit "Applicative") applicativeClassKey apAName = varQual gHC_BASE (fsLit "<*>") apAClassOpKey pureAName = varQual gHC_BASE (fsLit "pure") pureAClassOpKey thenAName = varQual gHC_BASE (fsLit "*>") thenAClassOpKey -- Classes (Foldable, Traversable) foldableClassName, traversableClassName :: Name foldableClassName = clsQual dATA_FOLDABLE (fsLit "Foldable") foldableClassKey traversableClassName = clsQual dATA_TRAVERSABLE (fsLit "Traversable") traversableClassKey -- Classes (Semigroup, Monoid) semigroupClassName, sappendName :: Name semigroupClassName = clsQual gHC_BASE (fsLit "Semigroup") semigroupClassKey sappendName = varQual gHC_BASE (fsLit "<>") sappendClassOpKey monoidClassName, memptyName, mappendName, mconcatName :: Name monoidClassName = clsQual gHC_BASE (fsLit "Monoid") monoidClassKey memptyName = varQual gHC_BASE (fsLit "mempty") memptyClassOpKey mappendName = varQual gHC_BASE (fsLit "mappend") mappendClassOpKey mconcatName = varQual gHC_BASE (fsLit "mconcat") mconcatClassOpKey -- AMP additions joinMName, alternativeClassName :: Name joinMName = varQual gHC_BASE (fsLit "join") joinMIdKey alternativeClassName = clsQual mONAD (fsLit "Alternative") alternativeClassKey -- joinMIdKey, apAClassOpKey, pureAClassOpKey, thenAClassOpKey, alternativeClassKey :: Unique joinMIdKey = mkPreludeMiscIdUnique 750 apAClassOpKey = mkPreludeMiscIdUnique 751 -- <*> pureAClassOpKey = mkPreludeMiscIdUnique 752 thenAClassOpKey = mkPreludeMiscIdUnique 753 alternativeClassKey = mkPreludeMiscIdUnique 754 -- Functions for GHC extensions groupWithName, considerAccessibleName :: Name groupWithName = varQual gHC_EXTS (fsLit "groupWith") groupWithIdKey considerAccessibleName = varQual gHC_EXTS (fsLit "considerAccessible") considerAccessibleIdKey -- Random GHC.Base functions fromStringName, otherwiseIdName, foldrName, buildName, augmentName, mapName, appendName, assertName, dollarName :: Name dollarName = varQual gHC_BASE (fsLit "$") dollarIdKey otherwiseIdName = varQual gHC_BASE (fsLit "otherwise") otherwiseIdKey foldrName = varQual gHC_BASE (fsLit "foldr") foldrIdKey buildName = varQual gHC_BASE (fsLit "build") buildIdKey augmentName = varQual gHC_BASE (fsLit "augment") augmentIdKey mapName = varQual gHC_BASE (fsLit "map") mapIdKey appendName = varQual gHC_BASE (fsLit "++") appendIdKey assertName = varQual gHC_BASE (fsLit "assert") assertIdKey fromStringName = varQual dATA_STRING (fsLit "fromString") fromStringClassOpKey -- Module GHC.Num numClassName, fromIntegerName, minusName, negateName :: Name numClassName = clsQual gHC_NUM (fsLit "Num") numClassKey fromIntegerName = varQual gHC_NUM (fsLit "fromInteger") fromIntegerClassOpKey minusName = varQual gHC_NUM (fsLit "-") minusClassOpKey negateName = varQual gHC_NUM (fsLit "negate") negateClassOpKey --------------------------------- -- ghc-bignum --------------------------------- integerFromNaturalName , integerToNaturalClampName , integerToNaturalThrowName , integerToNaturalName , integerToWordName , integerToIntName , integerToWord64Name , integerToInt64Name , integerFromWordName , integerFromWord64Name , integerFromInt64Name , integerAddName , integerMulName , integerSubName , integerNegateName , integerAbsName , integerPopCountName , integerQuotName , integerRemName , integerDivName , integerModName , integerDivModName , integerQuotRemName , integerEncodeFloatName , integerEncodeDoubleName , integerGcdName , integerLcmName , integerAndName , integerOrName , integerXorName , integerComplementName , integerBitName , integerTestBitName , integerShiftLName , integerShiftRName , naturalToWordName , naturalPopCountName , naturalShiftRName , naturalShiftLName , naturalAddName , naturalSubName , naturalSubThrowName , naturalSubUnsafeName , naturalMulName , naturalQuotRemName , naturalQuotName , naturalRemName , naturalAndName , naturalAndNotName , naturalOrName , naturalXorName , naturalTestBitName , naturalBitName , naturalGcdName , naturalLcmName , naturalLog2Name , naturalLogBaseWordName , naturalLogBaseName , naturalPowModName , naturalSizeInBaseName , bignatFromWordListName , bignatEqName , bignatCompareName , bignatCompareWordName :: Name bnbVarQual, bnnVarQual, bniVarQual :: String -> Unique -> Name bnbVarQual str key = varQual gHC_NUM_BIGNAT (fsLit str) key bnnVarQual str key = varQual gHC_NUM_NATURAL (fsLit str) key bniVarQual str key = varQual gHC_NUM_INTEGER (fsLit str) key -- Types and DataCons bignatFromWordListName = bnbVarQual "bigNatFromWordList#" bignatFromWordListIdKey bignatEqName = bnbVarQual "bigNatEq#" bignatEqIdKey bignatCompareName = bnbVarQual "bigNatCompare" bignatCompareIdKey bignatCompareWordName = bnbVarQual "bigNatCompareWord#" bignatCompareWordIdKey naturalToWordName = bnnVarQual "naturalToWord#" naturalToWordIdKey naturalPopCountName = bnnVarQual "naturalPopCount#" naturalPopCountIdKey naturalShiftRName = bnnVarQual "naturalShiftR#" naturalShiftRIdKey naturalShiftLName = bnnVarQual "naturalShiftL#" naturalShiftLIdKey naturalAddName = bnnVarQual "naturalAdd" naturalAddIdKey naturalSubName = bnnVarQual "naturalSub" naturalSubIdKey naturalSubThrowName = bnnVarQual "naturalSubThrow" naturalSubThrowIdKey naturalSubUnsafeName = bnnVarQual "naturalSubUnsafe" naturalSubUnsafeIdKey naturalMulName = bnnVarQual "naturalMul" naturalMulIdKey naturalQuotRemName = bnnVarQual "naturalQuotRem#" naturalQuotRemIdKey naturalQuotName = bnnVarQual "naturalQuot" naturalQuotIdKey naturalRemName = bnnVarQual "naturalRem" naturalRemIdKey naturalAndName = bnnVarQual "naturalAnd" naturalAndIdKey naturalAndNotName = bnnVarQual "naturalAndNot" naturalAndNotIdKey naturalOrName = bnnVarQual "naturalOr" naturalOrIdKey naturalXorName = bnnVarQual "naturalXor" naturalXorIdKey naturalTestBitName = bnnVarQual "naturalTestBit#" naturalTestBitIdKey naturalBitName = bnnVarQual "naturalBit#" naturalBitIdKey naturalGcdName = bnnVarQual "naturalGcd" naturalGcdIdKey naturalLcmName = bnnVarQual "naturalLcm" naturalLcmIdKey naturalLog2Name = bnnVarQual "naturalLog2#" naturalLog2IdKey naturalLogBaseWordName = bnnVarQual "naturalLogBaseWord#" naturalLogBaseWordIdKey naturalLogBaseName = bnnVarQual "naturalLogBase#" naturalLogBaseIdKey naturalPowModName = bnnVarQual "naturalPowMod" naturalPowModIdKey naturalSizeInBaseName = bnnVarQual "naturalSizeInBase#" naturalSizeInBaseIdKey integerFromNaturalName = bniVarQual "integerFromNatural" integerFromNaturalIdKey integerToNaturalClampName = bniVarQual "integerToNaturalClamp" integerToNaturalClampIdKey integerToNaturalThrowName = bniVarQual "integerToNaturalThrow" integerToNaturalThrowIdKey integerToNaturalName = bniVarQual "integerToNatural" integerToNaturalIdKey integerToWordName = bniVarQual "integerToWord#" integerToWordIdKey integerToIntName = bniVarQual "integerToInt#" integerToIntIdKey integerToWord64Name = bniVarQual "integerToWord64#" integerToWord64IdKey integerToInt64Name = bniVarQual "integerToInt64#" integerToInt64IdKey integerFromWordName = bniVarQual "integerFromWord#" integerFromWordIdKey integerFromWord64Name = bniVarQual "integerFromWord64#" integerFromWord64IdKey integerFromInt64Name = bniVarQual "integerFromInt64#" integerFromInt64IdKey integerAddName = bniVarQual "integerAdd" integerAddIdKey integerMulName = bniVarQual "integerMul" integerMulIdKey integerSubName = bniVarQual "integerSub" integerSubIdKey integerNegateName = bniVarQual "integerNegate" integerNegateIdKey integerAbsName = bniVarQual "integerAbs" integerAbsIdKey integerPopCountName = bniVarQual "integerPopCount#" integerPopCountIdKey integerQuotName = bniVarQual "integerQuot" integerQuotIdKey integerRemName = bniVarQual "integerRem" integerRemIdKey integerDivName = bniVarQual "integerDiv" integerDivIdKey integerModName = bniVarQual "integerMod" integerModIdKey integerDivModName = bniVarQual "integerDivMod#" integerDivModIdKey integerQuotRemName = bniVarQual "integerQuotRem#" integerQuotRemIdKey integerEncodeFloatName = bniVarQual "integerEncodeFloat#" integerEncodeFloatIdKey integerEncodeDoubleName = bniVarQual "integerEncodeDouble#" integerEncodeDoubleIdKey integerGcdName = bniVarQual "integerGcd" integerGcdIdKey integerLcmName = bniVarQual "integerLcm" integerLcmIdKey integerAndName = bniVarQual "integerAnd" integerAndIdKey integerOrName = bniVarQual "integerOr" integerOrIdKey integerXorName = bniVarQual "integerXor" integerXorIdKey integerComplementName = bniVarQual "integerComplement" integerComplementIdKey integerBitName = bniVarQual "integerBit#" integerBitIdKey integerTestBitName = bniVarQual "integerTestBit#" integerTestBitIdKey integerShiftLName = bniVarQual "integerShiftL#" integerShiftLIdKey integerShiftRName = bniVarQual "integerShiftR#" integerShiftRIdKey --------------------------------- -- End of ghc-bignum --------------------------------- -- GHC.Real types and classes rationalTyConName, ratioTyConName, ratioDataConName, realClassName, integralClassName, realFracClassName, fractionalClassName, fromRationalName, toIntegerName, toRationalName, fromIntegralName, realToFracName, mkRationalBase2Name, mkRationalBase10Name :: Name rationalTyConName = tcQual gHC_REAL (fsLit "Rational") rationalTyConKey ratioTyConName = tcQual gHC_REAL (fsLit "Ratio") ratioTyConKey ratioDataConName = dcQual gHC_REAL (fsLit ":%") ratioDataConKey realClassName = clsQual gHC_REAL (fsLit "Real") realClassKey integralClassName = clsQual gHC_REAL (fsLit "Integral") integralClassKey realFracClassName = clsQual gHC_REAL (fsLit "RealFrac") realFracClassKey fractionalClassName = clsQual gHC_REAL (fsLit "Fractional") fractionalClassKey fromRationalName = varQual gHC_REAL (fsLit "fromRational") fromRationalClassOpKey toIntegerName = varQual gHC_REAL (fsLit "toInteger") toIntegerClassOpKey toRationalName = varQual gHC_REAL (fsLit "toRational") toRationalClassOpKey fromIntegralName = varQual gHC_REAL (fsLit "fromIntegral")fromIntegralIdKey realToFracName = varQual gHC_REAL (fsLit "realToFrac") realToFracIdKey mkRationalBase2Name = varQual gHC_REAL (fsLit "mkRationalBase2") mkRationalBase2IdKey mkRationalBase10Name = varQual gHC_REAL (fsLit "mkRationalBase10") mkRationalBase10IdKey -- GHC.Float classes floatingClassName, realFloatClassName :: Name floatingClassName = clsQual gHC_FLOAT (fsLit "Floating") floatingClassKey realFloatClassName = clsQual gHC_FLOAT (fsLit "RealFloat") realFloatClassKey -- other GHC.Float functions integerToFloatName, integerToDoubleName, naturalToFloatName, naturalToDoubleName, rationalToFloatName, rationalToDoubleName :: Name integerToFloatName = varQual gHC_FLOAT (fsLit "integerToFloat#") integerToFloatIdKey integerToDoubleName = varQual gHC_FLOAT (fsLit "integerToDouble#") integerToDoubleIdKey naturalToFloatName = varQual gHC_FLOAT (fsLit "naturalToFloat#") naturalToFloatIdKey naturalToDoubleName = varQual gHC_FLOAT (fsLit "naturalToDouble#") naturalToDoubleIdKey rationalToFloatName = varQual gHC_FLOAT (fsLit "rationalToFloat") rationalToFloatIdKey rationalToDoubleName = varQual gHC_FLOAT (fsLit "rationalToDouble") rationalToDoubleIdKey -- Class Ix ixClassName :: Name ixClassName = clsQual gHC_IX (fsLit "Ix") ixClassKey -- Typeable representation types trModuleTyConName , trModuleDataConName , trNameTyConName , trNameSDataConName , trNameDDataConName , trTyConTyConName , trTyConDataConName :: Name trModuleTyConName = tcQual gHC_TYPES (fsLit "Module") trModuleTyConKey trModuleDataConName = dcQual gHC_TYPES (fsLit "Module") trModuleDataConKey trNameTyConName = tcQual gHC_TYPES (fsLit "TrName") trNameTyConKey trNameSDataConName = dcQual gHC_TYPES (fsLit "TrNameS") trNameSDataConKey trNameDDataConName = dcQual gHC_TYPES (fsLit "TrNameD") trNameDDataConKey trTyConTyConName = tcQual gHC_TYPES (fsLit "TyCon") trTyConTyConKey trTyConDataConName = dcQual gHC_TYPES (fsLit "TyCon") trTyConDataConKey kindRepTyConName , kindRepTyConAppDataConName , kindRepVarDataConName , kindRepAppDataConName , kindRepFunDataConName , kindRepTYPEDataConName , kindRepTypeLitSDataConName , kindRepTypeLitDDataConName :: Name kindRepTyConName = tcQual gHC_TYPES (fsLit "KindRep") kindRepTyConKey kindRepTyConAppDataConName = dcQual gHC_TYPES (fsLit "KindRepTyConApp") kindRepTyConAppDataConKey kindRepVarDataConName = dcQual gHC_TYPES (fsLit "KindRepVar") kindRepVarDataConKey kindRepAppDataConName = dcQual gHC_TYPES (fsLit "KindRepApp") kindRepAppDataConKey kindRepFunDataConName = dcQual gHC_TYPES (fsLit "KindRepFun") kindRepFunDataConKey kindRepTYPEDataConName = dcQual gHC_TYPES (fsLit "KindRepTYPE") kindRepTYPEDataConKey kindRepTypeLitSDataConName = dcQual gHC_TYPES (fsLit "KindRepTypeLitS") kindRepTypeLitSDataConKey kindRepTypeLitDDataConName = dcQual gHC_TYPES (fsLit "KindRepTypeLitD") kindRepTypeLitDDataConKey typeLitSortTyConName , typeLitSymbolDataConName , typeLitNatDataConName , typeLitCharDataConName :: Name typeLitSortTyConName = tcQual gHC_TYPES (fsLit "TypeLitSort") typeLitSortTyConKey typeLitSymbolDataConName = dcQual gHC_TYPES (fsLit "TypeLitSymbol") typeLitSymbolDataConKey typeLitNatDataConName = dcQual gHC_TYPES (fsLit "TypeLitNat") typeLitNatDataConKey typeLitCharDataConName = dcQual gHC_TYPES (fsLit "TypeLitChar") typeLitCharDataConKey -- Class Typeable, and functions for constructing `Typeable` dictionaries typeableClassName , typeRepTyConName , someTypeRepTyConName , someTypeRepDataConName , mkTrTypeName , mkTrConName , mkTrAppName , mkTrFunName , typeRepIdName , typeNatTypeRepName , typeSymbolTypeRepName , typeCharTypeRepName , trGhcPrimModuleName :: Name typeableClassName = clsQual tYPEABLE_INTERNAL (fsLit "Typeable") typeableClassKey typeRepTyConName = tcQual tYPEABLE_INTERNAL (fsLit "TypeRep") typeRepTyConKey someTypeRepTyConName = tcQual tYPEABLE_INTERNAL (fsLit "SomeTypeRep") someTypeRepTyConKey someTypeRepDataConName = dcQual tYPEABLE_INTERNAL (fsLit "SomeTypeRep") someTypeRepDataConKey typeRepIdName = varQual tYPEABLE_INTERNAL (fsLit "typeRep#") typeRepIdKey mkTrTypeName = varQual tYPEABLE_INTERNAL (fsLit "mkTrType") mkTrTypeKey mkTrConName = varQual tYPEABLE_INTERNAL (fsLit "mkTrCon") mkTrConKey mkTrAppName = varQual tYPEABLE_INTERNAL (fsLit "mkTrApp") mkTrAppKey mkTrFunName = varQual tYPEABLE_INTERNAL (fsLit "mkTrFun") mkTrFunKey typeNatTypeRepName = varQual tYPEABLE_INTERNAL (fsLit "typeNatTypeRep") typeNatTypeRepKey typeSymbolTypeRepName = varQual tYPEABLE_INTERNAL (fsLit "typeSymbolTypeRep") typeSymbolTypeRepKey typeCharTypeRepName = varQual tYPEABLE_INTERNAL (fsLit "typeCharTypeRep") typeCharTypeRepKey -- this is the Typeable 'Module' for GHC.Prim (which has no code, so we place in GHC.Types) -- See Note [Grand plan for Typeable] in GHC.Tc.Instance.Typeable. trGhcPrimModuleName = varQual gHC_TYPES (fsLit "tr$ModuleGHCPrim") trGhcPrimModuleKey -- Typeable KindReps for some common cases starKindRepName, starArrStarKindRepName, starArrStarArrStarKindRepName :: Name starKindRepName = varQual gHC_TYPES (fsLit "krep$*") starKindRepKey starArrStarKindRepName = varQual gHC_TYPES (fsLit "krep$*Arr*") starArrStarKindRepKey starArrStarArrStarKindRepName = varQual gHC_TYPES (fsLit "krep$*->*->*") starArrStarArrStarKindRepKey -- WithDict withDictClassName :: Name withDictClassName = clsQual gHC_MAGIC_DICT (fsLit "WithDict") withDictClassKey -- Custom type errors errorMessageTypeErrorFamName , typeErrorTextDataConName , typeErrorAppendDataConName , typeErrorVAppendDataConName , typeErrorShowTypeDataConName :: Name errorMessageTypeErrorFamName = tcQual gHC_TYPEERROR (fsLit "TypeError") errorMessageTypeErrorFamKey typeErrorTextDataConName = dcQual gHC_TYPEERROR (fsLit "Text") typeErrorTextDataConKey typeErrorAppendDataConName = dcQual gHC_TYPEERROR (fsLit ":<>:") typeErrorAppendDataConKey typeErrorVAppendDataConName = dcQual gHC_TYPEERROR (fsLit ":$$:") typeErrorVAppendDataConKey typeErrorShowTypeDataConName = dcQual gHC_TYPEERROR (fsLit "ShowType") typeErrorShowTypeDataConKey -- Unsafe coercion proofs unsafeEqualityProofName, unsafeEqualityTyConName, unsafeCoercePrimName, unsafeReflDataConName :: Name unsafeEqualityProofName = varQual uNSAFE_COERCE (fsLit "unsafeEqualityProof") unsafeEqualityProofIdKey unsafeEqualityTyConName = tcQual uNSAFE_COERCE (fsLit "UnsafeEquality") unsafeEqualityTyConKey unsafeReflDataConName = dcQual uNSAFE_COERCE (fsLit "UnsafeRefl") unsafeReflDataConKey unsafeCoercePrimName = varQual uNSAFE_COERCE (fsLit "unsafeCoerce#") unsafeCoercePrimIdKey -- Dynamic toDynName :: Name toDynName = varQual dYNAMIC (fsLit "toDyn") toDynIdKey -- Class Data dataClassName :: Name dataClassName = clsQual gENERICS (fsLit "Data") dataClassKey -- Error module assertErrorName :: Name assertErrorName = varQual gHC_IO_Exception (fsLit "assertError") assertErrorIdKey -- Debug.Trace traceName :: Name traceName = varQual dEBUG_TRACE (fsLit "trace") traceKey -- Enum module (Enum, Bounded) enumClassName, enumFromName, enumFromToName, enumFromThenName, enumFromThenToName, boundedClassName :: Name enumClassName = clsQual gHC_ENUM (fsLit "Enum") enumClassKey enumFromName = varQual gHC_ENUM (fsLit "enumFrom") enumFromClassOpKey enumFromToName = varQual gHC_ENUM (fsLit "enumFromTo") enumFromToClassOpKey enumFromThenName = varQual gHC_ENUM (fsLit "enumFromThen") enumFromThenClassOpKey enumFromThenToName = varQual gHC_ENUM (fsLit "enumFromThenTo") enumFromThenToClassOpKey boundedClassName = clsQual gHC_ENUM (fsLit "Bounded") boundedClassKey -- List functions concatName, filterName, zipName :: Name concatName = varQual gHC_LIST (fsLit "concat") concatIdKey filterName = varQual gHC_LIST (fsLit "filter") filterIdKey zipName = varQual gHC_LIST (fsLit "zip") zipIdKey -- Overloaded lists isListClassName, fromListName, fromListNName, toListName :: Name isListClassName = clsQual gHC_IS_LIST (fsLit "IsList") isListClassKey fromListName = varQual gHC_IS_LIST (fsLit "fromList") fromListClassOpKey fromListNName = varQual gHC_IS_LIST (fsLit "fromListN") fromListNClassOpKey toListName = varQual gHC_IS_LIST (fsLit "toList") toListClassOpKey -- HasField class ops getFieldName, setFieldName :: Name getFieldName = varQual gHC_RECORDS (fsLit "getField") getFieldClassOpKey setFieldName = varQual gHC_RECORDS (fsLit "setField") setFieldClassOpKey -- Class Show showClassName :: Name showClassName = clsQual gHC_SHOW (fsLit "Show") showClassKey -- Class Read readClassName :: Name readClassName = clsQual gHC_READ (fsLit "Read") readClassKey -- Classes Generic and Generic1, Datatype, Constructor and Selector genClassName, gen1ClassName, datatypeClassName, constructorClassName, selectorClassName :: Name genClassName = clsQual gHC_GENERICS (fsLit "Generic") genClassKey gen1ClassName = clsQual gHC_GENERICS (fsLit "Generic1") gen1ClassKey datatypeClassName = clsQual gHC_GENERICS (fsLit "Datatype") datatypeClassKey constructorClassName = clsQual gHC_GENERICS (fsLit "Constructor") constructorClassKey selectorClassName = clsQual gHC_GENERICS (fsLit "Selector") selectorClassKey genericClassNames :: [Name] genericClassNames = [genClassName, gen1ClassName] -- GHCi things ghciIoClassName, ghciStepIoMName :: Name ghciIoClassName = clsQual gHC_GHCI (fsLit "GHCiSandboxIO") ghciIoClassKey ghciStepIoMName = varQual gHC_GHCI (fsLit "ghciStepIO") ghciStepIoMClassOpKey -- IO things ioTyConName, ioDataConName, thenIOName, bindIOName, returnIOName, failIOName :: Name ioTyConName = tcQual gHC_TYPES (fsLit "IO") ioTyConKey ioDataConName = dcQual gHC_TYPES (fsLit "IO") ioDataConKey thenIOName = varQual gHC_BASE (fsLit "thenIO") thenIOIdKey bindIOName = varQual gHC_BASE (fsLit "bindIO") bindIOIdKey returnIOName = varQual gHC_BASE (fsLit "returnIO") returnIOIdKey failIOName = varQual gHC_IO (fsLit "failIO") failIOIdKey -- IO things printName :: Name printName = varQual sYSTEM_IO (fsLit "print") printIdKey -- Int, Word, and Addr things int8TyConName, int16TyConName, int32TyConName, int64TyConName :: Name int8TyConName = tcQual gHC_INT (fsLit "Int8") int8TyConKey int16TyConName = tcQual gHC_INT (fsLit "Int16") int16TyConKey int32TyConName = tcQual gHC_INT (fsLit "Int32") int32TyConKey int64TyConName = tcQual gHC_INT (fsLit "Int64") int64TyConKey -- Word module word8TyConName, word16TyConName, word32TyConName, word64TyConName :: Name word8TyConName = tcQual gHC_WORD (fsLit "Word8") word8TyConKey word16TyConName = tcQual gHC_WORD (fsLit "Word16") word16TyConKey word32TyConName = tcQual gHC_WORD (fsLit "Word32") word32TyConKey word64TyConName = tcQual gHC_WORD (fsLit "Word64") word64TyConKey -- PrelPtr module ptrTyConName, funPtrTyConName :: Name ptrTyConName = tcQual gHC_PTR (fsLit "Ptr") ptrTyConKey funPtrTyConName = tcQual gHC_PTR (fsLit "FunPtr") funPtrTyConKey -- Foreign objects and weak pointers stablePtrTyConName, newStablePtrName :: Name stablePtrTyConName = tcQual gHC_STABLE (fsLit "StablePtr") stablePtrTyConKey newStablePtrName = varQual gHC_STABLE (fsLit "newStablePtr") newStablePtrIdKey -- Recursive-do notation monadFixClassName, mfixName :: Name monadFixClassName = clsQual mONAD_FIX (fsLit "MonadFix") monadFixClassKey mfixName = varQual mONAD_FIX (fsLit "mfix") mfixIdKey -- Arrow notation arrAName, composeAName, firstAName, appAName, choiceAName, loopAName :: Name arrAName = varQual aRROW (fsLit "arr") arrAIdKey composeAName = varQual gHC_DESUGAR (fsLit ">>>") composeAIdKey firstAName = varQual aRROW (fsLit "first") firstAIdKey appAName = varQual aRROW (fsLit "app") appAIdKey choiceAName = varQual aRROW (fsLit "|||") choiceAIdKey loopAName = varQual aRROW (fsLit "loop") loopAIdKey -- Monad comprehensions guardMName, liftMName, mzipName :: Name guardMName = varQual mONAD (fsLit "guard") guardMIdKey liftMName = varQual mONAD (fsLit "liftM") liftMIdKey mzipName = varQual mONAD_ZIP (fsLit "mzip") mzipIdKey -- Annotation type checking toAnnotationWrapperName :: Name toAnnotationWrapperName = varQual gHC_DESUGAR (fsLit "toAnnotationWrapper") toAnnotationWrapperIdKey -- Other classes, needed for type defaulting monadPlusClassName, randomClassName, randomGenClassName, isStringClassName :: Name monadPlusClassName = clsQual mONAD (fsLit "MonadPlus") monadPlusClassKey randomClassName = clsQual rANDOM (fsLit "Random") randomClassKey randomGenClassName = clsQual rANDOM (fsLit "RandomGen") randomGenClassKey isStringClassName = clsQual dATA_STRING (fsLit "IsString") isStringClassKey -- Type-level naturals knownNatClassName :: Name knownNatClassName = clsQual gHC_TYPENATS (fsLit "KnownNat") knownNatClassNameKey knownSymbolClassName :: Name knownSymbolClassName = clsQual gHC_TYPELITS (fsLit "KnownSymbol") knownSymbolClassNameKey knownCharClassName :: Name knownCharClassName = clsQual gHC_TYPELITS (fsLit "KnownChar") knownCharClassNameKey -- Overloaded labels fromLabelClassOpName :: Name fromLabelClassOpName = varQual gHC_OVER_LABELS (fsLit "fromLabel") fromLabelClassOpKey -- Implicit Parameters ipClassName :: Name ipClassName = clsQual gHC_CLASSES (fsLit "IP") ipClassKey -- Overloaded record fields hasFieldClassName :: Name hasFieldClassName = clsQual gHC_RECORDS (fsLit "HasField") hasFieldClassNameKey -- Source Locations callStackTyConName, emptyCallStackName, pushCallStackName, srcLocDataConName :: Name callStackTyConName = tcQual gHC_STACK_TYPES (fsLit "CallStack") callStackTyConKey emptyCallStackName = varQual gHC_STACK_TYPES (fsLit "emptyCallStack") emptyCallStackKey pushCallStackName = varQual gHC_STACK_TYPES (fsLit "pushCallStack") pushCallStackKey srcLocDataConName = dcQual gHC_STACK_TYPES (fsLit "SrcLoc") srcLocDataConKey -- plugins pLUGINS :: Module pLUGINS = mkThisGhcModule (fsLit "GHC.Driver.Plugins") pluginTyConName :: Name pluginTyConName = tcQual pLUGINS (fsLit "Plugin") pluginTyConKey frontendPluginTyConName :: Name frontendPluginTyConName = tcQual pLUGINS (fsLit "FrontendPlugin") frontendPluginTyConKey -- Static pointers makeStaticName :: Name makeStaticName = varQual gHC_STATICPTR_INTERNAL (fsLit "makeStatic") makeStaticKey staticPtrInfoTyConName :: Name staticPtrInfoTyConName = tcQual gHC_STATICPTR (fsLit "StaticPtrInfo") staticPtrInfoTyConKey staticPtrInfoDataConName :: Name staticPtrInfoDataConName = dcQual gHC_STATICPTR (fsLit "StaticPtrInfo") staticPtrInfoDataConKey staticPtrTyConName :: Name staticPtrTyConName = tcQual gHC_STATICPTR (fsLit "StaticPtr") staticPtrTyConKey staticPtrDataConName :: Name staticPtrDataConName = dcQual gHC_STATICPTR (fsLit "StaticPtr") staticPtrDataConKey fromStaticPtrName :: Name fromStaticPtrName = varQual gHC_STATICPTR (fsLit "fromStaticPtr") fromStaticPtrClassOpKey fingerprintDataConName :: Name fingerprintDataConName = dcQual gHC_FINGERPRINT_TYPE (fsLit "Fingerprint") fingerprintDataConKey {- ************************************************************************ * * \subsection{Local helpers} * * ************************************************************************ All these are original names; hence mkOrig -} {-# INLINE varQual #-} {-# INLINE tcQual #-} {-# INLINE clsQual #-} {-# INLINE dcQual #-} varQual, tcQual, clsQual, dcQual :: Module -> FastString -> Unique -> Name varQual modu str unique = mk_known_key_name varName modu str unique tcQual modu str unique = mk_known_key_name tcName modu str unique clsQual modu str unique = mk_known_key_name clsName modu str unique dcQual modu str unique = mk_known_key_name dataName modu str unique mk_known_key_name :: NameSpace -> Module -> FastString -> Unique -> Name {-# INLINE mk_known_key_name #-} mk_known_key_name space modu str unique = mkExternalName unique modu (mkOccNameFS space str) noSrcSpan {- ************************************************************************ * * \subsubsection[Uniques-prelude-Classes]{@Uniques@ for wired-in @Classes@} * * ************************************************************************ --MetaHaskell extension hand allocate keys here -} boundedClassKey, enumClassKey, eqClassKey, floatingClassKey, fractionalClassKey, integralClassKey, monadClassKey, dataClassKey, functorClassKey, numClassKey, ordClassKey, readClassKey, realClassKey, realFloatClassKey, realFracClassKey, showClassKey, ixClassKey :: Unique boundedClassKey = mkPreludeClassUnique 1 enumClassKey = mkPreludeClassUnique 2 eqClassKey = mkPreludeClassUnique 3 floatingClassKey = mkPreludeClassUnique 5 fractionalClassKey = mkPreludeClassUnique 6 integralClassKey = mkPreludeClassUnique 7 monadClassKey = mkPreludeClassUnique 8 dataClassKey = mkPreludeClassUnique 9 functorClassKey = mkPreludeClassUnique 10 numClassKey = mkPreludeClassUnique 11 ordClassKey = mkPreludeClassUnique 12 readClassKey = mkPreludeClassUnique 13 realClassKey = mkPreludeClassUnique 14 realFloatClassKey = mkPreludeClassUnique 15 realFracClassKey = mkPreludeClassUnique 16 showClassKey = mkPreludeClassUnique 17 ixClassKey = mkPreludeClassUnique 18 typeableClassKey :: Unique typeableClassKey = mkPreludeClassUnique 20 withDictClassKey :: Unique withDictClassKey = mkPreludeClassUnique 21 monadFixClassKey :: Unique monadFixClassKey = mkPreludeClassUnique 28 monadFailClassKey :: Unique monadFailClassKey = mkPreludeClassUnique 29 monadPlusClassKey, randomClassKey, randomGenClassKey :: Unique monadPlusClassKey = mkPreludeClassUnique 30 randomClassKey = mkPreludeClassUnique 31 randomGenClassKey = mkPreludeClassUnique 32 isStringClassKey :: Unique isStringClassKey = mkPreludeClassUnique 33 applicativeClassKey, foldableClassKey, traversableClassKey :: Unique applicativeClassKey = mkPreludeClassUnique 34 foldableClassKey = mkPreludeClassUnique 35 traversableClassKey = mkPreludeClassUnique 36 genClassKey, gen1ClassKey, datatypeClassKey, constructorClassKey, selectorClassKey :: Unique genClassKey = mkPreludeClassUnique 37 gen1ClassKey = mkPreludeClassUnique 38 datatypeClassKey = mkPreludeClassUnique 39 constructorClassKey = mkPreludeClassUnique 40 selectorClassKey = mkPreludeClassUnique 41 -- KnownNat: see Note [KnownNat & KnownSymbol and EvLit] in GHC.Tc.Types.Evidence knownNatClassNameKey :: Unique knownNatClassNameKey = mkPreludeClassUnique 42 -- KnownSymbol: see Note [KnownNat & KnownSymbol and EvLit] in GHC.Tc.Types.Evidence knownSymbolClassNameKey :: Unique knownSymbolClassNameKey = mkPreludeClassUnique 43 knownCharClassNameKey :: Unique knownCharClassNameKey = mkPreludeClassUnique 44 ghciIoClassKey :: Unique ghciIoClassKey = mkPreludeClassUnique 45 semigroupClassKey, monoidClassKey :: Unique semigroupClassKey = mkPreludeClassUnique 47 monoidClassKey = mkPreludeClassUnique 48 -- Implicit Parameters ipClassKey :: Unique ipClassKey = mkPreludeClassUnique 49 -- Overloaded record fields hasFieldClassNameKey :: Unique hasFieldClassNameKey = mkPreludeClassUnique 50 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES ClassUniques 200-299 ----------------------------------------------------- {- ************************************************************************ * * \subsubsection[Uniques-prelude-TyCons]{@Uniques@ for wired-in @TyCons@} * * ************************************************************************ -} addrPrimTyConKey, arrayPrimTyConKey, boolTyConKey, byteArrayPrimTyConKey, charPrimTyConKey, charTyConKey, doublePrimTyConKey, doubleTyConKey, floatPrimTyConKey, floatTyConKey, funTyConKey, intPrimTyConKey, intTyConKey, int8TyConKey, int16TyConKey, int8PrimTyConKey, int16PrimTyConKey, int32PrimTyConKey, int32TyConKey, int64PrimTyConKey, int64TyConKey, integerTyConKey, naturalTyConKey, listTyConKey, foreignObjPrimTyConKey, maybeTyConKey, weakPrimTyConKey, mutableArrayPrimTyConKey, mutableByteArrayPrimTyConKey, orderingTyConKey, mVarPrimTyConKey, ratioTyConKey, rationalTyConKey, realWorldTyConKey, stablePtrPrimTyConKey, stablePtrTyConKey, eqTyConKey, heqTyConKey, ioPortPrimTyConKey, smallArrayPrimTyConKey, smallMutableArrayPrimTyConKey, stringTyConKey :: Unique addrPrimTyConKey = mkPreludeTyConUnique 1 arrayPrimTyConKey = mkPreludeTyConUnique 3 boolTyConKey = mkPreludeTyConUnique 4 byteArrayPrimTyConKey = mkPreludeTyConUnique 5 stringTyConKey = mkPreludeTyConUnique 6 charPrimTyConKey = mkPreludeTyConUnique 7 charTyConKey = mkPreludeTyConUnique 8 doublePrimTyConKey = mkPreludeTyConUnique 9 doubleTyConKey = mkPreludeTyConUnique 10 floatPrimTyConKey = mkPreludeTyConUnique 11 floatTyConKey = mkPreludeTyConUnique 12 funTyConKey = mkPreludeTyConUnique 13 intPrimTyConKey = mkPreludeTyConUnique 14 intTyConKey = mkPreludeTyConUnique 15 int8PrimTyConKey = mkPreludeTyConUnique 16 int8TyConKey = mkPreludeTyConUnique 17 int16PrimTyConKey = mkPreludeTyConUnique 18 int16TyConKey = mkPreludeTyConUnique 19 int32PrimTyConKey = mkPreludeTyConUnique 20 int32TyConKey = mkPreludeTyConUnique 21 int64PrimTyConKey = mkPreludeTyConUnique 22 int64TyConKey = mkPreludeTyConUnique 23 integerTyConKey = mkPreludeTyConUnique 24 naturalTyConKey = mkPreludeTyConUnique 25 listTyConKey = mkPreludeTyConUnique 26 foreignObjPrimTyConKey = mkPreludeTyConUnique 27 maybeTyConKey = mkPreludeTyConUnique 28 weakPrimTyConKey = mkPreludeTyConUnique 29 mutableArrayPrimTyConKey = mkPreludeTyConUnique 30 mutableByteArrayPrimTyConKey = mkPreludeTyConUnique 31 orderingTyConKey = mkPreludeTyConUnique 32 mVarPrimTyConKey = mkPreludeTyConUnique 33 ioPortPrimTyConKey = mkPreludeTyConUnique 34 ratioTyConKey = mkPreludeTyConUnique 35 rationalTyConKey = mkPreludeTyConUnique 36 realWorldTyConKey = mkPreludeTyConUnique 37 stablePtrPrimTyConKey = mkPreludeTyConUnique 38 stablePtrTyConKey = mkPreludeTyConUnique 39 eqTyConKey = mkPreludeTyConUnique 40 heqTyConKey = mkPreludeTyConUnique 41 statePrimTyConKey, stableNamePrimTyConKey, stableNameTyConKey, mutVarPrimTyConKey, ioTyConKey, wordPrimTyConKey, wordTyConKey, word8PrimTyConKey, word8TyConKey, word16PrimTyConKey, word16TyConKey, word32PrimTyConKey, word32TyConKey, word64PrimTyConKey, word64TyConKey, kindConKey, boxityConKey, typeConKey, threadIdPrimTyConKey, bcoPrimTyConKey, ptrTyConKey, funPtrTyConKey, tVarPrimTyConKey, eqPrimTyConKey, eqReprPrimTyConKey, eqPhantPrimTyConKey, compactPrimTyConKey, stackSnapshotPrimTyConKey :: Unique statePrimTyConKey = mkPreludeTyConUnique 50 stableNamePrimTyConKey = mkPreludeTyConUnique 51 stableNameTyConKey = mkPreludeTyConUnique 52 eqPrimTyConKey = mkPreludeTyConUnique 53 eqReprPrimTyConKey = mkPreludeTyConUnique 54 eqPhantPrimTyConKey = mkPreludeTyConUnique 55 mutVarPrimTyConKey = mkPreludeTyConUnique 56 ioTyConKey = mkPreludeTyConUnique 57 wordPrimTyConKey = mkPreludeTyConUnique 59 wordTyConKey = mkPreludeTyConUnique 60 word8PrimTyConKey = mkPreludeTyConUnique 61 word8TyConKey = mkPreludeTyConUnique 62 word16PrimTyConKey = mkPreludeTyConUnique 63 word16TyConKey = mkPreludeTyConUnique 64 word32PrimTyConKey = mkPreludeTyConUnique 65 word32TyConKey = mkPreludeTyConUnique 66 word64PrimTyConKey = mkPreludeTyConUnique 67 word64TyConKey = mkPreludeTyConUnique 68 kindConKey = mkPreludeTyConUnique 72 boxityConKey = mkPreludeTyConUnique 73 typeConKey = mkPreludeTyConUnique 74 threadIdPrimTyConKey = mkPreludeTyConUnique 75 bcoPrimTyConKey = mkPreludeTyConUnique 76 ptrTyConKey = mkPreludeTyConUnique 77 funPtrTyConKey = mkPreludeTyConUnique 78 tVarPrimTyConKey = mkPreludeTyConUnique 79 compactPrimTyConKey = mkPreludeTyConUnique 80 stackSnapshotPrimTyConKey = mkPreludeTyConUnique 81 eitherTyConKey :: Unique eitherTyConKey = mkPreludeTyConUnique 84 voidTyConKey :: Unique voidTyConKey = mkPreludeTyConUnique 85 nonEmptyTyConKey :: Unique nonEmptyTyConKey = mkPreludeTyConUnique 86 -- Kind constructors liftedTypeKindTyConKey, unliftedTypeKindTyConKey, tYPETyConKey, liftedRepTyConKey, unliftedRepTyConKey, constraintKindTyConKey, levityTyConKey, runtimeRepTyConKey, vecCountTyConKey, vecElemTyConKey, zeroBitRepTyConKey, zeroBitTypeTyConKey :: Unique liftedTypeKindTyConKey = mkPreludeTyConUnique 88 unliftedTypeKindTyConKey = mkPreludeTyConUnique 89 tYPETyConKey = mkPreludeTyConUnique 90 constraintKindTyConKey = mkPreludeTyConUnique 92 levityTyConKey = mkPreludeTyConUnique 94 runtimeRepTyConKey = mkPreludeTyConUnique 95 vecCountTyConKey = mkPreludeTyConUnique 96 vecElemTyConKey = mkPreludeTyConUnique 97 liftedRepTyConKey = mkPreludeTyConUnique 98 unliftedRepTyConKey = mkPreludeTyConUnique 99 zeroBitRepTyConKey = mkPreludeTyConUnique 100 zeroBitTypeTyConKey = mkPreludeTyConUnique 101 pluginTyConKey, frontendPluginTyConKey :: Unique pluginTyConKey = mkPreludeTyConUnique 102 frontendPluginTyConKey = mkPreludeTyConUnique 103 trTyConTyConKey, trModuleTyConKey, trNameTyConKey, kindRepTyConKey, typeLitSortTyConKey :: Unique trTyConTyConKey = mkPreludeTyConUnique 104 trModuleTyConKey = mkPreludeTyConUnique 105 trNameTyConKey = mkPreludeTyConUnique 106 kindRepTyConKey = mkPreludeTyConUnique 107 typeLitSortTyConKey = mkPreludeTyConUnique 108 -- Generics (Unique keys) v1TyConKey, u1TyConKey, par1TyConKey, rec1TyConKey, k1TyConKey, m1TyConKey, sumTyConKey, prodTyConKey, compTyConKey, rTyConKey, dTyConKey, cTyConKey, sTyConKey, rec0TyConKey, d1TyConKey, c1TyConKey, s1TyConKey, repTyConKey, rep1TyConKey, uRecTyConKey, uAddrTyConKey, uCharTyConKey, uDoubleTyConKey, uFloatTyConKey, uIntTyConKey, uWordTyConKey :: Unique v1TyConKey = mkPreludeTyConUnique 135 u1TyConKey = mkPreludeTyConUnique 136 par1TyConKey = mkPreludeTyConUnique 137 rec1TyConKey = mkPreludeTyConUnique 138 k1TyConKey = mkPreludeTyConUnique 139 m1TyConKey = mkPreludeTyConUnique 140 sumTyConKey = mkPreludeTyConUnique 141 prodTyConKey = mkPreludeTyConUnique 142 compTyConKey = mkPreludeTyConUnique 143 rTyConKey = mkPreludeTyConUnique 144 dTyConKey = mkPreludeTyConUnique 146 cTyConKey = mkPreludeTyConUnique 147 sTyConKey = mkPreludeTyConUnique 148 rec0TyConKey = mkPreludeTyConUnique 149 d1TyConKey = mkPreludeTyConUnique 151 c1TyConKey = mkPreludeTyConUnique 152 s1TyConKey = mkPreludeTyConUnique 153 repTyConKey = mkPreludeTyConUnique 155 rep1TyConKey = mkPreludeTyConUnique 156 uRecTyConKey = mkPreludeTyConUnique 157 uAddrTyConKey = mkPreludeTyConUnique 158 uCharTyConKey = mkPreludeTyConUnique 159 uDoubleTyConKey = mkPreludeTyConUnique 160 uFloatTyConKey = mkPreludeTyConUnique 161 uIntTyConKey = mkPreludeTyConUnique 162 uWordTyConKey = mkPreludeTyConUnique 163 -- Custom user type-errors errorMessageTypeErrorFamKey :: Unique errorMessageTypeErrorFamKey = mkPreludeTyConUnique 181 coercibleTyConKey :: Unique coercibleTyConKey = mkPreludeTyConUnique 183 proxyPrimTyConKey :: Unique proxyPrimTyConKey = mkPreludeTyConUnique 184 specTyConKey :: Unique specTyConKey = mkPreludeTyConUnique 185 anyTyConKey :: Unique anyTyConKey = mkPreludeTyConUnique 186 smallArrayPrimTyConKey = mkPreludeTyConUnique 187 smallMutableArrayPrimTyConKey = mkPreludeTyConUnique 188 staticPtrTyConKey :: Unique staticPtrTyConKey = mkPreludeTyConUnique 189 staticPtrInfoTyConKey :: Unique staticPtrInfoTyConKey = mkPreludeTyConUnique 190 callStackTyConKey :: Unique callStackTyConKey = mkPreludeTyConUnique 191 -- Typeables typeRepTyConKey, someTypeRepTyConKey, someTypeRepDataConKey :: Unique typeRepTyConKey = mkPreludeTyConUnique 192 someTypeRepTyConKey = mkPreludeTyConUnique 193 someTypeRepDataConKey = mkPreludeTyConUnique 194 typeSymbolAppendFamNameKey :: Unique typeSymbolAppendFamNameKey = mkPreludeTyConUnique 195 -- Unsafe equality unsafeEqualityTyConKey :: Unique unsafeEqualityTyConKey = mkPreludeTyConUnique 196 -- Linear types multiplicityTyConKey :: Unique multiplicityTyConKey = mkPreludeTyConUnique 197 unrestrictedFunTyConKey :: Unique unrestrictedFunTyConKey = mkPreludeTyConUnique 198 multMulTyConKey :: Unique multMulTyConKey = mkPreludeTyConUnique 199 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES TyConUniques 200-299 ----------------------------------------------------- ----------------------- SIMD ------------------------ -- USES TyConUniques 300-399 ----------------------------------------------------- #include "primop-vector-uniques.hs-incl" ------------- Type-level Symbol, Nat, Char ---------- -- USES TyConUniques 400-499 ----------------------------------------------------- typeSymbolKindConNameKey, typeCharKindConNameKey, typeNatAddTyFamNameKey, typeNatMulTyFamNameKey, typeNatExpTyFamNameKey, typeNatSubTyFamNameKey , typeSymbolCmpTyFamNameKey, typeNatCmpTyFamNameKey, typeCharCmpTyFamNameKey , typeLeqCharTyFamNameKey , typeNatDivTyFamNameKey , typeNatModTyFamNameKey , typeNatLogTyFamNameKey , typeConsSymbolTyFamNameKey, typeUnconsSymbolTyFamNameKey , typeCharToNatTyFamNameKey, typeNatToCharTyFamNameKey :: Unique typeSymbolKindConNameKey = mkPreludeTyConUnique 400 typeCharKindConNameKey = mkPreludeTyConUnique 401 typeNatAddTyFamNameKey = mkPreludeTyConUnique 402 typeNatMulTyFamNameKey = mkPreludeTyConUnique 403 typeNatExpTyFamNameKey = mkPreludeTyConUnique 404 typeNatSubTyFamNameKey = mkPreludeTyConUnique 405 typeSymbolCmpTyFamNameKey = mkPreludeTyConUnique 406 typeNatCmpTyFamNameKey = mkPreludeTyConUnique 407 typeCharCmpTyFamNameKey = mkPreludeTyConUnique 408 typeLeqCharTyFamNameKey = mkPreludeTyConUnique 409 typeNatDivTyFamNameKey = mkPreludeTyConUnique 410 typeNatModTyFamNameKey = mkPreludeTyConUnique 411 typeNatLogTyFamNameKey = mkPreludeTyConUnique 412 typeConsSymbolTyFamNameKey = mkPreludeTyConUnique 413 typeUnconsSymbolTyFamNameKey = mkPreludeTyConUnique 414 typeCharToNatTyFamNameKey = mkPreludeTyConUnique 415 typeNatToCharTyFamNameKey = mkPreludeTyConUnique 416 {- ************************************************************************ * * \subsubsection[Uniques-prelude-DataCons]{@Uniques@ for wired-in @DataCons@} * * ************************************************************************ -} charDataConKey, consDataConKey, doubleDataConKey, falseDataConKey, floatDataConKey, intDataConKey, nilDataConKey, ratioDataConKey, stableNameDataConKey, trueDataConKey, wordDataConKey, word8DataConKey, ioDataConKey, heqDataConKey, coercibleDataConKey, eqDataConKey, nothingDataConKey, justDataConKey, nonEmptyDataConKey :: Unique charDataConKey = mkPreludeDataConUnique 1 consDataConKey = mkPreludeDataConUnique 2 doubleDataConKey = mkPreludeDataConUnique 3 falseDataConKey = mkPreludeDataConUnique 4 floatDataConKey = mkPreludeDataConUnique 5 intDataConKey = mkPreludeDataConUnique 6 nothingDataConKey = mkPreludeDataConUnique 7 justDataConKey = mkPreludeDataConUnique 8 eqDataConKey = mkPreludeDataConUnique 9 nilDataConKey = mkPreludeDataConUnique 10 ratioDataConKey = mkPreludeDataConUnique 11 word8DataConKey = mkPreludeDataConUnique 12 stableNameDataConKey = mkPreludeDataConUnique 13 trueDataConKey = mkPreludeDataConUnique 14 wordDataConKey = mkPreludeDataConUnique 15 ioDataConKey = mkPreludeDataConUnique 16 heqDataConKey = mkPreludeDataConUnique 18 nonEmptyDataConKey = mkPreludeDataConUnique 19 -- Generic data constructors crossDataConKey, inlDataConKey, inrDataConKey, genUnitDataConKey :: Unique crossDataConKey = mkPreludeDataConUnique 20 inlDataConKey = mkPreludeDataConUnique 21 inrDataConKey = mkPreludeDataConUnique 22 genUnitDataConKey = mkPreludeDataConUnique 23 leftDataConKey, rightDataConKey :: Unique leftDataConKey = mkPreludeDataConUnique 25 rightDataConKey = mkPreludeDataConUnique 26 ordLTDataConKey, ordEQDataConKey, ordGTDataConKey :: Unique ordLTDataConKey = mkPreludeDataConUnique 27 ordEQDataConKey = mkPreludeDataConUnique 28 ordGTDataConKey = mkPreludeDataConUnique 29 coercibleDataConKey = mkPreludeDataConUnique 32 staticPtrDataConKey :: Unique staticPtrDataConKey = mkPreludeDataConUnique 33 staticPtrInfoDataConKey :: Unique staticPtrInfoDataConKey = mkPreludeDataConUnique 34 fingerprintDataConKey :: Unique fingerprintDataConKey = mkPreludeDataConUnique 35 srcLocDataConKey :: Unique srcLocDataConKey = mkPreludeDataConUnique 37 trTyConDataConKey, trModuleDataConKey, trNameSDataConKey, trNameDDataConKey, trGhcPrimModuleKey :: Unique trTyConDataConKey = mkPreludeDataConUnique 41 trModuleDataConKey = mkPreludeDataConUnique 43 trNameSDataConKey = mkPreludeDataConUnique 45 trNameDDataConKey = mkPreludeDataConUnique 46 trGhcPrimModuleKey = mkPreludeDataConUnique 47 typeErrorTextDataConKey, typeErrorAppendDataConKey, typeErrorVAppendDataConKey, typeErrorShowTypeDataConKey :: Unique typeErrorTextDataConKey = mkPreludeDataConUnique 50 typeErrorAppendDataConKey = mkPreludeDataConUnique 51 typeErrorVAppendDataConKey = mkPreludeDataConUnique 52 typeErrorShowTypeDataConKey = mkPreludeDataConUnique 53 prefixIDataConKey, infixIDataConKey, leftAssociativeDataConKey, rightAssociativeDataConKey, notAssociativeDataConKey, sourceUnpackDataConKey, sourceNoUnpackDataConKey, noSourceUnpackednessDataConKey, sourceLazyDataConKey, sourceStrictDataConKey, noSourceStrictnessDataConKey, decidedLazyDataConKey, decidedStrictDataConKey, decidedUnpackDataConKey, metaDataDataConKey, metaConsDataConKey, metaSelDataConKey :: Unique prefixIDataConKey = mkPreludeDataConUnique 54 infixIDataConKey = mkPreludeDataConUnique 55 leftAssociativeDataConKey = mkPreludeDataConUnique 56 rightAssociativeDataConKey = mkPreludeDataConUnique 57 notAssociativeDataConKey = mkPreludeDataConUnique 58 sourceUnpackDataConKey = mkPreludeDataConUnique 59 sourceNoUnpackDataConKey = mkPreludeDataConUnique 60 noSourceUnpackednessDataConKey = mkPreludeDataConUnique 61 sourceLazyDataConKey = mkPreludeDataConUnique 62 sourceStrictDataConKey = mkPreludeDataConUnique 63 noSourceStrictnessDataConKey = mkPreludeDataConUnique 64 decidedLazyDataConKey = mkPreludeDataConUnique 65 decidedStrictDataConKey = mkPreludeDataConUnique 66 decidedUnpackDataConKey = mkPreludeDataConUnique 67 metaDataDataConKey = mkPreludeDataConUnique 68 metaConsDataConKey = mkPreludeDataConUnique 69 metaSelDataConKey = mkPreludeDataConUnique 70 vecRepDataConKey, sumRepDataConKey, tupleRepDataConKey, boxedRepDataConKey :: Unique vecRepDataConKey = mkPreludeDataConUnique 71 tupleRepDataConKey = mkPreludeDataConUnique 72 sumRepDataConKey = mkPreludeDataConUnique 73 boxedRepDataConKey = mkPreludeDataConUnique 74 boxedRepDataConTyConKey, tupleRepDataConTyConKey :: Unique -- A promoted data constructors (i.e. a TyCon) has -- the same key as the data constructor itself boxedRepDataConTyConKey = boxedRepDataConKey tupleRepDataConTyConKey = tupleRepDataConKey -- See Note [Wiring in RuntimeRep] in GHC.Builtin.Types -- Includes all nullary-data-constructor reps. Does not -- include BoxedRep, VecRep, SumRep, TupleRep. runtimeRepSimpleDataConKeys :: [Unique] runtimeRepSimpleDataConKeys = map mkPreludeDataConUnique [75..87] liftedDataConKey,unliftedDataConKey :: Unique liftedDataConKey = mkPreludeDataConUnique 88 unliftedDataConKey = mkPreludeDataConUnique 89 -- See Note [Wiring in RuntimeRep] in GHC.Builtin.Types -- VecCount vecCountDataConKeys :: [Unique] vecCountDataConKeys = map mkPreludeDataConUnique [90..95] -- See Note [Wiring in RuntimeRep] in GHC.Builtin.Types -- VecElem vecElemDataConKeys :: [Unique] vecElemDataConKeys = map mkPreludeDataConUnique [96..105] -- Typeable things kindRepTyConAppDataConKey, kindRepVarDataConKey, kindRepAppDataConKey, kindRepFunDataConKey, kindRepTYPEDataConKey, kindRepTypeLitSDataConKey, kindRepTypeLitDDataConKey :: Unique kindRepTyConAppDataConKey = mkPreludeDataConUnique 106 kindRepVarDataConKey = mkPreludeDataConUnique 107 kindRepAppDataConKey = mkPreludeDataConUnique 108 kindRepFunDataConKey = mkPreludeDataConUnique 109 kindRepTYPEDataConKey = mkPreludeDataConUnique 110 kindRepTypeLitSDataConKey = mkPreludeDataConUnique 111 kindRepTypeLitDDataConKey = mkPreludeDataConUnique 112 typeLitSymbolDataConKey, typeLitNatDataConKey, typeLitCharDataConKey :: Unique typeLitSymbolDataConKey = mkPreludeDataConUnique 113 typeLitNatDataConKey = mkPreludeDataConUnique 114 typeLitCharDataConKey = mkPreludeDataConUnique 115 -- Unsafe equality unsafeReflDataConKey :: Unique unsafeReflDataConKey = mkPreludeDataConUnique 116 -- Multiplicity oneDataConKey, manyDataConKey :: Unique oneDataConKey = mkPreludeDataConUnique 117 manyDataConKey = mkPreludeDataConUnique 118 -- ghc-bignum integerISDataConKey, integerINDataConKey, integerIPDataConKey, naturalNSDataConKey, naturalNBDataConKey :: Unique integerISDataConKey = mkPreludeDataConUnique 120 integerINDataConKey = mkPreludeDataConUnique 121 integerIPDataConKey = mkPreludeDataConUnique 122 naturalNSDataConKey = mkPreludeDataConUnique 123 naturalNBDataConKey = mkPreludeDataConUnique 124 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES DataUniques 200-250 ----------------------------------------------------- {- ************************************************************************ * * \subsubsection[Uniques-prelude-Ids]{@Uniques@ for wired-in @Ids@ (except @DataCons@)} * * ************************************************************************ -} wildCardKey, absentErrorIdKey, augmentIdKey, appendIdKey, buildIdKey, foldrIdKey, recSelErrorIdKey, seqIdKey, eqStringIdKey, noMethodBindingErrorIdKey, nonExhaustiveGuardsErrorIdKey, runtimeErrorIdKey, patErrorIdKey, voidPrimIdKey, realWorldPrimIdKey, recConErrorIdKey, unpackCStringUtf8IdKey, unpackCStringAppendUtf8IdKey, unpackCStringFoldrUtf8IdKey, unpackCStringIdKey, unpackCStringAppendIdKey, unpackCStringFoldrIdKey, typeErrorIdKey, divIntIdKey, modIntIdKey, absentSumFieldErrorIdKey, cstringLengthIdKey, raiseOverflowIdKey, raiseUnderflowIdKey, raiseDivZeroIdKey :: Unique wildCardKey = mkPreludeMiscIdUnique 0 -- See Note [WildCard binders] absentErrorIdKey = mkPreludeMiscIdUnique 1 augmentIdKey = mkPreludeMiscIdUnique 2 appendIdKey = mkPreludeMiscIdUnique 3 buildIdKey = mkPreludeMiscIdUnique 4 foldrIdKey = mkPreludeMiscIdUnique 6 recSelErrorIdKey = mkPreludeMiscIdUnique 7 seqIdKey = mkPreludeMiscIdUnique 8 absentSumFieldErrorIdKey = mkPreludeMiscIdUnique 9 eqStringIdKey = mkPreludeMiscIdUnique 10 noMethodBindingErrorIdKey = mkPreludeMiscIdUnique 11 nonExhaustiveGuardsErrorIdKey = mkPreludeMiscIdUnique 12 runtimeErrorIdKey = mkPreludeMiscIdUnique 13 patErrorIdKey = mkPreludeMiscIdUnique 14 realWorldPrimIdKey = mkPreludeMiscIdUnique 15 recConErrorIdKey = mkPreludeMiscIdUnique 16 unpackCStringUtf8IdKey = mkPreludeMiscIdUnique 17 unpackCStringAppendUtf8IdKey = mkPreludeMiscIdUnique 18 unpackCStringFoldrUtf8IdKey = mkPreludeMiscIdUnique 19 unpackCStringIdKey = mkPreludeMiscIdUnique 20 unpackCStringAppendIdKey = mkPreludeMiscIdUnique 21 unpackCStringFoldrIdKey = mkPreludeMiscIdUnique 22 voidPrimIdKey = mkPreludeMiscIdUnique 23 typeErrorIdKey = mkPreludeMiscIdUnique 24 divIntIdKey = mkPreludeMiscIdUnique 25 modIntIdKey = mkPreludeMiscIdUnique 26 cstringLengthIdKey = mkPreludeMiscIdUnique 27 raiseOverflowIdKey = mkPreludeMiscIdUnique 28 raiseUnderflowIdKey = mkPreludeMiscIdUnique 29 raiseDivZeroIdKey = mkPreludeMiscIdUnique 30 concatIdKey, filterIdKey, zipIdKey, bindIOIdKey, returnIOIdKey, newStablePtrIdKey, printIdKey, failIOIdKey, nullAddrIdKey, voidArgIdKey, otherwiseIdKey, assertIdKey :: Unique concatIdKey = mkPreludeMiscIdUnique 31 filterIdKey = mkPreludeMiscIdUnique 32 zipIdKey = mkPreludeMiscIdUnique 33 bindIOIdKey = mkPreludeMiscIdUnique 34 returnIOIdKey = mkPreludeMiscIdUnique 35 newStablePtrIdKey = mkPreludeMiscIdUnique 36 printIdKey = mkPreludeMiscIdUnique 37 failIOIdKey = mkPreludeMiscIdUnique 38 nullAddrIdKey = mkPreludeMiscIdUnique 39 voidArgIdKey = mkPreludeMiscIdUnique 40 otherwiseIdKey = mkPreludeMiscIdUnique 43 assertIdKey = mkPreludeMiscIdUnique 44 leftSectionKey, rightSectionKey :: Unique leftSectionKey = mkPreludeMiscIdUnique 45 rightSectionKey = mkPreludeMiscIdUnique 46 rootMainKey, runMainKey :: Unique rootMainKey = mkPreludeMiscIdUnique 101 runMainKey = mkPreludeMiscIdUnique 102 thenIOIdKey, lazyIdKey, assertErrorIdKey, oneShotKey, runRWKey :: Unique thenIOIdKey = mkPreludeMiscIdUnique 103 lazyIdKey = mkPreludeMiscIdUnique 104 assertErrorIdKey = mkPreludeMiscIdUnique 105 oneShotKey = mkPreludeMiscIdUnique 106 runRWKey = mkPreludeMiscIdUnique 107 traceKey :: Unique traceKey = mkPreludeMiscIdUnique 108 inlineIdKey, noinlineIdKey :: Unique inlineIdKey = mkPreludeMiscIdUnique 120 -- see below mapIdKey, groupWithIdKey, dollarIdKey, coercionTokenIdKey, considerAccessibleIdKey :: Unique mapIdKey = mkPreludeMiscIdUnique 121 groupWithIdKey = mkPreludeMiscIdUnique 122 dollarIdKey = mkPreludeMiscIdUnique 123 coercionTokenIdKey = mkPreludeMiscIdUnique 124 noinlineIdKey = mkPreludeMiscIdUnique 125 considerAccessibleIdKey = mkPreludeMiscIdUnique 126 integerToFloatIdKey, integerToDoubleIdKey, naturalToFloatIdKey, naturalToDoubleIdKey :: Unique integerToFloatIdKey = mkPreludeMiscIdUnique 128 integerToDoubleIdKey = mkPreludeMiscIdUnique 129 naturalToFloatIdKey = mkPreludeMiscIdUnique 130 naturalToDoubleIdKey = mkPreludeMiscIdUnique 131 rationalToFloatIdKey, rationalToDoubleIdKey :: Unique rationalToFloatIdKey = mkPreludeMiscIdUnique 132 rationalToDoubleIdKey = mkPreludeMiscIdUnique 133 coerceKey :: Unique coerceKey = mkPreludeMiscIdUnique 157 {- Certain class operations from Prelude classes. They get their own uniques so we can look them up easily when we want to conjure them up during type checking. -} -- Just a placeholder for unbound variables produced by the renamer: unboundKey :: Unique unboundKey = mkPreludeMiscIdUnique 158 fromIntegerClassOpKey, minusClassOpKey, fromRationalClassOpKey, enumFromClassOpKey, enumFromThenClassOpKey, enumFromToClassOpKey, enumFromThenToClassOpKey, eqClassOpKey, geClassOpKey, negateClassOpKey, bindMClassOpKey, thenMClassOpKey, returnMClassOpKey, fmapClassOpKey :: Unique fromIntegerClassOpKey = mkPreludeMiscIdUnique 160 minusClassOpKey = mkPreludeMiscIdUnique 161 fromRationalClassOpKey = mkPreludeMiscIdUnique 162 enumFromClassOpKey = mkPreludeMiscIdUnique 163 enumFromThenClassOpKey = mkPreludeMiscIdUnique 164 enumFromToClassOpKey = mkPreludeMiscIdUnique 165 enumFromThenToClassOpKey = mkPreludeMiscIdUnique 166 eqClassOpKey = mkPreludeMiscIdUnique 167 geClassOpKey = mkPreludeMiscIdUnique 168 negateClassOpKey = mkPreludeMiscIdUnique 169 bindMClassOpKey = mkPreludeMiscIdUnique 171 -- (>>=) thenMClassOpKey = mkPreludeMiscIdUnique 172 -- (>>) fmapClassOpKey = mkPreludeMiscIdUnique 173 returnMClassOpKey = mkPreludeMiscIdUnique 174 -- Recursive do notation mfixIdKey :: Unique mfixIdKey = mkPreludeMiscIdUnique 175 -- MonadFail operations failMClassOpKey :: Unique failMClassOpKey = mkPreludeMiscIdUnique 176 -- fromLabel fromLabelClassOpKey :: Unique fromLabelClassOpKey = mkPreludeMiscIdUnique 177 -- Arrow notation arrAIdKey, composeAIdKey, firstAIdKey, appAIdKey, choiceAIdKey, loopAIdKey :: Unique arrAIdKey = mkPreludeMiscIdUnique 180 composeAIdKey = mkPreludeMiscIdUnique 181 -- >>> firstAIdKey = mkPreludeMiscIdUnique 182 appAIdKey = mkPreludeMiscIdUnique 183 choiceAIdKey = mkPreludeMiscIdUnique 184 -- ||| loopAIdKey = mkPreludeMiscIdUnique 185 fromStringClassOpKey :: Unique fromStringClassOpKey = mkPreludeMiscIdUnique 186 -- Annotation type checking toAnnotationWrapperIdKey :: Unique toAnnotationWrapperIdKey = mkPreludeMiscIdUnique 187 -- Conversion functions fromIntegralIdKey, realToFracIdKey, toIntegerClassOpKey, toRationalClassOpKey :: Unique fromIntegralIdKey = mkPreludeMiscIdUnique 190 realToFracIdKey = mkPreludeMiscIdUnique 191 toIntegerClassOpKey = mkPreludeMiscIdUnique 192 toRationalClassOpKey = mkPreludeMiscIdUnique 193 -- Monad comprehensions guardMIdKey, liftMIdKey, mzipIdKey :: Unique guardMIdKey = mkPreludeMiscIdUnique 194 liftMIdKey = mkPreludeMiscIdUnique 195 mzipIdKey = mkPreludeMiscIdUnique 196 -- GHCi ghciStepIoMClassOpKey :: Unique ghciStepIoMClassOpKey = mkPreludeMiscIdUnique 197 -- Overloaded lists isListClassKey, fromListClassOpKey, fromListNClassOpKey, toListClassOpKey :: Unique isListClassKey = mkPreludeMiscIdUnique 198 fromListClassOpKey = mkPreludeMiscIdUnique 199 fromListNClassOpKey = mkPreludeMiscIdUnique 500 toListClassOpKey = mkPreludeMiscIdUnique 501 proxyHashKey :: Unique proxyHashKey = mkPreludeMiscIdUnique 502 ---------------- Template Haskell ------------------- -- GHC.Builtin.Names.TH: USES IdUniques 200-499 ----------------------------------------------------- -- Used to make `Typeable` dictionaries mkTyConKey , mkTrTypeKey , mkTrConKey , mkTrAppKey , mkTrFunKey , typeNatTypeRepKey , typeSymbolTypeRepKey , typeCharTypeRepKey , typeRepIdKey :: Unique mkTyConKey = mkPreludeMiscIdUnique 503 mkTrTypeKey = mkPreludeMiscIdUnique 504 mkTrConKey = mkPreludeMiscIdUnique 505 mkTrAppKey = mkPreludeMiscIdUnique 506 typeNatTypeRepKey = mkPreludeMiscIdUnique 507 typeSymbolTypeRepKey = mkPreludeMiscIdUnique 508 typeCharTypeRepKey = mkPreludeMiscIdUnique 509 typeRepIdKey = mkPreludeMiscIdUnique 510 mkTrFunKey = mkPreludeMiscIdUnique 511 -- Representations for primitive types trTYPEKey , trTYPE'PtrRepLiftedKey , trRuntimeRepKey , tr'PtrRepLiftedKey , trLiftedRepKey :: Unique trTYPEKey = mkPreludeMiscIdUnique 512 trTYPE'PtrRepLiftedKey = mkPreludeMiscIdUnique 513 trRuntimeRepKey = mkPreludeMiscIdUnique 514 tr'PtrRepLiftedKey = mkPreludeMiscIdUnique 515 trLiftedRepKey = mkPreludeMiscIdUnique 516 -- KindReps for common cases starKindRepKey, starArrStarKindRepKey, starArrStarArrStarKindRepKey :: Unique starKindRepKey = mkPreludeMiscIdUnique 520 starArrStarKindRepKey = mkPreludeMiscIdUnique 521 starArrStarArrStarKindRepKey = mkPreludeMiscIdUnique 522 -- Dynamic toDynIdKey :: Unique toDynIdKey = mkPreludeMiscIdUnique 523 bitIntegerIdKey :: Unique bitIntegerIdKey = mkPreludeMiscIdUnique 550 heqSCSelIdKey, eqSCSelIdKey, coercibleSCSelIdKey :: Unique eqSCSelIdKey = mkPreludeMiscIdUnique 551 heqSCSelIdKey = mkPreludeMiscIdUnique 552 coercibleSCSelIdKey = mkPreludeMiscIdUnique 553 sappendClassOpKey :: Unique sappendClassOpKey = mkPreludeMiscIdUnique 554 memptyClassOpKey, mappendClassOpKey, mconcatClassOpKey :: Unique memptyClassOpKey = mkPreludeMiscIdUnique 555 mappendClassOpKey = mkPreludeMiscIdUnique 556 mconcatClassOpKey = mkPreludeMiscIdUnique 557 emptyCallStackKey, pushCallStackKey :: Unique emptyCallStackKey = mkPreludeMiscIdUnique 558 pushCallStackKey = mkPreludeMiscIdUnique 559 fromStaticPtrClassOpKey :: Unique fromStaticPtrClassOpKey = mkPreludeMiscIdUnique 560 makeStaticKey :: Unique makeStaticKey = mkPreludeMiscIdUnique 561 -- Unsafe coercion proofs unsafeEqualityProofIdKey, unsafeCoercePrimIdKey :: Unique unsafeEqualityProofIdKey = mkPreludeMiscIdUnique 570 unsafeCoercePrimIdKey = mkPreludeMiscIdUnique 571 -- HasField class ops getFieldClassOpKey, setFieldClassOpKey :: Unique getFieldClassOpKey = mkPreludeMiscIdUnique 572 setFieldClassOpKey = mkPreludeMiscIdUnique 573 ------------------------------------------------------ -- ghc-bignum uses 600-699 uniques ------------------------------------------------------ integerFromNaturalIdKey , integerToNaturalClampIdKey , integerToNaturalThrowIdKey , integerToNaturalIdKey , integerToWordIdKey , integerToIntIdKey , integerToWord64IdKey , integerToInt64IdKey , integerAddIdKey , integerMulIdKey , integerSubIdKey , integerNegateIdKey , integerAbsIdKey , integerPopCountIdKey , integerQuotIdKey , integerRemIdKey , integerDivIdKey , integerModIdKey , integerDivModIdKey , integerQuotRemIdKey , integerEncodeFloatIdKey , integerEncodeDoubleIdKey , integerGcdIdKey , integerLcmIdKey , integerAndIdKey , integerOrIdKey , integerXorIdKey , integerComplementIdKey , integerBitIdKey , integerTestBitIdKey , integerShiftLIdKey , integerShiftRIdKey , integerFromWordIdKey , integerFromWord64IdKey , integerFromInt64IdKey , naturalToWordIdKey , naturalPopCountIdKey , naturalShiftRIdKey , naturalShiftLIdKey , naturalAddIdKey , naturalSubIdKey , naturalSubThrowIdKey , naturalSubUnsafeIdKey , naturalMulIdKey , naturalQuotRemIdKey , naturalQuotIdKey , naturalRemIdKey , naturalAndIdKey , naturalAndNotIdKey , naturalOrIdKey , naturalXorIdKey , naturalTestBitIdKey , naturalBitIdKey , naturalGcdIdKey , naturalLcmIdKey , naturalLog2IdKey , naturalLogBaseWordIdKey , naturalLogBaseIdKey , naturalPowModIdKey , naturalSizeInBaseIdKey , bignatFromWordListIdKey , bignatEqIdKey , bignatCompareIdKey , bignatCompareWordIdKey :: Unique integerFromNaturalIdKey = mkPreludeMiscIdUnique 600 integerToNaturalClampIdKey = mkPreludeMiscIdUnique 601 integerToNaturalThrowIdKey = mkPreludeMiscIdUnique 602 integerToNaturalIdKey = mkPreludeMiscIdUnique 603 integerToWordIdKey = mkPreludeMiscIdUnique 604 integerToIntIdKey = mkPreludeMiscIdUnique 605 integerToWord64IdKey = mkPreludeMiscIdUnique 606 integerToInt64IdKey = mkPreludeMiscIdUnique 607 integerAddIdKey = mkPreludeMiscIdUnique 608 integerMulIdKey = mkPreludeMiscIdUnique 609 integerSubIdKey = mkPreludeMiscIdUnique 610 integerNegateIdKey = mkPreludeMiscIdUnique 611 integerAbsIdKey = mkPreludeMiscIdUnique 618 integerPopCountIdKey = mkPreludeMiscIdUnique 621 integerQuotIdKey = mkPreludeMiscIdUnique 622 integerRemIdKey = mkPreludeMiscIdUnique 623 integerDivIdKey = mkPreludeMiscIdUnique 624 integerModIdKey = mkPreludeMiscIdUnique 625 integerDivModIdKey = mkPreludeMiscIdUnique 626 integerQuotRemIdKey = mkPreludeMiscIdUnique 627 integerEncodeFloatIdKey = mkPreludeMiscIdUnique 630 integerEncodeDoubleIdKey = mkPreludeMiscIdUnique 631 integerGcdIdKey = mkPreludeMiscIdUnique 632 integerLcmIdKey = mkPreludeMiscIdUnique 633 integerAndIdKey = mkPreludeMiscIdUnique 634 integerOrIdKey = mkPreludeMiscIdUnique 635 integerXorIdKey = mkPreludeMiscIdUnique 636 integerComplementIdKey = mkPreludeMiscIdUnique 637 integerBitIdKey = mkPreludeMiscIdUnique 638 integerTestBitIdKey = mkPreludeMiscIdUnique 639 integerShiftLIdKey = mkPreludeMiscIdUnique 640 integerShiftRIdKey = mkPreludeMiscIdUnique 641 integerFromWordIdKey = mkPreludeMiscIdUnique 642 integerFromWord64IdKey = mkPreludeMiscIdUnique 643 integerFromInt64IdKey = mkPreludeMiscIdUnique 644 naturalToWordIdKey = mkPreludeMiscIdUnique 650 naturalPopCountIdKey = mkPreludeMiscIdUnique 659 naturalShiftRIdKey = mkPreludeMiscIdUnique 660 naturalShiftLIdKey = mkPreludeMiscIdUnique 661 naturalAddIdKey = mkPreludeMiscIdUnique 662 naturalSubIdKey = mkPreludeMiscIdUnique 663 naturalSubThrowIdKey = mkPreludeMiscIdUnique 664 naturalSubUnsafeIdKey = mkPreludeMiscIdUnique 665 naturalMulIdKey = mkPreludeMiscIdUnique 666 naturalQuotRemIdKey = mkPreludeMiscIdUnique 669 naturalQuotIdKey = mkPreludeMiscIdUnique 670 naturalRemIdKey = mkPreludeMiscIdUnique 671 naturalAndIdKey = mkPreludeMiscIdUnique 672 naturalAndNotIdKey = mkPreludeMiscIdUnique 673 naturalOrIdKey = mkPreludeMiscIdUnique 674 naturalXorIdKey = mkPreludeMiscIdUnique 675 naturalTestBitIdKey = mkPreludeMiscIdUnique 676 naturalBitIdKey = mkPreludeMiscIdUnique 677 naturalGcdIdKey = mkPreludeMiscIdUnique 678 naturalLcmIdKey = mkPreludeMiscIdUnique 679 naturalLog2IdKey = mkPreludeMiscIdUnique 680 naturalLogBaseWordIdKey = mkPreludeMiscIdUnique 681 naturalLogBaseIdKey = mkPreludeMiscIdUnique 682 naturalPowModIdKey = mkPreludeMiscIdUnique 683 naturalSizeInBaseIdKey = mkPreludeMiscIdUnique 684 bignatFromWordListIdKey = mkPreludeMiscIdUnique 690 bignatEqIdKey = mkPreludeMiscIdUnique 691 bignatCompareIdKey = mkPreludeMiscIdUnique 692 bignatCompareWordIdKey = mkPreludeMiscIdUnique 693 ------------------------------------------------------ -- ghci optimization for big rationals 700-749 uniques ------------------------------------------------------ -- Creating rationals at runtime. mkRationalBase2IdKey, mkRationalBase10IdKey :: Unique mkRationalBase2IdKey = mkPreludeMiscIdUnique 700 mkRationalBase10IdKey = mkPreludeMiscIdUnique 701 :: Unique {- ************************************************************************ * * \subsection[Class-std-groups]{Standard groups of Prelude classes} * * ************************************************************************ NOTE: @Eq@ and @Text@ do need to appear in @standardClasses@ even though every numeric class has these two as a superclass, because the list of ambiguous dictionaries hasn't been simplified. -} numericClassKeys :: [Unique] numericClassKeys = [ numClassKey , realClassKey , integralClassKey ] ++ fractionalClassKeys fractionalClassKeys :: [Unique] fractionalClassKeys = [ fractionalClassKey , floatingClassKey , realFracClassKey , realFloatClassKey ] -- The "standard classes" are used in defaulting (Haskell 98 report 4.3.4), -- and are: "classes defined in the Prelude or a standard library" standardClassKeys :: [Unique] standardClassKeys = derivableClassKeys ++ numericClassKeys ++ [randomClassKey, randomGenClassKey, functorClassKey, monadClassKey, monadPlusClassKey, monadFailClassKey, semigroupClassKey, monoidClassKey, isStringClassKey, applicativeClassKey, foldableClassKey, traversableClassKey, alternativeClassKey ] {- @derivableClassKeys@ is also used in checking \tr{deriving} constructs (@GHC.Tc.Deriv@). -} derivableClassKeys :: [Unique] derivableClassKeys = [ eqClassKey, ordClassKey, enumClassKey, ixClassKey, boundedClassKey, showClassKey, readClassKey ] -- These are the "interactive classes" that are consulted when doing -- defaulting. Does not include Num or IsString, which have special -- handling. interactiveClassNames :: [Name] interactiveClassNames = [ showClassName, eqClassName, ordClassName, foldableClassName , traversableClassName ] interactiveClassKeys :: [Unique] interactiveClassKeys = map getUnique interactiveClassNames {- ************************************************************************ * * Semi-builtin names * * ************************************************************************ Note [pretendNameIsInScope] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general, we filter out instances that mention types whose names are not in scope. However, in the situations listed below, we make an exception for some commonly used names, such as Data.Kind.Type, which may not actually be in scope but should be treated as though they were in scope. This includes built-in names, as well as a few extra names such as 'Type', 'TYPE', 'BoxedRep', etc. Situations in which we apply this special logic: - GHCi's :info command, see GHC.Runtime.Eval.getInfo. This fixes #1581. - When reporting instance overlap errors. Not doing so could mean that we would omit instances for typeclasses like type Cls :: k -> Constraint class Cls a because BoxedRep/Lifted were not in scope. See GHC.Tc.Errors.potentialInstancesErrMsg. This fixes one of the issues reported in #20465. -} -- | Should this name be considered in-scope, even though it technically isn't? -- -- This ensures that we don't filter out information because, e.g., -- Data.Kind.Type isn't imported. -- -- See Note [pretendNameIsInScope]. pretendNameIsInScope :: Name -> Bool pretendNameIsInScope n = isBuiltInSyntax n || any (n `hasKey`) [ liftedTypeKindTyConKey, unliftedTypeKindTyConKey , liftedDataConKey, unliftedDataConKey , tYPETyConKey , runtimeRepTyConKey, boxedRepDataConKey , eqTyConKey , oneDataConKey , manyDataConKey , funTyConKey ] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/PrimOps.hs0000644000000000000000000007175014472400112021200 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[PrimOp]{Primitive operations (machine-level)} -} {-# LANGUAGE CPP #-} module GHC.Builtin.PrimOps ( PrimOp(..), PrimOpVecCat(..), allThePrimOps, primOpType, primOpSig, primOpResultType, primOpTag, maxPrimOpTag, primOpOcc, primOpWrapperId, tagToEnumKey, primOpOutOfLine, primOpCodeSize, primOpOkForSpeculation, primOpOkForSideEffects, primOpIsCheap, primOpFixity, primOpDocs, primOpIsDiv, getPrimOpResultInfo, isComparisonPrimOp, PrimOpResultInfo(..), PrimCall(..) ) where import GHC.Prelude import GHC.Builtin.Types.Prim import GHC.Builtin.Types import GHC.Builtin.Uniques (mkPrimOpIdUnique, mkPrimOpWrapperUnique ) import GHC.Builtin.Names ( gHC_PRIMOPWRAPPERS ) import GHC.Core.TyCon ( TyCon, isPrimTyCon, PrimRep(..) ) import GHC.Core.Type import GHC.Cmm.Type import GHC.Types.Demand import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Name import GHC.Types.RepType ( tyConPrimRep1 ) import GHC.Types.Basic import GHC.Types.Fixity ( Fixity(..), FixityDirection(..) ) import GHC.Types.SrcLoc ( wiredInSrcSpan ) import GHC.Types.ForeignCall ( CLabelString ) import GHC.Types.SourceText ( SourceText(..) ) import GHC.Types.Unique ( Unique) import GHC.Unit.Types ( Unit ) import GHC.Utils.Outputable import GHC.Data.FastString {- ************************************************************************ * * \subsection[PrimOp-datatype]{Datatype for @PrimOp@ (an enumeration)} * * ************************************************************************ These are in \tr{state-interface.verb} order. -} -- supplies: -- data PrimOp = ... #include "primop-data-decl.hs-incl" -- supplies -- primOpTag :: PrimOp -> Int #include "primop-tag.hs-incl" primOpTag _ = error "primOpTag: unknown primop" instance Eq PrimOp where op1 == op2 = primOpTag op1 == primOpTag op2 instance Ord PrimOp where op1 < op2 = primOpTag op1 < primOpTag op2 op1 <= op2 = primOpTag op1 <= primOpTag op2 op1 >= op2 = primOpTag op1 >= primOpTag op2 op1 > op2 = primOpTag op1 > primOpTag op2 op1 `compare` op2 | op1 < op2 = LT | op1 == op2 = EQ | otherwise = GT instance Outputable PrimOp where ppr op = pprPrimOp op data PrimOpVecCat = IntVec | WordVec | FloatVec -- An @Enum@-derived list would be better; meanwhile... (ToDo) allThePrimOps :: [PrimOp] allThePrimOps = #include "primop-list.hs-incl" tagToEnumKey :: Unique tagToEnumKey = mkPrimOpIdUnique (primOpTag TagToEnumOp) {- ************************************************************************ * * \subsection[PrimOp-info]{The essential info about each @PrimOp@} * * ************************************************************************ -} data PrimOpInfo = Compare OccName -- string :: T -> T -> Int# Type | GenPrimOp OccName -- string :: \/a1..an . T1 -> .. -> Tk -> T [TyVarBinder] [Type] Type mkCompare :: FastString -> Type -> PrimOpInfo mkCompare str ty = Compare (mkVarOccFS str) ty mkGenPrimOp :: FastString -> [TyVarBinder] -> [Type] -> Type -> PrimOpInfo mkGenPrimOp str tvs tys ty = GenPrimOp (mkVarOccFS str) tvs tys ty {- ************************************************************************ * * \subsubsection{Strictness} * * ************************************************************************ Not all primops are strict! -} primOpStrictness :: PrimOp -> Arity -> DmdSig -- See Demand.DmdSig for discussion of what the results -- The arity should be the arity of the primop; that's why -- this function isn't exported. #include "primop-strictness.hs-incl" {- ************************************************************************ * * \subsubsection{Fixity} * * ************************************************************************ -} primOpFixity :: PrimOp -> Maybe Fixity #include "primop-fixity.hs-incl" {- ************************************************************************ * * \subsubsection{Docs} * * ************************************************************************ See Note [GHC.Prim Docs] -} primOpDocs :: [(String, String)] #include "primop-docs.hs-incl" {- ************************************************************************ * * \subsubsection[PrimOp-comparison]{PrimOpInfo basic comparison ops} * * ************************************************************************ @primOpInfo@ gives all essential information (from which everything else, notably a type, can be constructed) for each @PrimOp@. -} primOpInfo :: PrimOp -> PrimOpInfo #include "primop-primop-info.hs-incl" primOpInfo _ = error "primOpInfo: unknown primop" {- Here are a load of comments from the old primOp info: A @Word#@ is an unsigned @Int#@. @decodeFloat#@ is given w/ Integer-stuff (it's similar). @decodeDouble#@ is given w/ Integer-stuff (it's similar). Decoding of floating-point numbers is sorta Integer-related. Encoding is done with plain ccalls now (see PrelNumExtra.hs). A @Weak@ Pointer is created by the @mkWeak#@ primitive: mkWeak# :: k -> v -> f -> State# RealWorld -> (# State# RealWorld, Weak# v #) In practice, you'll use the higher-level data Weak v = Weak# v mkWeak :: k -> v -> IO () -> IO (Weak v) The following operation dereferences a weak pointer. The weak pointer may have been finalized, so the operation returns a result code which must be inspected before looking at the dereferenced value. deRefWeak# :: Weak# v -> State# RealWorld -> (# State# RealWorld, v, Int# #) Only look at v if the Int# returned is /= 0 !! The higher-level op is deRefWeak :: Weak v -> IO (Maybe v) Weak pointers can be finalized early by using the finalize# operation: finalizeWeak# :: Weak# v -> State# RealWorld -> (# State# RealWorld, Int#, IO () #) The Int# returned is either 0 if the weak pointer has already been finalized, or it has no finalizer (the third component is then invalid). 1 if the weak pointer is still alive, with the finalizer returned as the third component. A {\em stable name/pointer} is an index into a table of stable name entries. Since the garbage collector is told about stable pointers, it is safe to pass a stable pointer to external systems such as C routines. \begin{verbatim} makeStablePtr# :: a -> State# RealWorld -> (# State# RealWorld, StablePtr# a #) freeStablePtr :: StablePtr# a -> State# RealWorld -> State# RealWorld deRefStablePtr# :: StablePtr# a -> State# RealWorld -> (# State# RealWorld, a #) eqStablePtr# :: StablePtr# a -> StablePtr# a -> Int# \end{verbatim} It may seem a bit surprising that @makeStablePtr#@ is a @IO@ operation since it doesn't (directly) involve IO operations. The reason is that if some optimisation pass decided to duplicate calls to @makeStablePtr#@ and we only pass one of the stable pointers over, a massive space leak can result. Putting it into the IO monad prevents this. (Another reason for putting them in a monad is to ensure correct sequencing wrt the side-effecting @freeStablePtr@ operation.) An important property of stable pointers is that if you call makeStablePtr# twice on the same object you get the same stable pointer back. Note that we can implement @freeStablePtr#@ using @_ccall_@ (and, besides, it's not likely to be used from Haskell) so it's not a primop. Question: Why @RealWorld@ - won't any instance of @_ST@ do the job? [ADR] Stable Names ~~~~~~~~~~~~ A stable name is like a stable pointer, but with three important differences: (a) You can't deRef one to get back to the original object. (b) You can convert one to an Int. (c) You don't need to 'freeStableName' The existence of a stable name doesn't guarantee to keep the object it points to alive (unlike a stable pointer), hence (a). Invariants: (a) makeStableName always returns the same value for a given object (same as stable pointers). (b) if two stable names are equal, it implies that the objects from which they were created were the same. (c) stableNameToInt always returns the same Int for a given stable name. These primops are pretty weird. tagToEnum# :: Int -> a (result type must be an enumerated type) The constraints aren't currently checked by the front end, but the code generator will fall over if they aren't satisfied. ************************************************************************ * * Which PrimOps are out-of-line * * ************************************************************************ Some PrimOps need to be called out-of-line because they either need to perform a heap check or they block. -} primOpOutOfLine :: PrimOp -> Bool #include "primop-out-of-line.hs-incl" {- ************************************************************************ * * Failure and side effects * * ************************************************************************ Note [Checking versus non-checking primops] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In GHC primops break down into two classes: a. Checking primops behave, for instance, like division. In this case the primop may throw an exception (e.g. division-by-zero) and is consequently is marked with the can_fail flag described below. The ability to fail comes at the expense of precluding some optimizations. b. Non-checking primops behavior, for instance, like addition. While addition can overflow it does not produce an exception. So can_fail is set to False, and we get more optimisation opportunities. But we must never throw an exception, so we cannot rewrite to a call to error. It is important that a non-checking primop never be transformed in a way that would cause it to bottom. Doing so would violate Core's let/app invariant (see Note [Core let/app invariant] in GHC.Core) which is critical to the simplifier's ability to float without fear of changing program meaning. Note [PrimOp can_fail and has_side_effects] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both can_fail and has_side_effects mean that the primop has some effect that is not captured entirely by its result value. ---------- has_side_effects --------------------- A primop "has_side_effects" if it has some side effect, visible elsewhere, apart from the result it returns - reading or writing to the world (I/O) - reading or writing to a mutable data structure (writeIORef) - throwing a synchronous Haskell exception Often such primops have a type like State -> input -> (State, output) so the state token guarantees ordering. In general we rely on data dependencies of the state token to enforce write-effect ordering, but as the notes below make clear, the matter is a bit more complicated than that. * NB1: if you inline unsafePerformIO, you may end up with side-effecting ops whose 'state' output is discarded. And programmers may do that by hand; see #9390. That is why we (conservatively) do not discard write-effecting primops even if both their state and result is discarded. * NB2: We consider primops, such as raiseIO#, that can raise a (Haskell) synchronous exception to "have_side_effects" but not "can_fail". We must be careful about not discarding such things; see the paper "A semantics for imprecise exceptions". * NB3: *Read* effects on *mutable* cells (like reading an IORef or a MutableArray#) /are/ included. You may find this surprising because it doesn't matter if we don't do them, or do them more than once. *Sequencing* is maintained by the data dependency of the state token. But see "Duplication" below under Note [Transformations affected by can_fail and has_side_effects] Note that read operations on *immutable* values (like indexArray#) do not have has_side_effects. (They might be marked can_fail, however, because you might index out of bounds.) Using has_side_effects in this way is a bit of a blunt instrument. We could be more refined by splitting read and write effects (see comments with #3207 and #20195) ---------- can_fail ---------------------------- A primop "can_fail" if it can fail with an *unchecked* exception on some elements of its input domain. Main examples: division (fails on zero denominator) array indexing (fails if the index is out of bounds) An "unchecked exception" is one that is an outright error, (not turned into a Haskell exception,) such as seg-fault or divide-by-zero error. Such can_fail primops are ALWAYS surrounded with a test that checks for the bad cases, but we need to be very careful about code motion that might move it out of the scope of the test. Note [Transformations affected by can_fail and has_side_effects] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The can_fail and has_side_effects properties have the following effect on program transformations. Summary table is followed by details. can_fail has_side_effects Discard YES NO Float in YES YES Float out NO NO Duplicate YES NO * Discarding. case (a `op` b) of _ -> rhs ===> rhs You should not discard a has_side_effects primop; e.g. case (writeIntArray# a i v s of (# _, _ #) -> True Arguably you should be able to discard this, since the returned stat token is not used, but that relies on NEVER inlining unsafePerformIO, and programmers sometimes write this kind of stuff by hand (#9390). So we (conservatively) never discard a has_side_effects primop. However, it's fine to discard a can_fail primop. For example case (indexIntArray# a i) of _ -> True We can discard indexIntArray#; it has can_fail, but not has_side_effects; see #5658 which was all about this. Notice that indexIntArray# is (in a more general handling of effects) read effect, but we don't care about that here, and treat read effects as *not* has_side_effects. Similarly (a `/#` b) can be discarded. It can seg-fault or cause a hardware exception, but not a synchronous Haskell exception. Synchronous Haskell exceptions, e.g. from raiseIO#, are treated as has_side_effects and hence are not discarded. * Float in. You can float a can_fail or has_side_effects primop *inwards*, but not inside a lambda (see Duplication below). * Float out. You must not float a can_fail primop *outwards* lest you escape the dynamic scope of the test. Example: case d ># 0# of True -> case x /# d of r -> r +# 1 False -> 0 Here we must not float the case outwards to give case x/# d of r -> case d ># 0# of True -> r +# 1 False -> 0 Nor can you float out a has_side_effects primop. For example: if blah then case writeMutVar# v True s0 of (# s1 #) -> s1 else s0 Notice that s0 is mentioned in both branches of the 'if', but only one of these two will actually be consumed. But if we float out to case writeMutVar# v True s0 of (# s1 #) -> if blah then s1 else s0 the writeMutVar will be performed in both branches, which is utterly wrong. * Duplication. You cannot duplicate a has_side_effect primop. You might wonder how this can occur given the state token threading, but just look at Control.Monad.ST.Lazy.Imp.strictToLazy! We get something like this p = case readMutVar# s v of (# s', r #) -> (State# s', r) s' = case p of (s', r) -> s' r = case p of (s', r) -> r (All these bindings are boxed.) If we inline p at its two call sites, we get a catastrophe: because the read is performed once when s' is demanded, and once when 'r' is demanded, which may be much later. Utterly wrong. #3207 is real example of this happening. However, it's fine to duplicate a can_fail primop. That is really the only difference between can_fail and has_side_effects. Note [Implementation: how can_fail/has_side_effects affect transformations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ How do we ensure that floating/duplication/discarding are done right in the simplifier? Two main predicates on primops test these flags: primOpOkForSideEffects <=> not has_side_effects primOpOkForSpeculation <=> not (has_side_effects || can_fail) * The "no-float-out" thing is achieved by ensuring that we never let-bind a can_fail or has_side_effects primop. The RHS of a let-binding (which can float in and out freely) satisfies exprOkForSpeculation; this is the let/app invariant. And exprOkForSpeculation is false of can_fail and has_side_effects. * So can_fail and has_side_effects primops will appear only as the scrutinees of cases, and that's why the FloatIn pass is capable of floating case bindings inwards. * The no-duplicate thing is done via primOpIsCheap, by making has_side_effects things (very very very) not-cheap! -} primOpHasSideEffects :: PrimOp -> Bool #include "primop-has-side-effects.hs-incl" primOpCanFail :: PrimOp -> Bool #include "primop-can-fail.hs-incl" primOpOkForSpeculation :: PrimOp -> Bool -- See Note [PrimOp can_fail and has_side_effects] -- See comments with GHC.Core.Utils.exprOkForSpeculation -- primOpOkForSpeculation => primOpOkForSideEffects primOpOkForSpeculation op = primOpOkForSideEffects op && not (primOpOutOfLine op || primOpCanFail op) -- I think the "out of line" test is because out of line things can -- be expensive (eg sine, cosine), and so we may not want to speculate them primOpOkForSideEffects :: PrimOp -> Bool primOpOkForSideEffects op = not (primOpHasSideEffects op) {- Note [primOpIsCheap] ~~~~~~~~~~~~~~~~~~~~ @primOpIsCheap@, as used in GHC.Core.Opt.Simplify.Utils. For now (HACK WARNING), we just borrow some other predicates for a what-should-be-good-enough test. "Cheap" means willing to call it more than once, and/or push it inside a lambda. The latter could change the behaviour of 'seq' for primops that can fail, so we don't treat them as cheap. -} primOpIsCheap :: PrimOp -> Bool -- See Note [PrimOp can_fail and has_side_effects] primOpIsCheap op = primOpOkForSpeculation op -- In March 2001, we changed this to -- primOpIsCheap op = False -- thereby making *no* primops seem cheap. But this killed eta -- expansion on case (x ==# y) of True -> \s -> ... -- which is bad. In particular a loop like -- doLoop n = loop 0 -- where -- loop i | i == n = return () -- | otherwise = bar i >> loop (i+1) -- allocated a closure every time round because it doesn't eta expand. -- -- The problem that originally gave rise to the change was -- let x = a +# b *# c in x +# x -- were we don't want to inline x. But primopIsCheap doesn't control -- that (it's exprIsDupable that does) so the problem doesn't occur -- even if primOpIsCheap sometimes says 'True'. -- | True of dyadic operators that can fail only if the second arg is zero! -- -- This function probably belongs in an automagically generated file.. but it's -- such a special case I thought I'd leave it here for now. primOpIsDiv :: PrimOp -> Bool primOpIsDiv op = case op of -- TODO: quotRemWord2, Int64, Word64 IntQuotOp -> True Int8QuotOp -> True Int16QuotOp -> True Int32QuotOp -> True IntRemOp -> True Int8RemOp -> True Int16RemOp -> True Int32RemOp -> True IntQuotRemOp -> True Int8QuotRemOp -> True Int16QuotRemOp -> True Int32QuotRemOp -> True WordQuotOp -> True Word8QuotOp -> True Word16QuotOp -> True Word32QuotOp -> True WordRemOp -> True Word8RemOp -> True Word16RemOp -> True Word32RemOp -> True WordQuotRemOp -> True Word8QuotRemOp -> True Word16QuotRemOp -> True Word32QuotRemOp -> True FloatDivOp -> True DoubleDivOp -> True _ -> False {- ************************************************************************ * * PrimOp code size * * ************************************************************************ primOpCodeSize ~~~~~~~~~~~~~~ Gives an indication of the code size of a primop, for the purposes of calculating unfolding sizes; see GHC.Core.Unfold.sizeExpr. -} primOpCodeSize :: PrimOp -> Int #include "primop-code-size.hs-incl" primOpCodeSizeDefault :: Int primOpCodeSizeDefault = 1 -- GHC.Core.Unfold.primOpSize already takes into account primOpOutOfLine -- and adds some further costs for the args in that case. primOpCodeSizeForeignCall :: Int primOpCodeSizeForeignCall = 4 {- ************************************************************************ * * PrimOp types * * ************************************************************************ -} primOpType :: PrimOp -> Type -- you may want to use primOpSig instead primOpType op = case primOpInfo op of Compare _occ ty -> compare_fun_ty ty GenPrimOp _occ tyvars arg_tys res_ty -> mkForAllTys tyvars (mkVisFunTysMany arg_tys res_ty) primOpResultType :: PrimOp -> Type primOpResultType op = case primOpInfo op of Compare _occ _ty -> intPrimTy GenPrimOp _occ _tyvars _arg_tys res_ty -> res_ty primOpOcc :: PrimOp -> OccName primOpOcc op = case primOpInfo op of Compare occ _ -> occ GenPrimOp occ _ _ _ -> occ {- Note [Primop wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~ To support (limited) use of primops in GHCi genprimopcode generates the GHC.PrimopWrappers module. This module contains a "primop wrapper" binding for each primop. These are standard Haskell functions mirroring the types of the primops they wrap. For instance, in the case of plusInt# we would have: module GHC.PrimopWrappers where import GHC.Prim as P plusInt# :: Int# -> Int# -> Int# plusInt# a b = P.plusInt# a b The Id for the wrapper of a primop can be found using 'GHC.Builtin.PrimOps.primOpWrapperId'. However, GHCi does not use this mechanism to link primops; it rather does a rather hacky symbol lookup (see GHC.ByteCode.Linker.primopToCLabel). TODO: Perhaps this should be changed? Note that these wrappers aren't *quite* as expressive as their unwrapped breathren, in that they may exhibit less representation polymorphism. For instance, consider the case of mkWeakNoFinalizer#, which has type: mkWeakNoFinalizer# :: forall (r :: RuntimeRep) (k :: TYPE r) (v :: Type). k -> v -> State# RealWorld -> (# State# RealWorld, Weak# v #) Naively we could generate a wrapper of the form, mkWeakNoFinalizer# k v s = GHC.Prim.mkWeakNoFinalizer# k v s However, this would require that 'k' bind the representation-polymorphic key, which is disallowed by our representation polymorphism validity checks (see Note [Representation polymorphism invariants] in GHC.Core). Consequently, we give the wrapper the simpler, less polymorphic type mkWeakNoFinalizer# :: forall (k :: Type) (v :: Type). k -> v -> State# RealWorld -> (# State# RealWorld, Weak# v #) This simplification tends to be good enough for GHCi uses given that there are few representation-polymorphic primops, and we do little simplification on interpreted code anyways. TODO: This behavior is actually wrong; a program becomes ill-typed upon replacing a real primop occurrence with one of its wrapper due to the fact that the former has an additional type binder. Hmmm.... Note [Eta expanding primops] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STG requires that primop applications be saturated. This makes code generation significantly simpler since otherwise we would need to define a calling convention for curried applications that can accommodate representation polymorphism. To ensure saturation, CorePrep eta expands all primop applications as described in Note [Eta expansion of hasNoBinding things in CorePrep] in GHC.Core.Prep. Historical Note: For a short period around GHC 8.8 we rewrote unsaturated primop applications to rather use the primop's wrapper (see Note [Primop wrappers] in GHC.Builtin.PrimOps) instead of eta expansion. This was because at the time CoreTidy would try to predict the CAFfyness of bindings that would be produced by CorePrep for inclusion in interface files. Eta expanding during CorePrep proved to be very difficult to predict, leading to nasty inconsistencies in CAFfyness determinations (see #16846). Thankfully, we now no longer try to predict CAFfyness but rather compute it on GHC STG (see Note [SRTs] in GHC.Cmm.Info.Build) and inject it into the interface file after code generation (see TODO: Refer to whatever falls out of #18096). This is much simpler and avoids the potential for inconsistency, allowing us to return to the somewhat simpler eta expansion approach for unsaturated primops. See #18079. -} -- | Returns the 'Id' of the wrapper associated with the given 'PrimOp'. -- See Note [Primop wrappers]. primOpWrapperId :: PrimOp -> Id primOpWrapperId op = mkVanillaGlobalWithInfo name ty info where info = setCafInfo vanillaIdInfo NoCafRefs name = mkExternalName uniq gHC_PRIMOPWRAPPERS (primOpOcc op) wiredInSrcSpan uniq = mkPrimOpWrapperUnique (primOpTag op) ty = primOpType op isComparisonPrimOp :: PrimOp -> Bool isComparisonPrimOp op = case primOpInfo op of Compare {} -> True GenPrimOp {} -> False -- primOpSig is like primOpType but gives the result split apart: -- (type variables, argument types, result type) -- It also gives arity, strictness info primOpSig :: PrimOp -> ([TyVarBinder], [Type], Type, Arity, DmdSig) primOpSig op = (tyvars, arg_tys, res_ty, arity, primOpStrictness op arity) where arity = length arg_tys (tyvars, arg_tys, res_ty) = case (primOpInfo op) of Compare _occ ty -> ([], [ty,ty], intPrimTy) GenPrimOp _occ tyvars arg_tys res_ty -> (tyvars, arg_tys, res_ty ) data PrimOpResultInfo = ReturnsPrim PrimRep | ReturnsAlg TyCon -- Some PrimOps need not return a manifest primitive or algebraic value -- (i.e. they might return a polymorphic value). These PrimOps *must* -- be out of line, or the code generator won't work. getPrimOpResultInfo :: PrimOp -> PrimOpResultInfo getPrimOpResultInfo op = case (primOpInfo op) of Compare _ _ -> ReturnsPrim (tyConPrimRep1 intPrimTyCon) GenPrimOp _ _ _ ty | isPrimTyCon tc -> ReturnsPrim (tyConPrimRep1 tc) | otherwise -> ReturnsAlg tc where tc = tyConAppTyCon ty -- All primops return a tycon-app result -- The tycon can be an unboxed tuple or sum, though, -- which gives rise to a ReturnAlg {- We do not currently make use of whether primops are commutable. We used to try to move constants to the right hand side for strength reduction. -} {- commutableOp :: PrimOp -> Bool #include "primop-commutable.hs-incl" -} -- Utils: compare_fun_ty :: Type -> Type compare_fun_ty ty = mkVisFunTysMany [ty, ty] intPrimTy -- Output stuff: pprPrimOp :: PrimOp -> SDoc pprPrimOp other_op = pprOccName (primOpOcc other_op) {- ************************************************************************ * * \subsubsection[PrimCall]{User-imported primitive calls} * * ************************************************************************ -} data PrimCall = PrimCall CLabelString Unit instance Outputable PrimCall where ppr (PrimCall lbl pkgId) = text "__primcall" <+> ppr pkgId <+> ppr lbl ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/PrimOps/Ids.hs0000644000000000000000000000473314472400112021714 0ustar0000000000000000-- | PrimOp's Ids module GHC.Builtin.PrimOps.Ids ( primOpId , allThePrimOpIds ) where import GHC.Prelude -- primop rules are attached to primop ids import {-# SOURCE #-} GHC.Core.Opt.ConstantFold (primOpRules) import GHC.Core.Type (mkForAllTys, mkVisFunTysMany) import GHC.Core.FVs (mkRuleInfo) import GHC.Builtin.PrimOps import GHC.Builtin.Uniques import GHC.Builtin.Names import GHC.Types.Basic import GHC.Types.Cpr import GHC.Types.Demand import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.TyThing import GHC.Types.Name import GHC.Data.SmallArray import Data.Maybe ( maybeToList ) -- | Build a PrimOp Id mkPrimOpId :: PrimOp -> Id mkPrimOpId prim_op = id where (tyvars,arg_tys,res_ty, arity, strict_sig) = primOpSig prim_op ty = mkForAllTys tyvars (mkVisFunTysMany arg_tys res_ty) name = mkWiredInName gHC_PRIM (primOpOcc prim_op) (mkPrimOpIdUnique (primOpTag prim_op)) (AnId id) UserSyntax id = mkGlobalId (PrimOpId prim_op) name ty info -- PrimOps don't ever construct a product, but we want to preserve bottoms cpr | isDeadEndDiv (snd (splitDmdSig strict_sig)) = botCpr | otherwise = topCpr info = noCafIdInfo `setRuleInfo` mkRuleInfo (maybeToList $ primOpRules name prim_op) `setArityInfo` arity `setDmdSigInfo` strict_sig `setCprSigInfo` mkCprSig arity cpr `setInlinePragInfo` neverInlinePragma `setLevityInfoWithType` res_ty -- We give PrimOps a NOINLINE pragma so that we don't -- get silly warnings from Desugar.dsRule (the inline_shadows_rule -- test) about a RULE conflicting with a possible inlining -- cf #7287 ------------------------------------------------------------- -- Cache of PrimOp's Ids ------------------------------------------------------------- -- | A cache of the PrimOp Ids, indexed by PrimOp tag (0 indexed) primOpIds :: SmallArray Id {-# NOINLINE primOpIds #-} primOpIds = listToArray (maxPrimOpTag+1) primOpTag mkPrimOpId allThePrimOps -- | Get primop id. -- -- Retrieve it from `primOpIds` cache. primOpId :: PrimOp -> Id {-# INLINE primOpId #-} primOpId op = indexSmallArray primOpIds (primOpTag op) -- | All the primop ids, as a list allThePrimOpIds :: [Id] {-# INLINE allThePrimOpIds #-} allThePrimOpIds = map (indexSmallArray primOpIds) [0..maxPrimOpTag] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Types.hs0000644000000000000000000025216714472400112020716 0ustar0000000000000000{- (c) The GRASP Project, Glasgow University, 1994-1998 Wired-in knowledge about {\em non-primitive} types -} {-# LANGUAGE OverloadedStrings #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | This module is about types that can be defined in Haskell, but which -- must be wired into the compiler nonetheless. C.f module "GHC.Builtin.Types.Prim" module GHC.Builtin.Types ( -- * Helper functions defined here mkWiredInTyConName, -- This is used in GHC.Builtin.Types.Literals to define the -- built-in functions for evaluation. mkWiredInIdName, -- used in GHC.Types.Id.Make -- * All wired in things wiredInTyCons, isBuiltInOcc_maybe, -- * Bool boolTy, boolTyCon, boolTyCon_RDR, boolTyConName, trueDataCon, trueDataConId, true_RDR, falseDataCon, falseDataConId, false_RDR, promotedFalseDataCon, promotedTrueDataCon, -- * Ordering orderingTyCon, ordLTDataCon, ordLTDataConId, ordEQDataCon, ordEQDataConId, ordGTDataCon, ordGTDataConId, promotedLTDataCon, promotedEQDataCon, promotedGTDataCon, -- * Boxing primitive types boxingDataCon_maybe, -- * Char charTyCon, charDataCon, charTyCon_RDR, charTy, stringTy, charTyConName, stringTyCon_RDR, -- * Double doubleTyCon, doubleDataCon, doubleTy, doubleTyConName, -- * Float floatTyCon, floatDataCon, floatTy, floatTyConName, -- * Int intTyCon, intDataCon, intTyCon_RDR, intDataCon_RDR, intTyConName, intTy, -- * Word wordTyCon, wordDataCon, wordTyConName, wordTy, -- * Word8 word8TyCon, word8DataCon, word8Ty, -- * List listTyCon, listTyCon_RDR, listTyConName, listTyConKey, nilDataCon, nilDataConName, nilDataConKey, consDataCon_RDR, consDataCon, consDataConName, promotedNilDataCon, promotedConsDataCon, mkListTy, mkPromotedListTy, -- * NonEmpty nonEmptyTyCon, nonEmptyTyConName, nonEmptyDataCon, nonEmptyDataConName, -- * Maybe maybeTyCon, maybeTyConName, nothingDataCon, nothingDataConName, promotedNothingDataCon, justDataCon, justDataConName, promotedJustDataCon, mkPromotedMaybeTy, mkMaybeTy, isPromotedMaybeTy, -- * Tuples mkTupleTy, mkTupleTy1, mkBoxedTupleTy, mkTupleStr, tupleTyCon, tupleDataCon, tupleTyConName, tupleDataConName, promotedTupleDataCon, unitTyCon, unitDataCon, unitDataConId, unitTy, unitTyConKey, soloTyCon, pairTyCon, mkPromotedPairTy, isPromotedPairType, unboxedUnitTy, unboxedUnitTyCon, unboxedUnitDataCon, unboxedTupleKind, unboxedSumKind, filterCTuple, -- ** Constraint tuples cTupleTyCon, cTupleTyConName, cTupleTyConNames, isCTupleTyConName, cTupleTyConNameArity_maybe, cTupleDataCon, cTupleDataConName, cTupleDataConNames, cTupleSelId, cTupleSelIdName, -- * Any anyTyCon, anyTy, anyTypeOfKind, -- * Recovery TyCon makeRecoveryTyCon, -- * Sums mkSumTy, sumTyCon, sumDataCon, -- * Kinds typeSymbolKindCon, typeSymbolKind, isLiftedTypeKindTyConName, typeToTypeKind, liftedRepTyCon, unliftedRepTyCon, constraintKind, liftedTypeKind, unliftedTypeKind, zeroBitTypeKind, constraintKindTyCon, liftedTypeKindTyCon, unliftedTypeKindTyCon, constraintKindTyConName, liftedTypeKindTyConName, unliftedTypeKindTyConName, liftedRepTyConName, unliftedRepTyConName, -- * Equality predicates heqTyCon, heqTyConName, heqClass, heqDataCon, eqTyCon, eqTyConName, eqClass, eqDataCon, eqTyCon_RDR, coercibleTyCon, coercibleTyConName, coercibleDataCon, coercibleClass, -- * RuntimeRep and friends runtimeRepTyCon, levityTyCon, vecCountTyCon, vecElemTyCon, boxedRepDataConTyCon, runtimeRepTy, levityTy, liftedRepTy, unliftedRepTy, zeroBitRepTy, vecRepDataConTyCon, tupleRepDataConTyCon, sumRepDataConTyCon, liftedDataConTyCon, unliftedDataConTyCon, liftedDataConTy, unliftedDataConTy, intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy, vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy, int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy, -- * Multiplicity and friends multiplicityTyConName, oneDataConName, manyDataConName, multiplicityTy, multiplicityTyCon, oneDataCon, manyDataCon, oneDataConTy, manyDataConTy, oneDataConTyCon, manyDataConTyCon, multMulTyCon, unrestrictedFunTyCon, unrestrictedFunTyConName, -- * Bignum integerTy, integerTyCon, integerTyConName, integerISDataCon, integerISDataConName, integerIPDataCon, integerIPDataConName, integerINDataCon, integerINDataConName, naturalTy, naturalTyCon, naturalTyConName, naturalNSDataCon, naturalNSDataConName, naturalNBDataCon, naturalNBDataConName ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Id.Make ( mkDataConWorkId, mkDictSelId ) -- friends: import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Builtin.Uniques -- others: import GHC.Core.Coercion.Axiom import GHC.Types.Id import GHC.Types.TyThing import GHC.Types.SourceText import GHC.Types.Var (VarBndr (Bndr)) import GHC.Settings.Constants ( mAX_TUPLE_SIZE, mAX_CTUPLE_SIZE, mAX_SUM_SIZE ) import GHC.Unit.Module ( Module ) import GHC.Core.Type import qualified GHC.Core.TyCo.Rep as TyCoRep (Type(TyConApp)) import GHC.Types.RepType import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.TyCon import GHC.Core.Class ( Class, mkClass ) import GHC.Types.Name.Reader import GHC.Types.Name as Name import GHC.Types.Name.Env ( NameEnv, mkNameEnv, lookupNameEnv, lookupNameEnv_NF ) import GHC.Types.Basic import GHC.Types.ForeignCall import GHC.Types.Unique.Set import Data.Array import GHC.Data.FastString import GHC.Data.BooleanFormula ( mkAnd ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Panic.Plain import qualified Data.ByteString.Char8 as BS import Data.List ( elemIndex, intersperse ) alpha_tyvar :: [TyVar] alpha_tyvar = [alphaTyVar] alpha_ty :: [Type] alpha_ty = [alphaTy] {- Note [Wired-in Types and Type Constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module include a lot of wired-in types and type constructors. Here, these are presented in a tabular format to make it easier to find the wired-in type identifier corresponding to a known Haskell type. Data constructors are nested under their corresponding types with two spaces of indentation. Identifier Type Haskell name Notes ---------------------------------------------------------------------------- liftedTypeKindTyCon TyCon GHC.Types.Type Synonym for: TYPE LiftedRep unliftedTypeKindTyCon TyCon GHC.Types.Type Synonym for: TYPE UnliftedRep liftedRepTyCon TyCon GHC.Types.LiftedRep Synonym for: 'BoxedRep 'Lifted unliftedRepTyCon TyCon GHC.Types.LiftedRep Synonym for: 'BoxedRep 'Unlifted levityTyCon TyCon GHC.Types.Levity Data type liftedDataConTyCon TyCon GHC.Types.Lifted Data constructor unliftedDataConTyCon TyCon GHC.Types.Unlifted Data constructor vecCountTyCon TyCon GHC.Types.VecCount Data type vec2DataConTy Type GHC.Types.Vec2 Data constructor vec4DataConTy Type GHC.Types.Vec4 Data constructor vec8DataConTy Type GHC.Types.Vec8 Data constructor vec16DataConTy Type GHC.Types.Vec16 Data constructor vec32DataConTy Type GHC.Types.Vec32 Data constructor vec64DataConTy Type GHC.Types.Vec64 Data constructor runtimeRepTyCon TyCon GHC.Types.RuntimeRep Data type boxedRepDataConTyCon TyCon GHC.Types.BoxedRep Data constructor intRepDataConTy Type GHC.Types.IntRep Data constructor doubleRepDataConTy Type GHC.Types.DoubleRep Data constructor floatRepDataConTy Type GHC.Types.FloatRep Data constructor boolTyCon TyCon GHC.Types.Bool Data type trueDataCon DataCon GHC.Types.True Data constructor falseDataCon DataCon GHC.Types.False Data constructor promotedTrueDataCon TyCon GHC.Types.True Data constructor promotedFalseDataCon TyCon GHC.Types.False Data constructor ************************************************************************ * * \subsection{Wired in type constructors} * * ************************************************************************ If you change which things are wired in, make sure you change their names in GHC.Builtin.Names, so they use wTcQual, wDataQual, etc -} -- This list is used only to define GHC.Builtin.Utils.wiredInThings. That in turn -- is used to initialise the name environment carried around by the renamer. -- This means that if we look up the name of a TyCon (or its implicit binders) -- that occurs in this list that name will be assigned the wired-in key we -- define here. -- -- Because of their infinite nature, this list excludes -- * tuples, including boxed, unboxed and constraint tuples --- (mkTupleTyCon, unitTyCon, pairTyCon) -- * unboxed sums (sumTyCon) -- See Note [Infinite families of known-key names] in GHC.Builtin.Names -- -- See also Note [Known-key names] wiredInTyCons :: [TyCon] wiredInTyCons = [ -- Units are not treated like other tuples, because they -- are defined in GHC.Base, and there's only a few of them. We -- put them in wiredInTyCons so that they will pre-populate -- the name cache, so the parser in isBuiltInOcc_maybe doesn't -- need to look out for them. unitTyCon , unboxedUnitTyCon -- Solo (i.e., the bosed 1-tuple) is also not treated -- like other tuples (i.e. we /do/ include it here), -- since it does not use special syntax like other tuples -- See Note [One-tuples] (Wrinkle: Make boxed one-tuple names -- have known keys) in GHC.Builtin.Types. , soloTyCon , anyTyCon , boolTyCon , charTyCon , stringTyCon , doubleTyCon , floatTyCon , intTyCon , wordTyCon , listTyCon , orderingTyCon , maybeTyCon , heqTyCon , eqTyCon , coercibleTyCon , typeSymbolKindCon , runtimeRepTyCon , levityTyCon , vecCountTyCon , vecElemTyCon , constraintKindTyCon , liftedTypeKindTyCon , unliftedTypeKindTyCon , multiplicityTyCon , naturalTyCon , integerTyCon , liftedRepTyCon , unliftedRepTyCon , zeroBitRepTyCon , zeroBitTypeTyCon , nonEmptyTyCon ] mkWiredInTyConName :: BuiltInSyntax -> Module -> FastString -> Unique -> TyCon -> Name mkWiredInTyConName built_in modu fs unique tycon = mkWiredInName modu (mkTcOccFS fs) unique (ATyCon tycon) -- Relevant TyCon built_in mkWiredInDataConName :: BuiltInSyntax -> Module -> FastString -> Unique -> DataCon -> Name mkWiredInDataConName built_in modu fs unique datacon = mkWiredInName modu (mkDataOccFS fs) unique (AConLike (RealDataCon datacon)) -- Relevant DataCon built_in mkWiredInIdName :: Module -> FastString -> Unique -> Id -> Name mkWiredInIdName mod fs uniq id = mkWiredInName mod (mkOccNameFS Name.varName fs) uniq (AnId id) UserSyntax -- See Note [Kind-changing of (~) and Coercible] -- in libraries/ghc-prim/GHC/Types.hs eqTyConName, eqDataConName, eqSCSelIdName :: Name eqTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "~") eqTyConKey eqTyCon eqDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Eq#") eqDataConKey eqDataCon eqSCSelIdName = mkWiredInIdName gHC_TYPES (fsLit "eq_sel") eqSCSelIdKey eqSCSelId eqTyCon_RDR :: RdrName eqTyCon_RDR = nameRdrName eqTyConName -- See Note [Kind-changing of (~) and Coercible] -- in libraries/ghc-prim/GHC/Types.hs heqTyConName, heqDataConName, heqSCSelIdName :: Name heqTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "~~") heqTyConKey heqTyCon heqDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "HEq#") heqDataConKey heqDataCon heqSCSelIdName = mkWiredInIdName gHC_TYPES (fsLit "heq_sel") heqSCSelIdKey heqSCSelId -- See Note [Kind-changing of (~) and Coercible] in libraries/ghc-prim/GHC/Types.hs coercibleTyConName, coercibleDataConName, coercibleSCSelIdName :: Name coercibleTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Coercible") coercibleTyConKey coercibleTyCon coercibleDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "MkCoercible") coercibleDataConKey coercibleDataCon coercibleSCSelIdName = mkWiredInIdName gHC_TYPES (fsLit "coercible_sel") coercibleSCSelIdKey coercibleSCSelId charTyConName, charDataConName, intTyConName, intDataConName, stringTyConName :: Name charTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Char") charTyConKey charTyCon charDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "C#") charDataConKey charDataCon stringTyConName = mkWiredInTyConName UserSyntax gHC_BASE (fsLit "String") stringTyConKey stringTyCon intTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Int") intTyConKey intTyCon intDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "I#") intDataConKey intDataCon boolTyConName, falseDataConName, trueDataConName :: Name boolTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Bool") boolTyConKey boolTyCon falseDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "False") falseDataConKey falseDataCon trueDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "True") trueDataConKey trueDataCon listTyConName, nilDataConName, consDataConName :: Name listTyConName = mkWiredInTyConName BuiltInSyntax gHC_TYPES (fsLit "[]") listTyConKey listTyCon nilDataConName = mkWiredInDataConName BuiltInSyntax gHC_TYPES (fsLit "[]") nilDataConKey nilDataCon consDataConName = mkWiredInDataConName BuiltInSyntax gHC_TYPES (fsLit ":") consDataConKey consDataCon nonEmptyTyConName, nonEmptyDataConName :: Name nonEmptyTyConName = mkWiredInTyConName UserSyntax gHC_BASE (fsLit "NonEmpty") nonEmptyTyConKey nonEmptyTyCon nonEmptyDataConName = mkWiredInDataConName UserSyntax gHC_BASE (fsLit ":|") nonEmptyDataConKey nonEmptyDataCon maybeTyConName, nothingDataConName, justDataConName :: Name maybeTyConName = mkWiredInTyConName UserSyntax gHC_MAYBE (fsLit "Maybe") maybeTyConKey maybeTyCon nothingDataConName = mkWiredInDataConName UserSyntax gHC_MAYBE (fsLit "Nothing") nothingDataConKey nothingDataCon justDataConName = mkWiredInDataConName UserSyntax gHC_MAYBE (fsLit "Just") justDataConKey justDataCon wordTyConName, wordDataConName, word8DataConName :: Name wordTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Word") wordTyConKey wordTyCon wordDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "W#") wordDataConKey wordDataCon word8DataConName = mkWiredInDataConName UserSyntax gHC_WORD (fsLit "W8#") word8DataConKey word8DataCon floatTyConName, floatDataConName, doubleTyConName, doubleDataConName :: Name floatTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Float") floatTyConKey floatTyCon floatDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "F#") floatDataConKey floatDataCon doubleTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Double") doubleTyConKey doubleTyCon doubleDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "D#") doubleDataConKey doubleDataCon -- Any {- Note [Any types] ~~~~~~~~~~~~~~~~ The type constructor Any, type family Any :: k where { } It has these properties: * Note that 'Any' is kind polymorphic since in some program we may need to use Any to fill in a type variable of some kind other than * (see #959 for examples). Its kind is thus `forall k. k``. * It is defined in module GHC.Types, and exported so that it is available to users. For this reason it's treated like any other wired-in type: - has a fixed unique, anyTyConKey, - lives in the global name cache * It is a *closed* type family, with no instances. This means that if ty :: '(k1, k2) we add a given coercion g :: ty ~ (Fst ty, Snd ty) If Any was a *data* type, then we'd get inconsistency because 'ty' could be (Any '(k1,k2)) and then we'd have an equality with Any on one side and '(,) on the other. See also #9097 and #9636. * When instantiated at a lifted type it is inhabited by at least one value, namely bottom * You can safely coerce any /lifted/ type to Any, and back with unsafeCoerce. * It does not claim to be a *data* type, and that's important for the code generator, because the code gen may *enter* a data value but never enters a function value. * It is wired-in so we can easily refer to it where we don't have a name environment (e.g. see Rules.matchRule for one example) It's used to instantiate un-constrained type variables after type checking. For example, 'length' has type length :: forall a. [a] -> Int and the list datacon for the empty list has type [] :: forall a. [a] In order to compose these two terms as @length []@ a type application is required, but there is no constraint on the choice. In this situation GHC uses 'Any', > length (Any *) ([] (Any *)) Above, we print kinds explicitly, as if with --fprint-explicit-kinds. The Any tycon used to be quite magic, but we have since been able to implement it merely with an empty kind polymorphic type family. See #10886 for a bit of history. -} anyTyConName :: Name anyTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Any") anyTyConKey anyTyCon anyTyCon :: TyCon anyTyCon = mkFamilyTyCon anyTyConName binders res_kind Nothing (ClosedSynFamilyTyCon Nothing) Nothing NotInjective where binders@[kv] = mkTemplateKindTyConBinders [liftedTypeKind] res_kind = mkTyVarTy (binderVar kv) anyTy :: Type anyTy = mkTyConTy anyTyCon anyTypeOfKind :: Kind -> Type anyTypeOfKind kind = mkTyConApp anyTyCon [kind] -- | Make a fake, recovery 'TyCon' from an existing one. -- Used when recovering from errors in type declarations makeRecoveryTyCon :: TyCon -> TyCon makeRecoveryTyCon tc = mkTcTyCon (tyConName tc) bndrs res_kind noTcTyConScopedTyVars True -- Fully generalised flavour -- Keep old flavour where flavour = tyConFlavour tc [kv] = mkTemplateKindVars [liftedTypeKind] (bndrs, res_kind) = case flavour of PromotedDataConFlavour -> ([mkNamedTyConBinder Inferred kv], mkTyVarTy kv) _ -> (tyConBinders tc, tyConResKind tc) -- For data types we have already validated their kind, so it -- makes sense to keep it. For promoted data constructors we haven't, -- so we recover with kind (forall k. k). Otherwise consider -- data T a where { MkT :: Show a => T a } -- If T is for some reason invalid, we don't want to fall over -- at (promoted) use-sites of MkT. -- Kinds typeSymbolKindConName :: Name typeSymbolKindConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Symbol") typeSymbolKindConNameKey typeSymbolKindCon constraintKindTyConName :: Name constraintKindTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Constraint") constraintKindTyConKey constraintKindTyCon liftedTypeKindTyConName, unliftedTypeKindTyConName, zeroBitTypeTyConName :: Name liftedTypeKindTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Type") liftedTypeKindTyConKey liftedTypeKindTyCon unliftedTypeKindTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "UnliftedType") unliftedTypeKindTyConKey unliftedTypeKindTyCon zeroBitTypeTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "ZeroBitType") zeroBitTypeTyConKey zeroBitTypeTyCon liftedRepTyConName, unliftedRepTyConName, zeroBitRepTyConName :: Name liftedRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "LiftedRep") liftedRepTyConKey liftedRepTyCon unliftedRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "UnliftedRep") unliftedRepTyConKey unliftedRepTyCon zeroBitRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "ZeroBitRep") zeroBitRepTyConKey zeroBitRepTyCon multiplicityTyConName :: Name multiplicityTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Multiplicity") multiplicityTyConKey multiplicityTyCon oneDataConName, manyDataConName :: Name oneDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "One") oneDataConKey oneDataCon manyDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Many") manyDataConKey manyDataCon runtimeRepTyConName, vecRepDataConName, tupleRepDataConName, sumRepDataConName, boxedRepDataConName :: Name runtimeRepTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "RuntimeRep") runtimeRepTyConKey runtimeRepTyCon vecRepDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "VecRep") vecRepDataConKey vecRepDataCon tupleRepDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "TupleRep") tupleRepDataConKey tupleRepDataCon sumRepDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "SumRep") sumRepDataConKey sumRepDataCon boxedRepDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "BoxedRep") boxedRepDataConKey boxedRepDataCon levityTyConName, liftedDataConName, unliftedDataConName :: Name levityTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "Levity") levityTyConKey levityTyCon liftedDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Lifted") liftedDataConKey liftedDataCon unliftedDataConName = mkWiredInDataConName UserSyntax gHC_TYPES (fsLit "Unlifted") unliftedDataConKey unliftedDataCon -- See Note [Wiring in RuntimeRep] runtimeRepSimpleDataConNames :: [Name] runtimeRepSimpleDataConNames = zipWith3Lazy mk_special_dc_name [ fsLit "IntRep" , fsLit "Int8Rep", fsLit "Int16Rep", fsLit "Int32Rep", fsLit "Int64Rep" , fsLit "WordRep" , fsLit "Word8Rep", fsLit "Word16Rep", fsLit "Word32Rep", fsLit "Word64Rep" , fsLit "AddrRep" , fsLit "FloatRep", fsLit "DoubleRep" ] runtimeRepSimpleDataConKeys runtimeRepSimpleDataCons vecCountTyConName :: Name vecCountTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "VecCount") vecCountTyConKey vecCountTyCon -- See Note [Wiring in RuntimeRep] vecCountDataConNames :: [Name] vecCountDataConNames = zipWith3Lazy mk_special_dc_name [ fsLit "Vec2", fsLit "Vec4", fsLit "Vec8" , fsLit "Vec16", fsLit "Vec32", fsLit "Vec64" ] vecCountDataConKeys vecCountDataCons vecElemTyConName :: Name vecElemTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "VecElem") vecElemTyConKey vecElemTyCon -- See Note [Wiring in RuntimeRep] vecElemDataConNames :: [Name] vecElemDataConNames = zipWith3Lazy mk_special_dc_name [ fsLit "Int8ElemRep", fsLit "Int16ElemRep", fsLit "Int32ElemRep" , fsLit "Int64ElemRep", fsLit "Word8ElemRep", fsLit "Word16ElemRep" , fsLit "Word32ElemRep", fsLit "Word64ElemRep" , fsLit "FloatElemRep", fsLit "DoubleElemRep" ] vecElemDataConKeys vecElemDataCons mk_special_dc_name :: FastString -> Unique -> DataCon -> Name mk_special_dc_name fs u dc = mkWiredInDataConName UserSyntax gHC_TYPES fs u dc boolTyCon_RDR, false_RDR, true_RDR, intTyCon_RDR, charTyCon_RDR, stringTyCon_RDR, intDataCon_RDR, listTyCon_RDR, consDataCon_RDR :: RdrName boolTyCon_RDR = nameRdrName boolTyConName false_RDR = nameRdrName falseDataConName true_RDR = nameRdrName trueDataConName intTyCon_RDR = nameRdrName intTyConName charTyCon_RDR = nameRdrName charTyConName stringTyCon_RDR = nameRdrName stringTyConName intDataCon_RDR = nameRdrName intDataConName listTyCon_RDR = nameRdrName listTyConName consDataCon_RDR = nameRdrName consDataConName {- ************************************************************************ * * \subsection{mkWiredInTyCon} * * ************************************************************************ -} -- This function assumes that the types it creates have all parameters at -- Representational role, and that there is no kind polymorphism. pcTyCon :: Name -> Maybe CType -> [TyVar] -> [DataCon] -> TyCon pcTyCon name cType tyvars cons = mkAlgTyCon name (mkAnonTyConBinders VisArg tyvars) liftedTypeKind (map (const Representational) tyvars) cType [] -- No stupid theta (mkDataTyConRhs cons) (VanillaAlgTyCon (mkPrelTyConRepName name)) False -- Not in GADT syntax pcDataCon :: Name -> [TyVar] -> [Type] -> TyCon -> DataCon pcDataCon n univs tys = pcDataConW n univs (map linear tys) pcDataConW :: Name -> [TyVar] -> [Scaled Type] -> TyCon -> DataCon pcDataConW n univs tys = pcDataConWithFixity False n univs [] -- no ex_tvs univs -- the univs are precisely the user-written tyvars tys pcDataConWithFixity :: Bool -- ^ declared infix? -> Name -- ^ datacon name -> [TyVar] -- ^ univ tyvars -> [TyCoVar] -- ^ ex tycovars -> [TyCoVar] -- ^ user-written tycovars -> [Scaled Type] -- ^ args -> TyCon -> DataCon pcDataConWithFixity infx n = pcDataConWithFixity' infx n (dataConWorkerUnique (nameUnique n)) NoRRI -- The Name's unique is the first of two free uniques; -- the first is used for the datacon itself, -- the second is used for the "worker name" -- -- To support this the mkPreludeDataConUnique function "allocates" -- one DataCon unique per pair of Ints. pcDataConWithFixity' :: Bool -> Name -> Unique -> RuntimeRepInfo -> [TyVar] -> [TyCoVar] -> [TyCoVar] -> [Scaled Type] -> TyCon -> DataCon -- The Name should be in the DataName name space; it's the name -- of the DataCon itself. -- -- IMPORTANT NOTE: -- if you try to wire-in a /GADT/ data constructor you will -- find it hard (we did). You will need wrapper and worker -- Names, a DataConBoxer, DataConRep, EqSpec, etc. -- Try hard not to wire-in GADT data types. You will live -- to regret doing so (we do). pcDataConWithFixity' declared_infix dc_name wrk_key rri tyvars ex_tyvars user_tyvars arg_tys tycon = data_con where tag_map = mkTyConTagMap tycon -- This constructs the constructor Name to ConTag map once per -- constructor, which is quadratic. It's OK here, because it's -- only called for wired in data types that don't have a lot of -- constructors. It's also likely that GHC will lift tag_map, since -- we call pcDataConWithFixity' with static TyCons in the same module. -- See Note [Constructor tag allocation] and #14657 data_con = mkDataCon dc_name declared_infix prom_info (map (const no_bang) arg_tys) [] -- No labelled fields tyvars ex_tyvars (mkTyVarBinders SpecifiedSpec user_tyvars) [] -- No equality spec [] -- No theta arg_tys (mkTyConApp tycon (mkTyVarTys tyvars)) rri tycon (lookupNameEnv_NF tag_map dc_name) [] -- No stupid theta (mkDataConWorkId wrk_name data_con) NoDataConRep -- Wired-in types are too simple to need wrappers no_bang = HsSrcBang NoSourceText NoSrcUnpack NoSrcStrict wrk_name = mkDataConWorkerName data_con wrk_key prom_info = mkPrelTyConRepName dc_name mkDataConWorkerName :: DataCon -> Unique -> Name mkDataConWorkerName data_con wrk_key = mkWiredInName modu wrk_occ wrk_key (AnId (dataConWorkId data_con)) UserSyntax where modu = assert (isExternalName dc_name) $ nameModule dc_name dc_name = dataConName data_con dc_occ = nameOccName dc_name wrk_occ = mkDataConWorkerOcc dc_occ -- used for RuntimeRep and friends pcSpecialDataCon :: Name -> [Type] -> TyCon -> RuntimeRepInfo -> DataCon pcSpecialDataCon dc_name arg_tys tycon rri = pcDataConWithFixity' False dc_name (dataConWorkerUnique (nameUnique dc_name)) rri [] [] [] (map linear arg_tys) tycon {- ************************************************************************ * * Kinds * * ************************************************************************ -} typeSymbolKindCon :: TyCon -- data Symbol typeSymbolKindCon = pcTyCon typeSymbolKindConName Nothing [] [] typeSymbolKind :: Kind typeSymbolKind = mkTyConTy typeSymbolKindCon constraintKindTyCon :: TyCon -- 'TyCon.isConstraintKindCon' assumes that this is an AlgTyCon! constraintKindTyCon = pcTyCon constraintKindTyConName Nothing [] [] typeToTypeKind, constraintKind :: Kind typeToTypeKind = liftedTypeKind `mkVisFunTyMany` liftedTypeKind constraintKind = mkTyConTy constraintKindTyCon {- ************************************************************************ * * Stuff for dealing with tuples * * ************************************************************************ Note [How tuples work] ~~~~~~~~~~~~~~~~~~~~~~ * There are three families of tuple TyCons and corresponding DataCons, expressed by the type BasicTypes.TupleSort: data TupleSort = BoxedTuple | UnboxedTuple | ConstraintTuple * All three families are AlgTyCons, whose AlgTyConRhs is TupleTyCon * BoxedTuples - A wired-in type - Data type declarations in GHC.Tuple - The data constructors really have an info table * UnboxedTuples - A wired-in type - Have a pretend DataCon, defined in GHC.Prim, but no actual declaration and no info table * ConstraintTuples - A wired-in type. - Declared as classes in GHC.Classes, e.g. class (c1,c2) => (c1,c2) - Given constraints: the superclasses automatically become available - Wanted constraints: there is a built-in instance instance (c1,c2) => (c1,c2) See GHC.Tc.Instance.Class.matchCTuple - Currently just go up to 64; beyond that you have to use manual nesting - Their OccNames look like (%,,,%), so they can easily be distinguished from term tuples. But (following Haskell) we pretty-print saturated constraint tuples with round parens; see BasicTypes.tupleParens. - Unlike BoxedTuples and UnboxedTuples, which only wire in type constructors and data constructors, ConstraintTuples also wire in superclass selector functions. For instance, $p1(%,%) and $p2(%,%) are the selectors for the binary constraint tuple. * In quite a lot of places things are restricted just to BoxedTuple/UnboxedTuple, and then we used BasicTypes.Boxity to distinguish E.g. tupleTyCon has a Boxity argument * When looking up an OccName in the original-name cache (GHC.Iface.Env.lookupOrigNameCache), we spot the tuple OccName to make sure we get the right wired-in name. This guy can't tell the difference between BoxedTuple and ConstraintTuple (same OccName!), so tuples are not serialised into interface files using OccNames at all. * Serialization to interface files works via the usual mechanism for known-key things: instead of serializing the OccName we just serialize the key. During deserialization we lookup the Name associated with the unique with the logic in GHC.Builtin.Uniques. See Note [Symbol table representation of names] for details. See also Note [Known-key names] in GHC.Builtin.Names. Note [One-tuples] ~~~~~~~~~~~~~~~~~ GHC supports both boxed and unboxed one-tuples: - Unboxed one-tuples are sometimes useful when returning a single value after CPR analysis - A boxed one-tuple is used by GHC.HsToCore.Utils.mkSelectorBinds, when there is just one binder Basically it keeps everything uniform. However the /naming/ of the type/data constructors for one-tuples is a bit odd: 3-tuples: (,,) (,,)# 2-tuples: (,) (,)# 1-tuples: ?? 0-tuples: () ()# Zero-tuples have used up the logical name. So we use 'Solo' and 'Solo#' for one-tuples. So in ghc-prim:GHC.Tuple we see the declarations: data () = () data Solo a = Solo a data (a,b) = (a,b) There is no way to write a boxed one-tuple in Haskell using tuple syntax. They can, however, be written using other methods: 1. They can be written directly by importing them from GHC.Tuple. 2. They can be generated by way of Template Haskell or in `deriving` code. There is nothing special about one-tuples in Core; in particular, they have no custom pretty-printing, just using `Solo`. Note that there is *not* a unary constraint tuple, unlike for other forms of tuples. See [Ignore unary constraint tuples] in GHC.Tc.Gen.HsType for more details. See also Note [Flattening one-tuples] in GHC.Core.Make and Note [Don't flatten tuples from HsSyn] in GHC.Core.Make. ----- -- Wrinkle: Make boxed one-tuple names have known keys ----- We make boxed one-tuple names have known keys so that `data Solo a = Solo a`, defined in GHC.Tuple, will be used when one-tuples are spliced in through Template Haskell. This program (from #18097) crucially relies on this: case $( tupE [ [| "ok" |] ] ) of Solo x -> putStrLn x Unless Solo has a known key, the type of `$( tupE [ [| "ok" |] ] )` (an ExplicitTuple of length 1) will not match the type of Solo (an ordinary data constructor used in a pattern). Making Solo known-key allows GHC to make this connection. Unlike Solo, every other tuple is /not/ known-key (see Note [Infinite families of known-key names] in GHC.Builtin.Names). The main reason for this exception is that other tuples are written with special syntax, and as a result, they are renamed using a special `isBuiltInOcc_maybe` function (see Note [Built-in syntax and the OrigNameCache] in GHC.Types.Name.Cache). In contrast, Solo is just an ordinary data type with no special syntax, so it doesn't really make sense to handle it in `isBuiltInOcc_maybe`. Making Solo known-key is the next-best way to teach the internals of the compiler about it. -} -- | Built-in syntax isn't "in scope" so these OccNames map to wired-in Names -- with BuiltInSyntax. However, this should only be necessary while resolving -- names produced by Template Haskell splices since we take care to encode -- built-in syntax names specially in interface files. See -- Note [Symbol table representation of names]. -- -- Moreover, there is no need to include names of things that the user can't -- write (e.g. type representation bindings like $tc(,,,)). isBuiltInOcc_maybe :: OccName -> Maybe Name isBuiltInOcc_maybe occ = case name of "[]" -> Just $ choose_ns listTyConName nilDataConName ":" -> Just consDataConName -- function tycon "FUN" -> Just funTyConName "->" -> Just unrestrictedFunTyConName -- boxed tuple data/tycon -- We deliberately exclude Solo (the boxed 1-tuple). -- See Note [One-tuples] (Wrinkle: Make boxed one-tuple names have known keys) "()" -> Just $ tup_name Boxed 0 _ | Just rest <- "(" `BS.stripPrefix` name , (commas, rest') <- BS.span (==',') rest , ")" <- rest' -> Just $ tup_name Boxed (1+BS.length commas) -- unboxed tuple data/tycon "(##)" -> Just $ tup_name Unboxed 0 "Solo#" -> Just $ tup_name Unboxed 1 _ | Just rest <- "(#" `BS.stripPrefix` name , (commas, rest') <- BS.span (==',') rest , "#)" <- rest' -> Just $ tup_name Unboxed (1+BS.length commas) -- unboxed sum tycon _ | Just rest <- "(#" `BS.stripPrefix` name , (nb_pipes, rest') <- span_pipes rest , "#)" <- rest' -> Just $ tyConName $ sumTyCon (1+nb_pipes) -- unboxed sum datacon _ | Just rest <- "(#" `BS.stripPrefix` name , (nb_pipes1, rest') <- span_pipes rest , Just rest'' <- "_" `BS.stripPrefix` rest' , (nb_pipes2, rest''') <- span_pipes rest'' , "#)" <- rest''' -> let arity = nb_pipes1 + nb_pipes2 + 1 alt = nb_pipes1 + 1 in Just $ dataConName $ sumDataCon alt arity _ -> Nothing where name = bytesFS $ occNameFS occ span_pipes :: BS.ByteString -> (Int, BS.ByteString) span_pipes = go 0 where go nb_pipes bs = case BS.uncons bs of Just ('|',rest) -> go (nb_pipes + 1) rest Just (' ',rest) -> go nb_pipes rest _ -> (nb_pipes, bs) choose_ns :: Name -> Name -> Name choose_ns tc dc | isTcClsNameSpace ns = tc | isDataConNameSpace ns = dc | otherwise = pprPanic "tup_name" (ppr occ) where ns = occNameSpace occ tup_name boxity arity = choose_ns (getName (tupleTyCon boxity arity)) (getName (tupleDataCon boxity arity)) mkTupleOcc :: NameSpace -> Boxity -> Arity -> OccName -- No need to cache these, the caching is done in mk_tuple mkTupleOcc ns Boxed ar = mkOccName ns (mkBoxedTupleStr ar) mkTupleOcc ns Unboxed ar = mkOccName ns (mkUnboxedTupleStr ar) mkCTupleOcc :: NameSpace -> Arity -> OccName mkCTupleOcc ns ar = mkOccName ns (mkConstraintTupleStr ar) mkTupleStr :: Boxity -> Arity -> String mkTupleStr Boxed = mkBoxedTupleStr mkTupleStr Unboxed = mkUnboxedTupleStr mkBoxedTupleStr :: Arity -> String mkBoxedTupleStr 0 = "()" mkBoxedTupleStr 1 = "Solo" -- See Note [One-tuples] mkBoxedTupleStr ar = '(' : commas ar ++ ")" mkUnboxedTupleStr :: Arity -> String mkUnboxedTupleStr 0 = "(##)" mkUnboxedTupleStr 1 = "Solo#" -- See Note [One-tuples] mkUnboxedTupleStr ar = "(#" ++ commas ar ++ "#)" mkConstraintTupleStr :: Arity -> String mkConstraintTupleStr 0 = "(%%)" mkConstraintTupleStr 1 = "Solo%" -- See Note [One-tuples] mkConstraintTupleStr ar = "(%" ++ commas ar ++ "%)" commas :: Arity -> String commas ar = take (ar-1) (repeat ',') cTupleTyCon :: Arity -> TyCon cTupleTyCon i | i > mAX_CTUPLE_SIZE = fstOf3 (mk_ctuple i) -- Build one specially | otherwise = fstOf3 (cTupleArr ! i) cTupleTyConName :: Arity -> Name cTupleTyConName a = tyConName (cTupleTyCon a) cTupleTyConNames :: [Name] cTupleTyConNames = map cTupleTyConName (0 : [2..mAX_CTUPLE_SIZE]) cTupleTyConKeys :: UniqSet Unique cTupleTyConKeys = mkUniqSet $ map getUnique cTupleTyConNames isCTupleTyConName :: Name -> Bool isCTupleTyConName n = assertPpr (isExternalName n) (ppr n) $ getUnique n `elementOfUniqSet` cTupleTyConKeys -- | If the given name is that of a constraint tuple, return its arity. cTupleTyConNameArity_maybe :: Name -> Maybe Arity cTupleTyConNameArity_maybe n | not (isCTupleTyConName n) = Nothing | otherwise = fmap adjustArity (n `elemIndex` cTupleTyConNames) where -- Since `cTupleTyConNames` jumps straight from the `0` to the `2` -- case, we have to adjust accordingly our calculated arity. adjustArity a = if a > 0 then a + 1 else a cTupleDataCon :: Arity -> DataCon cTupleDataCon i | i > mAX_CTUPLE_SIZE = sndOf3 (mk_ctuple i) -- Build one specially | otherwise = sndOf3 (cTupleArr ! i) cTupleDataConName :: Arity -> Name cTupleDataConName i = dataConName (cTupleDataCon i) cTupleDataConNames :: [Name] cTupleDataConNames = map cTupleDataConName (0 : [2..mAX_CTUPLE_SIZE]) cTupleSelId :: ConTag -- Superclass position -> Arity -- Arity -> Id cTupleSelId sc_pos arity | sc_pos > arity = panic ("cTupleSelId: index out of bounds: superclass position: " ++ show sc_pos ++ " > arity " ++ show arity) | sc_pos <= 0 = panic ("cTupleSelId: Superclass positions start from 1. " ++ "(superclass position: " ++ show sc_pos ++ ", arity: " ++ show arity ++ ")") | arity < 2 = panic ("cTupleSelId: Arity starts from 2. " ++ "(superclass position: " ++ show sc_pos ++ ", arity: " ++ show arity ++ ")") | arity > mAX_CTUPLE_SIZE = thdOf3 (mk_ctuple arity) ! (sc_pos - 1) -- Build one specially | otherwise = thdOf3 (cTupleArr ! arity) ! (sc_pos - 1) cTupleSelIdName :: ConTag -- Superclass position -> Arity -- Arity -> Name cTupleSelIdName sc_pos arity = idName (cTupleSelId sc_pos arity) tupleTyCon :: Boxity -> Arity -> TyCon tupleTyCon sort i | i > mAX_TUPLE_SIZE = fst (mk_tuple sort i) -- Build one specially tupleTyCon Boxed i = fst (boxedTupleArr ! i) tupleTyCon Unboxed i = fst (unboxedTupleArr ! i) tupleTyConName :: TupleSort -> Arity -> Name tupleTyConName ConstraintTuple a = cTupleTyConName a tupleTyConName BoxedTuple a = tyConName (tupleTyCon Boxed a) tupleTyConName UnboxedTuple a = tyConName (tupleTyCon Unboxed a) promotedTupleDataCon :: Boxity -> Arity -> TyCon promotedTupleDataCon boxity i = promoteDataCon (tupleDataCon boxity i) tupleDataCon :: Boxity -> Arity -> DataCon tupleDataCon sort i | i > mAX_TUPLE_SIZE = snd (mk_tuple sort i) -- Build one specially tupleDataCon Boxed i = snd (boxedTupleArr ! i) tupleDataCon Unboxed i = snd (unboxedTupleArr ! i) tupleDataConName :: Boxity -> Arity -> Name tupleDataConName sort i = dataConName (tupleDataCon sort i) mkPromotedPairTy :: Kind -> Kind -> Type -> Type -> Type mkPromotedPairTy k1 k2 t1 t2 = mkTyConApp (promotedTupleDataCon Boxed 2) [k1,k2,t1,t2] isPromotedPairType :: Type -> Maybe (Type, Type) isPromotedPairType t | Just (tc, [_,_,x,y]) <- splitTyConApp_maybe t , tc == promotedTupleDataCon Boxed 2 = Just (x, y) | otherwise = Nothing boxedTupleArr, unboxedTupleArr :: Array Int (TyCon,DataCon) boxedTupleArr = listArray (0,mAX_TUPLE_SIZE) [mk_tuple Boxed i | i <- [0..mAX_TUPLE_SIZE]] unboxedTupleArr = listArray (0,mAX_TUPLE_SIZE) [mk_tuple Unboxed i | i <- [0..mAX_TUPLE_SIZE]] -- | Cached type constructors, data constructors, and superclass selectors for -- constraint tuples. The outer array is indexed by the arity of the constraint -- tuple and the inner array is indexed by the superclass position. cTupleArr :: Array Int (TyCon, DataCon, Array Int Id) cTupleArr = listArray (0,mAX_CTUPLE_SIZE) [mk_ctuple i | i <- [0..mAX_CTUPLE_SIZE]] -- Although GHC does not make use of unary constraint tuples -- (see Note [Ignore unary constraint tuples] in GHC.Tc.Gen.HsType), -- this array creates one anyway. This is primarily motivated by the fact -- that (1) the indices of an Array must be contiguous, and (2) we would like -- the index of a constraint tuple in this Array to correspond to its Arity. -- We could envision skipping over the unary constraint tuple and having index -- 1 correspond to a 2-constraint tuple (and so on), but that's more -- complicated than it's worth. -- | Given the TupleRep/SumRep tycon and list of RuntimeReps of the unboxed -- tuple/sum arguments, produces the return kind of an unboxed tuple/sum type -- constructor. @unboxedTupleSumKind [IntRep, LiftedRep] --> TYPE (TupleRep/SumRep -- [IntRep, LiftedRep])@ unboxedTupleSumKind :: TyCon -> [Type] -> Kind unboxedTupleSumKind tc rr_tys = mkTYPEapp (mkTyConApp tc [mkPromotedListTy runtimeRepTy rr_tys]) -- | Specialization of 'unboxedTupleSumKind' for tuples unboxedTupleKind :: [Type] -> Kind unboxedTupleKind = unboxedTupleSumKind tupleRepDataConTyCon mk_tuple :: Boxity -> Int -> (TyCon,DataCon) mk_tuple Boxed arity = (tycon, tuple_con) where tycon = mkTupleTyCon tc_name tc_binders tc_res_kind tc_arity tuple_con BoxedTuple flavour tc_binders = mkTemplateAnonTyConBinders (replicate arity liftedTypeKind) tc_res_kind = liftedTypeKind tc_arity = arity flavour = VanillaAlgTyCon (mkPrelTyConRepName tc_name) dc_tvs = binderVars tc_binders dc_arg_tys = mkTyVarTys dc_tvs tuple_con = pcDataCon dc_name dc_tvs dc_arg_tys tycon boxity = Boxed modu = gHC_TUPLE tc_name = mkWiredInName modu (mkTupleOcc tcName boxity arity) tc_uniq (ATyCon tycon) BuiltInSyntax dc_name = mkWiredInName modu (mkTupleOcc dataName boxity arity) dc_uniq (AConLike (RealDataCon tuple_con)) BuiltInSyntax tc_uniq = mkTupleTyConUnique boxity arity dc_uniq = mkTupleDataConUnique boxity arity mk_tuple Unboxed arity = (tycon, tuple_con) where tycon = mkTupleTyCon tc_name tc_binders tc_res_kind tc_arity tuple_con UnboxedTuple flavour -- See Note [Unboxed tuple RuntimeRep vars] in GHC.Core.TyCon -- Kind: forall (k1:RuntimeRep) (k2:RuntimeRep). TYPE k1 -> TYPE k2 -> TYPE (TupleRep [k1, k2]) tc_binders = mkTemplateTyConBinders (replicate arity runtimeRepTy) (\ks -> map mkTYPEapp ks) tc_res_kind = unboxedTupleKind rr_tys tc_arity = arity * 2 flavour = VanillaAlgTyCon (mkPrelTyConRepName tc_name) dc_tvs = binderVars tc_binders (rr_tys, dc_arg_tys) = splitAt arity (mkTyVarTys dc_tvs) tuple_con = pcDataCon dc_name dc_tvs dc_arg_tys tycon boxity = Unboxed modu = gHC_PRIM tc_name = mkWiredInName modu (mkTupleOcc tcName boxity arity) tc_uniq (ATyCon tycon) BuiltInSyntax dc_name = mkWiredInName modu (mkTupleOcc dataName boxity arity) dc_uniq (AConLike (RealDataCon tuple_con)) BuiltInSyntax tc_uniq = mkTupleTyConUnique boxity arity dc_uniq = mkTupleDataConUnique boxity arity mk_ctuple :: Arity -> (TyCon, DataCon, Array ConTagZ Id) mk_ctuple arity = (tycon, tuple_con, sc_sel_ids_arr) where tycon = mkClassTyCon tc_name binders roles rhs klass (mkPrelTyConRepName tc_name) klass = mk_ctuple_class tycon sc_theta sc_sel_ids tuple_con = pcDataConW dc_name tvs (map unrestricted sc_theta) tycon binders = mkTemplateAnonTyConBinders (replicate arity constraintKind) roles = replicate arity Nominal rhs = TupleTyCon{data_con = tuple_con, tup_sort = ConstraintTuple} modu = gHC_CLASSES tc_name = mkWiredInName modu (mkCTupleOcc tcName arity) tc_uniq (ATyCon tycon) BuiltInSyntax dc_name = mkWiredInName modu (mkCTupleOcc dataName arity) dc_uniq (AConLike (RealDataCon tuple_con)) BuiltInSyntax tc_uniq = mkCTupleTyConUnique arity dc_uniq = mkCTupleDataConUnique arity tvs = binderVars binders sc_theta = map mkTyVarTy tvs sc_sel_ids = [mk_sc_sel_id sc_pos | sc_pos <- [0..arity-1]] sc_sel_ids_arr = listArray (0,arity-1) sc_sel_ids mk_sc_sel_id sc_pos = let sc_sel_id_uniq = mkCTupleSelIdUnique sc_pos arity sc_sel_id_occ = mkCTupleOcc tcName arity sc_sel_id_name = mkWiredInIdName gHC_CLASSES (occNameFS (mkSuperDictSelOcc sc_pos sc_sel_id_occ)) sc_sel_id_uniq sc_sel_id sc_sel_id = mkDictSelId sc_sel_id_name klass in sc_sel_id unitTyCon :: TyCon unitTyCon = tupleTyCon Boxed 0 unitTyConKey :: Unique unitTyConKey = getUnique unitTyCon unitDataCon :: DataCon unitDataCon = head (tyConDataCons unitTyCon) unitDataConId :: Id unitDataConId = dataConWorkId unitDataCon soloTyCon :: TyCon soloTyCon = tupleTyCon Boxed 1 pairTyCon :: TyCon pairTyCon = tupleTyCon Boxed 2 unboxedUnitTy :: Type unboxedUnitTy = mkTyConTy unboxedUnitTyCon unboxedUnitTyCon :: TyCon unboxedUnitTyCon = tupleTyCon Unboxed 0 unboxedUnitDataCon :: DataCon unboxedUnitDataCon = tupleDataCon Unboxed 0 {- ********************************************************************* * * Unboxed sums * * ********************************************************************* -} -- | OccName for n-ary unboxed sum type constructor. mkSumTyConOcc :: Arity -> OccName mkSumTyConOcc n = mkOccName tcName str where -- No need to cache these, the caching is done in mk_sum str = '(' : '#' : ' ' : bars ++ " #)" bars = intersperse ' ' $ replicate (n-1) '|' -- | OccName for i-th alternative of n-ary unboxed sum data constructor. mkSumDataConOcc :: ConTag -> Arity -> OccName mkSumDataConOcc alt n = mkOccName dataName str where -- No need to cache these, the caching is done in mk_sum str = '(' : '#' : ' ' : bars alt ++ '_' : bars (n - alt - 1) ++ " #)" bars i = intersperse ' ' $ replicate i '|' -- | Type constructor for n-ary unboxed sum. sumTyCon :: Arity -> TyCon sumTyCon arity | arity > mAX_SUM_SIZE = fst (mk_sum arity) -- Build one specially | arity < 2 = panic ("sumTyCon: Arity starts from 2. (arity: " ++ show arity ++ ")") | otherwise = fst (unboxedSumArr ! arity) -- | Data constructor for i-th alternative of a n-ary unboxed sum. sumDataCon :: ConTag -- Alternative -> Arity -- Arity -> DataCon sumDataCon alt arity | alt > arity = panic ("sumDataCon: index out of bounds: alt: " ++ show alt ++ " > arity " ++ show arity) | alt <= 0 = panic ("sumDataCon: Alts start from 1. (alt: " ++ show alt ++ ", arity: " ++ show arity ++ ")") | arity < 2 = panic ("sumDataCon: Arity starts from 2. (alt: " ++ show alt ++ ", arity: " ++ show arity ++ ")") | arity > mAX_SUM_SIZE = snd (mk_sum arity) ! (alt - 1) -- Build one specially | otherwise = snd (unboxedSumArr ! arity) ! (alt - 1) -- | Cached type and data constructors for sums. The outer array is -- indexed by the arity of the sum and the inner array is indexed by -- the alternative. unboxedSumArr :: Array Int (TyCon, Array Int DataCon) unboxedSumArr = listArray (2,mAX_SUM_SIZE) [mk_sum i | i <- [2..mAX_SUM_SIZE]] -- | Specialization of 'unboxedTupleSumKind' for sums unboxedSumKind :: [Type] -> Kind unboxedSumKind = unboxedTupleSumKind sumRepDataConTyCon -- | Create type constructor and data constructors for n-ary unboxed sum. mk_sum :: Arity -> (TyCon, Array ConTagZ DataCon) mk_sum arity = (tycon, sum_cons) where tycon = mkSumTyCon tc_name tc_binders tc_res_kind (arity * 2) tyvars (elems sum_cons) UnboxedSumTyCon tc_binders = mkTemplateTyConBinders (replicate arity runtimeRepTy) (\ks -> map mkTYPEapp ks) tyvars = binderVars tc_binders tc_res_kind = unboxedSumKind rr_tys (rr_tys, tyvar_tys) = splitAt arity (mkTyVarTys tyvars) tc_name = mkWiredInName gHC_PRIM (mkSumTyConOcc arity) tc_uniq (ATyCon tycon) BuiltInSyntax sum_cons = listArray (0,arity-1) [sum_con i | i <- [0..arity-1]] sum_con i = let dc = pcDataCon dc_name tyvars -- univ tyvars [tyvar_tys !! i] -- arg types tycon dc_name = mkWiredInName gHC_PRIM (mkSumDataConOcc i arity) (dc_uniq i) (AConLike (RealDataCon dc)) BuiltInSyntax in dc tc_uniq = mkSumTyConUnique arity dc_uniq i = mkSumDataConUnique i arity {- ************************************************************************ * * Equality types and classes * * ********************************************************************* -} -- See Note [The equality types story] in GHC.Builtin.Types.Prim -- ((~~) :: forall k1 k2 (a :: k1) (b :: k2). a -> b -> Constraint) -- -- It's tempting to put functional dependencies on (~~), but it's not -- necessary because the functional-dependency coverage check looks -- through superclasses, and (~#) is handled in that check. eqTyCon, heqTyCon, coercibleTyCon :: TyCon eqClass, heqClass, coercibleClass :: Class eqDataCon, heqDataCon, coercibleDataCon :: DataCon eqSCSelId, heqSCSelId, coercibleSCSelId :: Id (eqTyCon, eqClass, eqDataCon, eqSCSelId) = (tycon, klass, datacon, sc_sel_id) where tycon = mkClassTyCon eqTyConName binders roles rhs klass (mkPrelTyConRepName eqTyConName) klass = mk_class tycon sc_pred sc_sel_id datacon = pcDataConW eqDataConName tvs [unrestricted sc_pred] tycon -- Kind: forall k. k -> k -> Constraint binders = mkTemplateTyConBinders [liftedTypeKind] (\[k] -> [k,k]) roles = [Nominal, Nominal, Nominal] rhs = mkDataTyConRhs [datacon] tvs@[k,a,b] = binderVars binders sc_pred = mkTyConApp eqPrimTyCon (mkTyVarTys [k,k,a,b]) sc_sel_id = mkDictSelId eqSCSelIdName klass (heqTyCon, heqClass, heqDataCon, heqSCSelId) = (tycon, klass, datacon, sc_sel_id) where tycon = mkClassTyCon heqTyConName binders roles rhs klass (mkPrelTyConRepName heqTyConName) klass = mk_class tycon sc_pred sc_sel_id datacon = pcDataConW heqDataConName tvs [unrestricted sc_pred] tycon -- Kind: forall k1 k2. k1 -> k2 -> Constraint binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id roles = [Nominal, Nominal, Nominal, Nominal] rhs = mkDataTyConRhs [datacon] tvs = binderVars binders sc_pred = mkTyConApp eqPrimTyCon (mkTyVarTys tvs) sc_sel_id = mkDictSelId heqSCSelIdName klass (coercibleTyCon, coercibleClass, coercibleDataCon, coercibleSCSelId) = (tycon, klass, datacon, sc_sel_id) where tycon = mkClassTyCon coercibleTyConName binders roles rhs klass (mkPrelTyConRepName coercibleTyConName) klass = mk_class tycon sc_pred sc_sel_id datacon = pcDataConW coercibleDataConName tvs [unrestricted sc_pred] tycon -- Kind: forall k. k -> k -> Constraint binders = mkTemplateTyConBinders [liftedTypeKind] (\[k] -> [k,k]) roles = [Nominal, Representational, Representational] rhs = mkDataTyConRhs [datacon] tvs@[k,a,b] = binderVars binders sc_pred = mkTyConApp eqReprPrimTyCon (mkTyVarTys [k, k, a, b]) sc_sel_id = mkDictSelId coercibleSCSelIdName klass mk_class :: TyCon -> PredType -> Id -> Class mk_class tycon sc_pred sc_sel_id = mkClass (tyConName tycon) (tyConTyVars tycon) [] [sc_pred] [sc_sel_id] [] [] (mkAnd []) tycon mk_ctuple_class :: TyCon -> ThetaType -> [Id] -> Class mk_ctuple_class tycon sc_theta sc_sel_ids = mkClass (tyConName tycon) (tyConTyVars tycon) [] sc_theta sc_sel_ids [] [] (mkAnd []) tycon {- ********************************************************************* * * Multiplicity Polymorphism * * ********************************************************************* -} {- Multiplicity polymorphism is implemented very similarly to representation polymorphism. We write in the multiplicity kind and the One and Many types which can appear in user programs. These are defined properly in GHC.Types. data Multiplicity = One | Many -} multiplicityTy :: Type multiplicityTy = mkTyConTy multiplicityTyCon multiplicityTyCon :: TyCon multiplicityTyCon = pcTyCon multiplicityTyConName Nothing [] [oneDataCon, manyDataCon] oneDataCon, manyDataCon :: DataCon oneDataCon = pcDataCon oneDataConName [] [] multiplicityTyCon manyDataCon = pcDataCon manyDataConName [] [] multiplicityTyCon oneDataConTy, manyDataConTy :: Type oneDataConTy = mkTyConTy oneDataConTyCon manyDataConTy = mkTyConTy manyDataConTyCon oneDataConTyCon, manyDataConTyCon :: TyCon oneDataConTyCon = promoteDataCon oneDataCon manyDataConTyCon = promoteDataCon manyDataCon multMulTyConName :: Name multMulTyConName = mkWiredInTyConName UserSyntax gHC_TYPES (fsLit "MultMul") multMulTyConKey multMulTyCon multMulTyCon :: TyCon multMulTyCon = mkFamilyTyCon multMulTyConName binders multiplicityTy Nothing (BuiltInSynFamTyCon trivialBuiltInFamily) Nothing NotInjective where binders = mkTemplateAnonTyConBinders [multiplicityTy, multiplicityTy] unrestrictedFunTy :: Type unrestrictedFunTy = functionWithMultiplicity manyDataConTy unrestrictedFunTyCon :: TyCon unrestrictedFunTyCon = buildSynTyCon unrestrictedFunTyConName [] arrowKind [] unrestrictedFunTy where arrowKind = mkTyConKind binders liftedTypeKind -- See also funTyCon binders = [ Bndr runtimeRep1TyVar (NamedTCB Inferred) , Bndr runtimeRep2TyVar (NamedTCB Inferred) ] ++ mkTemplateAnonTyConBinders [ mkTYPEapp runtimeRep1Ty , mkTYPEapp runtimeRep2Ty ] unrestrictedFunTyConName :: Name unrestrictedFunTyConName = mkWiredInTyConName BuiltInSyntax gHC_TYPES (fsLit "->") unrestrictedFunTyConKey unrestrictedFunTyCon {- ********************************************************************* * * Type synonyms (all declared in ghc-prim:GHC.Types) type Type = TYPE LiftedRep -- liftedTypeKind type UnliftedType = TYPE UnliftedRep -- unliftedTypeKind type LiftedRep = BoxedRep Lifted -- liftedRepTy type UnliftedRep = BoxedRep Unlifted -- unliftedRepTy * * ********************************************************************* -} -- For these synonyms, see -- Note [TYPE and RuntimeRep] in GHC.Builtin.Types.Prim, and -- Note [Using synonyms to compress types] in GHC.Core.Type ---------------------- -- @type Type = TYPE ('BoxedRep 'Lifted)@ liftedTypeKindTyCon :: TyCon liftedTypeKindTyCon = buildSynTyCon liftedTypeKindTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp tYPETyCon [liftedRepTy] liftedTypeKind :: Type liftedTypeKind = mkTyConTy liftedTypeKindTyCon ---------------------- -- | @type UnliftedType = TYPE ('BoxedRep 'Unlifted)@ unliftedTypeKindTyCon :: TyCon unliftedTypeKindTyCon = buildSynTyCon unliftedTypeKindTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp tYPETyCon [unliftedRepTy] unliftedTypeKind :: Type unliftedTypeKind = mkTyConTy unliftedTypeKindTyCon ---------------------- -- @type ZeroBitType = TYPE ZeroBitRep zeroBitTypeTyCon :: TyCon zeroBitTypeTyCon = buildSynTyCon zeroBitTypeTyConName [] liftedTypeKind [] rhs where rhs = TyCoRep.TyConApp tYPETyCon [zeroBitRepTy] zeroBitTypeKind :: Type zeroBitTypeKind = mkTyConTy zeroBitTypeTyCon ---------------------- -- | @type LiftedRep = 'BoxedRep 'Lifted@ liftedRepTyCon :: TyCon liftedRepTyCon = buildSynTyCon liftedRepTyConName [] runtimeRepTy [] rhs where rhs = TyCoRep.TyConApp boxedRepDataConTyCon [liftedDataConTy] liftedRepTy :: Type liftedRepTy = mkTyConTy liftedRepTyCon ---------------------- -- | @type UnliftedRep = 'BoxedRep 'Unlifted@ unliftedRepTyCon :: TyCon unliftedRepTyCon = buildSynTyCon unliftedRepTyConName [] runtimeRepTy [] rhs where rhs = TyCoRep.TyConApp boxedRepDataConTyCon [unliftedDataConTy] unliftedRepTy :: Type unliftedRepTy = mkTyConTy unliftedRepTyCon ---------------------- -- | @type ZeroBitRep = 'Tuple '[] zeroBitRepTyCon :: TyCon zeroBitRepTyCon = buildSynTyCon zeroBitRepTyConName [] runtimeRepTy [] rhs where rhs = TyCoRep.TyConApp tupleRepDataConTyCon [mkPromotedListTy runtimeRepTy []] zeroBitRepTy :: Type zeroBitRepTy = mkTyConTy zeroBitRepTyCon {- ********************************************************************* * * data Levity = Lifted | Unlifted * * ********************************************************************* -} levityTyCon :: TyCon levityTyCon = pcTyCon levityTyConName Nothing [] [liftedDataCon,unliftedDataCon] levityTy :: Type levityTy = mkTyConTy levityTyCon liftedDataCon, unliftedDataCon :: DataCon liftedDataCon = pcSpecialDataCon liftedDataConName [] levityTyCon LiftedInfo unliftedDataCon = pcSpecialDataCon unliftedDataConName [] levityTyCon UnliftedInfo liftedDataConTyCon :: TyCon liftedDataConTyCon = promoteDataCon liftedDataCon unliftedDataConTyCon :: TyCon unliftedDataConTyCon = promoteDataCon unliftedDataCon liftedDataConTy :: Type liftedDataConTy = mkTyConTy liftedDataConTyCon unliftedDataConTy :: Type unliftedDataConTy = mkTyConTy unliftedDataConTyCon {- ********************************************************************* * * See Note [Wiring in RuntimeRep] data RuntimeRep = VecRep VecCount VecElem | TupleRep [RuntimeRep] | SumRep [RuntimeRep] | BoxedRep Levity | IntRep | Int8Rep | ...etc... * * ********************************************************************* -} {- Note [Wiring in RuntimeRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The RuntimeRep type (and friends) in GHC.Types has a bunch of constructors, making it a pain to wire in. To ease the pain somewhat, we use lists of the different bits, like Uniques, Names, DataCons. These lists must be kept in sync with each other. The rule is this: use the order as declared in GHC.Types. All places where such lists exist should contain a reference to this Note, so a search for this Note's name should find all the lists. See also Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType. -} runtimeRepTyCon :: TyCon runtimeRepTyCon = pcTyCon runtimeRepTyConName Nothing [] (vecRepDataCon : tupleRepDataCon : sumRepDataCon : boxedRepDataCon : runtimeRepSimpleDataCons) runtimeRepTy :: Type runtimeRepTy = mkTyConTy runtimeRepTyCon boxedRepDataCon :: DataCon boxedRepDataCon = pcSpecialDataCon boxedRepDataConName [ levityTy ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in RepType prim_rep_fun [lev] = case tyConRuntimeRepInfo (tyConAppTyCon lev) of LiftedInfo -> [LiftedRep] UnliftedInfo -> [UnliftedRep] _ -> pprPanic "boxedRepDataCon" (ppr lev) prim_rep_fun args = pprPanic "boxedRepDataCon" (ppr args) boxedRepDataConTyCon :: TyCon boxedRepDataConTyCon = promoteDataCon boxedRepDataCon vecRepDataCon :: DataCon vecRepDataCon = pcSpecialDataCon vecRepDataConName [ mkTyConTy vecCountTyCon , mkTyConTy vecElemTyCon ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType prim_rep_fun [count, elem] | VecCount n <- tyConRuntimeRepInfo (tyConAppTyCon count) , VecElem e <- tyConRuntimeRepInfo (tyConAppTyCon elem) = [VecRep n e] prim_rep_fun args = pprPanic "vecRepDataCon" (ppr args) vecRepDataConTyCon :: TyCon vecRepDataConTyCon = promoteDataCon vecRepDataCon tupleRepDataCon :: DataCon tupleRepDataCon = pcSpecialDataCon tupleRepDataConName [ mkListTy runtimeRepTy ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType prim_rep_fun [rr_ty_list] = concatMap (runtimeRepPrimRep doc) rr_tys where rr_tys = extractPromotedList rr_ty_list doc = text "tupleRepDataCon" <+> ppr rr_tys prim_rep_fun args = pprPanic "tupleRepDataCon" (ppr args) tupleRepDataConTyCon :: TyCon tupleRepDataConTyCon = promoteDataCon tupleRepDataCon sumRepDataCon :: DataCon sumRepDataCon = pcSpecialDataCon sumRepDataConName [ mkListTy runtimeRepTy ] runtimeRepTyCon (RuntimeRep prim_rep_fun) where -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType prim_rep_fun [rr_ty_list] = map slotPrimRep (ubxSumRepType prim_repss) where rr_tys = extractPromotedList rr_ty_list doc = text "sumRepDataCon" <+> ppr rr_tys prim_repss = map (runtimeRepPrimRep doc) rr_tys prim_rep_fun args = pprPanic "sumRepDataCon" (ppr args) sumRepDataConTyCon :: TyCon sumRepDataConTyCon = promoteDataCon sumRepDataCon -- See Note [Wiring in RuntimeRep] -- See Note [Getting from RuntimeRep to PrimRep] in GHC.Types.RepType runtimeRepSimpleDataCons :: [DataCon] runtimeRepSimpleDataCons = zipWithLazy mk_runtime_rep_dc [ IntRep , Int8Rep, Int16Rep, Int32Rep, Int64Rep , WordRep , Word8Rep, Word16Rep, Word32Rep, Word64Rep , AddrRep , FloatRep, DoubleRep ] runtimeRepSimpleDataConNames where mk_runtime_rep_dc primrep name = pcSpecialDataCon name [] runtimeRepTyCon (RuntimeRep (\_ -> [primrep])) -- See Note [Wiring in RuntimeRep] intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy :: Type [intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy ] = map (mkTyConTy . promoteDataCon) runtimeRepSimpleDataCons vecCountTyCon :: TyCon vecCountTyCon = pcTyCon vecCountTyConName Nothing [] vecCountDataCons -- See Note [Wiring in RuntimeRep] vecCountDataCons :: [DataCon] vecCountDataCons = zipWithLazy mk_vec_count_dc [ 2, 4, 8, 16, 32, 64 ] vecCountDataConNames where mk_vec_count_dc n name = pcSpecialDataCon name [] vecCountTyCon (VecCount n) -- See Note [Wiring in RuntimeRep] vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy :: Type [vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy] = map (mkTyConTy . promoteDataCon) vecCountDataCons vecElemTyCon :: TyCon vecElemTyCon = pcTyCon vecElemTyConName Nothing [] vecElemDataCons -- See Note [Wiring in RuntimeRep] vecElemDataCons :: [DataCon] vecElemDataCons = zipWithLazy mk_vec_elem_dc [ Int8ElemRep, Int16ElemRep, Int32ElemRep, Int64ElemRep , Word8ElemRep, Word16ElemRep, Word32ElemRep, Word64ElemRep , FloatElemRep, DoubleElemRep ] vecElemDataConNames where mk_vec_elem_dc elem name = pcSpecialDataCon name [] vecElemTyCon (VecElem elem) -- See Note [Wiring in RuntimeRep] int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy :: Type [int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy] = map (mkTyConTy . promoteDataCon) vecElemDataCons {- ********************************************************************* * * The boxed primitive types: Char, Int, etc * * ********************************************************************* -} boxingDataCon_maybe :: TyCon -> Maybe DataCon -- boxingDataCon_maybe Char# = C# -- boxingDataCon_maybe Int# = I# -- ... etc ... -- See Note [Boxing primitive types] boxingDataCon_maybe tc = lookupNameEnv boxing_constr_env (tyConName tc) boxing_constr_env :: NameEnv DataCon boxing_constr_env = mkNameEnv [(charPrimTyConName , charDataCon ) ,(intPrimTyConName , intDataCon ) ,(wordPrimTyConName , wordDataCon ) ,(floatPrimTyConName , floatDataCon ) ,(doublePrimTyConName, doubleDataCon) ] {- Note [Boxing primitive types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For a handful of primitive types (Int, Char, Word, Float, Double), we can readily box and an unboxed version (Int#, Char# etc) using the corresponding data constructor. This is useful in a couple of places, notably let-floating -} charTy :: Type charTy = mkTyConTy charTyCon charTyCon :: TyCon charTyCon = pcTyCon charTyConName (Just (CType NoSourceText Nothing (NoSourceText,fsLit "HsChar"))) [] [charDataCon] charDataCon :: DataCon charDataCon = pcDataCon charDataConName [] [charPrimTy] charTyCon stringTy :: Type stringTy = mkTyConTy stringTyCon stringTyCon :: TyCon -- We have this wired-in so that Haskell literal strings -- get type String (in hsLitType), which in turn influences -- inferred types and error messages stringTyCon = buildSynTyCon stringTyConName [] liftedTypeKind [] (mkListTy charTy) intTy :: Type intTy = mkTyConTy intTyCon intTyCon :: TyCon intTyCon = pcTyCon intTyConName (Just (CType NoSourceText Nothing (NoSourceText,fsLit "HsInt"))) [] [intDataCon] intDataCon :: DataCon intDataCon = pcDataCon intDataConName [] [intPrimTy] intTyCon wordTy :: Type wordTy = mkTyConTy wordTyCon wordTyCon :: TyCon wordTyCon = pcTyCon wordTyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsWord"))) [] [wordDataCon] wordDataCon :: DataCon wordDataCon = pcDataCon wordDataConName [] [wordPrimTy] wordTyCon word8Ty :: Type word8Ty = mkTyConTy word8TyCon word8TyCon :: TyCon word8TyCon = pcTyCon word8TyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsWord8"))) [] [word8DataCon] word8DataCon :: DataCon word8DataCon = pcDataCon word8DataConName [] [word8PrimTy] word8TyCon floatTy :: Type floatTy = mkTyConTy floatTyCon floatTyCon :: TyCon floatTyCon = pcTyCon floatTyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsFloat"))) [] [floatDataCon] floatDataCon :: DataCon floatDataCon = pcDataCon floatDataConName [] [floatPrimTy] floatTyCon doubleTy :: Type doubleTy = mkTyConTy doubleTyCon doubleTyCon :: TyCon doubleTyCon = pcTyCon doubleTyConName (Just (CType NoSourceText Nothing (NoSourceText,fsLit "HsDouble"))) [] [doubleDataCon] doubleDataCon :: DataCon doubleDataCon = pcDataCon doubleDataConName [] [doublePrimTy] doubleTyCon {- ************************************************************************ * * The Bool type * * ************************************************************************ An ordinary enumeration type, but deeply wired in. There are no magical operations on @Bool@ (just the regular Prelude code). {\em BEGIN IDLE SPECULATION BY SIMON} This is not the only way to encode @Bool@. A more obvious coding makes @Bool@ just a boxed up version of @Bool#@, like this: \begin{verbatim} type Bool# = Int# data Bool = MkBool Bool# \end{verbatim} Unfortunately, this doesn't correspond to what the Report says @Bool@ looks like! Furthermore, we get slightly less efficient code (I think) with this coding. @gtInt@ would look like this: \begin{verbatim} gtInt :: Int -> Int -> Bool gtInt x y = case x of I# x# -> case y of I# y# -> case (gtIntPrim x# y#) of b# -> MkBool b# \end{verbatim} Notice that the result of the @gtIntPrim@ comparison has to be turned into an integer (here called @b#@), and returned in a @MkBool@ box. The @if@ expression would compile to this: \begin{verbatim} case (gtInt x y) of MkBool b# -> case b# of { 1# -> e1; 0# -> e2 } \end{verbatim} I think this code is a little less efficient than the previous code, but I'm not certain. At all events, corresponding with the Report is important. The interesting thing is that the language is expressive enough to describe more than one alternative; and that a type doesn't necessarily need to be a straightforwardly boxed version of its primitive counterpart. {\em END IDLE SPECULATION BY SIMON} -} boolTy :: Type boolTy = mkTyConTy boolTyCon boolTyCon :: TyCon boolTyCon = pcTyCon boolTyConName (Just (CType NoSourceText Nothing (NoSourceText, fsLit "HsBool"))) [] [falseDataCon, trueDataCon] falseDataCon, trueDataCon :: DataCon falseDataCon = pcDataCon falseDataConName [] [] boolTyCon trueDataCon = pcDataCon trueDataConName [] [] boolTyCon falseDataConId, trueDataConId :: Id falseDataConId = dataConWorkId falseDataCon trueDataConId = dataConWorkId trueDataCon orderingTyCon :: TyCon orderingTyCon = pcTyCon orderingTyConName Nothing [] [ordLTDataCon, ordEQDataCon, ordGTDataCon] ordLTDataCon, ordEQDataCon, ordGTDataCon :: DataCon ordLTDataCon = pcDataCon ordLTDataConName [] [] orderingTyCon ordEQDataCon = pcDataCon ordEQDataConName [] [] orderingTyCon ordGTDataCon = pcDataCon ordGTDataConName [] [] orderingTyCon ordLTDataConId, ordEQDataConId, ordGTDataConId :: Id ordLTDataConId = dataConWorkId ordLTDataCon ordEQDataConId = dataConWorkId ordEQDataCon ordGTDataConId = dataConWorkId ordGTDataCon {- ************************************************************************ * * The List type Special syntax, deeply wired in, but otherwise an ordinary algebraic data type * * ************************************************************************ data [] a = [] | a : (List a) -} mkListTy :: Type -> Type mkListTy ty = mkTyConApp listTyCon [ty] listTyCon :: TyCon listTyCon = pcTyCon listTyConName Nothing [alphaTyVar] [nilDataCon, consDataCon] -- See also Note [Empty lists] in GHC.Hs.Expr. nilDataCon :: DataCon nilDataCon = pcDataCon nilDataConName alpha_tyvar [] listTyCon consDataCon :: DataCon consDataCon = pcDataConWithFixity True {- Declared infix -} consDataConName alpha_tyvar [] alpha_tyvar (map linear [alphaTy, mkTyConApp listTyCon alpha_ty]) listTyCon -- Interesting: polymorphic recursion would help here. -- We can't use (mkListTy alphaTy) in the defn of consDataCon, else mkListTy -- gets the over-specific type (Type -> Type) -- NonEmpty lists (used for 'ProjectionE') nonEmptyTyCon :: TyCon nonEmptyTyCon = pcTyCon nonEmptyTyConName Nothing [alphaTyVar] [nonEmptyDataCon] nonEmptyDataCon :: DataCon nonEmptyDataCon = pcDataConWithFixity True {- Declared infix -} nonEmptyDataConName alpha_tyvar [] alpha_tyvar (map linear [alphaTy, mkTyConApp listTyCon alpha_ty]) nonEmptyTyCon -- Wired-in type Maybe maybeTyCon :: TyCon maybeTyCon = pcTyCon maybeTyConName Nothing alpha_tyvar [nothingDataCon, justDataCon] nothingDataCon :: DataCon nothingDataCon = pcDataCon nothingDataConName alpha_tyvar [] maybeTyCon justDataCon :: DataCon justDataCon = pcDataCon justDataConName alpha_tyvar [alphaTy] maybeTyCon mkPromotedMaybeTy :: Kind -> Maybe Type -> Type mkPromotedMaybeTy k (Just x) = mkTyConApp promotedJustDataCon [k,x] mkPromotedMaybeTy k Nothing = mkTyConApp promotedNothingDataCon [k] mkMaybeTy :: Type -> Kind mkMaybeTy t = mkTyConApp maybeTyCon [t] isPromotedMaybeTy :: Type -> Maybe (Maybe Type) isPromotedMaybeTy t | Just (tc,[_,x]) <- splitTyConApp_maybe t, tc == promotedJustDataCon = return $ Just x | Just (tc,[_]) <- splitTyConApp_maybe t, tc == promotedNothingDataCon = return $ Nothing | otherwise = Nothing {- ** ********************************************************************* * * The tuple types * * ************************************************************************ The tuple types are definitely magic, because they form an infinite family. \begin{itemize} \item They have a special family of type constructors, of type @TyCon@ These contain the tycon arity, but don't require a Unique. \item They have a special family of constructors, of type @Id@. Again these contain their arity but don't need a Unique. \item There should be a magic way of generating the info tables and entry code for all tuples. But at the moment we just compile a Haskell source file\srcloc{lib/prelude/...} containing declarations like: \begin{verbatim} data Tuple0 = Tup0 data Tuple2 a b = Tup2 a b data Tuple3 a b c = Tup3 a b c data Tuple4 a b c d = Tup4 a b c d ... \end{verbatim} The print-names associated with the magic @Id@s for tuple constructors ``just happen'' to be the same as those generated by these declarations. \item The instance environment should have a magic way to know that each tuple type is an instances of classes @Eq@, @Ix@, @Ord@ and so on. \ToDo{Not implemented yet.} \item There should also be a way to generate the appropriate code for each of these instances, but (like the info tables and entry code) it is done by enumeration\srcloc{lib/prelude/InTup?.hs}. \end{itemize} -} -- | Make a tuple type. The list of types should /not/ include any -- RuntimeRep specifications. Boxed 1-tuples are flattened. -- See Note [One-tuples] mkTupleTy :: Boxity -> [Type] -> Type -- Special case for *boxed* 1-tuples, which are represented by the type itself mkTupleTy Boxed [ty] = ty mkTupleTy boxity tys = mkTupleTy1 boxity tys -- | Make a tuple type. The list of types should /not/ include any -- RuntimeRep specifications. Boxed 1-tuples are *not* flattened. -- See Note [One-tuples] and Note [Don't flatten tuples from HsSyn] -- in "GHC.Core.Make" mkTupleTy1 :: Boxity -> [Type] -> Type mkTupleTy1 Boxed tys = mkTyConApp (tupleTyCon Boxed (length tys)) tys mkTupleTy1 Unboxed tys = mkTyConApp (tupleTyCon Unboxed (length tys)) (map getRuntimeRep tys ++ tys) -- | Build the type of a small tuple that holds the specified type of thing -- Flattens 1-tuples. See Note [One-tuples]. mkBoxedTupleTy :: [Type] -> Type mkBoxedTupleTy tys = mkTupleTy Boxed tys unitTy :: Type unitTy = mkTupleTy Boxed [] {- ********************************************************************* * * The sum types * * ************************************************************************ -} mkSumTy :: [Type] -> Type mkSumTy tys = mkTyConApp (sumTyCon (length tys)) (map getRuntimeRep tys ++ tys) -- Promoted Booleans promotedFalseDataCon, promotedTrueDataCon :: TyCon promotedTrueDataCon = promoteDataCon trueDataCon promotedFalseDataCon = promoteDataCon falseDataCon -- Promoted Maybe promotedNothingDataCon, promotedJustDataCon :: TyCon promotedNothingDataCon = promoteDataCon nothingDataCon promotedJustDataCon = promoteDataCon justDataCon -- Promoted Ordering promotedLTDataCon , promotedEQDataCon , promotedGTDataCon :: TyCon promotedLTDataCon = promoteDataCon ordLTDataCon promotedEQDataCon = promoteDataCon ordEQDataCon promotedGTDataCon = promoteDataCon ordGTDataCon -- Promoted List promotedConsDataCon, promotedNilDataCon :: TyCon promotedConsDataCon = promoteDataCon consDataCon promotedNilDataCon = promoteDataCon nilDataCon -- | Make a *promoted* list. mkPromotedListTy :: Kind -- ^ of the elements of the list -> [Type] -- ^ elements -> Type mkPromotedListTy k tys = foldr cons nil tys where cons :: Type -- element -> Type -- list -> Type cons elt list = mkTyConApp promotedConsDataCon [k, elt, list] nil :: Type nil = mkTyConApp promotedNilDataCon [k] -- | Extract the elements of a promoted list. Panics if the type is not a -- promoted list extractPromotedList :: Type -- ^ The promoted list -> [Type] extractPromotedList tys = go tys where go list_ty | Just (tc, [_k, t, ts]) <- splitTyConApp_maybe list_ty = assert (tc `hasKey` consDataConKey) $ t : go ts | Just (tc, [_k]) <- splitTyConApp_maybe list_ty = assert (tc `hasKey` nilDataConKey) [] | otherwise = pprPanic "extractPromotedList" (ppr tys) --------------------------------------- -- ghc-bignum --------------------------------------- integerTyConName , integerISDataConName , integerIPDataConName , integerINDataConName :: Name integerTyConName = mkWiredInTyConName UserSyntax gHC_NUM_INTEGER (fsLit "Integer") integerTyConKey integerTyCon integerISDataConName = mkWiredInDataConName UserSyntax gHC_NUM_INTEGER (fsLit "IS") integerISDataConKey integerISDataCon integerIPDataConName = mkWiredInDataConName UserSyntax gHC_NUM_INTEGER (fsLit "IP") integerIPDataConKey integerIPDataCon integerINDataConName = mkWiredInDataConName UserSyntax gHC_NUM_INTEGER (fsLit "IN") integerINDataConKey integerINDataCon integerTy :: Type integerTy = mkTyConTy integerTyCon integerTyCon :: TyCon integerTyCon = pcTyCon integerTyConName Nothing [] [integerISDataCon, integerIPDataCon, integerINDataCon] integerISDataCon :: DataCon integerISDataCon = pcDataCon integerISDataConName [] [intPrimTy] integerTyCon integerIPDataCon :: DataCon integerIPDataCon = pcDataCon integerIPDataConName [] [byteArrayPrimTy] integerTyCon integerINDataCon :: DataCon integerINDataCon = pcDataCon integerINDataConName [] [byteArrayPrimTy] integerTyCon naturalTyConName , naturalNSDataConName , naturalNBDataConName :: Name naturalTyConName = mkWiredInTyConName UserSyntax gHC_NUM_NATURAL (fsLit "Natural") naturalTyConKey naturalTyCon naturalNSDataConName = mkWiredInDataConName UserSyntax gHC_NUM_NATURAL (fsLit "NS") naturalNSDataConKey naturalNSDataCon naturalNBDataConName = mkWiredInDataConName UserSyntax gHC_NUM_NATURAL (fsLit "NB") naturalNBDataConKey naturalNBDataCon naturalTy :: Type naturalTy = mkTyConTy naturalTyCon naturalTyCon :: TyCon naturalTyCon = pcTyCon naturalTyConName Nothing [] [naturalNSDataCon, naturalNBDataCon] naturalNSDataCon :: DataCon naturalNSDataCon = pcDataCon naturalNSDataConName [] [wordPrimTy] naturalTyCon naturalNBDataCon :: DataCon naturalNBDataCon = pcDataCon naturalNBDataConName [] [byteArrayPrimTy] naturalTyCon -- | Replaces constraint tuple names with corresponding boxed ones. filterCTuple :: RdrName -> RdrName filterCTuple (Exact n) | Just arity <- cTupleTyConNameArity_maybe n = Exact $ tupleTyConName BoxedTuple arity filterCTuple rdr = rdr ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Types/Prim.hs0000644000000000000000000013670614472400112021625 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1994-1998 Wired-in knowledge about primitive types -} {-# LANGUAGE CPP #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | This module defines TyCons that can't be expressed in Haskell. -- They are all, therefore, wired-in TyCons. C.f module "GHC.Builtin.Types" module GHC.Builtin.Types.Prim( mkTemplateKindVar, mkTemplateKindVars, mkTemplateTyVars, mkTemplateTyVarsFrom, mkTemplateKiTyVars, mkTemplateKiTyVar, mkTemplateTyConBinders, mkTemplateKindTyConBinders, mkTemplateAnonTyConBinders, alphaTyVars, alphaTyVar, betaTyVar, gammaTyVar, deltaTyVar, alphaTyVarSpec, betaTyVarSpec, gammaTyVarSpec, deltaTyVarSpec, alphaTys, alphaTy, betaTy, gammaTy, deltaTy, alphaTyVarsUnliftedRep, alphaTyVarUnliftedRep, alphaTysUnliftedRep, alphaTyUnliftedRep, runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar, runtimeRep1TyVarInf, runtimeRep2TyVarInf, runtimeRep1Ty, runtimeRep2Ty, runtimeRep3Ty, levity1TyVar, levity2TyVar, levity1TyVarInf, levity2TyVarInf, levity1Ty, levity2Ty, openAlphaTyVar, openBetaTyVar, openGammaTyVar, openAlphaTyVarSpec, openBetaTyVarSpec, openGammaTyVarSpec, openAlphaTy, openBetaTy, openGammaTy, levPolyAlphaTyVar, levPolyBetaTyVar, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec, levPolyAlphaTy, levPolyBetaTy, multiplicityTyVar1, multiplicityTyVar2, -- Kind constructors... tYPETyCon, tYPETyConName, -- Kinds mkTYPEapp, functionWithMultiplicity, funTyCon, funTyConName, unexposedPrimTyCons, exposedPrimTyCons, primTyCons, charPrimTyCon, charPrimTy, charPrimTyConName, intPrimTyCon, intPrimTy, intPrimTyConName, wordPrimTyCon, wordPrimTy, wordPrimTyConName, addrPrimTyCon, addrPrimTy, addrPrimTyConName, floatPrimTyCon, floatPrimTy, floatPrimTyConName, doublePrimTyCon, doublePrimTy, doublePrimTyConName, statePrimTyCon, mkStatePrimTy, realWorldTyCon, realWorldTy, realWorldStatePrimTy, proxyPrimTyCon, mkProxyPrimTy, arrayPrimTyCon, mkArrayPrimTy, byteArrayPrimTyCon, byteArrayPrimTy, smallArrayPrimTyCon, mkSmallArrayPrimTy, mutableArrayPrimTyCon, mkMutableArrayPrimTy, mutableByteArrayPrimTyCon, mkMutableByteArrayPrimTy, smallMutableArrayPrimTyCon, mkSmallMutableArrayPrimTy, mutVarPrimTyCon, mkMutVarPrimTy, mVarPrimTyCon, mkMVarPrimTy, ioPortPrimTyCon, mkIOPortPrimTy, tVarPrimTyCon, mkTVarPrimTy, stablePtrPrimTyCon, mkStablePtrPrimTy, stableNamePrimTyCon, mkStableNamePrimTy, compactPrimTyCon, compactPrimTy, bcoPrimTyCon, bcoPrimTy, weakPrimTyCon, mkWeakPrimTy, threadIdPrimTyCon, threadIdPrimTy, stackSnapshotPrimTyCon, stackSnapshotPrimTy, int8PrimTyCon, int8PrimTy, int8PrimTyConName, word8PrimTyCon, word8PrimTy, word8PrimTyConName, int16PrimTyCon, int16PrimTy, int16PrimTyConName, word16PrimTyCon, word16PrimTy, word16PrimTyConName, int32PrimTyCon, int32PrimTy, int32PrimTyConName, word32PrimTyCon, word32PrimTy, word32PrimTyConName, int64PrimTyCon, int64PrimTy, int64PrimTyConName, word64PrimTyCon, word64PrimTy, word64PrimTyConName, eqPrimTyCon, -- ty1 ~# ty2 eqReprPrimTyCon, -- ty1 ~R# ty2 (at role Representational) eqPhantPrimTyCon, -- ty1 ~P# ty2 (at role Phantom) equalityTyCon, -- * SIMD #include "primop-vector-tys-exports.hs-incl" ) where import GHC.Prelude import {-# SOURCE #-} GHC.Builtin.Types ( runtimeRepTy, levityTy, unboxedTupleKind, liftedTypeKind , boxedRepDataConTyCon, vecRepDataConTyCon , liftedRepTy, unliftedRepTy, zeroBitRepTy , intRepDataConTy , int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy , wordRepDataConTy , word16RepDataConTy, word8RepDataConTy, word32RepDataConTy, word64RepDataConTy , addrRepDataConTy , floatRepDataConTy, doubleRepDataConTy , vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy , vec64DataConTy , int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy , int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy , word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy , doubleElemRepDataConTy , multiplicityTy ) import GHC.Types.Var ( TyVarBinder, TyVar , mkTyVar, mkTyVarBinder, mkTyVarBinders ) import GHC.Types.Name import {-# SOURCE #-} GHC.Types.TyThing import GHC.Core.TyCon import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Builtin.Names import GHC.Data.FastString import GHC.Utils.Misc ( changeLast ) import GHC.Core.TyCo.Rep -- Doesn't need special access, but this is easier to avoid -- import loops which show up if you import Type instead import {-# SOURCE #-} GHC.Core.Type ( mkTyConTy, mkTyConApp, mkTYPEapp, getLevity ) import Data.Char {- ************************************************************************ * * \subsection{Primitive type constructors} * * ************************************************************************ -} primTyCons :: [TyCon] primTyCons = unexposedPrimTyCons ++ exposedPrimTyCons -- | Primitive 'TyCon's that are defined in GHC.Prim but not exposed. -- It's important to keep these separate as we don't want users to be able to -- write them (see #15209) or see them in GHCi's @:browse@ output -- (see #12023). unexposedPrimTyCons :: [TyCon] unexposedPrimTyCons = [ eqPrimTyCon , eqReprPrimTyCon , eqPhantPrimTyCon ] -- | Primitive 'TyCon's that are defined in, and exported from, GHC.Prim. exposedPrimTyCons :: [TyCon] exposedPrimTyCons = [ addrPrimTyCon , arrayPrimTyCon , byteArrayPrimTyCon , smallArrayPrimTyCon , charPrimTyCon , doublePrimTyCon , floatPrimTyCon , intPrimTyCon , int8PrimTyCon , int16PrimTyCon , int32PrimTyCon , int64PrimTyCon , bcoPrimTyCon , weakPrimTyCon , mutableArrayPrimTyCon , mutableByteArrayPrimTyCon , smallMutableArrayPrimTyCon , mVarPrimTyCon , ioPortPrimTyCon , tVarPrimTyCon , mutVarPrimTyCon , realWorldTyCon , stablePtrPrimTyCon , stableNamePrimTyCon , compactPrimTyCon , statePrimTyCon , proxyPrimTyCon , threadIdPrimTyCon , wordPrimTyCon , word8PrimTyCon , word16PrimTyCon , word32PrimTyCon , word64PrimTyCon , stackSnapshotPrimTyCon , tYPETyCon , funTyCon #include "primop-vector-tycons.hs-incl" ] mkPrimTc :: FastString -> Unique -> TyCon -> Name mkPrimTc fs unique tycon = mkWiredInName gHC_PRIM (mkTcOccFS fs) unique (mkATyCon tycon) -- Relevant TyCon UserSyntax mkBuiltInPrimTc :: FastString -> Unique -> TyCon -> Name mkBuiltInPrimTc fs unique tycon = mkWiredInName gHC_PRIM (mkTcOccFS fs) unique (mkATyCon tycon) -- Relevant TyCon BuiltInSyntax charPrimTyConName, intPrimTyConName, int8PrimTyConName, int16PrimTyConName, int32PrimTyConName, int64PrimTyConName, wordPrimTyConName, word32PrimTyConName, word8PrimTyConName, word16PrimTyConName, word64PrimTyConName, addrPrimTyConName, floatPrimTyConName, doublePrimTyConName, statePrimTyConName, proxyPrimTyConName, realWorldTyConName, arrayPrimTyConName, smallArrayPrimTyConName, byteArrayPrimTyConName, mutableArrayPrimTyConName, mutableByteArrayPrimTyConName, smallMutableArrayPrimTyConName, mutVarPrimTyConName, mVarPrimTyConName, ioPortPrimTyConName, tVarPrimTyConName, stablePtrPrimTyConName, stableNamePrimTyConName, compactPrimTyConName, bcoPrimTyConName, weakPrimTyConName, threadIdPrimTyConName, eqPrimTyConName, eqReprPrimTyConName, eqPhantPrimTyConName, stackSnapshotPrimTyConName :: Name charPrimTyConName = mkPrimTc (fsLit "Char#") charPrimTyConKey charPrimTyCon intPrimTyConName = mkPrimTc (fsLit "Int#") intPrimTyConKey intPrimTyCon int8PrimTyConName = mkPrimTc (fsLit "Int8#") int8PrimTyConKey int8PrimTyCon int16PrimTyConName = mkPrimTc (fsLit "Int16#") int16PrimTyConKey int16PrimTyCon int32PrimTyConName = mkPrimTc (fsLit "Int32#") int32PrimTyConKey int32PrimTyCon int64PrimTyConName = mkPrimTc (fsLit "Int64#") int64PrimTyConKey int64PrimTyCon wordPrimTyConName = mkPrimTc (fsLit "Word#") wordPrimTyConKey wordPrimTyCon word8PrimTyConName = mkPrimTc (fsLit "Word8#") word8PrimTyConKey word8PrimTyCon word16PrimTyConName = mkPrimTc (fsLit "Word16#") word16PrimTyConKey word16PrimTyCon word32PrimTyConName = mkPrimTc (fsLit "Word32#") word32PrimTyConKey word32PrimTyCon word64PrimTyConName = mkPrimTc (fsLit "Word64#") word64PrimTyConKey word64PrimTyCon addrPrimTyConName = mkPrimTc (fsLit "Addr#") addrPrimTyConKey addrPrimTyCon floatPrimTyConName = mkPrimTc (fsLit "Float#") floatPrimTyConKey floatPrimTyCon doublePrimTyConName = mkPrimTc (fsLit "Double#") doublePrimTyConKey doublePrimTyCon statePrimTyConName = mkPrimTc (fsLit "State#") statePrimTyConKey statePrimTyCon proxyPrimTyConName = mkPrimTc (fsLit "Proxy#") proxyPrimTyConKey proxyPrimTyCon eqPrimTyConName = mkPrimTc (fsLit "~#") eqPrimTyConKey eqPrimTyCon eqReprPrimTyConName = mkBuiltInPrimTc (fsLit "~R#") eqReprPrimTyConKey eqReprPrimTyCon eqPhantPrimTyConName = mkBuiltInPrimTc (fsLit "~P#") eqPhantPrimTyConKey eqPhantPrimTyCon realWorldTyConName = mkPrimTc (fsLit "RealWorld") realWorldTyConKey realWorldTyCon arrayPrimTyConName = mkPrimTc (fsLit "Array#") arrayPrimTyConKey arrayPrimTyCon byteArrayPrimTyConName = mkPrimTc (fsLit "ByteArray#") byteArrayPrimTyConKey byteArrayPrimTyCon smallArrayPrimTyConName = mkPrimTc (fsLit "SmallArray#") smallArrayPrimTyConKey smallArrayPrimTyCon mutableArrayPrimTyConName = mkPrimTc (fsLit "MutableArray#") mutableArrayPrimTyConKey mutableArrayPrimTyCon mutableByteArrayPrimTyConName = mkPrimTc (fsLit "MutableByteArray#") mutableByteArrayPrimTyConKey mutableByteArrayPrimTyCon smallMutableArrayPrimTyConName= mkPrimTc (fsLit "SmallMutableArray#") smallMutableArrayPrimTyConKey smallMutableArrayPrimTyCon mutVarPrimTyConName = mkPrimTc (fsLit "MutVar#") mutVarPrimTyConKey mutVarPrimTyCon ioPortPrimTyConName = mkPrimTc (fsLit "IOPort#") ioPortPrimTyConKey ioPortPrimTyCon mVarPrimTyConName = mkPrimTc (fsLit "MVar#") mVarPrimTyConKey mVarPrimTyCon tVarPrimTyConName = mkPrimTc (fsLit "TVar#") tVarPrimTyConKey tVarPrimTyCon stablePtrPrimTyConName = mkPrimTc (fsLit "StablePtr#") stablePtrPrimTyConKey stablePtrPrimTyCon stableNamePrimTyConName = mkPrimTc (fsLit "StableName#") stableNamePrimTyConKey stableNamePrimTyCon compactPrimTyConName = mkPrimTc (fsLit "Compact#") compactPrimTyConKey compactPrimTyCon stackSnapshotPrimTyConName = mkPrimTc (fsLit "StackSnapshot#") stackSnapshotPrimTyConKey stackSnapshotPrimTyCon #if MIN_VERSION_ghc_prim(0, 7, 0) bcoPrimTyConName = mkPrimTc (fsLit "BCO") bcoPrimTyConKey bcoPrimTyCon #else bcoPrimTyConName = mkPrimTc (fsLit "BCO#") bcoPrimTyConKey bcoPrimTyCon #endif weakPrimTyConName = mkPrimTc (fsLit "Weak#") weakPrimTyConKey weakPrimTyCon threadIdPrimTyConName = mkPrimTc (fsLit "ThreadId#") threadIdPrimTyConKey threadIdPrimTyCon {- ************************************************************************ * * \subsection{Support code} * * ************************************************************************ alphaTyVars is a list of type variables for use in templates: ["a", "b", ..., "z", "t1", "t2", ... ] -} mkTemplateKindVar :: Kind -> TyVar mkTemplateKindVar = mkTyVar (mk_tv_name 0 "k") mkTemplateKindVars :: [Kind] -> [TyVar] -- k0 with unique (mkAlphaTyVarUnique 0) -- k1 with unique (mkAlphaTyVarUnique 1) -- ... etc mkTemplateKindVars [kind] = [mkTemplateKindVar kind] -- Special case for one kind: just "k" mkTemplateKindVars kinds = [ mkTyVar (mk_tv_name u ('k' : show u)) kind | (kind, u) <- kinds `zip` [0..] ] mk_tv_name :: Int -> String -> Name mk_tv_name u s = mkInternalName (mkAlphaTyVarUnique u) (mkTyVarOccFS (mkFastString s)) noSrcSpan mkTemplateTyVarsFrom :: Int -> [Kind] -> [TyVar] -- a with unique (mkAlphaTyVarUnique n) -- b with unique (mkAlphaTyVarUnique n+1) -- ... etc -- Typically called as -- mkTemplateTyVarsFrom (length kv_bndrs) kinds -- where kv_bndrs are the kind-level binders of a TyCon mkTemplateTyVarsFrom n kinds = [ mkTyVar name kind | (kind, index) <- zip kinds [0..], let ch_ord = index + ord 'a' name_str | ch_ord <= ord 'z' = [chr ch_ord] | otherwise = 't':show index name = mk_tv_name (index + n) name_str ] mkTemplateTyVars :: [Kind] -> [TyVar] mkTemplateTyVars = mkTemplateTyVarsFrom 1 mkTemplateTyConBinders :: [Kind] -- [k1, .., kn] Kinds of kind-forall'd vars -> ([Kind] -> [Kind]) -- Arg is [kv1:k1, ..., kvn:kn] -- same length as first arg -- Result is anon arg kinds -> [TyConBinder] mkTemplateTyConBinders kind_var_kinds mk_anon_arg_kinds = kv_bndrs ++ tv_bndrs where kv_bndrs = mkTemplateKindTyConBinders kind_var_kinds anon_kinds = mk_anon_arg_kinds (mkTyVarTys (binderVars kv_bndrs)) tv_bndrs = mkTemplateAnonTyConBindersFrom (length kv_bndrs) anon_kinds mkTemplateKiTyVars :: [Kind] -- [k1, .., kn] Kinds of kind-forall'd vars -> ([Kind] -> [Kind]) -- Arg is [kv1:k1, ..., kvn:kn] -- same length as first arg -- Result is anon arg kinds [ak1, .., akm] -> [TyVar] -- [kv1:k1, ..., kvn:kn, av1:ak1, ..., avm:akm] -- Example: if you want the tyvars for -- forall (r:RuntimeRep) (a:TYPE r) (b:*). blah -- call mkTemplateKiTyVars [RuntimeRep] (\[r] -> [TYPE r, *]) mkTemplateKiTyVars kind_var_kinds mk_arg_kinds = kv_bndrs ++ tv_bndrs where kv_bndrs = mkTemplateKindVars kind_var_kinds anon_kinds = mk_arg_kinds (mkTyVarTys kv_bndrs) tv_bndrs = mkTemplateTyVarsFrom (length kv_bndrs) anon_kinds mkTemplateKiTyVar :: Kind -- [k1, .., kn] Kind of kind-forall'd var -> (Kind -> [Kind]) -- Arg is kv1:k1 -- Result is anon arg kinds [ak1, .., akm] -> [TyVar] -- [kv1:k1, ..., kvn:kn, av1:ak1, ..., avm:akm] -- Example: if you want the tyvars for -- forall (r:RuntimeRep) (a:TYPE r) (b:*). blah -- call mkTemplateKiTyVar RuntimeRep (\r -> [TYPE r, *]) mkTemplateKiTyVar kind mk_arg_kinds = kv_bndr : tv_bndrs where kv_bndr = mkTemplateKindVar kind anon_kinds = mk_arg_kinds (mkTyVarTy kv_bndr) tv_bndrs = mkTemplateTyVarsFrom 1 anon_kinds mkTemplateKindTyConBinders :: [Kind] -> [TyConBinder] -- Makes named, Specified binders mkTemplateKindTyConBinders kinds = [mkNamedTyConBinder Specified tv | tv <- mkTemplateKindVars kinds] mkTemplateAnonTyConBinders :: [Kind] -> [TyConBinder] mkTemplateAnonTyConBinders kinds = mkAnonTyConBinders VisArg (mkTemplateTyVars kinds) mkTemplateAnonTyConBindersFrom :: Int -> [Kind] -> [TyConBinder] mkTemplateAnonTyConBindersFrom n kinds = mkAnonTyConBinders VisArg (mkTemplateTyVarsFrom n kinds) alphaTyVars :: [TyVar] alphaTyVars = mkTemplateTyVars $ repeat liftedTypeKind alphaTyVar, betaTyVar, gammaTyVar, deltaTyVar :: TyVar (alphaTyVar:betaTyVar:gammaTyVar:deltaTyVar:_) = alphaTyVars alphaTyVarSpec, betaTyVarSpec, gammaTyVarSpec, deltaTyVarSpec :: TyVarBinder (alphaTyVarSpec:betaTyVarSpec:gammaTyVarSpec:deltaTyVarSpec:_) = mkTyVarBinders Specified alphaTyVars alphaTys :: [Type] alphaTys = mkTyVarTys alphaTyVars alphaTy, betaTy, gammaTy, deltaTy :: Type (alphaTy:betaTy:gammaTy:deltaTy:_) = alphaTys alphaTyVarsUnliftedRep :: [TyVar] alphaTyVarsUnliftedRep = mkTemplateTyVars $ repeat (mkTYPEapp unliftedRepTy) alphaTyVarUnliftedRep :: TyVar (alphaTyVarUnliftedRep:_) = alphaTyVarsUnliftedRep alphaTysUnliftedRep :: [Type] alphaTysUnliftedRep = mkTyVarTys alphaTyVarsUnliftedRep alphaTyUnliftedRep :: Type (alphaTyUnliftedRep:_) = alphaTysUnliftedRep runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar :: TyVar (runtimeRep1TyVar : runtimeRep2TyVar : runtimeRep3TyVar : _) = drop 16 (mkTemplateTyVars (repeat runtimeRepTy)) -- selects 'q','r' runtimeRep1TyVarInf, runtimeRep2TyVarInf :: TyVarBinder runtimeRep1TyVarInf = mkTyVarBinder Inferred runtimeRep1TyVar runtimeRep2TyVarInf = mkTyVarBinder Inferred runtimeRep2TyVar runtimeRep1Ty, runtimeRep2Ty, runtimeRep3Ty :: Type runtimeRep1Ty = mkTyVarTy runtimeRep1TyVar runtimeRep2Ty = mkTyVarTy runtimeRep2TyVar runtimeRep3Ty = mkTyVarTy runtimeRep3TyVar openAlphaTyVar, openBetaTyVar, openGammaTyVar :: TyVar -- alpha :: TYPE r1 -- beta :: TYPE r2 -- gamma :: TYPE r3 [openAlphaTyVar,openBetaTyVar,openGammaTyVar] = mkTemplateTyVars [mkTYPEapp runtimeRep1Ty, mkTYPEapp runtimeRep2Ty, mkTYPEapp runtimeRep3Ty] openAlphaTyVarSpec, openBetaTyVarSpec, openGammaTyVarSpec :: TyVarBinder openAlphaTyVarSpec = mkTyVarBinder Specified openAlphaTyVar openBetaTyVarSpec = mkTyVarBinder Specified openBetaTyVar openGammaTyVarSpec = mkTyVarBinder Specified openGammaTyVar openAlphaTy, openBetaTy, openGammaTy :: Type openAlphaTy = mkTyVarTy openAlphaTyVar openBetaTy = mkTyVarTy openBetaTyVar openGammaTy = mkTyVarTy openGammaTyVar levity1TyVar, levity2TyVar :: TyVar (levity2TyVar : levity1TyVar : _) -- NB: levity2TyVar before levity1TyVar = drop 10 (mkTemplateTyVars (repeat levityTy)) -- selects 'k', 'l' -- The ordering of levity2TyVar before levity1TyVar is chosen so that -- the more common levity1TyVar uses the levity variable 'l'. levity1TyVarInf, levity2TyVarInf :: TyVarBinder levity1TyVarInf = mkTyVarBinder Inferred levity1TyVar levity2TyVarInf = mkTyVarBinder Inferred levity2TyVar levity1Ty, levity2Ty :: Type levity1Ty = mkTyVarTy levity1TyVar levity2Ty = mkTyVarTy levity2TyVar levPolyAlphaTyVar, levPolyBetaTyVar :: TyVar [levPolyAlphaTyVar, levPolyBetaTyVar] = mkTemplateTyVars [mkTYPEapp (mkTyConApp boxedRepDataConTyCon [levity1Ty]) ,mkTYPEapp (mkTyConApp boxedRepDataConTyCon [levity2Ty])] -- alpha :: TYPE ('BoxedRep l) -- beta :: TYPE ('BoxedRep k) levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec :: TyVarBinder levPolyAlphaTyVarSpec = mkTyVarBinder Specified levPolyAlphaTyVar levPolyBetaTyVarSpec = mkTyVarBinder Specified levPolyBetaTyVar levPolyAlphaTy, levPolyBetaTy :: Type levPolyAlphaTy = mkTyVarTy levPolyAlphaTyVar levPolyBetaTy = mkTyVarTy levPolyBetaTyVar multiplicityTyVar1, multiplicityTyVar2 :: TyVar (multiplicityTyVar1 : multiplicityTyVar2 : _) = drop 13 (mkTemplateTyVars (repeat multiplicityTy)) -- selects 'n', 'm' {- ************************************************************************ * * FunTyCon * * ************************************************************************ -} funTyConName :: Name funTyConName = mkPrimTcName UserSyntax (fsLit "FUN") funTyConKey funTyCon -- | The @FUN@ type constructor. -- -- @ -- FUN :: forall (m :: Multiplicity) -> -- forall {rep1 :: RuntimeRep} {rep2 :: RuntimeRep}. -- TYPE rep1 -> TYPE rep2 -> * -- @ -- -- The runtime representations quantification is left inferred. This -- means they cannot be specified with @-XTypeApplications@. -- -- This is a deliberate choice to allow future extensions to the -- function arrow. To allow visible application a type synonym can be -- defined: -- -- @ -- type Arr :: forall (rep1 :: RuntimeRep) (rep2 :: RuntimeRep). -- TYPE rep1 -> TYPE rep2 -> Type -- type Arr = FUN 'Many -- @ -- funTyCon :: TyCon funTyCon = mkFunTyCon funTyConName tc_bndrs tc_rep_nm where -- See also unrestrictedFunTyCon tc_bndrs = [ mkNamedTyConBinder Required multiplicityTyVar1 , mkNamedTyConBinder Inferred runtimeRep1TyVar , mkNamedTyConBinder Inferred runtimeRep2TyVar ] ++ mkTemplateAnonTyConBinders [ mkTYPEapp runtimeRep1Ty , mkTYPEapp runtimeRep2Ty ] tc_rep_nm = mkPrelTyConRepName funTyConName {- ************************************************************************ * * Kinds * * ************************************************************************ Note [TYPE and RuntimeRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~ All types that classify values have a kind of the form (TYPE rr), where data RuntimeRep -- Defined in ghc-prim:GHC.Types = BoxedRep Levity | IntRep | FloatRep .. etc .. data Levity = Lifted | Unlifted rr :: RuntimeRep TYPE :: RuntimeRep -> TYPE 'LiftedRep -- Built in So for example: Int :: TYPE ('BoxedRep 'Lifted) Array# Int :: TYPE ('BoxedRep 'Unlifted) Int# :: TYPE 'IntRep Float# :: TYPE 'FloatRep Maybe :: TYPE ('BoxedRep 'Lifted) -> TYPE ('BoxedRep 'Lifted) (# , #) :: TYPE r1 -> TYPE r2 -> TYPE (TupleRep [r1, r2]) We abbreviate '*' specially: type LiftedRep = 'BoxedRep 'Lifted type * = TYPE LiftedRep The 'rr' parameter tells us how the value is represented at runtime. Generally speaking, you can't be polymorphic in 'rr'. E.g f :: forall (rr:RuntimeRep) (a:TYPE rr). a -> [a] f = /\(rr:RuntimeRep) (a:rr) \(a:rr). ... This is no good: we could not generate code for 'f', because the calling convention for 'f' varies depending on whether the argument is a a Int, Int#, or Float#. (You could imagine generating specialised code, one for each instantiation of 'rr', but we don't do that.) Certain functions CAN be runtime-rep-polymorphic, because the code generator never has to manipulate a value of type 'a :: TYPE rr'. * error :: forall (rr:RuntimeRep) (a:TYPE rr). String -> a Code generator never has to manipulate the return value. * unsafeCoerce#, defined in Desugar.mkUnsafeCoercePair: Always inlined to be a no-op unsafeCoerce# :: forall (r1 :: RuntimeRep) (r2 :: RuntimeRep) (a :: TYPE r1) (b :: TYPE r2). a -> b * Unboxed tuples, and unboxed sums, defined in GHC.Builtin.Types Always inlined, and hence specialised to the call site (#,#) :: forall (r1 :: RuntimeRep) (r2 :: RuntimeRep) (a :: TYPE r1) (b :: TYPE r2). a -> b -> TYPE ('TupleRep '[r1, r2]) -} tYPETyCon :: TyCon tYPETyConName :: Name tYPETyCon = mkPrimTyCon tYPETyConName (mkTemplateAnonTyConBinders [runtimeRepTy]) liftedTypeKind [Nominal] -------------------------- -- ... and now their names -- If you edit these, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint tYPETyConName = mkPrimTcName UserSyntax (fsLit "TYPE") tYPETyConKey tYPETyCon mkPrimTcName :: BuiltInSyntax -> FastString -> Unique -> TyCon -> Name mkPrimTcName built_in_syntax occ key tycon = mkWiredInName gHC_PRIM (mkTcOccFS occ) key (mkATyCon tycon) built_in_syntax ----------------------------- -- Given a Multiplicity, applies FUN to it. functionWithMultiplicity :: Type -> Type functionWithMultiplicity mul = TyConApp funTyCon [mul] {- ************************************************************************ * * Basic primitive types (@Char#@, @Int#@, etc.) * * ************************************************************************ -} -- | Create a primitive 'TyCon' with the given 'Name', -- arguments of kind 'Type` with the given 'Role's, -- and the given result kind representation. -- -- Only use this in "GHC.Builtin.Types.Prim". pcPrimTyCon :: Name -> [Role] -> RuntimeRepType -> TyCon pcPrimTyCon name roles res_rep = mkPrimTyCon name binders result_kind roles where bndr_kis = liftedTypeKind <$ roles binders = mkTemplateAnonTyConBinders bndr_kis result_kind = mkTYPEapp res_rep -- | Create a primitive nullary 'TyCon' with the given 'Name' -- and result kind representation. -- -- Only use this in "GHC.Builtin.Types.Prim". pcPrimTyCon0 :: Name -> RuntimeRepType -> TyCon pcPrimTyCon0 name res_rep = pcPrimTyCon name [] res_rep -- | Create a primitive 'TyCon' like 'pcPrimTyCon', except the last -- argument is levity-polymorphic. -- -- Only use this in "GHC.Builtin.Types.Prim". pcPrimTyCon_LevPolyLastArg :: Name -> [Role] -- ^ roles of the arguments (must be non-empty), -- not including the implicit argument of kind 'Levity', -- which always has 'Nominal' role -> RuntimeRepType -- ^ representation of the fully-applied type -> TyCon pcPrimTyCon_LevPolyLastArg name roles res_rep = mkPrimTyCon name binders result_kind (Nominal : roles) where result_kind = mkTYPEapp res_rep lev_bndr = mkNamedTyConBinder Inferred levity1TyVar binders = lev_bndr : mkTemplateAnonTyConBinders anon_bndr_kis lev_tv = mkTyVarTy (binderVar lev_bndr) -- [ Type, ..., Type, TYPE (BoxedRep l) ] anon_bndr_kis = changeLast (liftedTypeKind <$ roles) (mkTYPEapp $ mkTyConApp boxedRepDataConTyCon [lev_tv]) charPrimTy :: Type charPrimTy = mkTyConTy charPrimTyCon charPrimTyCon :: TyCon charPrimTyCon = pcPrimTyCon0 charPrimTyConName wordRepDataConTy intPrimTy :: Type intPrimTy = mkTyConTy intPrimTyCon intPrimTyCon :: TyCon intPrimTyCon = pcPrimTyCon0 intPrimTyConName intRepDataConTy int8PrimTy :: Type int8PrimTy = mkTyConTy int8PrimTyCon int8PrimTyCon :: TyCon int8PrimTyCon = pcPrimTyCon0 int8PrimTyConName int8RepDataConTy int16PrimTy :: Type int16PrimTy = mkTyConTy int16PrimTyCon int16PrimTyCon :: TyCon int16PrimTyCon = pcPrimTyCon0 int16PrimTyConName int16RepDataConTy int32PrimTy :: Type int32PrimTy = mkTyConTy int32PrimTyCon int32PrimTyCon :: TyCon int32PrimTyCon = pcPrimTyCon0 int32PrimTyConName int32RepDataConTy int64PrimTy :: Type int64PrimTy = mkTyConTy int64PrimTyCon int64PrimTyCon :: TyCon int64PrimTyCon = pcPrimTyCon0 int64PrimTyConName int64RepDataConTy wordPrimTy :: Type wordPrimTy = mkTyConTy wordPrimTyCon wordPrimTyCon :: TyCon wordPrimTyCon = pcPrimTyCon0 wordPrimTyConName wordRepDataConTy word8PrimTy :: Type word8PrimTy = mkTyConTy word8PrimTyCon word8PrimTyCon :: TyCon word8PrimTyCon = pcPrimTyCon0 word8PrimTyConName word8RepDataConTy word16PrimTy :: Type word16PrimTy = mkTyConTy word16PrimTyCon word16PrimTyCon :: TyCon word16PrimTyCon = pcPrimTyCon0 word16PrimTyConName word16RepDataConTy word32PrimTy :: Type word32PrimTy = mkTyConTy word32PrimTyCon word32PrimTyCon :: TyCon word32PrimTyCon = pcPrimTyCon0 word32PrimTyConName word32RepDataConTy word64PrimTy :: Type word64PrimTy = mkTyConTy word64PrimTyCon word64PrimTyCon :: TyCon word64PrimTyCon = pcPrimTyCon0 word64PrimTyConName word64RepDataConTy addrPrimTy :: Type addrPrimTy = mkTyConTy addrPrimTyCon addrPrimTyCon :: TyCon addrPrimTyCon = pcPrimTyCon0 addrPrimTyConName addrRepDataConTy floatPrimTy :: Type floatPrimTy = mkTyConTy floatPrimTyCon floatPrimTyCon :: TyCon floatPrimTyCon = pcPrimTyCon0 floatPrimTyConName floatRepDataConTy doublePrimTy :: Type doublePrimTy = mkTyConTy doublePrimTyCon doublePrimTyCon :: TyCon doublePrimTyCon = pcPrimTyCon0 doublePrimTyConName doubleRepDataConTy {- ************************************************************************ * * The @State#@ type (and @_RealWorld@ types) * * ************************************************************************ Note [The equality types story] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC sports a veritable menagerie of equality types: Type or Lifted? Hetero? Role Built in Defining module class? L/U TyCon ----------------------------------------------------------------------------------------- ~# T U hetero nominal eqPrimTyCon GHC.Prim ~~ C L hetero nominal heqTyCon GHC.Types ~ C L homo nominal eqTyCon GHC.Types :~: T L homo nominal (not built-in) Data.Type.Equality :~~: T L hetero nominal (not built-in) Data.Type.Equality ~R# T U hetero repr eqReprPrimTy GHC.Prim Coercible C L homo repr coercibleTyCon GHC.Types Coercion T L homo repr (not built-in) Data.Type.Coercion ~P# T U hetero phantom eqPhantPrimTyCon GHC.Prim Recall that "hetero" means the equality can related types of different kinds. Knowing that (t1 ~# t2) or (t1 ~R# t2) or even that (t1 ~P# t2) also means that (k1 ~# k2), where (t1 :: k1) and (t2 :: k2). To produce less confusion for end users, when not dumping and without -fprint-equality-relations, each of these groups is printed as the bottommost listed equality. That is, (~#) and (~~) are both rendered as (~) in error messages, and (~R#) is rendered as Coercible. Let's take these one at a time: -------------------------- (~#) :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) -------------------------- This is The Type Of Equality in GHC. It classifies nominal coercions. This type is used in the solver for recording equality constraints. It responds "yes" to Type.isEqPrimPred and classifies as an EqPred in Type.classifyPredType. All wanted constraints of this type are built with coercion holes. (See Note [Coercion holes] in GHC.Core.TyCo.Rep.) But see also Note [Deferred errors for coercion holes] in GHC.Tc.Errors to see how equality constraints are deferred. Within GHC, ~# is called eqPrimTyCon, and it is defined in GHC.Builtin.Types.Prim. -------------------------- (~~) :: forall k1 k2. k1 -> k2 -> Constraint -------------------------- This is (almost) an ordinary class, defined as if by class a ~# b => a ~~ b instance a ~# b => a ~~ b Here's what's unusual about it: * We can't actually declare it that way because we don't have syntax for ~#. And ~# isn't a constraint, so even if we could write it, it wouldn't kind check. * Users cannot write instances of it. * It is "naturally coherent". This means that the solver won't hesitate to solve a goal of type (a ~~ b) even if there is, say (Int ~~ c) in the context. (Normally, it waits to learn more, just in case the given influences what happens next.) See Note [Naturally coherent classes] in GHC.Tc.Solver.Interact. * It always terminates. That is, in the UndecidableInstances checks, we don't worry if a (~~) constraint is too big, as we know that solving equality terminates. On the other hand, this behaves just like any class w.r.t. eager superclass unpacking in the solver. So a lifted equality given quickly becomes an unlifted equality given. This is good, because the solver knows all about unlifted equalities. There is some special-casing in GHC.Tc.Solver.Interact.matchClassInst to pretend that there is an instance of this class, as we can't write the instance in Haskell. Within GHC, ~~ is called heqTyCon, and it is defined in GHC.Builtin.Types. -------------------------- (~) :: forall k. k -> k -> Constraint -------------------------- This is /exactly/ like (~~), except with a homogeneous kind. It is an almost-ordinary class defined as if by class a ~# b => (a :: k) ~ (b :: k) instance a ~# b => a ~ b * All the bullets for (~~) apply * In addition (~) is magical syntax, as ~ is a reserved symbol. It cannot be exported or imported. Within GHC, ~ is called eqTyCon, and it is defined in GHC.Builtin.Types. Historical note: prior to July 18 (~) was defined as a more-ordinary class with (~~) as a superclass. But that made it special in different ways; and the extra superclass selections to get from (~) to (~#) via (~~) were tiresome. Now it's defined uniformly with (~~) and Coercible; much nicer.) -------------------------- (:~:) :: forall k. k -> k -> * (:~~:) :: forall k1 k2. k1 -> k2 -> * -------------------------- These are perfectly ordinary GADTs, wrapping (~) and (~~) resp. They are not defined within GHC at all. -------------------------- (~R#) :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) -------------------------- The is the representational analogue of ~#. This is the type of representational equalities that the solver works on. All wanted constraints of this type are built with coercion holes. Within GHC, ~R# is called eqReprPrimTyCon, and it is defined in GHC.Builtin.Types.Prim. -------------------------- Coercible :: forall k. k -> k -> Constraint -------------------------- This is quite like (~~) in the way it's defined and treated within GHC, but it's homogeneous. Homogeneity helps with type inference (as GHC can solve one kind from the other) and, in my (Richard's) estimation, will be more intuitive for users. An alternative design included HCoercible (like (~~)) and Coercible (like (~)). One annoyance was that we want `coerce :: Coercible a b => a -> b`, and we need the type of coerce to be fully wired-in. So the HCoercible/Coercible split required that both types be fully wired-in. Instead of doing this, I just got rid of HCoercible, as I'm not sure who would use it, anyway. Within GHC, Coercible is called coercibleTyCon, and it is defined in GHC.Builtin.Types. -------------------------- Coercion :: forall k. k -> k -> * -------------------------- This is a perfectly ordinary GADT, wrapping Coercible. It is not defined within GHC at all. -------------------------- (~P#) :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) -------------------------- This is the phantom analogue of ~# and it is barely used at all. (The solver has no idea about this one.) Here is the motivation: data Phant a = MkPhant type role Phant phantom Phant _P :: Phant Int ~P# Phant Bool We just need to have something to put on that last line. You probably don't need to worry about it. Note [The State# TyCon] ~~~~~~~~~~~~~~~~~~~~~~~ State# is the primitive, unlifted type of states. It has one type parameter, thus State# RealWorld or State# s where s is a type variable. The only purpose of the type parameter is to keep different state threads separate. It is represented by nothing at all. The type parameter to State# is intended to keep separate threads separate. Even though this parameter is not used in the definition of State#, it is given role Nominal to enforce its intended use. -} mkStatePrimTy :: Type -> Type mkStatePrimTy ty = TyConApp statePrimTyCon [ty] statePrimTyCon :: TyCon -- See Note [The State# TyCon] statePrimTyCon = pcPrimTyCon statePrimTyConName [Nominal] zeroBitRepTy {- RealWorld is deeply magical. It is *primitive*, but it is not *unlifted* (hence ptrArg). We never manipulate values of type RealWorld; it's only used in the type system, to parameterise State#. -} realWorldTyCon :: TyCon realWorldTyCon = mkPrimTyCon realWorldTyConName [] liftedTypeKind [] realWorldTy :: Type realWorldTy = mkTyConTy realWorldTyCon realWorldStatePrimTy :: Type realWorldStatePrimTy = mkStatePrimTy realWorldTy -- State# RealWorld -- Note: the ``state-pairing'' types are not truly primitive, -- so they are defined in \tr{GHC.Builtin.Types}, not here. mkProxyPrimTy :: Type -> Type -> Type mkProxyPrimTy k ty = TyConApp proxyPrimTyCon [k, ty] proxyPrimTyCon :: TyCon proxyPrimTyCon = mkPrimTyCon proxyPrimTyConName binders res_kind [Nominal,Phantom] where -- Kind: forall k. k -> TYPE (TupleRep '[]) binders = mkTemplateTyConBinders [liftedTypeKind] id res_kind = unboxedTupleKind [] {- ********************************************************************* * * Primitive equality constraints See Note [The equality types story] * * ********************************************************************* -} eqPrimTyCon :: TyCon -- The representation type for equality predicates -- See Note [The equality types story] eqPrimTyCon = mkPrimTyCon eqPrimTyConName binders res_kind roles where -- Kind :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id res_kind = unboxedTupleKind [] roles = [Nominal, Nominal, Nominal, Nominal] -- like eqPrimTyCon, but the type for *Representational* coercions -- this should only ever appear as the type of a covar. Its role is -- interpreted in coercionRole eqReprPrimTyCon :: TyCon -- See Note [The equality types story] eqReprPrimTyCon = mkPrimTyCon eqReprPrimTyConName binders res_kind roles where -- Kind :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id res_kind = unboxedTupleKind [] roles = [Nominal, Nominal, Representational, Representational] -- like eqPrimTyCon, but the type for *Phantom* coercions. -- This is only used to make higher-order equalities. Nothing -- should ever actually have this type! eqPhantPrimTyCon :: TyCon eqPhantPrimTyCon = mkPrimTyCon eqPhantPrimTyConName binders res_kind roles where -- Kind :: forall k1 k2. k1 -> k2 -> TYPE (TupleRep '[]) binders = mkTemplateTyConBinders [liftedTypeKind, liftedTypeKind] id res_kind = unboxedTupleKind [] roles = [Nominal, Nominal, Phantom, Phantom] -- | Given a Role, what TyCon is the type of equality predicates at that role? equalityTyCon :: Role -> TyCon equalityTyCon Nominal = eqPrimTyCon equalityTyCon Representational = eqReprPrimTyCon equalityTyCon Phantom = eqPhantPrimTyCon {- ********************************************************************* * * The primitive array types * * ********************************************************************* -} arrayPrimTyCon, mutableArrayPrimTyCon, mutableByteArrayPrimTyCon, byteArrayPrimTyCon, smallArrayPrimTyCon, smallMutableArrayPrimTyCon :: TyCon arrayPrimTyCon = pcPrimTyCon_LevPolyLastArg arrayPrimTyConName [Representational] unliftedRepTy mutableArrayPrimTyCon = pcPrimTyCon_LevPolyLastArg mutableArrayPrimTyConName [Nominal, Representational] unliftedRepTy mutableByteArrayPrimTyCon = pcPrimTyCon mutableByteArrayPrimTyConName [Nominal] unliftedRepTy byteArrayPrimTyCon = pcPrimTyCon0 byteArrayPrimTyConName unliftedRepTy smallArrayPrimTyCon = pcPrimTyCon_LevPolyLastArg smallArrayPrimTyConName [Representational] unliftedRepTy smallMutableArrayPrimTyCon = pcPrimTyCon_LevPolyLastArg smallMutableArrayPrimTyConName [Nominal, Representational] unliftedRepTy mkArrayPrimTy :: Type -> Type mkArrayPrimTy elt = TyConApp arrayPrimTyCon [getLevity elt, elt] byteArrayPrimTy :: Type byteArrayPrimTy = mkTyConTy byteArrayPrimTyCon mkSmallArrayPrimTy :: Type -> Type mkSmallArrayPrimTy elt = TyConApp smallArrayPrimTyCon [getLevity elt, elt] mkMutableArrayPrimTy :: Type -> Type -> Type mkMutableArrayPrimTy s elt = TyConApp mutableArrayPrimTyCon [getLevity elt, s, elt] mkMutableByteArrayPrimTy :: Type -> Type mkMutableByteArrayPrimTy s = TyConApp mutableByteArrayPrimTyCon [s] mkSmallMutableArrayPrimTy :: Type -> Type -> Type mkSmallMutableArrayPrimTy s elt = TyConApp smallMutableArrayPrimTyCon [getLevity elt, s, elt] {- ********************************************************************* * * The mutable variable type * * ********************************************************************* -} mutVarPrimTyCon :: TyCon mutVarPrimTyCon = pcPrimTyCon_LevPolyLastArg mutVarPrimTyConName [Nominal, Representational] unliftedRepTy mkMutVarPrimTy :: Type -> Type -> Type mkMutVarPrimTy s elt = TyConApp mutVarPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * \subsection[TysPrim-io-port-var]{The synchronizing I/O Port type} * * ************************************************************************ -} ioPortPrimTyCon :: TyCon ioPortPrimTyCon = pcPrimTyCon_LevPolyLastArg ioPortPrimTyConName [Nominal, Representational] unliftedRepTy mkIOPortPrimTy :: Type -> Type -> Type mkIOPortPrimTy s elt = TyConApp ioPortPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * The synchronizing variable type \subsection[TysPrim-synch-var]{The synchronizing variable type} * * ************************************************************************ -} mVarPrimTyCon :: TyCon mVarPrimTyCon = pcPrimTyCon_LevPolyLastArg mVarPrimTyConName [Nominal, Representational] unliftedRepTy mkMVarPrimTy :: Type -> Type -> Type mkMVarPrimTy s elt = TyConApp mVarPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * The transactional variable type * * ************************************************************************ -} tVarPrimTyCon :: TyCon tVarPrimTyCon = pcPrimTyCon_LevPolyLastArg tVarPrimTyConName [Nominal, Representational] unliftedRepTy mkTVarPrimTy :: Type -> Type -> Type mkTVarPrimTy s elt = TyConApp tVarPrimTyCon [getLevity elt, s, elt] {- ************************************************************************ * * The stable-pointer type * * ************************************************************************ -} stablePtrPrimTyCon :: TyCon stablePtrPrimTyCon = pcPrimTyCon_LevPolyLastArg stablePtrPrimTyConName [Representational] addrRepDataConTy mkStablePtrPrimTy :: Type -> Type mkStablePtrPrimTy ty = TyConApp stablePtrPrimTyCon [getLevity ty, ty] {- ************************************************************************ * * The stable-name type * * ************************************************************************ -} stableNamePrimTyCon :: TyCon stableNamePrimTyCon = pcPrimTyCon_LevPolyLastArg stableNamePrimTyConName [Phantom] unliftedRepTy mkStableNamePrimTy :: Type -> Type mkStableNamePrimTy ty = TyConApp stableNamePrimTyCon [getLevity ty, ty] {- ************************************************************************ * * The Compact NFData (CNF) type * * ************************************************************************ -} compactPrimTyCon :: TyCon compactPrimTyCon = pcPrimTyCon0 compactPrimTyConName unliftedRepTy compactPrimTy :: Type compactPrimTy = mkTyConTy compactPrimTyCon {- ************************************************************************ * * The @StackSnapshot#@ type * * ************************************************************************ -} stackSnapshotPrimTyCon :: TyCon stackSnapshotPrimTyCon = pcPrimTyCon0 stackSnapshotPrimTyConName unliftedRepTy stackSnapshotPrimTy :: Type stackSnapshotPrimTy = mkTyConTy stackSnapshotPrimTyCon {- ************************************************************************ * * The ``bytecode object'' type * * ************************************************************************ -} -- Unlike most other primitive types, BCO is lifted. This is because in -- general a BCO may be a thunk for the reasons given in Note [Updatable CAF -- BCOs] in GHCi.CreateBCO. bcoPrimTy :: Type bcoPrimTy = mkTyConTy bcoPrimTyCon bcoPrimTyCon :: TyCon bcoPrimTyCon = pcPrimTyCon0 bcoPrimTyConName liftedRepTy {- ************************************************************************ * * The ``weak pointer'' type * * ************************************************************************ -} weakPrimTyCon :: TyCon weakPrimTyCon = pcPrimTyCon_LevPolyLastArg weakPrimTyConName [Representational] unliftedRepTy mkWeakPrimTy :: Type -> Type mkWeakPrimTy v = TyConApp weakPrimTyCon [getLevity v, v] {- ************************************************************************ * * The ``thread id'' type * * ************************************************************************ A thread id is represented by a pointer to the TSO itself, to ensure that they are always unique and we can always find the TSO for a given thread id. However, this has the unfortunate consequence that a ThreadId# for a given thread is treated as a root by the garbage collector and can keep TSOs around for too long. Hence the programmer API for thread manipulation uses a weak pointer to the thread id internally. -} threadIdPrimTy :: Type threadIdPrimTy = mkTyConTy threadIdPrimTyCon threadIdPrimTyCon :: TyCon threadIdPrimTyCon = pcPrimTyCon0 threadIdPrimTyConName unliftedRepTy {- ************************************************************************ * * \subsection{SIMD vector types} * * ************************************************************************ -} #include "primop-vector-tys.hs-incl" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Uniques.hs0000644000000000000000000003274214472400112021236 0ustar0000000000000000 -- | This is where we define a mapping from Uniques to their associated -- known-key Names for things associated with tuples and sums. We use this -- mapping while deserializing known-key Names in interface file symbol tables, -- which are encoded as their Unique. See Note [Symbol table representation of -- names] for details. -- module GHC.Builtin.Uniques ( -- * Looking up known-key names knownUniqueName -- * Getting the 'Unique's of 'Name's -- ** Anonymous sums , mkSumTyConUnique , mkSumDataConUnique -- ** Tuples -- *** Vanilla , mkTupleTyConUnique , mkTupleDataConUnique -- *** Constraint , mkCTupleTyConUnique , mkCTupleDataConUnique , mkCTupleSelIdUnique -- ** Making built-in uniques , mkAlphaTyVarUnique , mkPrimOpIdUnique, mkPrimOpWrapperUnique , mkPreludeMiscIdUnique, mkPreludeDataConUnique , mkPreludeTyConUnique, mkPreludeClassUnique , mkVarOccUnique, mkDataOccUnique, mkTvOccUnique, mkTcOccUnique , mkRegSingleUnique, mkRegPairUnique, mkRegClassUnique, mkRegSubUnique , mkCostCentreUnique , mkBuiltinUnique , mkPseudoUniqueE -- ** Deriving uniquesc -- *** From TyCon name uniques , tyConRepNameUnique -- *** From DataCon name uniques , dataConWorkerUnique, dataConTyRepNameUnique , initExitJoinUnique ) where import GHC.Prelude import {-# SOURCE #-} GHC.Builtin.Types import {-# SOURCE #-} GHC.Core.TyCon import {-# SOURCE #-} GHC.Core.DataCon import {-# SOURCE #-} GHC.Types.Id import {-# SOURCE #-} GHC.Types.Name import GHC.Types.Basic import GHC.Types.Unique import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.Maybe -- | Get the 'Name' associated with a known-key 'Unique'. knownUniqueName :: Unique -> Maybe Name knownUniqueName u = case tag of 'z' -> Just $ getUnboxedSumName n '4' -> Just $ getTupleTyConName Boxed n '5' -> Just $ getTupleTyConName Unboxed n '7' -> Just $ getTupleDataConName Boxed n '8' -> Just $ getTupleDataConName Unboxed n 'j' -> Just $ getCTupleSelIdName n 'k' -> Just $ getCTupleTyConName n 'm' -> Just $ getCTupleDataConName n _ -> Nothing where (tag, n) = unpkUnique u {- Note [Unique layout for unboxed sums] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sum arities start from 2. The encoding is a bit funny: we break up the integral part into bitfields for the arity, an alternative index (which is taken to be 0xfc in the case of the TyCon), and, in the case of a datacon, a tag (used to identify the sum's TypeRep binding). This layout is chosen to remain compatible with the usual unique allocation for wired-in data constructors described in GHC.Types.Unique TyCon for sum of arity k: 00000000 kkkkkkkk 11111100 TypeRep of TyCon for sum of arity k: 00000000 kkkkkkkk 11111101 DataCon for sum of arity k and alternative n (zero-based): 00000000 kkkkkkkk nnnnnn00 TypeRep for sum DataCon of arity k and alternative n (zero-based): 00000000 kkkkkkkk nnnnnn10 -} mkSumTyConUnique :: Arity -> Unique mkSumTyConUnique arity = assert (arity < 0x3f) $ -- 0x3f since we only have 6 bits to encode the -- alternative mkUnique 'z' (arity `shiftL` 8 .|. 0xfc) mkSumDataConUnique :: ConTagZ -> Arity -> Unique mkSumDataConUnique alt arity | alt >= arity = panic ("mkSumDataConUnique: " ++ show alt ++ " >= " ++ show arity) | otherwise = mkUnique 'z' (arity `shiftL` 8 + alt `shiftL` 2) {- skip the tycon -} getUnboxedSumName :: Int -> Name getUnboxedSumName n | n .&. 0xfc == 0xfc = case tag of 0x0 -> tyConName $ sumTyCon arity 0x1 -> getRep $ sumTyCon arity _ -> pprPanic "getUnboxedSumName: invalid tag" (ppr tag) | tag == 0x0 = dataConName $ sumDataCon (alt + 1) arity | tag == 0x1 = getName $ dataConWrapId $ sumDataCon (alt + 1) arity | tag == 0x2 = getRep $ promoteDataCon $ sumDataCon (alt + 1) arity | otherwise = pprPanic "getUnboxedSumName" (ppr n) where arity = n `shiftR` 8 alt = (n .&. 0xfc) `shiftR` 2 tag = 0x3 .&. n getRep tycon = fromMaybe (pprPanic "getUnboxedSumName(getRep)" (ppr tycon)) $ tyConRepName_maybe tycon -- Note [Uniques for tuple type and data constructors] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Wired-in type constructor keys occupy *two* slots: -- * u: the TyCon itself -- * u+1: the TyConRepName of the TyCon -- -- Wired-in tuple data constructor keys occupy *three* slots: -- * u: the DataCon itself -- * u+1: its worker Id -- * u+2: the TyConRepName of the promoted TyCon {- Note [Unique layout for constraint tuple selectors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constraint tuples, like boxed and unboxed tuples, have their type and data constructor Uniques wired in (see Note [Uniques for tuple type and data constructors]). Constraint tuples are somewhat more involved, however. For a boxed or unboxed n-tuple, we need: * A Unique for the type constructor, and * A Unique for the data constructor With a constraint n-tuple, however, we need: * A Unique for the type constructor, * A Unique for the data constructor, and * A Unique for each of the n superclass selectors To pick a concrete example (n = 2), the binary constraint tuple has a type constructor and data constructor (%,%) along with superclass selectors $p1(%,%) and $p2(%,%). Just as we wire in the Uniques for constraint tuple type constructors and data constructors, we wish to wire in the Uniques for the superclass selectors as well. Not only does this make everything consistent, it also avoids a compile-time performance penalty whenever GHC.Classes is loaded from an interface file. This is because GHC.Classes defines constraint tuples as class definitions, and if these classes weren't wired in, then loading GHC.Classes would also load every single constraint tuple type constructor, data constructor, and superclass selector. See #18635. We encode the Uniques for constraint tuple superclass selectors as follows. The integral part of the Unique is broken up into bitfields for the arity and the position of the superclass. Given a selector for a constraint tuple with arity n (zero-based) and position k (where 1 <= k <= n), its Unique will look like: 00000000 nnnnnnnn kkkkkkkk We can use bit-twiddling tricks to access the arity and position with cTupleSelIdArityBits and cTupleSelIdPosBitmask, respectively. This pattern bears a certain resemblance to the way that the Uniques for unboxed sums are encoded. This is because for a unboxed sum of arity n, there are n corresponding data constructors, each with an alternative position k. Similarly, for a constraint tuple of arity n, there are n corresponding superclass selectors. Reading Note [Unique layout for unboxed sums] will instill an appreciation for how the encoding for constraint tuple superclass selector Uniques takes inspiration from the encoding for unboxed sum Uniques. -} mkCTupleTyConUnique :: Arity -> Unique mkCTupleTyConUnique a = mkUnique 'k' (2*a) mkCTupleDataConUnique :: Arity -> Unique mkCTupleDataConUnique a = mkUnique 'm' (3*a) mkCTupleSelIdUnique :: ConTagZ -> Arity -> Unique mkCTupleSelIdUnique sc_pos arity | sc_pos >= arity = panic ("mkCTupleSelIdUnique: " ++ show sc_pos ++ " >= " ++ show arity) | otherwise = mkUnique 'j' (arity `shiftL` cTupleSelIdArityBits + sc_pos) getCTupleTyConName :: Int -> Name getCTupleTyConName n = case n `divMod` 2 of (arity, 0) -> cTupleTyConName arity (arity, 1) -> mkPrelTyConRepName $ cTupleTyConName arity _ -> panic "getCTupleTyConName: impossible" getCTupleDataConName :: Int -> Name getCTupleDataConName n = case n `divMod` 3 of (arity, 0) -> cTupleDataConName arity (arity, 1) -> getName $ dataConWrapId $ cTupleDataCon arity (arity, 2) -> mkPrelTyConRepName $ cTupleDataConName arity _ -> panic "getCTupleDataConName: impossible" getCTupleSelIdName :: Int -> Name getCTupleSelIdName n = cTupleSelIdName (sc_pos + 1) arity where arity = n `shiftR` cTupleSelIdArityBits sc_pos = n .&. cTupleSelIdPosBitmask -- Given the arity of a constraint tuple, this is the number of bits by which -- one must shift it to the left in order to encode the arity in the Unique -- of a superclass selector for that constraint tuple. Alternatively, given the -- Unique for a constraint tuple superclass selector, this is the number of -- bits by which one must shift it to the right to retrieve the arity of the -- constraint tuple. See Note [Unique layout for constraint tuple selectors]. cTupleSelIdArityBits :: Int cTupleSelIdArityBits = 8 -- Given the Unique for a constraint tuple superclass selector, one can -- retrieve the position of the selector by ANDing this mask, which will -- clear all but the eight least significant bits. -- See Note [Unique layout for constraint tuple selectors]. cTupleSelIdPosBitmask :: Int cTupleSelIdPosBitmask = 0xff -------------------------------------------------- -- Normal tuples mkTupleDataConUnique :: Boxity -> Arity -> Unique mkTupleDataConUnique Boxed a = mkUnique '7' (3*a) -- may be used in C labels mkTupleDataConUnique Unboxed a = mkUnique '8' (3*a) mkTupleTyConUnique :: Boxity -> Arity -> Unique mkTupleTyConUnique Boxed a = mkUnique '4' (2*a) mkTupleTyConUnique Unboxed a = mkUnique '5' (2*a) getTupleTyConName :: Boxity -> Int -> Name getTupleTyConName boxity n = case n `divMod` 2 of (arity, 0) -> tyConName $ tupleTyCon boxity arity (arity, 1) -> fromMaybe (panic "getTupleTyConName") $ tyConRepName_maybe $ tupleTyCon boxity arity _ -> panic "getTupleTyConName: impossible" getTupleDataConName :: Boxity -> Int -> Name getTupleDataConName boxity n = case n `divMod` 3 of (arity, 0) -> dataConName $ tupleDataCon boxity arity (arity, 1) -> idName $ dataConWorkId $ tupleDataCon boxity arity (arity, 2) -> fromMaybe (panic "getTupleDataCon") $ tyConRepName_maybe $ promotedTupleDataCon boxity arity _ -> panic "getTupleDataConName: impossible" {- Note [Uniques for wired-in prelude things and known masks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Allocation of unique supply characters: v,u: for renumbering value-, and usage- vars. B: builtin C-E: pseudo uniques (used in native-code generator) I: GHCi evaluation X: uniques from mkLocalUnique _: unifiable tyvars (above) 0-9: prelude things below (no numbers left any more..) :: (prelude) parallel array data constructors other a-z: lower case chars for unique supplies. Used so far: a TypeChecking? c StgToCmm/Renamer d desugarer f AbsC flattener i TypeChecking interface files j constraint tuple superclass selectors k constraint tuple tycons m constraint tuple datacons n Native/LLVM codegen r Hsc name cache s simplifier u Cmm pipeline y GHCi bytecode generator z anonymous sums -} mkAlphaTyVarUnique :: Int -> Unique mkPreludeClassUnique :: Int -> Unique mkPrimOpIdUnique :: Int -> Unique -- See Note [Primop wrappers] in GHC.Builtin.PrimOps. mkPrimOpWrapperUnique :: Int -> Unique mkPreludeMiscIdUnique :: Int -> Unique mkAlphaTyVarUnique i = mkUnique '1' i mkPreludeClassUnique i = mkUnique '2' i -------------------------------------------------- mkPrimOpIdUnique op = mkUnique '9' (2*op) mkPrimOpWrapperUnique op = mkUnique '9' (2*op+1) mkPreludeMiscIdUnique i = mkUnique '0' i mkPseudoUniqueE, mkBuiltinUnique :: Int -> Unique mkBuiltinUnique i = mkUnique 'B' i mkPseudoUniqueE i = mkUnique 'E' i -- used in NCG spiller to create spill VirtualRegs mkRegSingleUnique, mkRegPairUnique, mkRegSubUnique, mkRegClassUnique :: Int -> Unique mkRegSingleUnique = mkUnique 'R' mkRegSubUnique = mkUnique 'S' mkRegPairUnique = mkUnique 'P' mkRegClassUnique = mkUnique 'L' mkCostCentreUnique :: Int -> Unique mkCostCentreUnique = mkUnique 'C' mkVarOccUnique, mkDataOccUnique, mkTvOccUnique, mkTcOccUnique :: FastString -> Unique -- See Note [The Unique of an OccName] in GHC.Types.Name.Occurrence mkVarOccUnique fs = mkUnique 'i' (uniqueOfFS fs) mkDataOccUnique fs = mkUnique 'd' (uniqueOfFS fs) mkTvOccUnique fs = mkUnique 'v' (uniqueOfFS fs) mkTcOccUnique fs = mkUnique 'c' (uniqueOfFS fs) initExitJoinUnique :: Unique initExitJoinUnique = mkUnique 's' 0 -------------------------------------------------- -- Wired-in type constructor keys occupy *two* slots: -- * u: the TyCon itself -- * u+1: the TyConRepName of the TyCon mkPreludeTyConUnique :: Int -> Unique mkPreludeTyConUnique i = mkUnique '3' (2*i) tyConRepNameUnique :: Unique -> Unique tyConRepNameUnique u = incrUnique u -------------------------------------------------- -- Wired-in data constructor keys occupy *three* slots: -- * u: the DataCon itself -- * u+1: its worker Id -- * u+2: the TyConRepName of the promoted TyCon -- Prelude data constructors are too simple to need wrappers. mkPreludeDataConUnique :: Int -> Unique mkPreludeDataConUnique i = mkUnique '6' (3*i) -- Must be alphabetic -------------------------------------------------- dataConTyRepNameUnique, dataConWorkerUnique :: Unique -> Unique dataConWorkerUnique u = incrUnique u dataConTyRepNameUnique u = stepUnique u 2 ghc-lib-parser-9.4.7.20230826/compiler/GHC/ByteCode/Types.hs0000644000000000000000000002225414472400112020776 0ustar0000000000000000{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE RecordWildCards #-} -- -- (c) The University of Glasgow 2002-2006 -- -- | Bytecode assembler types module GHC.ByteCode.Types ( CompiledByteCode(..), seqCompiledByteCode , FFIInfo(..) , RegBitmap(..) , NativeCallType(..), NativeCallInfo(..), voidTupleReturnInfo, voidPrimCallInfo , ByteOff(..), WordOff(..) , UnlinkedBCO(..), BCOPtr(..), BCONPtr(..) , ItblEnv, ItblPtr(..) , AddrEnv, AddrPtr(..) , CgBreakInfo(..) , ModBreaks (..), BreakIndex, emptyModBreaks , CCostCentre ) where import GHC.Prelude import GHC.Data.FastString import GHC.Data.SizedSeq import GHC.Types.Name import GHC.Types.Name.Env import GHC.Utils.Outputable import GHC.Builtin.PrimOps import GHC.Types.SrcLoc import GHCi.BreakArray import GHCi.RemoteTypes import GHCi.FFI import Control.DeepSeq import Foreign import Data.Array import Data.Array.Base ( UArray(..) ) import Data.ByteString (ByteString) import Data.IntMap (IntMap) import qualified Data.IntMap as IntMap import qualified GHC.Exts.Heap as Heap import GHC.Stack.CCS import GHC.Cmm.Expr ( GlobalRegSet, emptyRegSet, regSetToList ) import GHC.Iface.Syntax -- ----------------------------------------------------------------------------- -- Compiled Byte Code data CompiledByteCode = CompiledByteCode { bc_bcos :: [UnlinkedBCO] -- Bunch of interpretable bindings , bc_itbls :: ItblEnv -- A mapping from DataCons to their itbls , bc_ffis :: [FFIInfo] -- ffi blocks we allocated , bc_strs :: AddrEnv -- malloc'd top-level strings , bc_breaks :: Maybe ModBreaks -- breakpoint info (Nothing if we're not -- creating breakpoints, for some reason) } -- ToDo: we're not tracking strings that we malloc'd newtype FFIInfo = FFIInfo (RemotePtr C_ffi_cif) deriving (Show, NFData) instance Outputable CompiledByteCode where ppr CompiledByteCode{..} = ppr bc_bcos -- Not a real NFData instance, because ModBreaks contains some things -- we can't rnf seqCompiledByteCode :: CompiledByteCode -> () seqCompiledByteCode CompiledByteCode{..} = rnf bc_bcos `seq` seqEltsNameEnv rnf bc_itbls `seq` rnf bc_ffis `seq` seqEltsNameEnv rnf bc_strs `seq` rnf (fmap seqModBreaks bc_breaks) newtype ByteOff = ByteOff Int deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Outputable) newtype WordOff = WordOff Int deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Outputable) newtype RegBitmap = RegBitmap { unRegBitmap :: Word32 } deriving (Enum, Eq, Show, Integral, Num, Ord, Real, Bits, FiniteBits, Outputable) {- Note [GHCi TupleInfo] ~~~~~~~~~~~~~~~~~~~~~~~~ This contains the data we need for passing unboxed tuples between bytecode and native code In general we closely follow the native calling convention that GHC uses for unboxed tuples, but we don't use any registers in bytecode. All tuple elements are expanded to use a full register or a full word on the stack. The position of tuple elements that are returned on the stack in the native calling convention is unchanged when returning the same tuple in bytecode. The order of the remaining elements is determined by the register in which they would have been returned, rather than by their position in the tuple in the Haskell source code. This makes jumping between bytecode and native code easier: A map of live registers is enough to convert the tuple. See GHC.StgToByteCode.layoutTuple for more details. -} data NativeCallType = NativePrimCall | NativeTupleReturn deriving (Eq) data NativeCallInfo = NativeCallInfo { nativeCallType :: !NativeCallType , nativeCallSize :: !WordOff -- total size of arguments in words , nativeCallRegs :: !GlobalRegSet , nativeCallStackSpillSize :: !WordOff {- words spilled on the stack by GHCs native calling convention -} } instance Outputable NativeCallInfo where ppr NativeCallInfo{..} = text " ppr nativeCallSize <+> text "stack" <+> ppr nativeCallStackSpillSize <+> text "regs" <+> ppr (map (text . show) $ regSetToList nativeCallRegs) <> char '>' voidTupleReturnInfo :: NativeCallInfo voidTupleReturnInfo = NativeCallInfo NativeTupleReturn 0 emptyRegSet 0 voidPrimCallInfo :: NativeCallInfo voidPrimCallInfo = NativeCallInfo NativePrimCall 0 emptyRegSet 0 type ItblEnv = NameEnv (Name, ItblPtr) type AddrEnv = NameEnv (Name, AddrPtr) -- We need the Name in the range so we know which -- elements to filter out when unloading a module newtype ItblPtr = ItblPtr (RemotePtr Heap.StgInfoTable) deriving (Show, NFData) newtype AddrPtr = AddrPtr (RemotePtr ()) deriving (NFData) data UnlinkedBCO = UnlinkedBCO { unlinkedBCOName :: !Name, unlinkedBCOArity :: {-# UNPACK #-} !Int, unlinkedBCOInstrs :: !(UArray Int Word16), -- insns unlinkedBCOBitmap :: !(UArray Int Word64), -- bitmap unlinkedBCOLits :: !(SizedSeq BCONPtr), -- non-ptrs unlinkedBCOPtrs :: !(SizedSeq BCOPtr) -- ptrs } instance NFData UnlinkedBCO where rnf UnlinkedBCO{..} = rnf unlinkedBCOLits `seq` rnf unlinkedBCOPtrs data BCOPtr = BCOPtrName !Name | BCOPtrPrimOp !PrimOp | BCOPtrBCO !UnlinkedBCO | BCOPtrBreakArray -- a pointer to this module's BreakArray instance NFData BCOPtr where rnf (BCOPtrBCO bco) = rnf bco rnf x = x `seq` () data BCONPtr = BCONPtrWord {-# UNPACK #-} !Word | BCONPtrLbl !FastString | BCONPtrItbl !Name -- | A reference to a top-level string literal; see -- Note [Generating code for top-level string literal bindings] in GHC.StgToByteCode. | BCONPtrAddr !Name -- | Only used internally in the assembler in an intermediate representation; -- should never appear in a fully-assembled UnlinkedBCO. -- Also see Note [Allocating string literals] in GHC.ByteCode.Asm. | BCONPtrStr !ByteString instance NFData BCONPtr where rnf x = x `seq` () -- | Information about a breakpoint that we know at code-generation time -- In order to be used, this needs to be hydrated relative to the current HscEnv by -- 'hydrateCgBreakInfo'. Everything here can be fully forced and that's critical for -- preventing space leaks (see #22530) data CgBreakInfo = CgBreakInfo { cgb_tyvars :: ![IfaceTvBndr] -- ^ Type variables in scope at the breakpoint , cgb_vars :: ![Maybe (IfaceIdBndr, Word16)] , cgb_resty :: !IfaceType } -- See Note [Syncing breakpoint info] in GHC.Runtime.Eval seqCgBreakInfo :: CgBreakInfo -> () seqCgBreakInfo CgBreakInfo{..} = rnf cgb_tyvars `seq` rnf cgb_vars `seq` rnf cgb_resty instance Outputable UnlinkedBCO where ppr (UnlinkedBCO nm _arity _insns _bitmap lits ptrs) = sep [text "BCO", ppr nm, text "with", ppr (sizeSS lits), text "lits", ppr (sizeSS ptrs), text "ptrs" ] instance Outputable CgBreakInfo where ppr info = text "CgBreakInfo" <+> parens (ppr (cgb_vars info) <+> ppr (cgb_resty info)) -- ----------------------------------------------------------------------------- -- Breakpoints -- | Breakpoint index type BreakIndex = Int -- | C CostCentre type data CCostCentre -- | All the information about the breakpoints for a module data ModBreaks = ModBreaks { modBreaks_flags :: ForeignRef BreakArray -- ^ The array of flags, one per breakpoint, -- indicating which breakpoints are enabled. , modBreaks_locs :: !(Array BreakIndex SrcSpan) -- ^ An array giving the source span of each breakpoint. , modBreaks_vars :: !(Array BreakIndex [OccName]) -- ^ An array giving the names of the free variables at each breakpoint. , modBreaks_decls :: !(Array BreakIndex [String]) -- ^ An array giving the names of the declarations enclosing each breakpoint. -- See Note [Field modBreaks_decls] , modBreaks_ccs :: !(Array BreakIndex (RemotePtr CostCentre)) -- ^ Array pointing to cost centre for each breakpoint , modBreaks_breakInfo :: IntMap CgBreakInfo -- ^ info about each breakpoint from the bytecode generator } seqModBreaks :: ModBreaks -> () seqModBreaks ModBreaks{..} = rnf modBreaks_flags `seq` rnf modBreaks_locs `seq` rnf modBreaks_vars `seq` rnf modBreaks_decls `seq` rnf modBreaks_ccs `seq` rnf (fmap seqCgBreakInfo modBreaks_breakInfo) -- | Construct an empty ModBreaks emptyModBreaks :: ModBreaks emptyModBreaks = ModBreaks { modBreaks_flags = error "ModBreaks.modBreaks_array not initialised" -- ToDo: can we avoid this? , modBreaks_locs = array (0,-1) [] , modBreaks_vars = array (0,-1) [] , modBreaks_decls = array (0,-1) [] , modBreaks_ccs = array (0,-1) [] , modBreaks_breakInfo = IntMap.empty } {- Note [Field modBreaks_decls] ~~~~~~~~~~~~~~~~~~~~~~ A value of eg ["foo", "bar", "baz"] in a `modBreaks_decls` field means: The breakpoint is in the function called "baz" that is declared in a `let` or `where` clause of a declaration called "bar", which itself is declared in a `let` or `where` clause of the top-level function called "foo". -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm.hs0000644000000000000000000002472214472400112016712 0ustar0000000000000000-- Cmm representations using Hoopl's Graph CmmNode e x. {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE ExplicitNamespaces #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Cmm ( -- * Cmm top-level datatypes CmmProgram, CmmGroup, CmmGroupSRTs, RawCmmGroup, GenCmmGroup, CmmDecl, CmmDeclSRTs, GenCmmDecl(..), CmmGraph, GenCmmGraph(..), CmmBlock, RawCmmDecl, Section(..), SectionType(..), GenCmmStatics(..), type CmmStatics, type RawCmmStatics, CmmStatic(..), SectionProtection(..), sectionProtection, -- ** Blocks containing lists GenBasicBlock(..), blockId, ListGraph(..), pprBBlock, -- * Info Tables CmmTopInfo(..), CmmStackInfo(..), CmmInfoTable(..), topInfoTable, ClosureTypeInfo(..), ProfilingInfo(..), ConstrDescription, -- * Statements, expressions and types module GHC.Cmm.Node, module GHC.Cmm.Expr, ) where import GHC.Prelude import GHC.Types.Id import GHC.Types.CostCentre import GHC.Cmm.CLabel import GHC.Cmm.BlockId import GHC.Cmm.Node import GHC.Runtime.Heap.Layout import GHC.Cmm.Expr import GHC.Cmm.Dataflow.Block import GHC.Cmm.Dataflow.Collections import GHC.Cmm.Dataflow.Graph import GHC.Cmm.Dataflow.Label import GHC.Utils.Outputable import Data.ByteString (ByteString) ----------------------------------------------------------------------------- -- Cmm, GenCmm ----------------------------------------------------------------------------- -- A CmmProgram is a list of CmmGroups -- A CmmGroup is a list of top-level declarations -- When object-splitting is on, each group is compiled into a separate -- .o file. So typically we put closely related stuff in a CmmGroup. -- Section-splitting follows suit and makes one .text subsection for each -- CmmGroup. type CmmProgram = [CmmGroup] type GenCmmGroup d h g = [GenCmmDecl d h g] -- | Cmm group before SRT generation type CmmGroup = GenCmmGroup CmmStatics CmmTopInfo CmmGraph -- | Cmm group with SRTs type CmmGroupSRTs = GenCmmGroup RawCmmStatics CmmTopInfo CmmGraph -- | "Raw" cmm group (TODO (osa): not sure what that means) type RawCmmGroup = GenCmmGroup RawCmmStatics (LabelMap RawCmmStatics) CmmGraph ----------------------------------------------------------------------------- -- CmmDecl, GenCmmDecl ----------------------------------------------------------------------------- -- GenCmmDecl is abstracted over -- d, the type of static data elements in CmmData -- h, the static info preceding the code of a CmmProc -- g, the control-flow graph of a CmmProc -- -- We expect there to be two main instances of this type: -- (a) C--, i.e. populated with various C-- constructs -- (b) Native code, populated with data/instructions -- | A top-level chunk, abstracted over the type of the contents of -- the basic blocks (Cmm or instructions are the likely instantiations). data GenCmmDecl d h g = CmmProc -- A procedure h -- Extra header such as the info table CLabel -- Entry label [GlobalReg] -- Registers live on entry. Note that the set of live -- registers will be correct in generated C-- code, but -- not in hand-written C-- code. However, -- splitAtProcPoints calculates correct liveness -- information for CmmProcs. g -- Control-flow graph for the procedure's code | CmmData -- Static data Section d deriving (Functor) type CmmDecl = GenCmmDecl CmmStatics CmmTopInfo CmmGraph type CmmDeclSRTs = GenCmmDecl RawCmmStatics CmmTopInfo CmmGraph type RawCmmDecl = GenCmmDecl RawCmmStatics (LabelMap RawCmmStatics) CmmGraph ----------------------------------------------------------------------------- -- Graphs ----------------------------------------------------------------------------- type CmmGraph = GenCmmGraph CmmNode data GenCmmGraph n = CmmGraph { g_entry :: BlockId, g_graph :: Graph n C C } type CmmBlock = Block CmmNode C C ----------------------------------------------------------------------------- -- Info Tables ----------------------------------------------------------------------------- -- | CmmTopInfo is attached to each CmmDecl (see defn of CmmGroup), and contains -- the extra info (beyond the executable code) that belongs to that CmmDecl. data CmmTopInfo = TopInfo { info_tbls :: LabelMap CmmInfoTable , stack_info :: CmmStackInfo } topInfoTable :: GenCmmDecl a CmmTopInfo (GenCmmGraph n) -> Maybe CmmInfoTable topInfoTable (CmmProc infos _ _ g) = mapLookup (g_entry g) (info_tbls infos) topInfoTable _ = Nothing data CmmStackInfo = StackInfo { arg_space :: ByteOff, -- number of bytes of arguments on the stack on entry to the -- the proc. This is filled in by GHC.StgToCmm.codeGen, and -- used by the stack allocator later. do_layout :: Bool -- Do automatic stack layout for this proc. This is -- True for all code generated by the code generator, -- but is occasionally False for hand-written Cmm where -- we want to do the stack manipulation manually. } -- | Info table as a haskell data type data CmmInfoTable = CmmInfoTable { cit_lbl :: CLabel, -- Info table label cit_rep :: SMRep, cit_prof :: ProfilingInfo, cit_srt :: Maybe CLabel, -- empty, or a closure address cit_clo :: Maybe (Id, CostCentreStack) -- Just (id,ccs) <=> build a static closure later -- Nothing <=> don't build a static closure -- -- Static closures for FUNs and THUNKs are *not* generated by -- the code generator, because we might want to add SRT -- entries to them later (for FUNs at least; THUNKs are -- treated the same for consistency). See Note [SRTs] in -- GHC.Cmm.Info.Build, in particular the [FUN] optimisation. -- -- This is strictly speaking not a part of the info table that -- will be finally generated, but it's the only convenient -- place to convey this information from the code generator to -- where we build the static closures in -- GHC.Cmm.Info.Build.doSRTs. } deriving Eq data ProfilingInfo = NoProfilingInfo | ProfilingInfo ByteString ByteString -- closure_type, closure_desc deriving Eq ----------------------------------------------------------------------------- -- Static Data ----------------------------------------------------------------------------- data SectionType = Text | Data | ReadOnlyData | RelocatableReadOnlyData | UninitialisedData | ReadOnlyData16 -- .rodata.cst16 on x86_64, 16-byte aligned -- See Note [Initializers and finalizers in Cmm] in GHC.Cmm.InitFini | InitArray -- .init_array on ELF, .ctor on Windows | FiniArray -- .fini_array on ELF, .dtor on Windows | CString | OtherSection String deriving (Show) data SectionProtection = ReadWriteSection | ReadOnlySection | WriteProtectedSection -- See Note [Relocatable Read-Only Data] deriving (Eq) -- | Should a data in this section be considered constant at runtime sectionProtection :: Section -> SectionProtection sectionProtection (Section t _) = case t of Text -> ReadOnlySection ReadOnlyData -> ReadOnlySection RelocatableReadOnlyData -> WriteProtectedSection ReadOnlyData16 -> ReadOnlySection InitArray -> ReadOnlySection FiniArray -> ReadOnlySection CString -> ReadOnlySection Data -> ReadWriteSection UninitialisedData -> ReadWriteSection (OtherSection _) -> ReadWriteSection {- Note [Relocatable Read-Only Data] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Relocatable data are only read-only after relocation at the start of the program. They should be writable from the source code until then. Failure to do so would end up in segfaults at execution when using linkers that do not enforce writability of those sections, such as the gold linker. -} data Section = Section SectionType CLabel data CmmStatic = CmmStaticLit CmmLit -- ^ a literal value, size given by cmmLitRep of the literal. | CmmUninitialised Int -- ^ uninitialised data, N bytes long | CmmString ByteString -- ^ string of 8-bit values only, not zero terminated. | CmmFileEmbed FilePath -- ^ an embedded binary file instance Outputable CmmStatic where ppr (CmmStaticLit lit) = text "CmmStaticLit" <+> ppr lit ppr (CmmUninitialised n) = text "CmmUninitialised" <+> ppr n ppr (CmmString _) = text "CmmString" ppr (CmmFileEmbed fp) = text "CmmFileEmbed" <+> text fp -- Static data before SRT generation data GenCmmStatics (rawOnly :: Bool) where CmmStatics :: CLabel -- Label of statics -> CmmInfoTable -> CostCentreStack -> [CmmLit] -- Payload -> GenCmmStatics 'False -- | Static data, after SRTs are generated CmmStaticsRaw :: CLabel -- Label of statics -> [CmmStatic] -- The static data itself -> GenCmmStatics a type CmmStatics = GenCmmStatics 'False type RawCmmStatics = GenCmmStatics 'True -- ----------------------------------------------------------------------------- -- Basic blocks consisting of lists -- These are used by the LLVM and NCG backends, when populating Cmm -- with lists of instructions. data GenBasicBlock i = BasicBlock BlockId [i] deriving (Functor) -- | The branch block id is that of the first block in -- the branch, which is that branch's entry point blockId :: GenBasicBlock i -> BlockId blockId (BasicBlock blk_id _ ) = blk_id newtype ListGraph i = ListGraph [GenBasicBlock i] deriving (Functor) instance Outputable instr => Outputable (ListGraph instr) where ppr (ListGraph blocks) = vcat (map ppr blocks) instance OutputableP env instr => OutputableP env (ListGraph instr) where pdoc env g = ppr (fmap (pdoc env) g) instance Outputable instr => Outputable (GenBasicBlock instr) where ppr = pprBBlock instance OutputableP env instr => OutputableP env (GenBasicBlock instr) where pdoc env block = ppr (fmap (pdoc env) block) pprBBlock :: Outputable stmt => GenBasicBlock stmt -> SDoc pprBBlock (BasicBlock ident stmts) = hang (ppr ident <> colon) 4 (vcat (map ppr stmts)) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/BlockId.hs0000644000000000000000000000256414472400112020221 0ustar0000000000000000{-# LANGUAGE TypeSynonymInstances #-} {-# OPTIONS_GHC -fno-warn-orphans #-} {- BlockId module should probably go away completely, being superseded by Label -} module GHC.Cmm.BlockId ( BlockId, mkBlockId -- ToDo: BlockId should be abstract, but it isn't yet , newBlockId , blockLbl, infoTblLbl ) where import GHC.Prelude import GHC.Cmm.CLabel import GHC.Types.Id.Info import GHC.Types.Name import GHC.Types.Unique import GHC.Types.Unique.Supply import GHC.Cmm.Dataflow.Label (Label, mkHooplLabel) ---------------------------------------------------------------- --- Block Ids, their environments, and their sets {- Note [Unique BlockId] ~~~~~~~~~~~~~~~~~~~~~~~~ Although a 'BlockId' is a local label, for reasons of implementation, 'BlockId's must be unique within an entire compilation unit. The reason is that each local label is mapped to an assembly-language label, and in most assembly languages allow, a label is visible throughout the entire compilation unit in which it appears. -} type BlockId = Label mkBlockId :: Unique -> BlockId mkBlockId unique = mkHooplLabel $ getKey unique newBlockId :: MonadUnique m => m BlockId newBlockId = mkBlockId <$> getUniqueM blockLbl :: BlockId -> CLabel blockLbl label = mkLocalBlockLabel (getUnique label) infoTblLbl :: BlockId -> CLabel infoTblLbl label = mkBlockInfoTableLabel (mkFCallName (getUnique label) "block") NoCafRefs ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/CLabel.hs0000644000000000000000000021752614472400112020042 0ustar0000000000000000----------------------------------------------------------------------------- -- -- Object-file symbols (called CLabel for histerical raisins). -- -- (c) The University of Glasgow 2004-2006 -- ----------------------------------------------------------------------------- {-# LANGUAGE BangPatterns #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Cmm.CLabel ( CLabel, -- abstract type NeedExternDecl (..), ForeignLabelSource(..), DynamicLinkerLabelInfo(..), ConInfoTableLocation(..), getConInfoTableLocation, -- * Constructors mkClosureLabel, mkSRTLabel, mkInfoTableLabel, mkEntryLabel, mkRednCountsLabel, mkTagHitLabel, mkConInfoTableLabel, mkApEntryLabel, mkApInfoTableLabel, mkClosureTableLabel, mkBytesLabel, mkLocalBlockLabel, mkBlockInfoTableLabel, mkBitmapLabel, mkStringLitLabel, mkInitializerStubLabel, mkInitializerArrayLabel, mkFinalizerStubLabel, mkFinalizerArrayLabel, mkAsmTempLabel, mkAsmTempDerivedLabel, mkAsmTempEndLabel, mkAsmTempProcEndLabel, mkAsmTempDieLabel, mkDirty_MUT_VAR_Label, mkMUT_VAR_CLEAN_infoLabel, mkNonmovingWriteBarrierEnabledLabel, mkUpdInfoLabel, mkBHUpdInfoLabel, mkIndStaticInfoLabel, mkMainCapabilityLabel, mkMAP_FROZEN_CLEAN_infoLabel, mkMAP_FROZEN_DIRTY_infoLabel, mkMAP_DIRTY_infoLabel, mkSMAP_FROZEN_CLEAN_infoLabel, mkSMAP_FROZEN_DIRTY_infoLabel, mkSMAP_DIRTY_infoLabel, mkBadAlignmentLabel, mkOutOfBoundsAccessLabel, mkMemcpyRangeOverlapLabel, mkArrWords_infoLabel, mkSRTInfoLabel, mkTopTickyCtrLabel, mkCAFBlackHoleInfoTableLabel, mkRtsPrimOpLabel, mkRtsSlowFastTickyCtrLabel, mkSelectorInfoLabel, mkSelectorEntryLabel, mkCmmInfoLabel, mkCmmEntryLabel, mkCmmRetInfoLabel, mkCmmRetLabel, mkCmmCodeLabel, mkCmmDataLabel, mkRtsCmmDataLabel, mkCmmClosureLabel, mkRtsApFastLabel, mkPrimCallLabel, mkForeignLabel, mkCCLabel, mkCCSLabel, mkIPELabel, InfoProvEnt(..), mkDynamicLinkerLabel, mkPicBaseLabel, mkDeadStripPreventer, mkHpcTicksLabel, -- * Predicates hasCAF, needsCDecl, maybeLocalBlockLabel, externallyVisibleCLabel, isMathFun, isCFunctionLabel, isGcPtrLabel, labelDynamic, isLocalCLabel, mayRedirectTo, isInfoTableLabel, isConInfoTableLabel, isIdLabel, isTickyLabel, hasHaskellName, hasIdLabelInfo, isBytesLabel, isForeignLabel, isSomeRODataLabel, isStaticClosureLabel, -- * Conversions toClosureLbl, toSlowEntryLbl, toEntryLbl, toInfoLbl, -- * Pretty-printing LabelStyle (..), pprDebugCLabel, pprCLabel, ppInternalProcLabel, -- * Others dynamicLinkerLabelInfo, addLabelSize, foreignLabelStdcallInfo ) where import GHC.Prelude import GHC.Types.Id.Info import GHC.Types.Basic import {-# SOURCE #-} GHC.Cmm.BlockId (BlockId, mkBlockId) import GHC.Unit.Types import GHC.Types.Name import GHC.Types.Unique import GHC.Builtin.PrimOps import GHC.Types.CostCentre import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString import GHC.Platform import GHC.Types.Unique.Set import GHC.Utils.Misc import GHC.Core.Ppr ( {- instances -} ) import GHC.Types.SrcLoc -- ----------------------------------------------------------------------------- -- The CLabel type {- | 'CLabel' is an abstract type that supports the following operations: - Pretty printing - In a C file, does it need to be declared before use? (i.e. is it guaranteed to be already in scope in the places we need to refer to it?) - If it needs to be declared, what type (code or data) should it be declared to have? - Is it visible outside this object file or not? - Is it "dynamic" (see details below) - Eq and Ord, so that we can make sets of CLabels (currently only used in outputting C as far as I can tell, to avoid generating more than one declaration for any given label). - Converting an info table label into an entry label. CLabel usage is a bit messy in GHC as they are used in a number of different contexts: - By the C-- AST to identify labels - By the unregisterised C code generator (\"PprC\") for naming functions (hence the name 'CLabel') - By the native and LLVM code generators to identify labels For extra fun, each of these uses a slightly different subset of constructors (e.g. 'AsmTempLabel' and 'AsmTempDerivedLabel' are used only in the NCG and LLVM backends). In general, we use 'IdLabel' to represent Haskell things early in the pipeline. However, later optimization passes will often represent blocks they create with 'LocalBlockLabel' where there is no obvious 'Name' to hang off the label. -} data CLabel = -- | A label related to the definition of a particular Id or Con in a .hs file. IdLabel Name CafInfo IdLabelInfo -- ^ encodes the suffix of the label -- | A label from a .cmm file that is not associated with a .hs level Id. | CmmLabel UnitId -- ^ what package the label belongs to. NeedExternDecl -- ^ does the label need an "extern .." declaration FastString -- ^ identifier giving the prefix of the label CmmLabelInfo -- ^ encodes the suffix of the label -- | A label with a baked-in \/ algorithmically generated name that definitely -- comes from the RTS. The code for it must compile into libHSrts.a \/ libHSrts.so -- If it doesn't have an algorithmically generated name then use a CmmLabel -- instead and give it an appropriate UnitId argument. | RtsLabel RtsLabelInfo -- | A label associated with a block. These aren't visible outside of the -- compilation unit in which they are defined. These are generally used to -- name blocks produced by Cmm-to-Cmm passes and the native code generator, -- where we don't have a 'Name' to associate the label to and therefore can't -- use 'IdLabel'. | LocalBlockLabel {-# UNPACK #-} !Unique -- | A 'C' (or otherwise foreign) label. -- | ForeignLabel FastString -- ^ name of the imported label. (Maybe Int) -- ^ possible '@n' suffix for stdcall functions -- When generating C, the '@n' suffix is omitted, but when -- generating assembler we must add it to the label. ForeignLabelSource -- ^ what package the foreign label is in. FunctionOrData -- | Local temporary label used for native (or LLVM) code generation; must not -- appear outside of these contexts. Use primarily for debug information | AsmTempLabel {-# UNPACK #-} !Unique -- | A label \"derived\" from another 'CLabel' by the addition of a suffix. -- Must not occur outside of the NCG or LLVM code generators. | AsmTempDerivedLabel CLabel FastString -- ^ suffix | StringLitLabel {-# UNPACK #-} !Unique | CC_Label CostCentre | CCS_Label CostCentreStack | IPE_Label InfoProvEnt -- | A per-module metadata label. | ModuleLabel !Module ModuleLabelKind -- | These labels are generated and used inside the NCG only. -- They are special variants of a label used for dynamic linking -- see module "GHC.CmmToAsm.PIC" for details. | DynamicLinkerLabel DynamicLinkerLabelInfo CLabel -- | This label is generated and used inside the NCG only. -- It is used as a base for PIC calculations on some platforms. -- It takes the form of a local numeric assembler label '1'; and -- is pretty-printed as 1b, referring to the previous definition -- of 1: in the assembler source file. | PicBaseLabel -- | A label before an info table to prevent excessive dead-stripping on darwin | DeadStripPreventer CLabel -- | Per-module table of tick locations | HpcTicksLabel Module -- | Static reference table | SRTLabel {-# UNPACK #-} !Unique -- | A bitmap (function or case return) | LargeBitmapLabel {-# UNPACK #-} !Unique deriving Eq instance Show CLabel where show = showPprUnsafe . pprDebugCLabel genericPlatform instance Outputable CLabel where ppr = text . show data ModuleLabelKind = MLK_Initializer String | MLK_InitializerArray | MLK_Finalizer String | MLK_FinalizerArray | MLK_IPEBuffer deriving (Eq, Ord) instance Outputable ModuleLabelKind where ppr MLK_InitializerArray = text "init_arr" ppr (MLK_Initializer s) = text ("init__" ++ s) ppr MLK_FinalizerArray = text "fini_arr" ppr (MLK_Finalizer s) = text ("fini__" ++ s) ppr MLK_IPEBuffer = text "ipe_buf" isIdLabel :: CLabel -> Bool isIdLabel IdLabel{} = True isIdLabel _ = False -- Used in SRT analysis. See Note [Ticky labels in SRT analysis] in -- GHC.Cmm.Info.Build. isTickyLabel :: CLabel -> Bool isTickyLabel (IdLabel _ _ IdTickyInfo{}) = True isTickyLabel _ = False -- | Indicate if "GHC.CmmToC" has to generate an extern declaration for the -- label (e.g. "extern StgWordArray(foo)"). The type is fixed to StgWordArray. -- -- Symbols from the RTS don't need "extern" declarations because they are -- exposed via "rts/include/Stg.h" with the appropriate type. See 'needsCDecl'. -- -- The fixed StgWordArray type led to "conflicting types" issues with user -- provided Cmm files (not in the RTS) that declare data of another type (#15467 -- and test for #17920). Hence the Cmm parser considers that labels in data -- sections don't need the "extern" declaration (just add one explicitly if you -- need it). -- -- See https://gitlab.haskell.org/ghc/ghc/-/wikis/commentary/compiler/backends/ppr-c#prototypes -- for why extern declaration are needed at all. newtype NeedExternDecl = NeedExternDecl Bool deriving (Ord,Eq) -- This is laborious, but necessary. We can't derive Ord because -- Unique doesn't have an Ord instance. Note nonDetCmpUnique in the -- implementation. See Note [No Ord for Unique] -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] instance Ord CLabel where compare (IdLabel a1 b1 c1) (IdLabel a2 b2 c2) = compare a1 a2 `thenCmp` compare b1 b2 `thenCmp` compare c1 c2 compare (CmmLabel a1 b1 c1 d1) (CmmLabel a2 b2 c2 d2) = compare a1 a2 `thenCmp` compare b1 b2 `thenCmp` -- This non-determinism is "safe" in the sense that it only affects object code, -- which is currently not covered by GHC's determinism guarantees. See #12935. uniqCompareFS c1 c2 `thenCmp` compare d1 d2 compare (RtsLabel a1) (RtsLabel a2) = compare a1 a2 compare (LocalBlockLabel u1) (LocalBlockLabel u2) = nonDetCmpUnique u1 u2 compare (ForeignLabel a1 b1 c1 d1) (ForeignLabel a2 b2 c2 d2) = uniqCompareFS a1 a2 `thenCmp` compare b1 b2 `thenCmp` compare c1 c2 `thenCmp` compare d1 d2 compare (AsmTempLabel u1) (AsmTempLabel u2) = nonDetCmpUnique u1 u2 compare (AsmTempDerivedLabel a1 b1) (AsmTempDerivedLabel a2 b2) = compare a1 a2 `thenCmp` lexicalCompareFS b1 b2 compare (StringLitLabel u1) (StringLitLabel u2) = nonDetCmpUnique u1 u2 compare (CC_Label a1) (CC_Label a2) = compare a1 a2 compare (CCS_Label a1) (CCS_Label a2) = compare a1 a2 compare (IPE_Label a1) (IPE_Label a2) = compare a1 a2 compare (ModuleLabel m1 k1) (ModuleLabel m2 k2) = compare m1 m2 `thenCmp` compare k1 k2 compare (DynamicLinkerLabel a1 b1) (DynamicLinkerLabel a2 b2) = compare a1 a2 `thenCmp` compare b1 b2 compare PicBaseLabel PicBaseLabel = EQ compare (DeadStripPreventer a1) (DeadStripPreventer a2) = compare a1 a2 compare (HpcTicksLabel a1) (HpcTicksLabel a2) = compare a1 a2 compare (SRTLabel u1) (SRTLabel u2) = nonDetCmpUnique u1 u2 compare (LargeBitmapLabel u1) (LargeBitmapLabel u2) = nonDetCmpUnique u1 u2 compare IdLabel{} _ = LT compare _ IdLabel{} = GT compare CmmLabel{} _ = LT compare _ CmmLabel{} = GT compare RtsLabel{} _ = LT compare _ RtsLabel{} = GT compare LocalBlockLabel{} _ = LT compare _ LocalBlockLabel{} = GT compare ForeignLabel{} _ = LT compare _ ForeignLabel{} = GT compare AsmTempLabel{} _ = LT compare _ AsmTempLabel{} = GT compare AsmTempDerivedLabel{} _ = LT compare _ AsmTempDerivedLabel{} = GT compare StringLitLabel{} _ = LT compare _ StringLitLabel{} = GT compare CC_Label{} _ = LT compare _ CC_Label{} = GT compare CCS_Label{} _ = LT compare _ CCS_Label{} = GT compare DynamicLinkerLabel{} _ = LT compare _ DynamicLinkerLabel{} = GT compare PicBaseLabel{} _ = LT compare _ PicBaseLabel{} = GT compare DeadStripPreventer{} _ = LT compare _ DeadStripPreventer{} = GT compare HpcTicksLabel{} _ = LT compare _ HpcTicksLabel{} = GT compare SRTLabel{} _ = LT compare _ SRTLabel{} = GT compare (IPE_Label {}) _ = LT compare _ (IPE_Label{}) = GT compare (ModuleLabel {}) _ = LT compare _ (ModuleLabel{}) = GT -- | Record where a foreign label is stored. data ForeignLabelSource -- | Label is in a named package = ForeignLabelInPackage UnitId -- | Label is in some external, system package that doesn't also -- contain compiled Haskell code, and is not associated with any .hi files. -- We don't have to worry about Haskell code being inlined from -- external packages. It is safe to treat the RTS package as "external". | ForeignLabelInExternalPackage -- | Label is in the package currently being compiled. -- This is only used for creating hacky tmp labels during code generation. -- Don't use it in any code that might be inlined across a package boundary -- (ie, core code) else the information will be wrong relative to the -- destination module. | ForeignLabelInThisPackage deriving (Eq, Ord) -- | For debugging problems with the CLabel representation. -- We can't make a Show instance for CLabel because lots of its components don't have instances. -- The regular Outputable instance only shows the label name, and not its other info. -- pprDebugCLabel :: Platform -> CLabel -> SDoc pprDebugCLabel platform lbl = pprCLabel platform AsmStyle lbl <> parens extra where extra = case lbl of IdLabel _ _ info -> text "IdLabel" <> whenPprDebug (text ":" <> ppr info) CmmLabel pkg _ext _name _info -> text "CmmLabel" <+> ppr pkg RtsLabel{} -> text "RtsLabel" ForeignLabel _name mSuffix src funOrData -> text "ForeignLabel" <+> ppr mSuffix <+> ppr src <+> ppr funOrData _ -> text "other CLabel" -- Dynamic ticky info for the id. data TickyIdInfo = TickyRednCounts -- ^ Used for dynamic allocations | TickyInferedTag !Unique -- ^ Used to track dynamic hits of tag inference. deriving (Eq,Show) instance Outputable TickyIdInfo where ppr TickyRednCounts = text "ct_rdn" ppr (TickyInferedTag unique) = text "ct_tag[" <> ppr unique <> char ']' -- | Don't depend on this if you need determinism. -- No determinism in the ncg backend, so we use the unique for Ord. -- Even if it pains me slightly. instance Ord TickyIdInfo where compare TickyRednCounts TickyRednCounts = EQ compare TickyRednCounts _ = LT compare _ TickyRednCounts = GT compare (TickyInferedTag unique1) (TickyInferedTag unique2) = nonDetCmpUnique unique1 unique2 data IdLabelInfo = Closure -- ^ Label for closure | InfoTable -- ^ Info tables for closures; always read-only | Entry -- ^ Entry point | Slow -- ^ Slow entry point | LocalInfoTable -- ^ Like InfoTable but not externally visible | LocalEntry -- ^ Like Entry but not externally visible | IdTickyInfo !TickyIdInfo -- ^ Label of place to keep Ticky-ticky hit info for this Id | ConEntry ConInfoTableLocation -- ^ Constructor entry point, when `-fdistinct-info-tables` is enabled then -- each usage of a constructor will be given a unique number and a fresh info -- table will be created in the module where the constructor is used. The -- argument is used to keep track of which info table a usage of a constructor -- should use. When the argument is 'Nothing' then it uses the info table which -- is defined in the module where the datatype is declared, this is the usual case. -- When it is (Just (m, k)) it will use the kth info table defined in module m. The -- point of this inefficiency is so that you can work out where allocations of data -- constructors are coming from when you are debugging. | ConInfoTable ConInfoTableLocation -- ^ Corresponding info table | ClosureTable -- ^ Table of closures for Enum tycons | Bytes -- ^ Content of a string literal. See -- Note [Bytes label]. | BlockInfoTable -- ^ Like LocalInfoTable but for a proc-point block -- instead of a closure entry-point. -- See Note [Proc-point local block entry-points]. deriving (Eq, Ord) -- | Which module is the info table from, and which number was it. data ConInfoTableLocation = UsageSite Module Int | DefinitionSite deriving (Eq, Ord) instance Outputable ConInfoTableLocation where ppr (UsageSite m n) = text "Loc(" <> ppr n <> text "):" <+> ppr m ppr DefinitionSite = empty getConInfoTableLocation :: IdLabelInfo -> Maybe ConInfoTableLocation getConInfoTableLocation (ConInfoTable ci) = Just ci getConInfoTableLocation _ = Nothing instance Outputable IdLabelInfo where ppr Closure = text "Closure" ppr InfoTable = text "InfoTable" ppr Entry = text "Entry" ppr Slow = text "Slow" ppr LocalInfoTable = text "LocalInfoTable" ppr LocalEntry = text "LocalEntry" ppr (ConEntry mn) = text "ConEntry" <+> ppr mn ppr (ConInfoTable mn) = text "ConInfoTable" <+> ppr mn ppr ClosureTable = text "ClosureTable" ppr Bytes = text "Bytes" ppr BlockInfoTable = text "BlockInfoTable" ppr (IdTickyInfo info) = text "IdTickyInfo" <+> ppr info data RtsLabelInfo = RtsSelectorInfoTable Bool{-updatable-} Int{-offset-} -- ^ Selector thunks | RtsSelectorEntry Bool{-updatable-} Int{-offset-} | RtsApInfoTable Bool{-updatable-} Int{-arity-} -- ^ AP thunks | RtsApEntry Bool{-updatable-} Int{-arity-} | RtsPrimOp PrimOp | RtsApFast NonDetFastString -- ^ _fast versions of generic apply | RtsSlowFastTickyCtr String deriving (Eq,Ord) -- | What type of Cmm label we're dealing with. -- Determines the suffix appended to the name when a CLabel.CmmLabel -- is pretty printed. data CmmLabelInfo = CmmInfo -- ^ misc rts info tables, suffix _info | CmmEntry -- ^ misc rts entry points, suffix _entry | CmmRetInfo -- ^ misc rts ret info tables, suffix _info | CmmRet -- ^ misc rts return points, suffix _ret | CmmData -- ^ misc rts data bits, eg CHARLIKE_closure | CmmCode -- ^ misc rts code | CmmClosure -- ^ closures eg CHARLIKE_closure | CmmPrimCall -- ^ a prim call to some hand written Cmm code deriving (Eq, Ord) data DynamicLinkerLabelInfo = CodeStub -- MachO: Lfoo$stub, ELF: foo@plt | SymbolPtr -- MachO: Lfoo$non_lazy_ptr, Windows: __imp_foo | GotSymbolPtr -- ELF: foo@got | GotSymbolOffset -- ELF: foo@gotoff deriving (Eq, Ord) -- ----------------------------------------------------------------------------- -- Constructing CLabels -- ----------------------------------------------------------------------------- -- Constructing IdLabels -- These are always local: mkSRTLabel :: Unique -> CLabel mkSRTLabel u = SRTLabel u -- See Note [ticky for LNE] mkRednCountsLabel :: Name -> CLabel mkRednCountsLabel name = IdLabel name NoCafRefs (IdTickyInfo TickyRednCounts) mkTagHitLabel :: Name -> Unique -> CLabel mkTagHitLabel name !uniq = IdLabel name NoCafRefs (IdTickyInfo (TickyInferedTag uniq)) mkClosureLabel :: Name -> CafInfo -> CLabel mkInfoTableLabel :: Name -> CafInfo -> CLabel mkEntryLabel :: Name -> CafInfo -> CLabel mkClosureTableLabel :: Name -> CafInfo -> CLabel mkConInfoTableLabel :: Name -> ConInfoTableLocation -> CLabel mkBytesLabel :: Name -> CLabel mkClosureLabel name c = IdLabel name c Closure -- | Decicdes between external and local labels based on the names externality. mkInfoTableLabel name c | isExternalName name = IdLabel name c InfoTable | otherwise = IdLabel name c LocalInfoTable mkEntryLabel name c = IdLabel name c Entry mkClosureTableLabel name c = IdLabel name c ClosureTable -- Special case for the normal 'DefinitionSite' case so that the 'ConInfoTable' application can be floated to a CAF. mkConInfoTableLabel name DefinitionSite = IdLabel name NoCafRefs (ConInfoTable DefinitionSite) mkConInfoTableLabel name k = IdLabel name NoCafRefs (ConInfoTable k) mkBytesLabel name = IdLabel name NoCafRefs Bytes mkBlockInfoTableLabel :: Name -> CafInfo -> CLabel mkBlockInfoTableLabel name c = IdLabel name c BlockInfoTable -- See Note [Proc-point local block entry-points]. -- Constructing Cmm Labels mkDirty_MUT_VAR_Label, mkNonmovingWriteBarrierEnabledLabel, mkUpdInfoLabel, mkBHUpdInfoLabel, mkIndStaticInfoLabel, mkMainCapabilityLabel, mkMAP_FROZEN_CLEAN_infoLabel, mkMAP_FROZEN_DIRTY_infoLabel, mkMAP_DIRTY_infoLabel, mkArrWords_infoLabel, mkTopTickyCtrLabel, mkCAFBlackHoleInfoTableLabel, mkSMAP_FROZEN_CLEAN_infoLabel, mkSMAP_FROZEN_DIRTY_infoLabel, mkSMAP_DIRTY_infoLabel, mkBadAlignmentLabel, mkOutOfBoundsAccessLabel, mkMemcpyRangeOverlapLabel, mkMUT_VAR_CLEAN_infoLabel :: CLabel mkDirty_MUT_VAR_Label = mkForeignLabel (fsLit "dirty_MUT_VAR") Nothing ForeignLabelInExternalPackage IsFunction mkNonmovingWriteBarrierEnabledLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "nonmoving_write_barrier_enabled") CmmData mkUpdInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_upd_frame") CmmInfo mkBHUpdInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_bh_upd_frame" ) CmmInfo mkIndStaticInfoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_IND_STATIC") CmmInfo mkMainCapabilityLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "MainCapability") CmmData mkMAP_FROZEN_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_ARR_PTRS_FROZEN_CLEAN") CmmInfo mkMAP_FROZEN_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_ARR_PTRS_FROZEN_DIRTY") CmmInfo mkMAP_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_ARR_PTRS_DIRTY") CmmInfo mkTopTickyCtrLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "top_ct") CmmData mkCAFBlackHoleInfoTableLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_CAF_BLACKHOLE") CmmInfo mkArrWords_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_ARR_WORDS") CmmInfo mkSMAP_FROZEN_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN") CmmInfo mkSMAP_FROZEN_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY") CmmInfo mkSMAP_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_SMALL_MUT_ARR_PTRS_DIRTY") CmmInfo mkBadAlignmentLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_badAlignment") CmmEntry mkOutOfBoundsAccessLabel = mkForeignLabel (fsLit "rtsOutOfBoundsAccess") Nothing ForeignLabelInExternalPackage IsFunction mkMemcpyRangeOverlapLabel = mkForeignLabel (fsLit "rtsMemcpyRangeOverlap") Nothing ForeignLabelInExternalPackage IsFunction mkMUT_VAR_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_VAR_CLEAN") CmmInfo mkSRTInfoLabel :: Int -> CLabel mkSRTInfoLabel n = CmmLabel rtsUnitId (NeedExternDecl False) lbl CmmInfo where lbl = case n of 1 -> fsLit "stg_SRT_1" 2 -> fsLit "stg_SRT_2" 3 -> fsLit "stg_SRT_3" 4 -> fsLit "stg_SRT_4" 5 -> fsLit "stg_SRT_5" 6 -> fsLit "stg_SRT_6" 7 -> fsLit "stg_SRT_7" 8 -> fsLit "stg_SRT_8" 9 -> fsLit "stg_SRT_9" 10 -> fsLit "stg_SRT_10" 11 -> fsLit "stg_SRT_11" 12 -> fsLit "stg_SRT_12" 13 -> fsLit "stg_SRT_13" 14 -> fsLit "stg_SRT_14" 15 -> fsLit "stg_SRT_15" 16 -> fsLit "stg_SRT_16" _ -> panic "mkSRTInfoLabel" ----- mkCmmInfoLabel, mkCmmEntryLabel, mkCmmRetInfoLabel, mkCmmRetLabel, mkCmmCodeLabel, mkCmmClosureLabel :: UnitId -> FastString -> CLabel mkCmmDataLabel :: UnitId -> NeedExternDecl -> FastString -> CLabel mkRtsCmmDataLabel :: FastString -> CLabel mkCmmInfoLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmInfo mkCmmEntryLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmEntry mkCmmRetInfoLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmRetInfo mkCmmRetLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmRet mkCmmCodeLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmCode mkCmmClosureLabel pkg str = CmmLabel pkg (NeedExternDecl True) str CmmClosure mkCmmDataLabel pkg ext str = CmmLabel pkg ext str CmmData mkRtsCmmDataLabel str = CmmLabel rtsUnitId (NeedExternDecl False) str CmmData -- RTS symbols don't need "GHC.CmmToC" to -- generate \"extern\" declaration (they are -- exposed via rts/include/Stg.h) mkLocalBlockLabel :: Unique -> CLabel mkLocalBlockLabel u = LocalBlockLabel u -- Constructing RtsLabels mkRtsPrimOpLabel :: PrimOp -> CLabel mkRtsPrimOpLabel primop = RtsLabel (RtsPrimOp primop) mkSelectorInfoLabel :: Platform -> Bool -> Int -> CLabel mkSelectorInfoLabel platform upd offset = assert (offset >= 0 && offset <= pc_MAX_SPEC_SELECTEE_SIZE (platformConstants platform)) $ RtsLabel (RtsSelectorInfoTable upd offset) mkSelectorEntryLabel :: Platform -> Bool -> Int -> CLabel mkSelectorEntryLabel platform upd offset = assert (offset >= 0 && offset <= pc_MAX_SPEC_SELECTEE_SIZE (platformConstants platform)) $ RtsLabel (RtsSelectorEntry upd offset) mkApInfoTableLabel :: Platform -> Bool -> Int -> CLabel mkApInfoTableLabel platform upd arity = assert (arity > 0 && arity <= pc_MAX_SPEC_AP_SIZE (platformConstants platform)) $ RtsLabel (RtsApInfoTable upd arity) mkApEntryLabel :: Platform -> Bool -> Int -> CLabel mkApEntryLabel platform upd arity = assert (arity > 0 && arity <= pc_MAX_SPEC_AP_SIZE (platformConstants platform)) $ RtsLabel (RtsApEntry upd arity) -- A call to some primitive hand written Cmm code mkPrimCallLabel :: PrimCall -> CLabel mkPrimCallLabel (PrimCall str pkg) = CmmLabel (toUnitId pkg) (NeedExternDecl True) str CmmPrimCall -- Constructing ForeignLabels -- | Make a foreign label mkForeignLabel :: FastString -- name -> Maybe Int -- size prefix -> ForeignLabelSource -- what package it's in -> FunctionOrData -> CLabel mkForeignLabel = ForeignLabel -- | Update the label size field in a ForeignLabel addLabelSize :: CLabel -> Int -> CLabel addLabelSize (ForeignLabel str _ src fod) sz = ForeignLabel str (Just sz) src fod addLabelSize label _ = label -- | Whether label is a top-level string literal isBytesLabel :: CLabel -> Bool isBytesLabel (IdLabel _ _ Bytes) = True isBytesLabel _lbl = False -- | Whether label is a non-haskell label (defined in C code) isForeignLabel :: CLabel -> Bool isForeignLabel (ForeignLabel _ _ _ _) = True isForeignLabel _lbl = False -- | Whether label is a static closure label (can come from haskell or cmm) isStaticClosureLabel :: CLabel -> Bool -- Closure defined in haskell (.hs) isStaticClosureLabel (IdLabel _ _ Closure) = True -- Closure defined in cmm isStaticClosureLabel (CmmLabel _ _ _ CmmClosure) = True isStaticClosureLabel _lbl = False -- | Whether label is a .rodata label isSomeRODataLabel :: CLabel -> Bool -- info table defined in haskell (.hs) isSomeRODataLabel (IdLabel _ _ ClosureTable) = True isSomeRODataLabel (IdLabel _ _ ConInfoTable {}) = True isSomeRODataLabel (IdLabel _ _ InfoTable) = True isSomeRODataLabel (IdLabel _ _ LocalInfoTable) = True isSomeRODataLabel (IdLabel _ _ BlockInfoTable) = True -- info table defined in cmm (.cmm) isSomeRODataLabel (CmmLabel _ _ _ CmmInfo) = True isSomeRODataLabel _lbl = False -- | Whether label is points to some kind of info table isInfoTableLabel :: CLabel -> Bool isInfoTableLabel (IdLabel _ _ InfoTable) = True isInfoTableLabel (IdLabel _ _ LocalInfoTable) = True isInfoTableLabel (IdLabel _ _ ConInfoTable {}) = True isInfoTableLabel (IdLabel _ _ BlockInfoTable) = True isInfoTableLabel _ = False -- | Whether label is points to constructor info table isConInfoTableLabel :: CLabel -> Bool isConInfoTableLabel (IdLabel _ _ ConInfoTable {}) = True isConInfoTableLabel _ = False -- | Get the label size field from a ForeignLabel foreignLabelStdcallInfo :: CLabel -> Maybe Int foreignLabelStdcallInfo (ForeignLabel _ info _ _) = info foreignLabelStdcallInfo _lbl = Nothing -- Constructing Large*Labels mkBitmapLabel :: Unique -> CLabel mkBitmapLabel uniq = LargeBitmapLabel uniq -- | Info Table Provenance Entry -- See Note [Mapping Info Tables to Source Positions] data InfoProvEnt = InfoProvEnt { infoTablePtr :: !CLabel -- Address of the info table , infoProvEntClosureType :: !Int -- The closure type of the info table (from ClosureMacros.h) , infoTableType :: !String -- The rendered Haskell type of the closure the table represents , infoProvModule :: !Module -- Origin module , infoTableProv :: !(Maybe (RealSrcSpan, String)) } -- Position and information about the info table deriving (Eq, Ord) -- Constructing Cost Center Labels mkCCLabel :: CostCentre -> CLabel mkCCSLabel :: CostCentreStack -> CLabel mkIPELabel :: Module -> CLabel mkCCLabel cc = CC_Label cc mkCCSLabel ccs = CCS_Label ccs mkIPELabel mod = ModuleLabel mod MLK_IPEBuffer mkRtsApFastLabel :: FastString -> CLabel mkRtsApFastLabel str = RtsLabel (RtsApFast (NonDetFastString str)) mkRtsSlowFastTickyCtrLabel :: String -> CLabel mkRtsSlowFastTickyCtrLabel pat = RtsLabel (RtsSlowFastTickyCtr pat) -- Constructing Code Coverage Labels mkHpcTicksLabel :: Module -> CLabel mkHpcTicksLabel = HpcTicksLabel -- Constructing labels used for dynamic linking mkDynamicLinkerLabel :: DynamicLinkerLabelInfo -> CLabel -> CLabel mkDynamicLinkerLabel = DynamicLinkerLabel dynamicLinkerLabelInfo :: CLabel -> Maybe (DynamicLinkerLabelInfo, CLabel) dynamicLinkerLabelInfo (DynamicLinkerLabel info lbl) = Just (info, lbl) dynamicLinkerLabelInfo _ = Nothing mkPicBaseLabel :: CLabel mkPicBaseLabel = PicBaseLabel -- Constructing miscellaneous other labels mkDeadStripPreventer :: CLabel -> CLabel mkDeadStripPreventer lbl = DeadStripPreventer lbl mkStringLitLabel :: Unique -> CLabel mkStringLitLabel = StringLitLabel mkInitializerStubLabel :: Module -> String -> CLabel mkInitializerStubLabel mod s = ModuleLabel mod (MLK_Initializer s) mkInitializerArrayLabel :: Module -> CLabel mkInitializerArrayLabel mod = ModuleLabel mod MLK_InitializerArray mkFinalizerStubLabel :: Module -> String -> CLabel mkFinalizerStubLabel mod s = ModuleLabel mod (MLK_Finalizer s) mkFinalizerArrayLabel :: Module -> CLabel mkFinalizerArrayLabel mod = ModuleLabel mod MLK_FinalizerArray mkAsmTempLabel :: Uniquable a => a -> CLabel mkAsmTempLabel a = AsmTempLabel (getUnique a) mkAsmTempDerivedLabel :: CLabel -> FastString -> CLabel mkAsmTempDerivedLabel = AsmTempDerivedLabel mkAsmTempEndLabel :: CLabel -> CLabel mkAsmTempEndLabel l = mkAsmTempDerivedLabel l (fsLit "_end") -- | A label indicating the end of a procedure. mkAsmTempProcEndLabel :: CLabel -> CLabel mkAsmTempProcEndLabel l = mkAsmTempDerivedLabel l (fsLit "_proc_end") -- | Construct a label for a DWARF Debug Information Entity (DIE) -- describing another symbol. mkAsmTempDieLabel :: CLabel -> CLabel mkAsmTempDieLabel l = mkAsmTempDerivedLabel l (fsLit "_die") -- ----------------------------------------------------------------------------- -- Convert between different kinds of label toClosureLbl :: Platform -> CLabel -> CLabel toClosureLbl platform lbl = case lbl of IdLabel n c _ -> IdLabel n c Closure CmmLabel m ext str _ -> CmmLabel m ext str CmmClosure _ -> pprPanic "toClosureLbl" (pprDebugCLabel platform lbl) toSlowEntryLbl :: Platform -> CLabel -> CLabel toSlowEntryLbl platform lbl = case lbl of IdLabel n _ BlockInfoTable -> pprPanic "toSlowEntryLbl" (ppr n) IdLabel n c _ -> IdLabel n c Slow _ -> pprPanic "toSlowEntryLbl" (pprDebugCLabel platform lbl) toEntryLbl :: Platform -> CLabel -> CLabel toEntryLbl platform lbl = case lbl of IdLabel n c LocalInfoTable -> IdLabel n c LocalEntry IdLabel n c (ConInfoTable k) -> IdLabel n c (ConEntry k) IdLabel n _ BlockInfoTable -> mkLocalBlockLabel (nameUnique n) -- See Note [Proc-point local block entry-points]. IdLabel n c _ -> IdLabel n c Entry CmmLabel m ext str CmmInfo -> CmmLabel m ext str CmmEntry CmmLabel m ext str CmmRetInfo -> CmmLabel m ext str CmmRet _ -> pprPanic "toEntryLbl" (pprDebugCLabel platform lbl) toInfoLbl :: Platform -> CLabel -> CLabel toInfoLbl platform lbl = case lbl of IdLabel n c LocalEntry -> IdLabel n c LocalInfoTable IdLabel n c (ConEntry k) -> IdLabel n c (ConInfoTable k) IdLabel n c _ -> IdLabel n c InfoTable CmmLabel m ext str CmmEntry -> CmmLabel m ext str CmmInfo CmmLabel m ext str CmmRet -> CmmLabel m ext str CmmRetInfo _ -> pprPanic "CLabel.toInfoLbl" (pprDebugCLabel platform lbl) hasHaskellName :: CLabel -> Maybe Name hasHaskellName (IdLabel n _ _) = Just n hasHaskellName _ = Nothing hasIdLabelInfo :: CLabel -> Maybe IdLabelInfo hasIdLabelInfo (IdLabel _ _ l) = Just l hasIdLabelInfo _ = Nothing -- ----------------------------------------------------------------------------- -- Does a CLabel's referent itself refer to a CAF? hasCAF :: CLabel -> Bool hasCAF (IdLabel _ _ (IdTickyInfo TickyRednCounts)) = False -- See Note [ticky for LNE] hasCAF (IdLabel _ MayHaveCafRefs _) = True hasCAF _ = False -- Note [ticky for LNE] -- ~~~~~~~~~~~~~~~~~~~~~ -- Until 14 Feb 2013, every ticky counter was associated with a -- closure. Thus, ticky labels used IdLabel. It is odd that -- GHC.Cmm.Info.Build.cafTransfers would consider such a ticky label -- reason to add the name to the CAFEnv (and thus eventually the SRT), -- but it was harmless because the ticky was only used if the closure -- was also. -- -- Since we now have ticky counters for LNEs, it is no longer the case -- that every ticky counter has an actual closure. So I changed the -- generation of ticky counters' CLabels to not result in their -- associated id ending up in the SRT. -- -- NB IdLabel is still appropriate for ticky ids (as opposed to -- CmmLabel) because the LNE's counter is still related to an .hs Id, -- that Id just isn't for a proper closure. -- ----------------------------------------------------------------------------- -- Does a CLabel need declaring before use or not? -- -- See wiki:commentary/compiler/backends/ppr-c#prototypes needsCDecl :: CLabel -> Bool -- False <=> it's pre-declared; don't bother -- don't bother declaring Bitmap labels, we always make sure -- they are defined before use. needsCDecl (SRTLabel _) = True needsCDecl (LargeBitmapLabel _) = False needsCDecl (IdLabel _ _ _) = True needsCDecl (LocalBlockLabel _) = True needsCDecl (StringLitLabel _) = False needsCDecl (AsmTempLabel _) = False needsCDecl (AsmTempDerivedLabel _ _) = False needsCDecl (RtsLabel _) = False needsCDecl (CmmLabel pkgId (NeedExternDecl external) _ _) -- local labels mustn't have it | not external = False -- Prototypes for labels defined in the runtime system are imported -- into HC files via rts/include/Stg.h. | pkgId == rtsUnitId = False -- For other labels we inline one into the HC file directly. | otherwise = True needsCDecl l@(ForeignLabel{}) = not (isMathFun l) needsCDecl (CC_Label _) = True needsCDecl (CCS_Label _) = True needsCDecl (IPE_Label {}) = True needsCDecl (ModuleLabel _ kind) = modLabelNeedsCDecl kind needsCDecl (HpcTicksLabel _) = True needsCDecl (DynamicLinkerLabel {}) = panic "needsCDecl DynamicLinkerLabel" needsCDecl PicBaseLabel = panic "needsCDecl PicBaseLabel" needsCDecl (DeadStripPreventer {}) = panic "needsCDecl DeadStripPreventer" modLabelNeedsCDecl :: ModuleLabelKind -> Bool -- Code for finalizers and initializers are emitted in stub objects modLabelNeedsCDecl (MLK_Initializer _) = True modLabelNeedsCDecl (MLK_Finalizer _) = True modLabelNeedsCDecl MLK_IPEBuffer = True -- The finalizer and initializer arrays are emitted in the code of the module modLabelNeedsCDecl MLK_InitializerArray = False modLabelNeedsCDecl MLK_FinalizerArray = False -- | If a label is a local block label then return just its 'BlockId', otherwise -- 'Nothing'. maybeLocalBlockLabel :: CLabel -> Maybe BlockId maybeLocalBlockLabel (LocalBlockLabel uq) = Just $ mkBlockId uq maybeLocalBlockLabel _ = Nothing -- | Check whether a label corresponds to a C function that has -- a prototype in a system header somewhere, or is built-in -- to the C compiler. For these labels we avoid generating our -- own C prototypes. isMathFun :: CLabel -> Bool isMathFun (ForeignLabel fs _ _ _) = fs `elementOfUniqSet` math_funs isMathFun _ = False math_funs :: UniqSet FastString math_funs = mkUniqSet [ -- _ISOC99_SOURCE (fsLit "acos"), (fsLit "acosf"), (fsLit "acosh"), (fsLit "acoshf"), (fsLit "acoshl"), (fsLit "acosl"), (fsLit "asin"), (fsLit "asinf"), (fsLit "asinl"), (fsLit "asinh"), (fsLit "asinhf"), (fsLit "asinhl"), (fsLit "atan"), (fsLit "atanf"), (fsLit "atanl"), (fsLit "atan2"), (fsLit "atan2f"), (fsLit "atan2l"), (fsLit "atanh"), (fsLit "atanhf"), (fsLit "atanhl"), (fsLit "cbrt"), (fsLit "cbrtf"), (fsLit "cbrtl"), (fsLit "ceil"), (fsLit "ceilf"), (fsLit "ceill"), (fsLit "copysign"), (fsLit "copysignf"), (fsLit "copysignl"), (fsLit "cos"), (fsLit "cosf"), (fsLit "cosl"), (fsLit "cosh"), (fsLit "coshf"), (fsLit "coshl"), (fsLit "erf"), (fsLit "erff"), (fsLit "erfl"), (fsLit "erfc"), (fsLit "erfcf"), (fsLit "erfcl"), (fsLit "exp"), (fsLit "expf"), (fsLit "expl"), (fsLit "exp2"), (fsLit "exp2f"), (fsLit "exp2l"), (fsLit "expm1"), (fsLit "expm1f"), (fsLit "expm1l"), (fsLit "fabs"), (fsLit "fabsf"), (fsLit "fabsl"), (fsLit "fdim"), (fsLit "fdimf"), (fsLit "fdiml"), (fsLit "floor"), (fsLit "floorf"), (fsLit "floorl"), (fsLit "fma"), (fsLit "fmaf"), (fsLit "fmal"), (fsLit "fmax"), (fsLit "fmaxf"), (fsLit "fmaxl"), (fsLit "fmin"), (fsLit "fminf"), (fsLit "fminl"), (fsLit "fmod"), (fsLit "fmodf"), (fsLit "fmodl"), (fsLit "frexp"), (fsLit "frexpf"), (fsLit "frexpl"), (fsLit "hypot"), (fsLit "hypotf"), (fsLit "hypotl"), (fsLit "ilogb"), (fsLit "ilogbf"), (fsLit "ilogbl"), (fsLit "ldexp"), (fsLit "ldexpf"), (fsLit "ldexpl"), (fsLit "lgamma"), (fsLit "lgammaf"), (fsLit "lgammal"), (fsLit "llrint"), (fsLit "llrintf"), (fsLit "llrintl"), (fsLit "llround"), (fsLit "llroundf"), (fsLit "llroundl"), (fsLit "log"), (fsLit "logf"), (fsLit "logl"), (fsLit "log10l"), (fsLit "log10"), (fsLit "log10f"), (fsLit "log1pl"), (fsLit "log1p"), (fsLit "log1pf"), (fsLit "log2"), (fsLit "log2f"), (fsLit "log2l"), (fsLit "logb"), (fsLit "logbf"), (fsLit "logbl"), (fsLit "lrint"), (fsLit "lrintf"), (fsLit "lrintl"), (fsLit "lround"), (fsLit "lroundf"), (fsLit "lroundl"), (fsLit "modf"), (fsLit "modff"), (fsLit "modfl"), (fsLit "nan"), (fsLit "nanf"), (fsLit "nanl"), (fsLit "nearbyint"), (fsLit "nearbyintf"), (fsLit "nearbyintl"), (fsLit "nextafter"), (fsLit "nextafterf"), (fsLit "nextafterl"), (fsLit "nexttoward"), (fsLit "nexttowardf"), (fsLit "nexttowardl"), (fsLit "pow"), (fsLit "powf"), (fsLit "powl"), (fsLit "remainder"), (fsLit "remainderf"), (fsLit "remainderl"), (fsLit "remquo"), (fsLit "remquof"), (fsLit "remquol"), (fsLit "rint"), (fsLit "rintf"), (fsLit "rintl"), (fsLit "round"), (fsLit "roundf"), (fsLit "roundl"), (fsLit "scalbln"), (fsLit "scalblnf"), (fsLit "scalblnl"), (fsLit "scalbn"), (fsLit "scalbnf"), (fsLit "scalbnl"), (fsLit "sin"), (fsLit "sinf"), (fsLit "sinl"), (fsLit "sinh"), (fsLit "sinhf"), (fsLit "sinhl"), (fsLit "sqrt"), (fsLit "sqrtf"), (fsLit "sqrtl"), (fsLit "tan"), (fsLit "tanf"), (fsLit "tanl"), (fsLit "tanh"), (fsLit "tanhf"), (fsLit "tanhl"), (fsLit "tgamma"), (fsLit "tgammaf"), (fsLit "tgammal"), (fsLit "trunc"), (fsLit "truncf"), (fsLit "truncl"), -- ISO C 99 also defines these function-like macros in math.h: -- fpclassify, isfinite, isinf, isnormal, signbit, isgreater, -- isgreaterequal, isless, islessequal, islessgreater, isunordered -- additional symbols from _BSD_SOURCE (fsLit "drem"), (fsLit "dremf"), (fsLit "dreml"), (fsLit "finite"), (fsLit "finitef"), (fsLit "finitel"), (fsLit "gamma"), (fsLit "gammaf"), (fsLit "gammal"), (fsLit "isinf"), (fsLit "isinff"), (fsLit "isinfl"), (fsLit "isnan"), (fsLit "isnanf"), (fsLit "isnanl"), (fsLit "j0"), (fsLit "j0f"), (fsLit "j0l"), (fsLit "j1"), (fsLit "j1f"), (fsLit "j1l"), (fsLit "jn"), (fsLit "jnf"), (fsLit "jnl"), (fsLit "lgamma_r"), (fsLit "lgammaf_r"), (fsLit "lgammal_r"), (fsLit "scalb"), (fsLit "scalbf"), (fsLit "scalbl"), (fsLit "significand"), (fsLit "significandf"), (fsLit "significandl"), (fsLit "y0"), (fsLit "y0f"), (fsLit "y0l"), (fsLit "y1"), (fsLit "y1f"), (fsLit "y1l"), (fsLit "yn"), (fsLit "ynf"), (fsLit "ynl"), -- These functions are described in IEEE Std 754-2008 - -- Standard for Floating-Point Arithmetic and ISO/IEC TS 18661 (fsLit "nextup"), (fsLit "nextupf"), (fsLit "nextupl"), (fsLit "nextdown"), (fsLit "nextdownf"), (fsLit "nextdownl") ] -- ----------------------------------------------------------------------------- -- | Is a CLabel visible outside this object file or not? -- From the point of view of the code generator, a name is -- externally visible if it has to be declared as exported -- in the .o file's symbol table; that is, made non-static. externallyVisibleCLabel :: CLabel -> Bool -- not C "static" externallyVisibleCLabel (StringLitLabel _) = False externallyVisibleCLabel (AsmTempLabel _) = False externallyVisibleCLabel (AsmTempDerivedLabel _ _)= False externallyVisibleCLabel (RtsLabel _) = True externallyVisibleCLabel (LocalBlockLabel _) = False externallyVisibleCLabel (CmmLabel _ _ _ _) = True externallyVisibleCLabel (ForeignLabel{}) = True externallyVisibleCLabel (IdLabel name _ info) = isExternalName name && externallyVisibleIdLabel info externallyVisibleCLabel (CC_Label _) = True externallyVisibleCLabel (CCS_Label _) = True externallyVisibleCLabel (IPE_Label {}) = True externallyVisibleCLabel (ModuleLabel {}) = True externallyVisibleCLabel (DynamicLinkerLabel _ _) = False externallyVisibleCLabel (HpcTicksLabel _) = True externallyVisibleCLabel (LargeBitmapLabel _) = False externallyVisibleCLabel (SRTLabel _) = False externallyVisibleCLabel (PicBaseLabel {}) = panic "externallyVisibleCLabel PicBaseLabel" externallyVisibleCLabel (DeadStripPreventer {}) = panic "externallyVisibleCLabel DeadStripPreventer" externallyVisibleIdLabel :: IdLabelInfo -> Bool externallyVisibleIdLabel LocalInfoTable = False externallyVisibleIdLabel LocalEntry = False externallyVisibleIdLabel BlockInfoTable = False externallyVisibleIdLabel _ = True -- ----------------------------------------------------------------------------- -- Finding the "type" of a CLabel -- For generating correct types in label declarations: data CLabelType = CodeLabel -- Address of some executable instructions | DataLabel -- Address of data, not a GC ptr | GcPtrLabel -- Address of a (presumably static) GC object isCFunctionLabel :: CLabel -> Bool isCFunctionLabel lbl = case labelType lbl of CodeLabel -> True _other -> False isGcPtrLabel :: CLabel -> Bool isGcPtrLabel lbl = case labelType lbl of GcPtrLabel -> True _other -> False -- | Work out the general type of data at the address of this label -- whether it be code, data, or static GC object. labelType :: CLabel -> CLabelType labelType (IdLabel _ _ info) = idInfoLabelType info labelType (CmmLabel _ _ _ CmmData) = DataLabel labelType (CmmLabel _ _ _ CmmClosure) = GcPtrLabel labelType (CmmLabel _ _ _ CmmCode) = CodeLabel labelType (CmmLabel _ _ _ CmmInfo) = DataLabel labelType (CmmLabel _ _ _ CmmEntry) = CodeLabel labelType (CmmLabel _ _ _ CmmPrimCall) = CodeLabel labelType (CmmLabel _ _ _ CmmRetInfo) = DataLabel labelType (CmmLabel _ _ _ CmmRet) = CodeLabel labelType (RtsLabel (RtsSelectorInfoTable _ _)) = DataLabel labelType (RtsLabel (RtsApInfoTable _ _)) = DataLabel labelType (RtsLabel (RtsApFast _)) = CodeLabel labelType (RtsLabel _) = DataLabel labelType (LocalBlockLabel _) = CodeLabel labelType (SRTLabel _) = DataLabel labelType (ForeignLabel _ _ _ IsFunction) = CodeLabel labelType (ForeignLabel _ _ _ IsData) = DataLabel labelType (AsmTempLabel _) = panic "labelType(AsmTempLabel)" labelType (AsmTempDerivedLabel _ _) = panic "labelType(AsmTempDerivedLabel)" labelType (StringLitLabel _) = DataLabel labelType (CC_Label _) = DataLabel labelType (CCS_Label _) = DataLabel labelType (IPE_Label {}) = DataLabel labelType (ModuleLabel _ kind) = moduleLabelKindType kind labelType (DynamicLinkerLabel _ _) = DataLabel -- Is this right? labelType PicBaseLabel = DataLabel labelType (DeadStripPreventer _) = DataLabel labelType (HpcTicksLabel _) = DataLabel labelType (LargeBitmapLabel _) = DataLabel moduleLabelKindType :: ModuleLabelKind -> CLabelType moduleLabelKindType kind = case kind of MLK_Initializer _ -> CodeLabel MLK_InitializerArray -> DataLabel MLK_Finalizer _ -> CodeLabel MLK_FinalizerArray -> DataLabel MLK_IPEBuffer -> DataLabel idInfoLabelType :: IdLabelInfo -> CLabelType idInfoLabelType info = case info of InfoTable -> DataLabel LocalInfoTable -> DataLabel BlockInfoTable -> DataLabel Closure -> GcPtrLabel ConInfoTable {} -> DataLabel ClosureTable -> DataLabel IdTickyInfo{} -> DataLabel Bytes -> DataLabel _ -> CodeLabel -- ----------------------------------------------------------------------------- -- | Is a 'CLabel' defined in the current module being compiled? -- -- Sometimes we can optimise references within a compilation unit in ways that -- we couldn't for inter-module references. This provides a conservative -- estimate of whether a 'CLabel' lives in the current module. isLocalCLabel :: Module -> CLabel -> Bool isLocalCLabel this_mod lbl = case lbl of IdLabel name _ _ | isInternalName name -> True | otherwise -> nameModule name == this_mod LocalBlockLabel _ -> True _ -> False -- ----------------------------------------------------------------------------- -- | Does a 'CLabel' need dynamic linkage? -- -- When referring to data in code, we need to know whether -- that data resides in a DLL or not. [Win32 only.] -- @labelDynamic@ returns @True@ if the label is located -- in a DLL, be it a data reference or not. labelDynamic :: Module -> Platform -> Bool -> CLabel -> Bool labelDynamic this_mod platform external_dynamic_refs lbl = case lbl of -- is the RTS in a DLL or not? RtsLabel _ -> external_dynamic_refs && (this_unit /= rtsUnitId) IdLabel n _ _ -> external_dynamic_refs && isDynLinkName platform this_mod n -- When compiling in the "dyn" way, each package is to be linked into -- its own shared library. CmmLabel lbl_unit _ _ _ | os == OSMinGW32 -> external_dynamic_refs && (this_unit /= lbl_unit) | otherwise -> external_dynamic_refs LocalBlockLabel _ -> False ForeignLabel _ _ source _ -> if os == OSMinGW32 then case source of -- Foreign label is in some un-named foreign package (or DLL). ForeignLabelInExternalPackage -> True -- Foreign label is linked into the same package as the -- source file currently being compiled. ForeignLabelInThisPackage -> False -- Foreign label is in some named package. -- When compiling in the "dyn" way, each package is to be -- linked into its own DLL. ForeignLabelInPackage pkgId -> external_dynamic_refs && (this_unit /= pkgId) else -- On Mac OS X and on ELF platforms, false positives are OK, -- so we claim that all foreign imports come from dynamic -- libraries True CC_Label cc -> external_dynamic_refs && not (ccFromThisModule cc this_mod) -- CCS_Label always contains a CostCentre defined in the current module CCS_Label _ -> False IPE_Label {} -> True HpcTicksLabel m -> external_dynamic_refs && this_mod /= m -- Note that DynamicLinkerLabels do NOT require dynamic linking themselves. _ -> False where os = platformOS platform this_unit = toUnitId (moduleUnit this_mod) ----------------------------------------------------------------------------- -- Printing out CLabels. {- Convention: _ where is _ for external names and for internal names. is one of the following: info Info table srt Static reference table entry Entry code (function, closure) slow Slow entry code (if any) ret Direct return address vtbl Vector table _alt Case alternative (tag n) dflt Default case alternative btm Large bitmap vector closure Static closure con_entry Dynamic Constructor entry code con_info Dynamic Constructor info table static_entry Static Constructor entry code static_info Static Constructor info table sel_info Selector info table sel_entry Selector entry code cc Cost centre ccs Cost centre stack Many of these distinctions are only for documentation reasons. For example, _ret is only distinguished from _entry to make it easy to tell whether a code fragment is a return point or a closure/function entry. Note [Closure and info labels] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For a function 'foo, we have: foo_info : Points to the info table describing foo's closure (and entry code for foo with tables next to code) foo_closure : Static (no-free-var) closure only: points to the statically-allocated closure For a data constructor (such as Just or Nothing), we have: Just_con_info: Info table for the data constructor itself the first word of a heap-allocated Just Just_info: Info table for the *worker function*, an ordinary Haskell function of arity 1 that allocates a (Just x) box: Just = \x -> Just x Just_entry: The entry code for the worker function Just_closure: The closure for this worker Nothing_closure: a statically allocated closure for Nothing Nothing_static_info: info table for Nothing_closure All these must be exported symbol, EXCEPT Just_info. We don't need to export this because in other modules we either have * A reference to 'Just'; use Just_closure * A saturated call 'Just x'; allocate using Just_con_info Not exporting these Just_info labels reduces the number of symbols somewhat. Note [Bytes label] ~~~~~~~~~~~~~~~~~~ For a top-level string literal 'foo', we have just one symbol 'foo_bytes', which points to a static data block containing the content of the literal. Note [Proc-point local block entry-points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A label for a proc-point local block entry-point has no "_entry" suffix. With `infoTblLbl` we derive an info table label from a proc-point block ID. If we convert such an info table label into an entry label we must produce the label without an "_entry" suffix. So an info table label records the fact that it was derived from a block ID in `IdLabelInfo` as `BlockInfoTable`. The info table label and the local block label are both local labels and are not externally visible. Note [Bangs in CLabel] ~~~~~~~~~~~~~~~~~~~~~~ There are some carefully placed strictness annotations in this module, which were discovered in !5226 to significantly reduce compile-time allocation. Take care if you want to remove them! -} instance OutputableP Platform CLabel where {-# INLINE pdoc #-} -- see Note [Bangs in CLabel] pdoc !platform lbl = getPprStyle $ \pp_sty -> let !sty = case pp_sty of PprCode sty -> sty _ -> CStyle in pprCLabel platform sty lbl pprCLabel :: Platform -> LabelStyle -> CLabel -> SDoc pprCLabel !platform !sty lbl = -- see Note [Bangs in CLabel] let !use_leading_underscores = platformLeadingUnderscore platform -- some platform (e.g. Darwin) require a leading "_" for exported asm -- symbols maybe_underscore :: SDoc -> SDoc maybe_underscore doc = case sty of AsmStyle | use_leading_underscores -> pp_cSEP <> doc _ -> doc tempLabelPrefixOrUnderscore :: Platform -> SDoc tempLabelPrefixOrUnderscore platform = case sty of AsmStyle -> asmTempLabelPrefix platform CStyle -> char '_' in case lbl of LocalBlockLabel u -> case sty of AsmStyle -> tempLabelPrefixOrUnderscore platform <> pprUniqueAlways u CStyle -> tempLabelPrefixOrUnderscore platform <> text "blk_" <> pprUniqueAlways u AsmTempLabel u -> tempLabelPrefixOrUnderscore platform <> pprUniqueAlways u AsmTempDerivedLabel l suf -> asmTempLabelPrefix platform <> case l of AsmTempLabel u -> pprUniqueAlways u LocalBlockLabel u -> pprUniqueAlways u _other -> pprCLabel platform sty l <> ftext suf DynamicLinkerLabel info lbl -> pprDynamicLinkerAsmLabel platform info (pprCLabel platform AsmStyle lbl) PicBaseLabel -> text "1b" DeadStripPreventer lbl -> {- `lbl` can be temp one but we need to ensure that dsp label will stay in the final binary so we prepend non-temp prefix ("dsp_") and optional `_` (underscore) because this is how you mark non-temp symbols on some platforms (Darwin) -} maybe_underscore $ text "dsp_" <> pprCLabel platform sty lbl <> text "_dsp" StringLitLabel u -> maybe_underscore $ pprUniqueAlways u <> text "_str" ForeignLabel fs (Just sz) _ _ | AsmStyle <- sty , OSMinGW32 <- platformOS platform -> -- In asm mode, we need to put the suffix on a stdcall ForeignLabel. -- (The C compiler does this itself). maybe_underscore $ ftext fs <> char '@' <> int sz ForeignLabel fs _ _ _ -> maybe_underscore $ ftext fs IdLabel name _cafs flavor -> case sty of AsmStyle -> maybe_underscore $ internalNamePrefix <> ppr name <> ppIdFlavor flavor where isRandomGenerated = not (isExternalName name) internalNamePrefix = if isRandomGenerated then asmTempLabelPrefix platform else empty CStyle -> ppr name <> ppIdFlavor flavor SRTLabel u -> maybe_underscore $ tempLabelPrefixOrUnderscore platform <> pprUniqueAlways u <> pp_cSEP <> text "srt" RtsLabel (RtsApFast (NonDetFastString str)) -> maybe_underscore $ ftext str <> text "_fast" RtsLabel (RtsSelectorInfoTable upd_reqd offset) -> maybe_underscore $ hcat [ text "stg_sel_", text (show offset) , if upd_reqd then text "_upd_info" else text "_noupd_info" ] RtsLabel (RtsSelectorEntry upd_reqd offset) -> maybe_underscore $ hcat [ text "stg_sel_", text (show offset) , if upd_reqd then text "_upd_entry" else text "_noupd_entry" ] RtsLabel (RtsApInfoTable upd_reqd arity) -> maybe_underscore $ hcat [ text "stg_ap_", text (show arity) , if upd_reqd then text "_upd_info" else text "_noupd_info" ] RtsLabel (RtsApEntry upd_reqd arity) -> maybe_underscore $ hcat [ text "stg_ap_", text (show arity) , if upd_reqd then text "_upd_entry" else text "_noupd_entry" ] RtsLabel (RtsPrimOp primop) -> maybe_underscore $ text "stg_" <> ppr primop RtsLabel (RtsSlowFastTickyCtr pat) -> maybe_underscore $ text "SLOW_CALL_fast_" <> text pat <> text "_ctr" LargeBitmapLabel u -> maybe_underscore $ tempLabelPrefixOrUnderscore platform <> char 'b' <> pprUniqueAlways u <> pp_cSEP <> text "btm" -- Some bitmaps for tuple constructors have a numeric tag (e.g. '7') -- until that gets resolved we'll just force them to start -- with a letter so the label will be legal assembly code. HpcTicksLabel mod -> maybe_underscore $ text "_hpc_tickboxes_" <> ppr mod <> text "_hpc" CC_Label cc -> maybe_underscore $ ppr cc CCS_Label ccs -> maybe_underscore $ ppr ccs IPE_Label (InfoProvEnt l _ _ m _) -> maybe_underscore $ (pprCode CStyle (pdoc platform l) <> text "_" <> ppr m <> text "_ipe") ModuleLabel mod kind -> maybe_underscore $ ppr mod <> text "_" <> ppr kind CmmLabel _ _ fs CmmCode -> maybe_underscore $ ftext fs CmmLabel _ _ fs CmmData -> maybe_underscore $ ftext fs CmmLabel _ _ fs CmmPrimCall -> maybe_underscore $ ftext fs CmmLabel _ _ fs CmmInfo -> maybe_underscore $ ftext fs <> text "_info" CmmLabel _ _ fs CmmEntry -> maybe_underscore $ ftext fs <> text "_entry" CmmLabel _ _ fs CmmRetInfo -> maybe_underscore $ ftext fs <> text "_info" CmmLabel _ _ fs CmmRet -> maybe_underscore $ ftext fs <> text "_ret" CmmLabel _ _ fs CmmClosure -> maybe_underscore $ ftext fs <> text "_closure" -- Note [Internal proc labels] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Some tools (e.g. the `perf` utility on Linux) rely on the symbol table -- for resolution of function names. To help these tools we provide the -- (enabled by default) -fexpose-all-symbols flag which causes GHC to produce -- symbols even for symbols with are internal to a module (although such -- symbols will have only local linkage). -- -- Note that these labels are *not* referred to by code. They are strictly for -- diagnostics purposes. -- -- To avoid confusion, it is desirable to add a module-qualifier to the -- symbol name. However, the Name type's Internal constructor doesn't carry -- knowledge of the current Module. Consequently, we have to pass this around -- explicitly. -- | Generate a label for a procedure internal to a module (if -- 'Opt_ExposeAllSymbols' is enabled). -- See Note [Internal proc labels]. ppInternalProcLabel :: Module -- ^ the current module -> CLabel -> Maybe SDoc -- ^ the internal proc label ppInternalProcLabel this_mod (IdLabel nm _ flavour) | isInternalName nm = Just $ text "_" <> ppr this_mod <> char '_' <> ztext (zEncodeFS (occNameFS (occName nm))) <> char '_' <> pprUniqueAlways (getUnique nm) <> ppIdFlavor flavour ppInternalProcLabel _ _ = Nothing ppIdFlavor :: IdLabelInfo -> SDoc ppIdFlavor x = pp_cSEP <> case x of Closure -> text "closure" InfoTable -> text "info" LocalInfoTable -> text "info" Entry -> text "entry" LocalEntry -> text "entry" Slow -> text "slow" IdTickyInfo TickyRednCounts -> text "ct" IdTickyInfo (TickyInferedTag unique) -> text "ct_inf_tag" <> char '_' <> ppr unique ConEntry loc -> case loc of DefinitionSite -> text "con_entry" UsageSite m n -> ppr m <> pp_cSEP <> ppr n <> pp_cSEP <> text "con_entry" ConInfoTable k -> case k of DefinitionSite -> text "con_info" UsageSite m n -> ppr m <> pp_cSEP <> ppr n <> pp_cSEP <> text "con_info" ClosureTable -> text "closure_tbl" Bytes -> text "bytes" BlockInfoTable -> text "info" pp_cSEP :: SDoc pp_cSEP = char '_' instance Outputable ForeignLabelSource where ppr fs = case fs of ForeignLabelInPackage pkgId -> parens $ text "package: " <> ppr pkgId ForeignLabelInThisPackage -> parens $ text "this package" ForeignLabelInExternalPackage -> parens $ text "external package" -- ----------------------------------------------------------------------------- -- Machine-dependent knowledge about labels. asmTempLabelPrefix :: Platform -> SDoc -- for formatting labels asmTempLabelPrefix !platform = case platformOS platform of OSDarwin -> text "L" OSAIX -> text "__L" -- follow IBM XL C's convention _ -> text ".L" pprDynamicLinkerAsmLabel :: Platform -> DynamicLinkerLabelInfo -> SDoc -> SDoc pprDynamicLinkerAsmLabel !platform dllInfo ppLbl = case platformOS platform of OSDarwin | platformArch platform == ArchX86_64 -> case dllInfo of CodeStub -> char 'L' <> ppLbl <> text "$stub" SymbolPtr -> char 'L' <> ppLbl <> text "$non_lazy_ptr" GotSymbolPtr -> ppLbl <> text "@GOTPCREL" GotSymbolOffset -> ppLbl | platformArch platform == ArchAArch64 -> ppLbl | otherwise -> case dllInfo of CodeStub -> char 'L' <> ppLbl <> text "$stub" SymbolPtr -> char 'L' <> ppLbl <> text "$non_lazy_ptr" _ -> panic "pprDynamicLinkerAsmLabel" OSAIX -> case dllInfo of SymbolPtr -> text "LC.." <> ppLbl -- GCC's naming convention _ -> panic "pprDynamicLinkerAsmLabel" _ | osElfTarget (platformOS platform) -> elfLabel OSMinGW32 -> case dllInfo of SymbolPtr -> text "__imp_" <> ppLbl _ -> panic "pprDynamicLinkerAsmLabel" _ -> panic "pprDynamicLinkerAsmLabel" where elfLabel | platformArch platform == ArchPPC = case dllInfo of CodeStub -> -- See Note [.LCTOC1 in PPC PIC code] ppLbl <> text "+32768@plt" SymbolPtr -> text ".LC_" <> ppLbl _ -> panic "pprDynamicLinkerAsmLabel" | platformArch platform == ArchAArch64 = ppLbl | platformArch platform == ArchX86_64 = case dllInfo of CodeStub -> ppLbl <> text "@plt" GotSymbolPtr -> ppLbl <> text "@gotpcrel" GotSymbolOffset -> ppLbl SymbolPtr -> text ".LC_" <> ppLbl | platformArch platform == ArchPPC_64 ELF_V1 || platformArch platform == ArchPPC_64 ELF_V2 = case dllInfo of GotSymbolPtr -> text ".LC_" <> ppLbl <> text "@toc" GotSymbolOffset -> ppLbl SymbolPtr -> text ".LC_" <> ppLbl _ -> panic "pprDynamicLinkerAsmLabel" | otherwise = case dllInfo of CodeStub -> ppLbl <> text "@plt" SymbolPtr -> text ".LC_" <> ppLbl GotSymbolPtr -> ppLbl <> text "@got" GotSymbolOffset -> ppLbl <> text "@gotoff" -- Figure out whether `symbol` may serve as an alias -- to `target` within one compilation unit. -- -- This is true if any of these holds: -- * `target` is a module-internal haskell name. -- * `target` is an exported name, but comes from the same -- module as `symbol` -- -- These are sufficient conditions for establishing e.g. a -- GNU assembly alias ('.equiv' directive). Sadly, there is -- no such thing as an alias to an imported symbol (conf. -- http://blog.omega-prime.co.uk/2011/07/06/the-sad-state-of-symbol-aliases/) -- See Note [emit-time elimination of static indirections]. -- -- Precondition is that both labels represent the -- same semantic value. mayRedirectTo :: CLabel -> CLabel -> Bool mayRedirectTo symbol target | Just nam <- haskellName , staticClosureLabel , isExternalName nam , Just mod <- nameModule_maybe nam , Just anam <- hasHaskellName symbol , Just amod <- nameModule_maybe anam = amod == mod | Just nam <- haskellName , staticClosureLabel , isInternalName nam = True | otherwise = False where staticClosureLabel = isStaticClosureLabel target haskellName = hasHaskellName target {- Note [emit-time elimination of static indirections] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in #15155, certain static values are representationally equivalent, e.g. 'cast'ed values (when created by 'newtype' wrappers). newtype A = A Int {-# NOINLINE a #-} a = A 42 a1_rYB :: Int [GblId, Caf=NoCafRefs, Unf=OtherCon []] a1_rYB = GHC.Types.I# 42# a [InlPrag=NOINLINE] :: A [GblId, Unf=OtherCon []] a = a1_rYB `cast` (Sym (T15155.N:A[0]) :: Int ~R# A) Formerly we created static indirections for these (IND_STATIC), which consist of a statically allocated forwarding closure that contains the (possibly tagged) indirectee. (See CMM/assembly below.) This approach is suboptimal for two reasons: (a) they occupy extra space, (b) they need to be entered in order to obtain the indirectee, thus they cannot be tagged. Fortunately there is a common case where static indirections can be eliminated while emitting assembly (native or LLVM), viz. when the indirectee is in the same module (object file) as the symbol that points to it. In this case an assembly-level identification can be created ('.equiv' directive), and as such the same object will be assigned two names in the symbol table. Any of the identified symbols can be referenced by a tagged pointer. Currently the 'mayRedirectTo' predicate will give a clue whether a label can be equated with another, already emitted, label (which can in turn be an alias). The general mechanics is that we identify data (IND_STATIC closures) that are amenable to aliasing while pretty-printing of assembly output, and emit the '.equiv' directive instead of static data in such a case. Here is a sketch how the output is massaged: Consider newtype A = A Int {-# NOINLINE a #-} a = A 42 -- I# 42# is the indirectee -- 'a' is exported results in STG a1_rXq :: GHC.Types.Int [GblId, Caf=NoCafRefs, Unf=OtherCon []] = CCS_DONT_CARE GHC.Types.I#! [42#]; T15155.a [InlPrag=NOINLINE] :: T15155.A [GblId, Unf=OtherCon []] = CAF_ccs \ u [] a1_rXq; and CMM [section ""data" . a1_rXq_closure" { a1_rXq_closure: const GHC.Types.I#_con_info; const 42; }] [section ""data" . T15155.a_closure" { T15155.a_closure: const stg_IND_STATIC_info; const a1_rXq_closure+1; const 0; const 0; }] The emitted assembly is ==== INDIRECTEE a1_rXq_closure: -- module local haskell value .quad GHC.Types.I#_con_info -- an Int .quad 42 ==== BEFORE .globl T15155.a_closure -- exported newtype wrapped value T15155.a_closure: .quad stg_IND_STATIC_info -- the closure info .quad a1_rXq_closure+1 -- indirectee ('+1' being the tag) .quad 0 .quad 0 ==== AFTER .globl T15155.a_closure -- exported newtype wrapped value .equiv a1_rXq_closure,T15155.a_closure -- both are shared The transformation is performed because T15155.a_closure `mayRedirectTo` a1_rXq_closure+1 returns True. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Dataflow/Block.hs0000644000000000000000000002606014472400112021502 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeFamilies #-} module GHC.Cmm.Dataflow.Block ( Extensibility (..) , O , C , MaybeO(..) , IndexedCO , Block(..) , blockAppend , blockConcat , blockCons , blockFromList , blockJoin , blockJoinHead , blockJoinTail , blockSnoc , blockSplit , blockSplitHead , blockSplitTail , blockToList , emptyBlock , firstNode , foldBlockNodesB , foldBlockNodesB3 , foldBlockNodesF , isEmptyBlock , lastNode , mapBlock , mapBlock' , mapBlock3' , replaceFirstNode , replaceLastNode ) where import GHC.Prelude -- ----------------------------------------------------------------------------- -- Shapes: Open and Closed -- | Used at the type level to indicate "open" vs "closed" structure. data Extensibility -- | An "open" structure with a unique, unnamed control-flow edge flowing in -- or out. \"Fallthrough\" and concatenation are permitted at an open point. = Open -- | A "closed" structure which supports control transfer only through the use -- of named labels---no "fallthrough" is permitted. The number of control-flow -- edges is unconstrained. | Closed type O = 'Open type C = 'Closed -- | Either type indexed by closed/open using type families type family IndexedCO (ex :: Extensibility) (a :: k) (b :: k) :: k type instance IndexedCO C a _b = a type instance IndexedCO O _a b = b -- | Maybe type indexed by open/closed data MaybeO ex t where JustO :: t -> MaybeO O t NothingO :: MaybeO C t deriving instance Functor (MaybeO ex) -- ----------------------------------------------------------------------------- -- The Block type -- | A sequence of nodes. May be any of four shapes (O/O, O/C, C/O, C/C). -- Open at the entry means single entry, mutatis mutandis for exit. -- A closed/closed block is a /basic/ block and can't be extended further. -- Clients should avoid manipulating blocks and should stick to either nodes -- or graphs. data Block n e x where BlockCO :: n C O -> Block n O O -> Block n C O BlockCC :: n C O -> Block n O O -> n O C -> Block n C C BlockOC :: Block n O O -> n O C -> Block n O C BNil :: Block n O O BMiddle :: n O O -> Block n O O BCat :: Block n O O -> Block n O O -> Block n O O BSnoc :: Block n O O -> n O O -> Block n O O BCons :: n O O -> Block n O O -> Block n O O -- ----------------------------------------------------------------------------- -- Simple operations on Blocks -- Predicates isEmptyBlock :: Block n e x -> Bool isEmptyBlock BNil = True isEmptyBlock (BCat l r) = isEmptyBlock l && isEmptyBlock r isEmptyBlock _ = False -- Building emptyBlock :: Block n O O emptyBlock = BNil blockCons :: n O O -> Block n O x -> Block n O x blockCons n b = case b of BlockOC b l -> (BlockOC $! (n `blockCons` b)) l BNil{} -> BMiddle n BMiddle{} -> n `BCons` b BCat{} -> n `BCons` b BSnoc{} -> n `BCons` b BCons{} -> n `BCons` b blockSnoc :: Block n e O -> n O O -> Block n e O blockSnoc b n = case b of BlockCO f b -> BlockCO f $! (b `blockSnoc` n) BNil{} -> BMiddle n BMiddle{} -> b `BSnoc` n BCat{} -> b `BSnoc` n BSnoc{} -> b `BSnoc` n BCons{} -> b `BSnoc` n blockJoinHead :: n C O -> Block n O x -> Block n C x blockJoinHead f (BlockOC b l) = BlockCC f b l blockJoinHead f b = BlockCO f BNil `cat` b blockJoinTail :: Block n e O -> n O C -> Block n e C blockJoinTail (BlockCO f b) t = BlockCC f b t blockJoinTail b t = b `cat` BlockOC BNil t blockJoin :: n C O -> Block n O O -> n O C -> Block n C C blockJoin f b t = BlockCC f b t blockAppend :: Block n e O -> Block n O x -> Block n e x blockAppend = cat blockConcat :: [Block n O O] -> Block n O O blockConcat = foldr blockAppend emptyBlock -- Taking apart firstNode :: Block n C x -> n C O firstNode (BlockCO n _) = n firstNode (BlockCC n _ _) = n lastNode :: Block n x C -> n O C lastNode (BlockOC _ n) = n lastNode (BlockCC _ _ n) = n blockSplitHead :: Block n C x -> (n C O, Block n O x) blockSplitHead (BlockCO n b) = (n, b) blockSplitHead (BlockCC n b t) = (n, BlockOC b t) blockSplitTail :: Block n e C -> (Block n e O, n O C) blockSplitTail (BlockOC b n) = (b, n) blockSplitTail (BlockCC f b t) = (BlockCO f b, t) -- | Split a closed block into its entry node, open middle block, and -- exit node. blockSplit :: Block n C C -> (n C O, Block n O O, n O C) blockSplit (BlockCC f b t) = (f, b, t) blockToList :: Block n O O -> [n O O] blockToList b = go b [] where go :: Block n O O -> [n O O] -> [n O O] go BNil r = r go (BMiddle n) r = n : r go (BCat b1 b2) r = go b1 $! go b2 r go (BSnoc b1 n) r = go b1 (n:r) go (BCons n b1) r = n : go b1 r blockFromList :: [n O O] -> Block n O O blockFromList = foldr BCons BNil -- Modifying replaceFirstNode :: Block n C x -> n C O -> Block n C x replaceFirstNode (BlockCO _ b) f = BlockCO f b replaceFirstNode (BlockCC _ b n) f = BlockCC f b n replaceLastNode :: Block n x C -> n O C -> Block n x C replaceLastNode (BlockOC b _) n = BlockOC b n replaceLastNode (BlockCC l b _) n = BlockCC l b n -- ----------------------------------------------------------------------------- -- General concatenation cat :: Block n e O -> Block n O x -> Block n e x cat x y = case x of BNil -> y BlockCO l b1 -> case y of BlockOC b2 n -> (BlockCC l $! (b1 `cat` b2)) n BNil -> x BMiddle _ -> BlockCO l $! (b1 `cat` y) BCat{} -> BlockCO l $! (b1 `cat` y) BSnoc{} -> BlockCO l $! (b1 `cat` y) BCons{} -> BlockCO l $! (b1 `cat` y) BMiddle n -> case y of BlockOC b2 n2 -> (BlockOC $! (x `cat` b2)) n2 BNil -> x BMiddle{} -> BCons n y BCat{} -> BCons n y BSnoc{} -> BCons n y BCons{} -> BCons n y BCat{} -> case y of BlockOC b3 n2 -> (BlockOC $! (x `cat` b3)) n2 BNil -> x BMiddle n -> BSnoc x n BCat{} -> BCat x y BSnoc{} -> BCat x y BCons{} -> BCat x y BSnoc{} -> case y of BlockOC b2 n2 -> (BlockOC $! (x `cat` b2)) n2 BNil -> x BMiddle n -> BSnoc x n BCat{} -> BCat x y BSnoc{} -> BCat x y BCons{} -> BCat x y BCons{} -> case y of BlockOC b2 n2 -> (BlockOC $! (x `cat` b2)) n2 BNil -> x BMiddle n -> BSnoc x n BCat{} -> BCat x y BSnoc{} -> BCat x y BCons{} -> BCat x y -- ----------------------------------------------------------------------------- -- Mapping -- | map a function over the nodes of a 'Block' mapBlock :: (forall e x. n e x -> n' e x) -> Block n e x -> Block n' e x mapBlock f (BlockCO n b ) = BlockCO (f n) (mapBlock f b) mapBlock f (BlockOC b n) = BlockOC (mapBlock f b) (f n) mapBlock f (BlockCC n b m) = BlockCC (f n) (mapBlock f b) (f m) mapBlock _ BNil = BNil mapBlock f (BMiddle n) = BMiddle (f n) mapBlock f (BCat b1 b2) = BCat (mapBlock f b1) (mapBlock f b2) mapBlock f (BSnoc b n) = BSnoc (mapBlock f b) (f n) mapBlock f (BCons n b) = BCons (f n) (mapBlock f b) -- | A strict 'mapBlock' mapBlock' :: (forall e x. n e x -> n' e x) -> (Block n e x -> Block n' e x) mapBlock' f = mapBlock3' (f, f, f) -- | map over a block, with different functions to apply to first nodes, -- middle nodes and last nodes respectively. The map is strict. -- mapBlock3' :: forall n n' e x . ( n C O -> n' C O , n O O -> n' O O, n O C -> n' O C) -> Block n e x -> Block n' e x mapBlock3' (f, m, l) b = go b where go :: forall e x . Block n e x -> Block n' e x go (BlockOC b y) = (BlockOC $! go b) $! l y go (BlockCO x b) = (BlockCO $! f x) $! (go b) go (BlockCC x b y) = ((BlockCC $! f x) $! go b) $! (l y) go BNil = BNil go (BMiddle n) = BMiddle $! m n go (BCat x y) = (BCat $! go x) $! (go y) go (BSnoc x n) = (BSnoc $! go x) $! (m n) go (BCons n x) = (BCons $! m n) $! (go x) -- ----------------------------------------------------------------------------- -- Folding -- | Fold a function over every node in a block, forward or backward. -- The fold function must be polymorphic in the shape of the nodes. foldBlockNodesF3 :: forall n a b c . ( n C O -> a -> b , n O O -> b -> b , n O C -> b -> c) -> (forall e x . Block n e x -> IndexedCO e a b -> IndexedCO x c b) foldBlockNodesF :: forall n a . (forall e x . n e x -> a -> a) -> (forall e x . Block n e x -> IndexedCO e a a -> IndexedCO x a a) foldBlockNodesB3 :: forall n a b c . ( n C O -> b -> c , n O O -> b -> b , n O C -> a -> b) -> (forall e x . Block n e x -> IndexedCO x a b -> IndexedCO e c b) foldBlockNodesB :: forall n a . (forall e x . n e x -> a -> a) -> (forall e x . Block n e x -> IndexedCO x a a -> IndexedCO e a a) foldBlockNodesF3 (ff, fm, fl) = block where block :: forall e x . Block n e x -> IndexedCO e a b -> IndexedCO x c b block (BlockCO f b ) = ff f `cat` block b block (BlockCC f b l) = ff f `cat` block b `cat` fl l block (BlockOC b l) = block b `cat` fl l block BNil = id block (BMiddle node) = fm node block (b1 `BCat` b2) = block b1 `cat` block b2 block (b1 `BSnoc` n) = block b1 `cat` fm n block (n `BCons` b2) = fm n `cat` block b2 cat :: forall a b c. (a -> b) -> (b -> c) -> a -> c cat f f' = f' . f foldBlockNodesF f = foldBlockNodesF3 (f, f, f) foldBlockNodesB3 (ff, fm, fl) = block where block :: forall e x . Block n e x -> IndexedCO x a b -> IndexedCO e c b block (BlockCO f b ) = ff f `cat` block b block (BlockCC f b l) = ff f `cat` block b `cat` fl l block (BlockOC b l) = block b `cat` fl l block BNil = id block (BMiddle node) = fm node block (b1 `BCat` b2) = block b1 `cat` block b2 block (b1 `BSnoc` n) = block b1 `cat` fm n block (n `BCons` b2) = fm n `cat` block b2 cat :: forall a b c. (b -> c) -> (a -> b) -> a -> c cat f f' = f . f' foldBlockNodesB f = foldBlockNodesB3 (f, f, f) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Dataflow/Collections.hs0000644000000000000000000001333414472400112022726 0ustar0000000000000000{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE TypeFamilies #-} module GHC.Cmm.Dataflow.Collections ( IsSet(..) , setInsertList, setDeleteList, setUnions , IsMap(..) , mapInsertList, mapDeleteList, mapUnions , UniqueMap, UniqueSet ) where import GHC.Prelude import qualified Data.IntMap.Strict as M import qualified Data.IntSet as S import Data.List (foldl1') class IsSet set where type ElemOf set setNull :: set -> Bool setSize :: set -> Int setMember :: ElemOf set -> set -> Bool setEmpty :: set setSingleton :: ElemOf set -> set setInsert :: ElemOf set -> set -> set setDelete :: ElemOf set -> set -> set setUnion :: set -> set -> set setDifference :: set -> set -> set setIntersection :: set -> set -> set setIsSubsetOf :: set -> set -> Bool setFilter :: (ElemOf set -> Bool) -> set -> set setFoldl :: (b -> ElemOf set -> b) -> b -> set -> b setFoldr :: (ElemOf set -> b -> b) -> b -> set -> b setElems :: set -> [ElemOf set] setFromList :: [ElemOf set] -> set -- Helper functions for IsSet class setInsertList :: IsSet set => [ElemOf set] -> set -> set setInsertList keys set = foldl' (flip setInsert) set keys setDeleteList :: IsSet set => [ElemOf set] -> set -> set setDeleteList keys set = foldl' (flip setDelete) set keys setUnions :: IsSet set => [set] -> set setUnions [] = setEmpty setUnions sets = foldl1' setUnion sets class IsMap map where type KeyOf map mapNull :: map a -> Bool mapSize :: map a -> Int mapMember :: KeyOf map -> map a -> Bool mapLookup :: KeyOf map -> map a -> Maybe a mapFindWithDefault :: a -> KeyOf map -> map a -> a mapEmpty :: map a mapSingleton :: KeyOf map -> a -> map a mapInsert :: KeyOf map -> a -> map a -> map a mapInsertWith :: (a -> a -> a) -> KeyOf map -> a -> map a -> map a mapDelete :: KeyOf map -> map a -> map a mapAlter :: (Maybe a -> Maybe a) -> KeyOf map -> map a -> map a mapAdjust :: (a -> a) -> KeyOf map -> map a -> map a mapUnion :: map a -> map a -> map a mapUnionWithKey :: (KeyOf map -> a -> a -> a) -> map a -> map a -> map a mapDifference :: map a -> map a -> map a mapIntersection :: map a -> map a -> map a mapIsSubmapOf :: Eq a => map a -> map a -> Bool mapMap :: (a -> b) -> map a -> map b mapMapWithKey :: (KeyOf map -> a -> b) -> map a -> map b mapFoldl :: (b -> a -> b) -> b -> map a -> b mapFoldr :: (a -> b -> b) -> b -> map a -> b mapFoldlWithKey :: (b -> KeyOf map -> a -> b) -> b -> map a -> b mapFoldMapWithKey :: Monoid m => (KeyOf map -> a -> m) -> map a -> m mapFilter :: (a -> Bool) -> map a -> map a mapFilterWithKey :: (KeyOf map -> a -> Bool) -> map a -> map a mapElems :: map a -> [a] mapKeys :: map a -> [KeyOf map] mapToList :: map a -> [(KeyOf map, a)] mapFromList :: [(KeyOf map, a)] -> map a mapFromListWith :: (a -> a -> a) -> [(KeyOf map,a)] -> map a -- Helper functions for IsMap class mapInsertList :: IsMap map => [(KeyOf map, a)] -> map a -> map a mapInsertList assocs map = foldl' (flip (uncurry mapInsert)) map assocs mapDeleteList :: IsMap map => [KeyOf map] -> map a -> map a mapDeleteList keys map = foldl' (flip mapDelete) map keys mapUnions :: IsMap map => [map a] -> map a mapUnions [] = mapEmpty mapUnions maps = foldl1' mapUnion maps ----------------------------------------------------------------------------- -- Basic instances ----------------------------------------------------------------------------- newtype UniqueSet = US S.IntSet deriving (Eq, Ord, Show, Semigroup, Monoid) instance IsSet UniqueSet where type ElemOf UniqueSet = Int setNull (US s) = S.null s setSize (US s) = S.size s setMember k (US s) = S.member k s setEmpty = US S.empty setSingleton k = US (S.singleton k) setInsert k (US s) = US (S.insert k s) setDelete k (US s) = US (S.delete k s) setUnion (US x) (US y) = US (S.union x y) setDifference (US x) (US y) = US (S.difference x y) setIntersection (US x) (US y) = US (S.intersection x y) setIsSubsetOf (US x) (US y) = S.isSubsetOf x y setFilter f (US s) = US (S.filter f s) setFoldl k z (US s) = S.foldl' k z s setFoldr k z (US s) = S.foldr k z s setElems (US s) = S.elems s setFromList ks = US (S.fromList ks) newtype UniqueMap v = UM (M.IntMap v) deriving (Eq, Ord, Show, Functor, Foldable, Traversable) instance IsMap UniqueMap where type KeyOf UniqueMap = Int mapNull (UM m) = M.null m mapSize (UM m) = M.size m mapMember k (UM m) = M.member k m mapLookup k (UM m) = M.lookup k m mapFindWithDefault def k (UM m) = M.findWithDefault def k m mapEmpty = UM M.empty mapSingleton k v = UM (M.singleton k v) mapInsert k v (UM m) = UM (M.insert k v m) mapInsertWith f k v (UM m) = UM (M.insertWith f k v m) mapDelete k (UM m) = UM (M.delete k m) mapAlter f k (UM m) = UM (M.alter f k m) mapAdjust f k (UM m) = UM (M.adjust f k m) mapUnion (UM x) (UM y) = UM (M.union x y) mapUnionWithKey f (UM x) (UM y) = UM (M.unionWithKey f x y) mapDifference (UM x) (UM y) = UM (M.difference x y) mapIntersection (UM x) (UM y) = UM (M.intersection x y) mapIsSubmapOf (UM x) (UM y) = M.isSubmapOf x y mapMap f (UM m) = UM (M.map f m) mapMapWithKey f (UM m) = UM (M.mapWithKey f m) mapFoldl k z (UM m) = M.foldl' k z m mapFoldr k z (UM m) = M.foldr k z m mapFoldlWithKey k z (UM m) = M.foldlWithKey' k z m mapFoldMapWithKey f (UM m) = M.foldMapWithKey f m {-# INLINEABLE mapFilter #-} mapFilter f (UM m) = UM (M.filter f m) {-# INLINEABLE mapFilterWithKey #-} mapFilterWithKey f (UM m) = UM (M.filterWithKey f m) mapElems (UM m) = M.elems m mapKeys (UM m) = M.keys m {-# INLINEABLE mapToList #-} mapToList (UM m) = M.toList m mapFromList assocs = UM (M.fromList assocs) mapFromListWith f assocs = UM (M.fromListWith f assocs) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Dataflow/Graph.hs0000644000000000000000000001570314472400112021513 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} module GHC.Cmm.Dataflow.Graph ( Body , Graph , Graph'(..) , NonLocal(..) , addBlock , bodyList , emptyBody , labelsDefined , mapGraph , mapGraphBlocks , revPostorderFrom ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Cmm.Dataflow.Label import GHC.Cmm.Dataflow.Block import GHC.Cmm.Dataflow.Collections import Data.Kind -- | A (possibly empty) collection of closed/closed blocks type Body n = LabelMap (Block n C C) -- | @Body@ abstracted over @block@ type Body' block (n :: Extensibility -> Extensibility -> Type) = LabelMap (block n C C) ------------------------------- -- | Gives access to the anchor points for -- nonlocal edges as well as the edges themselves class NonLocal thing where entryLabel :: thing C x -> Label -- ^ The label of a first node or block successors :: thing e C -> [Label] -- ^ Gives control-flow successors instance NonLocal n => NonLocal (Block n) where entryLabel (BlockCO f _) = entryLabel f entryLabel (BlockCC f _ _) = entryLabel f successors (BlockOC _ n) = successors n successors (BlockCC _ _ n) = successors n emptyBody :: Body' block n emptyBody = mapEmpty bodyList :: Body' block n -> [(Label,block n C C)] bodyList body = mapToList body addBlock :: (NonLocal block, HasDebugCallStack) => block C C -> LabelMap (block C C) -> LabelMap (block C C) addBlock block body = mapAlter add lbl body where lbl = entryLabel block add Nothing = Just block add _ = error $ "duplicate label " ++ show lbl ++ " in graph" -- --------------------------------------------------------------------------- -- Graph -- | A control-flow graph, which may take any of four shapes (O/O, -- O/C, C/O, C/C). A graph open at the entry has a single, -- distinguished, anonymous entry point; if a graph is closed at the -- entry, its entry point(s) are supplied by a context. type Graph = Graph' Block -- | @Graph'@ is abstracted over the block type, so that we can build -- graphs of annotated blocks for example (Compiler.Hoopl.Dataflow -- needs this). data Graph' block (n :: Extensibility -> Extensibility -> Type) e x where GNil :: Graph' block n O O GUnit :: block n O O -> Graph' block n O O GMany :: MaybeO e (block n O C) -> Body' block n -> MaybeO x (block n C O) -> Graph' block n e x -- ----------------------------------------------------------------------------- -- Mapping over graphs -- | Maps over all nodes in a graph. mapGraph :: (forall e x. n e x -> n' e x) -> Graph n e x -> Graph n' e x mapGraph f = mapGraphBlocks (mapBlock f) -- | Function 'mapGraphBlocks' enables a change of representation of blocks, -- nodes, or both. It lifts a polymorphic block transform into a polymorphic -- graph transform. When the block representation stabilizes, a similar -- function should be provided for blocks. mapGraphBlocks :: forall block n block' n' e x . (forall e x . block n e x -> block' n' e x) -> (Graph' block n e x -> Graph' block' n' e x) mapGraphBlocks f = map where map :: Graph' block n e x -> Graph' block' n' e x map GNil = GNil map (GUnit b) = GUnit (f b) map (GMany e b x) = GMany (fmap f e) (mapMap f b) (fmap f x) -- ----------------------------------------------------------------------------- -- Extracting Labels from graphs labelsDefined :: forall block n e x . NonLocal (block n) => Graph' block n e x -> LabelSet labelsDefined GNil = setEmpty labelsDefined (GUnit{}) = setEmpty labelsDefined (GMany _ body x) = mapFoldlWithKey addEntry (exitLabel x) body where addEntry :: forall a. LabelSet -> ElemOf LabelSet -> a -> LabelSet addEntry labels label _ = setInsert label labels exitLabel :: MaybeO x (block n C O) -> LabelSet exitLabel NothingO = setEmpty exitLabel (JustO b) = setSingleton (entryLabel b) ---------------------------------------------------------------- -- | Returns a list of blocks reachable from the provided Labels in the reverse -- postorder. -- -- This is the most important traversal over this data structure. It drops -- unreachable code and puts blocks in an order that is good for solving forward -- dataflow problems quickly. The reverse order is good for solving backward -- dataflow problems quickly. The forward order is also reasonably good for -- emitting instructions, except that it will not usually exploit Forrest -- Baskett's trick of eliminating the unconditional branch from a loop. For -- that you would need a more serious analysis, probably based on dominators, to -- identify loop headers. -- -- For forward analyses we want reverse postorder visitation, consider: -- @ -- A -> [B,C] -- B -> D -- C -> D -- @ -- Postorder: [D, C, B, A] (or [D, B, C, A]) -- Reverse postorder: [A, B, C, D] (or [A, C, B, D]) -- This matters for, e.g., forward analysis, because we want to analyze *both* -- B and C before we analyze D. revPostorderFrom :: forall block. (NonLocal block) => LabelMap (block C C) -> Label -> [block C C] revPostorderFrom graph start = go start_worklist setEmpty [] where start_worklist = lookup_for_descend start Nil -- To compute the postorder we need to "visit" a block (mark as done) *after* -- visiting all its successors. So we need to know whether we already -- processed all successors of each block (and @NonLocal@ allows arbitrary -- many successors). So we use an explicit stack with an extra bit -- of information: -- - @ConsTodo@ means to explore the block if it wasn't visited before -- - @ConsMark@ means that all successors were already done and we can add -- the block to the result. -- -- NOTE: We add blocks to the result list in postorder, but we *prepend* -- them (i.e., we use @(:)@), which means that the final list is in reverse -- postorder. go :: DfsStack (block C C) -> LabelSet -> [block C C] -> [block C C] go Nil !_ !result = result go (ConsMark block rest) !wip_or_done !result = go rest wip_or_done (block : result) go (ConsTodo block rest) !wip_or_done !result | entryLabel block `setMember` wip_or_done = go rest wip_or_done result | otherwise = let new_worklist = foldr lookup_for_descend (ConsMark block rest) (successors block) in go new_worklist (setInsert (entryLabel block) wip_or_done) result lookup_for_descend :: Label -> DfsStack (block C C) -> DfsStack (block C C) lookup_for_descend label wl | Just b <- mapLookup label graph = ConsTodo b wl | otherwise = error $ "Label that doesn't have a block?! " ++ show label data DfsStack a = ConsTodo a (DfsStack a) | ConsMark a (DfsStack a) | Nil ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Dataflow/Label.hs0000644000000000000000000001141514472400112021465 0ustar0000000000000000{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE TypeFamilies #-} module GHC.Cmm.Dataflow.Label ( Label , LabelMap , LabelSet , FactBase , lookupFact , mkHooplLabel ) where import GHC.Prelude import GHC.Utils.Outputable -- TODO: This should really just use GHC's Unique and Uniq{Set,FM} import GHC.Cmm.Dataflow.Collections import GHC.Types.Unique (Uniquable(..)) import GHC.Data.TrieMap ----------------------------------------------------------------------------- -- Label ----------------------------------------------------------------------------- newtype Label = Label { lblToUnique :: Int } deriving (Eq, Ord) mkHooplLabel :: Int -> Label mkHooplLabel = Label instance Show Label where show (Label n) = "L" ++ show n instance Uniquable Label where getUnique label = getUnique (lblToUnique label) instance Outputable Label where ppr label = ppr (getUnique label) instance OutputableP env Label where pdoc _ l = ppr l ----------------------------------------------------------------------------- -- LabelSet newtype LabelSet = LS UniqueSet deriving (Eq, Ord, Show, Monoid, Semigroup) instance IsSet LabelSet where type ElemOf LabelSet = Label setNull (LS s) = setNull s setSize (LS s) = setSize s setMember (Label k) (LS s) = setMember k s setEmpty = LS setEmpty setSingleton (Label k) = LS (setSingleton k) setInsert (Label k) (LS s) = LS (setInsert k s) setDelete (Label k) (LS s) = LS (setDelete k s) setUnion (LS x) (LS y) = LS (setUnion x y) setDifference (LS x) (LS y) = LS (setDifference x y) setIntersection (LS x) (LS y) = LS (setIntersection x y) setIsSubsetOf (LS x) (LS y) = setIsSubsetOf x y setFilter f (LS s) = LS (setFilter (f . mkHooplLabel) s) setFoldl k z (LS s) = setFoldl (\a v -> k a (mkHooplLabel v)) z s setFoldr k z (LS s) = setFoldr (\v a -> k (mkHooplLabel v) a) z s setElems (LS s) = map mkHooplLabel (setElems s) setFromList ks = LS (setFromList (map lblToUnique ks)) ----------------------------------------------------------------------------- -- LabelMap newtype LabelMap v = LM (UniqueMap v) deriving (Eq, Ord, Show, Functor, Foldable, Traversable) instance IsMap LabelMap where type KeyOf LabelMap = Label mapNull (LM m) = mapNull m mapSize (LM m) = mapSize m mapMember (Label k) (LM m) = mapMember k m mapLookup (Label k) (LM m) = mapLookup k m mapFindWithDefault def (Label k) (LM m) = mapFindWithDefault def k m mapEmpty = LM mapEmpty mapSingleton (Label k) v = LM (mapSingleton k v) mapInsert (Label k) v (LM m) = LM (mapInsert k v m) mapInsertWith f (Label k) v (LM m) = LM (mapInsertWith f k v m) mapDelete (Label k) (LM m) = LM (mapDelete k m) mapAlter f (Label k) (LM m) = LM (mapAlter f k m) mapAdjust f (Label k) (LM m) = LM (mapAdjust f k m) mapUnion (LM x) (LM y) = LM (mapUnion x y) mapUnionWithKey f (LM x) (LM y) = LM (mapUnionWithKey (f . mkHooplLabel) x y) mapDifference (LM x) (LM y) = LM (mapDifference x y) mapIntersection (LM x) (LM y) = LM (mapIntersection x y) mapIsSubmapOf (LM x) (LM y) = mapIsSubmapOf x y mapMap f (LM m) = LM (mapMap f m) mapMapWithKey f (LM m) = LM (mapMapWithKey (f . mkHooplLabel) m) mapFoldl k z (LM m) = mapFoldl k z m mapFoldr k z (LM m) = mapFoldr k z m mapFoldlWithKey k z (LM m) = mapFoldlWithKey (\a v -> k a (mkHooplLabel v)) z m mapFoldMapWithKey f (LM m) = mapFoldMapWithKey (\k v -> f (mkHooplLabel k) v) m {-# INLINEABLE mapFilter #-} mapFilter f (LM m) = LM (mapFilter f m) {-# INLINEABLE mapFilterWithKey #-} mapFilterWithKey f (LM m) = LM (mapFilterWithKey (f . mkHooplLabel) m) mapElems (LM m) = mapElems m mapKeys (LM m) = map mkHooplLabel (mapKeys m) {-# INLINEABLE mapToList #-} mapToList (LM m) = [(mkHooplLabel k, v) | (k, v) <- mapToList m] mapFromList assocs = LM (mapFromList [(lblToUnique k, v) | (k, v) <- assocs]) mapFromListWith f assocs = LM (mapFromListWith f [(lblToUnique k, v) | (k, v) <- assocs]) ----------------------------------------------------------------------------- -- Instances instance Outputable LabelSet where ppr = ppr . setElems instance Outputable a => Outputable (LabelMap a) where ppr = ppr . mapToList instance OutputableP env a => OutputableP env (LabelMap a) where pdoc env = pdoc env . mapToList instance TrieMap LabelMap where type Key LabelMap = Label emptyTM = mapEmpty lookupTM k m = mapLookup k m alterTM k f m = mapAlter f k m foldTM k m z = mapFoldr k z m mapTM f m = mapMap f m filterTM f m = mapFilter f m ----------------------------------------------------------------------------- -- FactBase type FactBase f = LabelMap f lookupFact :: Label -> FactBase f -> Maybe f lookupFact = mapLookup ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Expr.hs0000644000000000000000000005604414472400112017632 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE UndecidableInstances #-} module GHC.Cmm.Expr ( CmmExpr(..), cmmExprType, cmmExprWidth, cmmExprAlignment, maybeInvertCmmExpr , CmmReg(..), cmmRegType, cmmRegWidth , CmmLit(..), cmmLitType , AlignmentSpec(..) , LocalReg(..), localRegType , GlobalReg(..), isArgReg, globalRegType , spReg, hpReg, spLimReg, hpLimReg, nodeReg , currentTSOReg, currentNurseryReg, hpAllocReg, cccsReg , node, baseReg , VGcPtr(..) , DefinerOfRegs, UserOfRegs , foldRegsDefd, foldRegsUsed , foldLocalRegsDefd, foldLocalRegsUsed , RegSet, LocalRegSet, GlobalRegSet , emptyRegSet, elemRegSet, extendRegSet, deleteFromRegSet, mkRegSet , plusRegSet, minusRegSet, timesRegSet, sizeRegSet, nullRegSet , regSetToList , Area(..) , module GHC.Cmm.MachOp , module GHC.Cmm.Type ) where import GHC.Prelude import GHC.Platform import GHC.Cmm.BlockId import GHC.Cmm.CLabel import GHC.Cmm.MachOp import GHC.Cmm.Type import GHC.Utils.Panic (panic) import GHC.Utils.Outputable import GHC.Types.Unique import Data.Set (Set) import qualified Data.Set as Set import GHC.Types.Basic (Alignment, mkAlignment, alignmentOf) ----------------------------------------------------------------------------- -- CmmExpr -- An expression. Expressions have no side effects. ----------------------------------------------------------------------------- data CmmExpr = CmmLit !CmmLit -- Literal | CmmLoad !CmmExpr !CmmType !AlignmentSpec -- Read memory location | CmmReg !CmmReg -- Contents of register | CmmMachOp MachOp [CmmExpr] -- Machine operation (+, -, *, etc.) | CmmStackSlot Area {-# UNPACK #-} !Int -- Addressing expression of a stack slot -- See Note [CmmStackSlot aliasing] | CmmRegOff !CmmReg !Int -- CmmRegOff reg i -- ** is shorthand only, meaning ** -- CmmMachOp (MO_Add rep) [x, CmmLit (CmmInt (fromIntegral i) rep)] -- where rep = typeWidth (cmmRegType reg) deriving Show instance Eq CmmExpr where -- Equality ignores the types CmmLit l1 == CmmLit l2 = l1==l2 CmmLoad e1 _ _ == CmmLoad e2 _ _ = e1==e2 CmmReg r1 == CmmReg r2 = r1==r2 CmmRegOff r1 i1 == CmmRegOff r2 i2 = r1==r2 && i1==i2 CmmMachOp op1 es1 == CmmMachOp op2 es2 = op1==op2 && es1==es2 CmmStackSlot a1 i1 == CmmStackSlot a2 i2 = a1==a2 && i1==i2 _e1 == _e2 = False data AlignmentSpec = NaturallyAligned | Unaligned deriving (Eq, Ord, Show) data CmmReg = CmmLocal {-# UNPACK #-} !LocalReg | CmmGlobal GlobalReg deriving( Eq, Ord, Show ) -- | A stack area is either the stack slot where a variable is spilled -- or the stack space where function arguments and results are passed. data Area = Old -- See Note [Old Area] | Young {-# UNPACK #-} !BlockId -- Invariant: must be a continuation BlockId -- See Note [Continuation BlockIds] in GHC.Cmm.Node. deriving (Eq, Ord, Show) {- Note [Old Area] ~~~~~~~~~~~~~~~~~~ There is a single call area 'Old', allocated at the extreme old end of the stack frame (ie just younger than the return address) which holds: * incoming (overflow) parameters, * outgoing (overflow) parameter to tail calls, * outgoing (overflow) result values * the update frame (if any) Its size is the max of all these requirements. On entry, the stack pointer will point to the youngest incoming parameter, which is not necessarily at the young end of the Old area. End of note -} {- Note [CmmStackSlot aliasing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When do two CmmStackSlots alias? - T[old+N] aliases with U[young(L)+M] for all T, U, L, N and M - T[old+N] aliases with U[old+M] only if the areas actually overlap Or more informally, different Areas may overlap with each other. An alternative semantics, that we previously had, was that different Areas do not overlap. The problem that lead to redefining the semantics of stack areas is described below. e.g. if we had x = Sp[old + 8] y = Sp[old + 16] Sp[young(L) + 8] = L Sp[young(L) + 16] = y Sp[young(L) + 24] = x call f() returns to L if areas semantically do not overlap, then we might optimise this to Sp[young(L) + 8] = L Sp[young(L) + 16] = Sp[old + 8] Sp[young(L) + 24] = Sp[old + 16] call f() returns to L and now young(L) cannot be allocated at the same place as old, and we are doomed to use more stack. - old+8 conflicts with young(L)+8 - old+16 conflicts with young(L)+16 and young(L)+8 so young(L)+8 == old+24 and we get Sp[-8] = L Sp[-16] = Sp[8] Sp[-24] = Sp[0] Sp -= 24 call f() returns to L However, if areas are defined to be "possibly overlapping" in the semantics, then we cannot commute any loads/stores of old with young(L), and we will be able to re-use both old+8 and old+16 for young(L). x = Sp[8] y = Sp[0] Sp[8] = L Sp[0] = y Sp[-8] = x Sp = Sp - 8 call f() returns to L Now, the assignments of y go away, x = Sp[8] Sp[8] = L Sp[-8] = x Sp = Sp - 8 call f() returns to L -} data CmmLit = CmmInt !Integer !Width -- Interpretation: the 2's complement representation of the value -- is truncated to the specified size. This is easier than trying -- to keep the value within range, because we don't know whether -- it will be used as a signed or unsigned value (the CmmType doesn't -- distinguish between signed & unsigned). | CmmFloat Rational !Width | CmmVec [CmmLit] -- Vector literal | CmmLabel CLabel -- Address of label | CmmLabelOff CLabel !Int -- Address of label + byte offset -- Due to limitations in the C backend, the following -- MUST ONLY be used inside the info table indicated by label2 -- (label2 must be the info label), and label1 must be an -- SRT, a slow entrypoint or a large bitmap (see the Mangler) -- Don't use it at all unless tablesNextToCode. -- It is also used inside the NCG during when generating -- position-independent code. | CmmLabelDiffOff CLabel CLabel !Int !Width -- label1 - label2 + offset -- In an expression, the width just has the effect of MO_SS_Conv -- from wordWidth to the desired width. -- -- In a static literal, the supported Widths depend on the -- architecture: wordWidth is supported on all -- architectures. Additionally W32 is supported on x86_64 when -- using the small memory model. | CmmBlock {-# UNPACK #-} !BlockId -- Code label -- Invariant: must be a continuation BlockId -- See Note [Continuation BlockIds] in GHC.Cmm.Node. | CmmHighStackMark -- A late-bound constant that stands for the max -- #bytes of stack space used during a procedure. -- During the stack-layout pass, CmmHighStackMark -- is replaced by a CmmInt for the actual number -- of bytes used deriving (Eq, Show) instance Outputable CmmLit where ppr (CmmInt n w) = text "CmmInt" <+> ppr n <+> ppr w ppr (CmmFloat n w) = text "CmmFloat" <+> text (show n) <+> ppr w ppr (CmmVec xs) = text "CmmVec" <+> ppr xs ppr (CmmLabel _) = text "CmmLabel" ppr (CmmLabelOff _ _) = text "CmmLabelOff" ppr (CmmLabelDiffOff _ _ _ _) = text "CmmLabelDiffOff" ppr (CmmBlock blk) = text "CmmBlock" <+> ppr blk ppr CmmHighStackMark = text "CmmHighStackMark" cmmExprType :: Platform -> CmmExpr -> CmmType cmmExprType platform = \case (CmmLit lit) -> cmmLitType platform lit (CmmLoad _ rep _) -> rep (CmmReg reg) -> cmmRegType platform reg (CmmMachOp op args) -> machOpResultType platform op (map (cmmExprType platform) args) (CmmRegOff reg _) -> cmmRegType platform reg (CmmStackSlot _ _) -> bWord platform -- an address -- Careful though: what is stored at the stack slot may be bigger than -- an address cmmLitType :: Platform -> CmmLit -> CmmType cmmLitType platform = \case (CmmInt _ width) -> cmmBits width (CmmFloat _ width) -> cmmFloat width (CmmVec []) -> panic "cmmLitType: CmmVec []" (CmmVec (l:ls)) -> let ty = cmmLitType platform l in if all (`cmmEqType` ty) (map (cmmLitType platform) ls) then cmmVec (1+length ls) ty else panic "cmmLitType: CmmVec" (CmmLabel lbl) -> cmmLabelType platform lbl (CmmLabelOff lbl _) -> cmmLabelType platform lbl (CmmLabelDiffOff _ _ _ width) -> cmmBits width (CmmBlock _) -> bWord platform (CmmHighStackMark) -> bWord platform cmmLabelType :: Platform -> CLabel -> CmmType cmmLabelType platform lbl | isGcPtrLabel lbl = gcWord platform | otherwise = bWord platform cmmExprWidth :: Platform -> CmmExpr -> Width cmmExprWidth platform e = typeWidth (cmmExprType platform e) -- | Returns an alignment in bytes of a CmmExpr when it's a statically -- known integer constant, otherwise returns an alignment of 1 byte. -- The caller is responsible for using with a sensible CmmExpr -- argument. cmmExprAlignment :: CmmExpr -> Alignment cmmExprAlignment (CmmLit (CmmInt intOff _)) = alignmentOf (fromInteger intOff) cmmExprAlignment _ = mkAlignment 1 -------- --- Negation for conditional branches maybeInvertCmmExpr :: CmmExpr -> Maybe CmmExpr maybeInvertCmmExpr (CmmMachOp op args) = do op' <- maybeInvertComparison op return (CmmMachOp op' args) maybeInvertCmmExpr _ = Nothing ----------------------------------------------------------------------------- -- Local registers ----------------------------------------------------------------------------- data LocalReg = LocalReg {-# UNPACK #-} !Unique !CmmType -- ^ Parameters: -- 1. Identifier -- 2. Type deriving Show instance Eq LocalReg where (LocalReg u1 _) == (LocalReg u2 _) = u1 == u2 -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] -- See Note [No Ord for Unique] instance Ord LocalReg where compare (LocalReg u1 _) (LocalReg u2 _) = nonDetCmpUnique u1 u2 instance Uniquable LocalReg where getUnique (LocalReg uniq _) = uniq cmmRegType :: Platform -> CmmReg -> CmmType cmmRegType _ (CmmLocal reg) = localRegType reg cmmRegType platform (CmmGlobal reg) = globalRegType platform reg cmmRegWidth :: Platform -> CmmReg -> Width cmmRegWidth platform = typeWidth . cmmRegType platform localRegType :: LocalReg -> CmmType localRegType (LocalReg _ rep) = rep ----------------------------------------------------------------------------- -- Register-use information for expressions and other types ----------------------------------------------------------------------------- -- | Sets of registers -- These are used for dataflow facts, and a common operation is taking -- the union of two RegSets and then asking whether the union is the -- same as one of the inputs. UniqSet isn't good here, because -- sizeUniqSet is O(n) whereas Set.size is O(1), so we use ordinary -- Sets. type RegSet r = Set r type LocalRegSet = RegSet LocalReg type GlobalRegSet = RegSet GlobalReg emptyRegSet :: RegSet r nullRegSet :: RegSet r -> Bool elemRegSet :: Ord r => r -> RegSet r -> Bool extendRegSet :: Ord r => RegSet r -> r -> RegSet r deleteFromRegSet :: Ord r => RegSet r -> r -> RegSet r mkRegSet :: Ord r => [r] -> RegSet r minusRegSet, plusRegSet, timesRegSet :: Ord r => RegSet r -> RegSet r -> RegSet r sizeRegSet :: RegSet r -> Int regSetToList :: RegSet r -> [r] emptyRegSet = Set.empty nullRegSet = Set.null elemRegSet = Set.member extendRegSet = flip Set.insert deleteFromRegSet = flip Set.delete mkRegSet = Set.fromList minusRegSet = Set.difference plusRegSet = Set.union timesRegSet = Set.intersection sizeRegSet = Set.size regSetToList = Set.toList class Ord r => UserOfRegs r a where foldRegsUsed :: Platform -> (b -> r -> b) -> b -> a -> b foldLocalRegsUsed :: UserOfRegs LocalReg a => Platform -> (b -> LocalReg -> b) -> b -> a -> b foldLocalRegsUsed = foldRegsUsed class Ord r => DefinerOfRegs r a where foldRegsDefd :: Platform -> (b -> r -> b) -> b -> a -> b foldLocalRegsDefd :: DefinerOfRegs LocalReg a => Platform -> (b -> LocalReg -> b) -> b -> a -> b foldLocalRegsDefd = foldRegsDefd instance UserOfRegs LocalReg CmmReg where foldRegsUsed _ f z (CmmLocal reg) = f z reg foldRegsUsed _ _ z (CmmGlobal _) = z instance DefinerOfRegs LocalReg CmmReg where foldRegsDefd _ f z (CmmLocal reg) = f z reg foldRegsDefd _ _ z (CmmGlobal _) = z instance UserOfRegs GlobalReg CmmReg where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed _ _ z (CmmLocal _) = z foldRegsUsed _ f z (CmmGlobal reg) = f z reg instance DefinerOfRegs GlobalReg CmmReg where foldRegsDefd _ _ z (CmmLocal _) = z foldRegsDefd _ f z (CmmGlobal reg) = f z reg instance Ord r => UserOfRegs r r where foldRegsUsed _ f z r = f z r instance Ord r => DefinerOfRegs r r where foldRegsDefd _ f z r = f z r instance (Ord r, UserOfRegs r CmmReg) => UserOfRegs r CmmExpr where -- The (Ord r) in the context is necessary here -- See Note [Recursive superclasses] in GHC.Tc.TyCl.Instance {-# INLINEABLE foldRegsUsed #-} foldRegsUsed platform f !z e = expr z e where expr z (CmmLit _) = z expr z (CmmLoad addr _ _) = foldRegsUsed platform f z addr expr z (CmmReg r) = foldRegsUsed platform f z r expr z (CmmMachOp _ exprs) = foldRegsUsed platform f z exprs expr z (CmmRegOff r _) = foldRegsUsed platform f z r expr z (CmmStackSlot _ _) = z instance UserOfRegs r a => UserOfRegs r [a] where foldRegsUsed platform f set as = foldl' (foldRegsUsed platform f) set as {-# INLINABLE foldRegsUsed #-} instance DefinerOfRegs r a => DefinerOfRegs r [a] where foldRegsDefd platform f set as = foldl' (foldRegsDefd platform f) set as {-# INLINABLE foldRegsDefd #-} ----------------------------------------------------------------------------- -- Global STG registers ----------------------------------------------------------------------------- data VGcPtr = VGcPtr | VNonGcPtr deriving( Eq, Show ) ----------------------------------------------------------------------------- -- Global STG registers ----------------------------------------------------------------------------- {- Note [Overlapping global registers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The backend might not faithfully implement the abstraction of the STG machine with independent registers for different values of type GlobalReg. Specifically, certain pairs of registers (r1, r2) may overlap in the sense that a store to r1 invalidates the value in r2, and vice versa. Currently this occurs only on the x86_64 architecture where FloatReg n and DoubleReg n are assigned the same microarchitectural register, in order to allow functions to receive more Float# or Double# arguments in registers (as opposed to on the stack). There are no specific rules about which registers might overlap with which other registers, but presumably it's safe to assume that nothing will overlap with special registers like Sp or BaseReg. Use GHC.Cmm.Utils.regsOverlap to determine whether two GlobalRegs overlap on a particular platform. The instance Eq GlobalReg is syntactic equality of STG registers and does not take overlap into account. However it is still used in UserOfRegs/DefinerOfRegs and there are likely still bugs there, beware! -} data GlobalReg -- Argument and return registers = VanillaReg -- pointers, unboxed ints and chars {-# UNPACK #-} !Int -- its number VGcPtr | FloatReg -- single-precision floating-point registers {-# UNPACK #-} !Int -- its number | DoubleReg -- double-precision floating-point registers {-# UNPACK #-} !Int -- its number | LongReg -- long int registers (64-bit, really) {-# UNPACK #-} !Int -- its number | XmmReg -- 128-bit SIMD vector register {-# UNPACK #-} !Int -- its number | YmmReg -- 256-bit SIMD vector register {-# UNPACK #-} !Int -- its number | ZmmReg -- 512-bit SIMD vector register {-# UNPACK #-} !Int -- its number -- STG registers | Sp -- Stack ptr; points to last occupied stack location. | SpLim -- Stack limit | Hp -- Heap ptr; points to last occupied heap location. | HpLim -- Heap limit register | CCCS -- Current cost-centre stack | CurrentTSO -- pointer to current thread's TSO | CurrentNursery -- pointer to allocation area | HpAlloc -- allocation count for heap check failure -- We keep the address of some commonly-called -- functions in the register table, to keep code -- size down: | EagerBlackholeInfo -- stg_EAGER_BLACKHOLE_info | GCEnter1 -- stg_gc_enter_1 | GCFun -- stg_gc_fun -- Base offset for the register table, used for accessing registers -- which do not have real registers assigned to them. This register -- will only appear after we have expanded GlobalReg into memory accesses -- (where necessary) in the native code generator. | BaseReg -- The register used by the platform for the C stack pointer. This is -- a break in the STG abstraction used exclusively to setup stack unwinding -- information. | MachSp -- The is a dummy register used to indicate to the stack unwinder where -- a routine would return to. | UnwindReturnReg -- Base Register for PIC (position-independent code) calculations -- Only used inside the native code generator. It's exact meaning differs -- from platform to platform (see module PositionIndependentCode). | PicBaseReg deriving( Show ) instance Eq GlobalReg where VanillaReg i _ == VanillaReg j _ = i==j -- Ignore type when seeking clashes FloatReg i == FloatReg j = i==j DoubleReg i == DoubleReg j = i==j LongReg i == LongReg j = i==j -- NOTE: XMM, YMM, ZMM registers actually are the same registers -- at least with respect to store at YMM i and then read from XMM i -- and similarly for ZMM etc. XmmReg i == XmmReg j = i==j YmmReg i == YmmReg j = i==j ZmmReg i == ZmmReg j = i==j Sp == Sp = True SpLim == SpLim = True Hp == Hp = True HpLim == HpLim = True CCCS == CCCS = True CurrentTSO == CurrentTSO = True CurrentNursery == CurrentNursery = True HpAlloc == HpAlloc = True EagerBlackholeInfo == EagerBlackholeInfo = True GCEnter1 == GCEnter1 = True GCFun == GCFun = True BaseReg == BaseReg = True MachSp == MachSp = True UnwindReturnReg == UnwindReturnReg = True PicBaseReg == PicBaseReg = True _r1 == _r2 = False -- NOTE: this Ord instance affects the tuple layout in GHCi, see -- Note [GHCi tuple layout] instance Ord GlobalReg where compare (VanillaReg i _) (VanillaReg j _) = compare i j -- Ignore type when seeking clashes compare (FloatReg i) (FloatReg j) = compare i j compare (DoubleReg i) (DoubleReg j) = compare i j compare (LongReg i) (LongReg j) = compare i j compare (XmmReg i) (XmmReg j) = compare i j compare (YmmReg i) (YmmReg j) = compare i j compare (ZmmReg i) (ZmmReg j) = compare i j compare Sp Sp = EQ compare SpLim SpLim = EQ compare Hp Hp = EQ compare HpLim HpLim = EQ compare CCCS CCCS = EQ compare CurrentTSO CurrentTSO = EQ compare CurrentNursery CurrentNursery = EQ compare HpAlloc HpAlloc = EQ compare EagerBlackholeInfo EagerBlackholeInfo = EQ compare GCEnter1 GCEnter1 = EQ compare GCFun GCFun = EQ compare BaseReg BaseReg = EQ compare MachSp MachSp = EQ compare UnwindReturnReg UnwindReturnReg = EQ compare PicBaseReg PicBaseReg = EQ compare (VanillaReg _ _) _ = LT compare _ (VanillaReg _ _) = GT compare (FloatReg _) _ = LT compare _ (FloatReg _) = GT compare (DoubleReg _) _ = LT compare _ (DoubleReg _) = GT compare (LongReg _) _ = LT compare _ (LongReg _) = GT compare (XmmReg _) _ = LT compare _ (XmmReg _) = GT compare (YmmReg _) _ = LT compare _ (YmmReg _) = GT compare (ZmmReg _) _ = LT compare _ (ZmmReg _) = GT compare Sp _ = LT compare _ Sp = GT compare SpLim _ = LT compare _ SpLim = GT compare Hp _ = LT compare _ Hp = GT compare HpLim _ = LT compare _ HpLim = GT compare CCCS _ = LT compare _ CCCS = GT compare CurrentTSO _ = LT compare _ CurrentTSO = GT compare CurrentNursery _ = LT compare _ CurrentNursery = GT compare HpAlloc _ = LT compare _ HpAlloc = GT compare GCEnter1 _ = LT compare _ GCEnter1 = GT compare GCFun _ = LT compare _ GCFun = GT compare BaseReg _ = LT compare _ BaseReg = GT compare MachSp _ = LT compare _ MachSp = GT compare UnwindReturnReg _ = LT compare _ UnwindReturnReg = GT compare EagerBlackholeInfo _ = LT compare _ EagerBlackholeInfo = GT -- convenient aliases baseReg, spReg, hpReg, spLimReg, hpLimReg, nodeReg, currentTSOReg, currentNurseryReg, hpAllocReg, cccsReg :: CmmReg baseReg = CmmGlobal BaseReg spReg = CmmGlobal Sp hpReg = CmmGlobal Hp hpLimReg = CmmGlobal HpLim spLimReg = CmmGlobal SpLim nodeReg = CmmGlobal node currentTSOReg = CmmGlobal CurrentTSO currentNurseryReg = CmmGlobal CurrentNursery hpAllocReg = CmmGlobal HpAlloc cccsReg = CmmGlobal CCCS node :: GlobalReg node = VanillaReg 1 VGcPtr globalRegType :: Platform -> GlobalReg -> CmmType globalRegType platform = \case (VanillaReg _ VGcPtr) -> gcWord platform (VanillaReg _ VNonGcPtr) -> bWord platform (FloatReg _) -> cmmFloat W32 (DoubleReg _) -> cmmFloat W64 (LongReg _) -> cmmBits W64 -- TODO: improve the internal model of SIMD/vectorized registers -- the right design SHOULd improve handling of float and double code too. -- see remarks in "NOTE [SIMD Design for the future]"" in GHC.StgToCmm.Prim (XmmReg _) -> cmmVec 4 (cmmBits W32) (YmmReg _) -> cmmVec 8 (cmmBits W32) (ZmmReg _) -> cmmVec 16 (cmmBits W32) Hp -> gcWord platform -- The initialiser for all -- dynamically allocated closures _ -> bWord platform isArgReg :: GlobalReg -> Bool isArgReg (VanillaReg {}) = True isArgReg (FloatReg {}) = True isArgReg (DoubleReg {}) = True isArgReg (LongReg {}) = True isArgReg (XmmReg {}) = True isArgReg (YmmReg {}) = True isArgReg (ZmmReg {}) = True isArgReg _ = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/MachOp.hs0000644000000000000000000005554614472400112020071 0ustar0000000000000000{-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Cmm.MachOp ( MachOp(..) , pprMachOp, isCommutableMachOp, isAssociativeMachOp , isComparisonMachOp, maybeIntComparison, machOpResultType , machOpArgReps, maybeInvertComparison, isFloatComparison -- MachOp builders , mo_wordAdd, mo_wordSub, mo_wordEq, mo_wordNe,mo_wordMul, mo_wordSQuot , mo_wordSRem, mo_wordSNeg, mo_wordUQuot, mo_wordURem , mo_wordSGe, mo_wordSLe, mo_wordSGt, mo_wordSLt, mo_wordUGe , mo_wordULe, mo_wordUGt, mo_wordULt , mo_wordAnd, mo_wordOr, mo_wordXor, mo_wordNot , mo_wordShl, mo_wordSShr, mo_wordUShr , mo_u_8To32, mo_s_8To32, mo_u_16To32, mo_s_16To32 , mo_u_8ToWord, mo_s_8ToWord, mo_u_16ToWord, mo_s_16ToWord , mo_u_32ToWord, mo_s_32ToWord , mo_32To8, mo_32To16, mo_WordTo8, mo_WordTo16, mo_WordTo32, mo_WordTo64 -- CallishMachOp , CallishMachOp(..), callishMachOpHints , pprCallishMachOp , machOpMemcpyishAlign -- Atomic read-modify-write , MemoryOrdering(..) , AtomicMachOp(..) ) where import GHC.Prelude import GHC.Platform import GHC.Cmm.Type import GHC.Utils.Outputable ----------------------------------------------------------------------------- -- MachOp ----------------------------------------------------------------------------- {- | Machine-level primops; ones which we can reasonably delegate to the native code generators to handle. Most operations are parameterised by the 'Width' that they operate on. Some operations have separate signed and unsigned versions, and float and integer versions. Note that there are variety of places in the native code generator where we assume that the code produced for a MachOp does not introduce new blocks. -} data MachOp -- Integer operations (insensitive to signed/unsigned) = MO_Add Width | MO_Sub Width | MO_Eq Width | MO_Ne Width | MO_Mul Width -- low word of multiply -- Signed multiply/divide | MO_S_MulMayOflo Width -- nonzero if signed multiply overflows | MO_S_Quot Width -- signed / (same semantics as IntQuotOp) | MO_S_Rem Width -- signed % (same semantics as IntRemOp) | MO_S_Neg Width -- unary - -- Unsigned multiply/divide | MO_U_MulMayOflo Width -- nonzero if unsigned multiply overflows | MO_U_Quot Width -- unsigned / (same semantics as WordQuotOp) | MO_U_Rem Width -- unsigned % (same semantics as WordRemOp) -- Signed comparisons | MO_S_Ge Width | MO_S_Le Width | MO_S_Gt Width | MO_S_Lt Width -- Unsigned comparisons | MO_U_Ge Width | MO_U_Le Width | MO_U_Gt Width | MO_U_Lt Width -- Floating point arithmetic | MO_F_Add Width | MO_F_Sub Width | MO_F_Neg Width -- unary - | MO_F_Mul Width | MO_F_Quot Width -- Floating point comparison | MO_F_Eq Width | MO_F_Ne Width | MO_F_Ge Width | MO_F_Le Width | MO_F_Gt Width | MO_F_Lt Width -- Bitwise operations. Not all of these may be supported -- at all sizes, and only integral Widths are valid. | MO_And Width | MO_Or Width | MO_Xor Width | MO_Not Width -- Shifts. The shift amount must be in [0,widthInBits). | MO_Shl Width | MO_U_Shr Width -- unsigned shift right | MO_S_Shr Width -- signed shift right -- Conversions. Some of these will be NOPs. -- Floating-point conversions use the signed variant. | MO_SF_Conv Width Width -- Signed int -> Float | MO_FS_Conv Width Width -- Float -> Signed int | MO_SS_Conv Width Width -- Signed int -> Signed int | MO_UU_Conv Width Width -- unsigned int -> unsigned int | MO_XX_Conv Width Width -- int -> int; puts no requirements on the -- contents of upper bits when extending; -- narrowing is simply truncation; the only -- expectation is that we can recover the -- original value by applying the opposite -- MO_XX_Conv, e.g., -- MO_XX_CONV W64 W8 (MO_XX_CONV W8 W64 x) -- is equivalent to just x. | MO_FF_Conv Width Width -- Float -> Float -- Vector element insertion and extraction operations | MO_V_Insert Length Width -- Insert scalar into vector | MO_V_Extract Length Width -- Extract scalar from vector -- Integer vector operations | MO_V_Add Length Width | MO_V_Sub Length Width | MO_V_Mul Length Width -- Signed vector multiply/divide | MO_VS_Quot Length Width | MO_VS_Rem Length Width | MO_VS_Neg Length Width -- Unsigned vector multiply/divide | MO_VU_Quot Length Width | MO_VU_Rem Length Width -- Floating point vector element insertion and extraction operations | MO_VF_Insert Length Width -- Insert scalar into vector | MO_VF_Extract Length Width -- Extract scalar from vector -- Floating point vector operations | MO_VF_Add Length Width | MO_VF_Sub Length Width | MO_VF_Neg Length Width -- unary negation | MO_VF_Mul Length Width | MO_VF_Quot Length Width -- Alignment check (for -falignment-sanitisation) | MO_AlignmentCheck Int Width deriving (Eq, Show) pprMachOp :: MachOp -> SDoc pprMachOp mo = text (show mo) -- ----------------------------------------------------------------------------- -- Some common MachReps -- A 'wordRep' is a machine word on the target architecture -- Specifically, it is the size of an Int#, Word#, Addr# -- and the unit of allocation on the stack and the heap -- Any pointer is also guaranteed to be a wordRep. mo_wordAdd, mo_wordSub, mo_wordEq, mo_wordNe,mo_wordMul, mo_wordSQuot , mo_wordSRem, mo_wordSNeg, mo_wordUQuot, mo_wordURem , mo_wordSGe, mo_wordSLe, mo_wordSGt, mo_wordSLt, mo_wordUGe , mo_wordULe, mo_wordUGt, mo_wordULt , mo_wordAnd, mo_wordOr, mo_wordXor, mo_wordNot, mo_wordShl, mo_wordSShr, mo_wordUShr , mo_u_8ToWord, mo_s_8ToWord, mo_u_16ToWord, mo_s_16ToWord, mo_u_32ToWord, mo_s_32ToWord , mo_WordTo8, mo_WordTo16, mo_WordTo32, mo_WordTo64 :: Platform -> MachOp mo_u_8To32, mo_s_8To32, mo_u_16To32, mo_s_16To32 , mo_32To8, mo_32To16 :: MachOp mo_wordAdd platform = MO_Add (wordWidth platform) mo_wordSub platform = MO_Sub (wordWidth platform) mo_wordEq platform = MO_Eq (wordWidth platform) mo_wordNe platform = MO_Ne (wordWidth platform) mo_wordMul platform = MO_Mul (wordWidth platform) mo_wordSQuot platform = MO_S_Quot (wordWidth platform) mo_wordSRem platform = MO_S_Rem (wordWidth platform) mo_wordSNeg platform = MO_S_Neg (wordWidth platform) mo_wordUQuot platform = MO_U_Quot (wordWidth platform) mo_wordURem platform = MO_U_Rem (wordWidth platform) mo_wordSGe platform = MO_S_Ge (wordWidth platform) mo_wordSLe platform = MO_S_Le (wordWidth platform) mo_wordSGt platform = MO_S_Gt (wordWidth platform) mo_wordSLt platform = MO_S_Lt (wordWidth platform) mo_wordUGe platform = MO_U_Ge (wordWidth platform) mo_wordULe platform = MO_U_Le (wordWidth platform) mo_wordUGt platform = MO_U_Gt (wordWidth platform) mo_wordULt platform = MO_U_Lt (wordWidth platform) mo_wordAnd platform = MO_And (wordWidth platform) mo_wordOr platform = MO_Or (wordWidth platform) mo_wordXor platform = MO_Xor (wordWidth platform) mo_wordNot platform = MO_Not (wordWidth platform) mo_wordShl platform = MO_Shl (wordWidth platform) mo_wordSShr platform = MO_S_Shr (wordWidth platform) mo_wordUShr platform = MO_U_Shr (wordWidth platform) mo_u_8To32 = MO_UU_Conv W8 W32 mo_s_8To32 = MO_SS_Conv W8 W32 mo_u_16To32 = MO_UU_Conv W16 W32 mo_s_16To32 = MO_SS_Conv W16 W32 mo_u_8ToWord platform = MO_UU_Conv W8 (wordWidth platform) mo_s_8ToWord platform = MO_SS_Conv W8 (wordWidth platform) mo_u_16ToWord platform = MO_UU_Conv W16 (wordWidth platform) mo_s_16ToWord platform = MO_SS_Conv W16 (wordWidth platform) mo_s_32ToWord platform = MO_SS_Conv W32 (wordWidth platform) mo_u_32ToWord platform = MO_UU_Conv W32 (wordWidth platform) mo_WordTo8 platform = MO_UU_Conv (wordWidth platform) W8 mo_WordTo16 platform = MO_UU_Conv (wordWidth platform) W16 mo_WordTo32 platform = MO_UU_Conv (wordWidth platform) W32 mo_WordTo64 platform = MO_UU_Conv (wordWidth platform) W64 mo_32To8 = MO_UU_Conv W32 W8 mo_32To16 = MO_UU_Conv W32 W16 -- ---------------------------------------------------------------------------- -- isCommutableMachOp {- | Returns 'True' if the MachOp has commutable arguments. This is used in the platform-independent Cmm optimisations. If in doubt, return 'False'. This generates worse code on the native routes, but is otherwise harmless. -} isCommutableMachOp :: MachOp -> Bool isCommutableMachOp mop = case mop of MO_Add _ -> True MO_Eq _ -> True MO_Ne _ -> True MO_Mul _ -> True MO_S_MulMayOflo _ -> True MO_U_MulMayOflo _ -> True MO_And _ -> True MO_Or _ -> True MO_Xor _ -> True MO_F_Add _ -> True MO_F_Mul _ -> True _other -> False -- ---------------------------------------------------------------------------- -- isAssociativeMachOp {- | Returns 'True' if the MachOp is associative (i.e. @(x+y)+z == x+(y+z)@) This is used in the platform-independent Cmm optimisations. If in doubt, return 'False'. This generates worse code on the native routes, but is otherwise harmless. -} isAssociativeMachOp :: MachOp -> Bool isAssociativeMachOp mop = case mop of MO_Add {} -> True -- NB: does not include MO_Mul {} -> True -- floatint point! MO_And {} -> True MO_Or {} -> True MO_Xor {} -> True _other -> False -- ---------------------------------------------------------------------------- -- isComparisonMachOp {- | Returns 'True' if the MachOp is a comparison. If in doubt, return False. This generates worse code on the native routes, but is otherwise harmless. -} isComparisonMachOp :: MachOp -> Bool isComparisonMachOp mop = case mop of MO_Eq _ -> True MO_Ne _ -> True MO_S_Ge _ -> True MO_S_Le _ -> True MO_S_Gt _ -> True MO_S_Lt _ -> True MO_U_Ge _ -> True MO_U_Le _ -> True MO_U_Gt _ -> True MO_U_Lt _ -> True MO_F_Eq {} -> True MO_F_Ne {} -> True MO_F_Ge {} -> True MO_F_Le {} -> True MO_F_Gt {} -> True MO_F_Lt {} -> True _other -> False {- | Returns @Just w@ if the operation is an integer comparison with width @w@, or @Nothing@ otherwise. -} maybeIntComparison :: MachOp -> Maybe Width maybeIntComparison mop = case mop of MO_Eq w -> Just w MO_Ne w -> Just w MO_S_Ge w -> Just w MO_S_Le w -> Just w MO_S_Gt w -> Just w MO_S_Lt w -> Just w MO_U_Ge w -> Just w MO_U_Le w -> Just w MO_U_Gt w -> Just w MO_U_Lt w -> Just w _ -> Nothing isFloatComparison :: MachOp -> Bool isFloatComparison mop = case mop of MO_F_Eq {} -> True MO_F_Ne {} -> True MO_F_Ge {} -> True MO_F_Le {} -> True MO_F_Gt {} -> True MO_F_Lt {} -> True _other -> False -- Note [Inverting conditions] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Sometimes it's useful to be able to invert the sense of a -- condition. Not all conditional tests are invertible: in -- particular, floating point conditionals cannot be inverted, because -- there exist floating-point values which return False for both senses -- of a condition (eg. !(NaN > NaN) && !(NaN /<= NaN)). maybeInvertComparison :: MachOp -> Maybe MachOp maybeInvertComparison op = case op of -- None of these Just cases include floating point MO_Eq r -> Just (MO_Ne r) MO_Ne r -> Just (MO_Eq r) MO_U_Lt r -> Just (MO_U_Ge r) MO_U_Gt r -> Just (MO_U_Le r) MO_U_Le r -> Just (MO_U_Gt r) MO_U_Ge r -> Just (MO_U_Lt r) MO_S_Lt r -> Just (MO_S_Ge r) MO_S_Gt r -> Just (MO_S_Le r) MO_S_Le r -> Just (MO_S_Gt r) MO_S_Ge r -> Just (MO_S_Lt r) _other -> Nothing -- ---------------------------------------------------------------------------- -- machOpResultType {- | Returns the MachRep of the result of a MachOp. -} machOpResultType :: Platform -> MachOp -> [CmmType] -> CmmType machOpResultType platform mop tys = case mop of MO_Add {} -> ty1 -- Preserve GC-ptr-hood MO_Sub {} -> ty1 -- of first arg MO_Mul r -> cmmBits r MO_S_MulMayOflo r -> cmmBits r MO_S_Quot r -> cmmBits r MO_S_Rem r -> cmmBits r MO_S_Neg r -> cmmBits r MO_U_MulMayOflo r -> cmmBits r MO_U_Quot r -> cmmBits r MO_U_Rem r -> cmmBits r MO_Eq {} -> comparisonResultRep platform MO_Ne {} -> comparisonResultRep platform MO_S_Ge {} -> comparisonResultRep platform MO_S_Le {} -> comparisonResultRep platform MO_S_Gt {} -> comparisonResultRep platform MO_S_Lt {} -> comparisonResultRep platform MO_U_Ge {} -> comparisonResultRep platform MO_U_Le {} -> comparisonResultRep platform MO_U_Gt {} -> comparisonResultRep platform MO_U_Lt {} -> comparisonResultRep platform MO_F_Add r -> cmmFloat r MO_F_Sub r -> cmmFloat r MO_F_Mul r -> cmmFloat r MO_F_Quot r -> cmmFloat r MO_F_Neg r -> cmmFloat r MO_F_Eq {} -> comparisonResultRep platform MO_F_Ne {} -> comparisonResultRep platform MO_F_Ge {} -> comparisonResultRep platform MO_F_Le {} -> comparisonResultRep platform MO_F_Gt {} -> comparisonResultRep platform MO_F_Lt {} -> comparisonResultRep platform MO_And {} -> ty1 -- Used for pointer masking MO_Or {} -> ty1 MO_Xor {} -> ty1 MO_Not r -> cmmBits r MO_Shl r -> cmmBits r MO_U_Shr r -> cmmBits r MO_S_Shr r -> cmmBits r MO_SS_Conv _ to -> cmmBits to MO_UU_Conv _ to -> cmmBits to MO_XX_Conv _ to -> cmmBits to MO_FS_Conv _ to -> cmmBits to MO_SF_Conv _ to -> cmmFloat to MO_FF_Conv _ to -> cmmFloat to MO_V_Insert l w -> cmmVec l (cmmBits w) MO_V_Extract _ w -> cmmBits w MO_V_Add l w -> cmmVec l (cmmBits w) MO_V_Sub l w -> cmmVec l (cmmBits w) MO_V_Mul l w -> cmmVec l (cmmBits w) MO_VS_Quot l w -> cmmVec l (cmmBits w) MO_VS_Rem l w -> cmmVec l (cmmBits w) MO_VS_Neg l w -> cmmVec l (cmmBits w) MO_VU_Quot l w -> cmmVec l (cmmBits w) MO_VU_Rem l w -> cmmVec l (cmmBits w) MO_VF_Insert l w -> cmmVec l (cmmFloat w) MO_VF_Extract _ w -> cmmFloat w MO_VF_Add l w -> cmmVec l (cmmFloat w) MO_VF_Sub l w -> cmmVec l (cmmFloat w) MO_VF_Mul l w -> cmmVec l (cmmFloat w) MO_VF_Quot l w -> cmmVec l (cmmFloat w) MO_VF_Neg l w -> cmmVec l (cmmFloat w) MO_AlignmentCheck _ _ -> ty1 where (ty1:_) = tys comparisonResultRep :: Platform -> CmmType comparisonResultRep = bWord -- is it? -- ----------------------------------------------------------------------------- -- machOpArgReps -- | This function is used for debugging only: we can check whether an -- application of a MachOp is "type-correct" by checking that the MachReps of -- its arguments are the same as the MachOp expects. This is used when -- linting a CmmExpr. machOpArgReps :: Platform -> MachOp -> [Width] machOpArgReps platform op = case op of MO_Add r -> [r,r] MO_Sub r -> [r,r] MO_Eq r -> [r,r] MO_Ne r -> [r,r] MO_Mul r -> [r,r] MO_S_MulMayOflo r -> [r,r] MO_S_Quot r -> [r,r] MO_S_Rem r -> [r,r] MO_S_Neg r -> [r] MO_U_MulMayOflo r -> [r,r] MO_U_Quot r -> [r,r] MO_U_Rem r -> [r,r] MO_S_Ge r -> [r,r] MO_S_Le r -> [r,r] MO_S_Gt r -> [r,r] MO_S_Lt r -> [r,r] MO_U_Ge r -> [r,r] MO_U_Le r -> [r,r] MO_U_Gt r -> [r,r] MO_U_Lt r -> [r,r] MO_F_Add r -> [r,r] MO_F_Sub r -> [r,r] MO_F_Mul r -> [r,r] MO_F_Quot r -> [r,r] MO_F_Neg r -> [r] MO_F_Eq r -> [r,r] MO_F_Ne r -> [r,r] MO_F_Ge r -> [r,r] MO_F_Le r -> [r,r] MO_F_Gt r -> [r,r] MO_F_Lt r -> [r,r] MO_And r -> [r,r] MO_Or r -> [r,r] MO_Xor r -> [r,r] MO_Not r -> [r] MO_Shl r -> [r, wordWidth platform] MO_U_Shr r -> [r, wordWidth platform] MO_S_Shr r -> [r, wordWidth platform] MO_SS_Conv from _ -> [from] MO_UU_Conv from _ -> [from] MO_XX_Conv from _ -> [from] MO_SF_Conv from _ -> [from] MO_FS_Conv from _ -> [from] MO_FF_Conv from _ -> [from] MO_V_Insert l r -> [typeWidth (vec l (cmmBits r)),r, W32] MO_V_Extract l r -> [typeWidth (vec l (cmmBits r)), W32] MO_VF_Insert l r -> [typeWidth (vec l (cmmFloat r)),r,W32] MO_VF_Extract l r -> [typeWidth (vec l (cmmFloat r)),W32] -- SIMD vector indices are always 32 bit MO_V_Add _ r -> [r,r] MO_V_Sub _ r -> [r,r] MO_V_Mul _ r -> [r,r] MO_VS_Quot _ r -> [r,r] MO_VS_Rem _ r -> [r,r] MO_VS_Neg _ r -> [r] MO_VU_Quot _ r -> [r,r] MO_VU_Rem _ r -> [r,r] MO_VF_Add _ r -> [r,r] MO_VF_Sub _ r -> [r,r] MO_VF_Mul _ r -> [r,r] MO_VF_Quot _ r -> [r,r] MO_VF_Neg _ r -> [r] MO_AlignmentCheck _ r -> [r] ----------------------------------------------------------------------------- -- CallishMachOp ----------------------------------------------------------------------------- -- CallishMachOps tend to be implemented by foreign calls in some backends, -- so we separate them out. In Cmm, these can only occur in a -- statement position, in contrast to an ordinary MachOp which can occur -- anywhere in an expression. data CallishMachOp = MO_F64_Pwr | MO_F64_Sin | MO_F64_Cos | MO_F64_Tan | MO_F64_Sinh | MO_F64_Cosh | MO_F64_Tanh | MO_F64_Asin | MO_F64_Acos | MO_F64_Atan | MO_F64_Asinh | MO_F64_Acosh | MO_F64_Atanh | MO_F64_Log | MO_F64_Log1P | MO_F64_Exp | MO_F64_ExpM1 | MO_F64_Fabs | MO_F64_Sqrt | MO_F32_Pwr | MO_F32_Sin | MO_F32_Cos | MO_F32_Tan | MO_F32_Sinh | MO_F32_Cosh | MO_F32_Tanh | MO_F32_Asin | MO_F32_Acos | MO_F32_Atan | MO_F32_Asinh | MO_F32_Acosh | MO_F32_Atanh | MO_F32_Log | MO_F32_Log1P | MO_F32_Exp | MO_F32_ExpM1 | MO_F32_Fabs | MO_F32_Sqrt -- 64-bit int/word ops for when they exceed the native word size -- (i.e. on 32-bit architectures) | MO_I64_ToI | MO_I64_FromI | MO_W64_ToW | MO_W64_FromW | MO_x64_Neg | MO_x64_Add | MO_x64_Sub | MO_x64_Mul | MO_I64_Quot | MO_I64_Rem | MO_W64_Quot | MO_W64_Rem | MO_x64_And | MO_x64_Or | MO_x64_Xor | MO_x64_Not | MO_x64_Shl | MO_I64_Shr | MO_W64_Shr | MO_x64_Eq | MO_x64_Ne | MO_I64_Ge | MO_I64_Gt | MO_I64_Le | MO_I64_Lt | MO_W64_Ge | MO_W64_Gt | MO_W64_Le | MO_W64_Lt | MO_UF_Conv Width | MO_S_Mul2 Width | MO_S_QuotRem Width | MO_U_QuotRem Width | MO_U_QuotRem2 Width | MO_Add2 Width | MO_AddWordC Width | MO_SubWordC Width | MO_AddIntC Width | MO_SubIntC Width | MO_U_Mul2 Width | MO_ReadBarrier | MO_WriteBarrier | MO_Touch -- Keep variables live (when using interior pointers) -- Prefetch | MO_Prefetch_Data Int -- Prefetch hint. May change program performance but not -- program behavior. -- the Int can be 0-3. Needs to be known at compile time -- to interact with code generation correctly. -- TODO: add support for prefetch WRITES, -- currently only exposes prefetch reads, which -- would the majority of use cases in ghc anyways -- These three MachOps are parameterised by the known alignment -- of the destination and source (for memcpy/memmove) pointers. -- This information may be used for optimisation in backends. | MO_Memcpy Int | MO_Memset Int | MO_Memmove Int | MO_Memcmp Int | MO_PopCnt Width | MO_Pdep Width | MO_Pext Width | MO_Clz Width | MO_Ctz Width | MO_BSwap Width | MO_BRev Width -- | Atomic read-modify-write. Arguments are @[dest, n]@. | MO_AtomicRMW Width AtomicMachOp -- | Atomic read. Arguments are @[addr]@. | MO_AtomicRead Width MemoryOrdering -- | Atomic write. Arguments are @[addr, value]@. | MO_AtomicWrite Width MemoryOrdering -- | Atomic compare-and-swap. Arguments are @[dest, expected, new]@. -- Sequentially consistent. -- Possible future refactoring: should this be an'MO_AtomicRMW' variant? | MO_Cmpxchg Width -- | Atomic swap. Arguments are @[dest, new]@ | MO_Xchg Width -- These rts provided functions are special: suspendThread releases the -- capability, hence we mustn't sink any use of data stored in the capability -- after this instruction. | MO_SuspendThread | MO_ResumeThread deriving (Eq, Show) -- | C11 memory ordering semantics. data MemoryOrdering = MemOrderRelaxed -- ^ relaxed ordering | MemOrderAcquire -- ^ acquire ordering | MemOrderRelease -- ^ release ordering | MemOrderSeqCst -- ^ sequentially consistent deriving (Eq, Ord, Show) -- | The operation to perform atomically. data AtomicMachOp = AMO_Add | AMO_Sub | AMO_And | AMO_Nand | AMO_Or | AMO_Xor deriving (Eq, Show) pprCallishMachOp :: CallishMachOp -> SDoc pprCallishMachOp mo = text (show mo) -- | Return (results_hints,args_hints) callishMachOpHints :: CallishMachOp -> ([ForeignHint], [ForeignHint]) callishMachOpHints op = case op of MO_Memcpy _ -> ([], [AddrHint,AddrHint,NoHint]) MO_Memset _ -> ([], [AddrHint,NoHint,NoHint]) MO_Memmove _ -> ([], [AddrHint,AddrHint,NoHint]) MO_Memcmp _ -> ([], [AddrHint, AddrHint, NoHint]) MO_SuspendThread -> ([AddrHint], [AddrHint,NoHint]) MO_ResumeThread -> ([AddrHint], [AddrHint]) _ -> ([],[]) -- empty lists indicate NoHint -- | The alignment of a 'memcpy'-ish operation. machOpMemcpyishAlign :: CallishMachOp -> Maybe Int machOpMemcpyishAlign op = case op of MO_Memcpy align -> Just align MO_Memset align -> Just align MO_Memmove align -> Just align MO_Memcmp align -> Just align _ -> Nothing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Node.hs0000644000000000000000000007734214472400112017605 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- CmmNode type for representation using Hoopl graphs. module GHC.Cmm.Node ( CmmNode(..), CmmFormal, CmmActual, CmmTickish, UpdFrameOffset, Convention(..), ForeignConvention(..), ForeignTarget(..), foreignTargetHints, CmmReturnInfo(..), mapExp, mapExpDeep, wrapRecExp, foldExp, foldExpDeep, wrapRecExpf, mapExpM, mapExpDeepM, wrapRecExpM, mapSuccessors, mapCollectSuccessors, -- * Tick scopes CmmTickScope(..), isTickSubScope, combineTickScopes, ) where import GHC.Prelude hiding (succ) import GHC.Platform.Regs import GHC.Cmm.Expr import GHC.Cmm.Switch import GHC.Data.FastString import GHC.Types.ForeignCall import GHC.Utils.Outputable import GHC.Runtime.Heap.Layout import GHC.Types.Tickish (CmmTickish) import qualified GHC.Types.Unique as U import GHC.Cmm.Dataflow.Block import GHC.Cmm.Dataflow.Graph import GHC.Cmm.Dataflow.Collections import GHC.Cmm.Dataflow.Label import Data.Maybe import Data.List (tails,sortBy) import GHC.Types.Unique (nonDetCmpUnique) import GHC.Utils.Misc ------------------------ -- CmmNode #define ULabel {-# UNPACK #-} !Label data CmmNode e x where CmmEntry :: ULabel -> CmmTickScope -> CmmNode C O CmmComment :: FastString -> CmmNode O O -- Tick annotation, covering Cmm code in our tick scope. We only -- expect non-code @Tickish@ at this point (e.g. @SourceNote@). -- See Note [CmmTick scoping details] CmmTick :: !CmmTickish -> CmmNode O O -- Unwind pseudo-instruction, encoding stack unwinding -- instructions for a debugger. This describes how to reconstruct -- the "old" value of a register if we want to navigate the stack -- up one frame. Having unwind information for @Sp@ will allow the -- debugger to "walk" the stack. -- -- See Note [What is this unwinding business?] in "GHC.Cmm.DebugBlock" CmmUnwind :: [(GlobalReg, Maybe CmmExpr)] -> CmmNode O O CmmAssign :: !CmmReg -> !CmmExpr -> CmmNode O O -- Assign to register CmmStore :: !CmmExpr -> !CmmExpr -> !AlignmentSpec -> CmmNode O O -- Assign to memory location. Size is -- given by cmmExprType of the rhs. CmmUnsafeForeignCall :: -- An unsafe foreign call; -- see Note [Foreign calls] -- Like a "fat machine instruction"; can occur -- in the middle of a block ForeignTarget -> -- call target [CmmFormal] -> -- zero or more results [CmmActual] -> -- zero or more arguments CmmNode O O -- Semantics: clobbers any GlobalRegs for which callerSaves r == True -- See Note [Unsafe foreign calls clobber caller-save registers] -- -- Invariant: the arguments and the ForeignTarget must not -- mention any registers for which GHC.Platform.callerSaves -- is True. See Note [Register parameter passing]. CmmBranch :: ULabel -> CmmNode O C -- Goto another block in the same procedure CmmCondBranch :: { -- conditional branch cml_pred :: CmmExpr, cml_true, cml_false :: ULabel, cml_likely :: Maybe Bool -- likely result of the conditional, -- if known } -> CmmNode O C CmmSwitch :: CmmExpr -- Scrutinee, of some integral type -> SwitchTargets -- Cases. See Note [SwitchTargets] -> CmmNode O C CmmCall :: { -- A native call or tail call cml_target :: CmmExpr, -- never a CmmPrim to a CallishMachOp! cml_cont :: Maybe Label, -- Label of continuation (Nothing for return or tail call) -- -- Note [Continuation BlockIds] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- These BlockIds are called -- Continuation BlockIds, and are the only BlockIds that can -- occur in CmmExprs, namely as (CmmLit (CmmBlock b)) or -- (CmmStackSlot (Young b) _). cml_args_regs :: [GlobalReg], -- The argument GlobalRegs (Rx, Fx, Dx, Lx) that are passed -- to the call. This is essential information for the -- native code generator's register allocator; without -- knowing which GlobalRegs are live it has to assume that -- they are all live. This list should only include -- GlobalRegs that are mapped to real machine registers on -- the target platform. cml_args :: ByteOff, -- Byte offset, from the *old* end of the Area associated with -- the Label (if cml_cont = Nothing, then Old area), of -- youngest outgoing arg. Set the stack pointer to this before -- transferring control. -- (NB: an update frame might also have been stored in the Old -- area, but it'll be in an older part than the args.) cml_ret_args :: ByteOff, -- For calls *only*, the byte offset for youngest returned value -- This is really needed at the *return* point rather than here -- at the call, but in practice it's convenient to record it here. cml_ret_off :: ByteOff -- For calls *only*, the byte offset of the base of the frame that -- must be described by the info table for the return point. -- The older words are an update frames, which have their own -- info-table and layout information -- From a liveness point of view, the stack words older than -- cml_ret_off are treated as live, even if the sequel of -- the call goes into a loop. } -> CmmNode O C CmmForeignCall :: { -- A safe foreign call; see Note [Foreign calls] -- Always the last node of a block tgt :: ForeignTarget, -- call target and convention res :: [CmmFormal], -- zero or more results args :: [CmmActual], -- zero or more arguments; see Note [Register parameter passing] succ :: ULabel, -- Label of continuation ret_args :: ByteOff, -- same as cml_ret_args ret_off :: ByteOff, -- same as cml_ret_off intrbl:: Bool -- whether or not the call is interruptible } -> CmmNode O C {- Note [Foreign calls] ~~~~~~~~~~~~~~~~~~~~~~~ A CmmUnsafeForeignCall is used for *unsafe* foreign calls; a CmmForeignCall call is used for *safe* foreign calls. Unsafe ones are mostly easy: think of them as a "fat machine instruction". In particular, they do *not* kill all live registers, just the registers they return to (there was a bit of code in GHC that conservatively assumed otherwise.) However, see [Register parameter passing]. Safe ones are trickier. A safe foreign call r = f(x) ultimately expands to push "return address" -- Never used to return to; -- just points an info table save registers into TSO call suspendThread r = f(x) -- Make the call call resumeThread restore registers pop "return address" We cannot "lower" a safe foreign call to this sequence of Cmms, because after we've saved Sp all the Cmm optimiser's assumptions are broken. Note that a safe foreign call needs an info table. So Safe Foreign Calls must remain as last nodes until the stack is made manifest in GHC.Cmm.LayoutStack, where they are lowered into the above sequence. -} {- Note [Unsafe foreign calls clobber caller-save registers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A foreign call is defined to clobber any GlobalRegs that are mapped to caller-saves machine registers (according to the prevailing C ABI). GHC.StgToCmm.Utils.callerSaves tells you which GlobalRegs are caller-saves. This is a design choice that makes it easier to generate code later. We could instead choose to say that foreign calls do *not* clobber caller-saves regs, but then we would have to figure out which regs were live across the call later and insert some saves/restores. Furthermore when we generate code we never have any GlobalRegs live across a call, because they are always copied-in to LocalRegs and copied-out again before making a call/jump. So all we have to do is avoid any code motion that would make a caller-saves GlobalReg live across a foreign call during subsequent optimisations. -} {- Note [Register parameter passing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ On certain architectures, some registers are utilized for parameter passing in the C calling convention. For example, in x86-64 Linux convention, rdi, rsi, rdx and rcx (as well as r8 and r9) may be used for argument passing. These are registers R3-R6, which our generated code may also be using; as a result, it's necessary to save these values before doing a foreign call. This is done during initial code generation in callerSaveVolatileRegs in GHC.StgToCmm.Utils. However, one result of doing this is that the contents of these registers may mysteriously change if referenced inside the arguments. This is dangerous, so you'll need to disable inlining much in the same way is done in GHC.Cmm.Sink currently. We should fix this! -} --------------------------------------------- -- Eq instance of CmmNode deriving instance Eq (CmmNode e x) ---------------------------------------------- -- Hoopl instances of CmmNode instance NonLocal CmmNode where entryLabel (CmmEntry l _) = l successors (CmmBranch l) = [l] successors (CmmCondBranch {cml_true=t, cml_false=f}) = [f, t] -- meets layout constraint successors (CmmSwitch _ ids) = switchTargetsToList ids successors (CmmCall {cml_cont=l}) = maybeToList l successors (CmmForeignCall {succ=l}) = [l] -------------------------------------------------- -- Various helper types type CmmActual = CmmExpr type CmmFormal = LocalReg type UpdFrameOffset = ByteOff -- | A convention maps a list of values (function arguments or return -- values) to registers or stack locations. data Convention = NativeDirectCall -- ^ top-level Haskell functions use @NativeDirectCall@, which -- maps arguments to registers starting with R2, according to -- how many registers are available on the platform. This -- convention ignores R1, because for a top-level function call -- the function closure is implicit, and doesn't need to be passed. | NativeNodeCall -- ^ non-top-level Haskell functions, which pass the address of -- the function closure in R1 (regardless of whether R1 is a -- real register or not), and the rest of the arguments in -- registers or on the stack. | NativeReturn -- ^ a native return. The convention for returns depends on -- how many values are returned: for just one value returned, -- the appropriate register is used (R1, F1, etc.). regardless -- of whether it is a real register or not. For multiple -- values returned, they are mapped to registers or the stack. | Slow -- ^ Slow entry points: all args pushed on the stack | GC -- ^ Entry to the garbage collector: uses the node reg! -- (TODO: I don't think we need this --SDM) deriving( Eq ) data ForeignConvention = ForeignConvention CCallConv -- Which foreign-call convention [ForeignHint] -- Extra info about the args [ForeignHint] -- Extra info about the result CmmReturnInfo deriving Eq data CmmReturnInfo = CmmMayReturn | CmmNeverReturns deriving ( Eq ) data ForeignTarget -- The target of a foreign call = ForeignTarget -- A foreign procedure CmmExpr -- Its address ForeignConvention -- Its calling convention | PrimTarget -- A possibly-side-effecting machine operation CallishMachOp -- Which one deriving Eq foreignTargetHints :: ForeignTarget -> ([ForeignHint], [ForeignHint]) foreignTargetHints target = ( res_hints ++ repeat NoHint , arg_hints ++ repeat NoHint ) where (res_hints, arg_hints) = case target of PrimTarget op -> callishMachOpHints op ForeignTarget _ (ForeignConvention _ arg_hints res_hints _) -> (res_hints, arg_hints) -------------------------------------------------- -- Instances of register and slot users / definers instance UserOfRegs LocalReg (CmmNode e x) where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed platform f !z n = case n of CmmAssign _ expr -> fold f z expr CmmStore addr rval _ -> fold f (fold f z addr) rval CmmUnsafeForeignCall t _ args -> fold f (fold f z t) args CmmCondBranch expr _ _ _ -> fold f z expr CmmSwitch expr _ -> fold f z expr CmmCall {cml_target=tgt} -> fold f z tgt CmmForeignCall {tgt=tgt, args=args} -> fold f (fold f z tgt) args _ -> z where fold :: forall a b. UserOfRegs LocalReg a => (b -> LocalReg -> b) -> b -> a -> b fold f z n = foldRegsUsed platform f z n instance UserOfRegs GlobalReg (CmmNode e x) where {-# INLINEABLE foldRegsUsed #-} foldRegsUsed platform f !z n = case n of CmmAssign _ expr -> fold f z expr CmmStore addr rval _ -> fold f (fold f z addr) rval CmmUnsafeForeignCall t _ args -> fold f (fold f z t) args CmmCondBranch expr _ _ _ -> fold f z expr CmmSwitch expr _ -> fold f z expr CmmCall {cml_target=tgt, cml_args_regs=args} -> fold f (fold f z args) tgt CmmForeignCall {tgt=tgt, args=args} -> fold f (fold f z tgt) args _ -> z where fold :: forall a b. UserOfRegs GlobalReg a => (b -> GlobalReg -> b) -> b -> a -> b fold f z n = foldRegsUsed platform f z n instance (Ord r, UserOfRegs r CmmReg) => UserOfRegs r ForeignTarget where -- The (Ord r) in the context is necessary here -- See Note [Recursive superclasses] in GHC.Tc.TyCl.Instance {-# INLINEABLE foldRegsUsed #-} foldRegsUsed _ _ !z (PrimTarget _) = z foldRegsUsed platform f !z (ForeignTarget e _) = foldRegsUsed platform f z e instance DefinerOfRegs LocalReg (CmmNode e x) where {-# INLINEABLE foldRegsDefd #-} foldRegsDefd platform f !z n = case n of CmmAssign lhs _ -> fold f z lhs CmmUnsafeForeignCall _ fs _ -> fold f z fs CmmForeignCall {res=res} -> fold f z res _ -> z where fold :: forall a b. DefinerOfRegs LocalReg a => (b -> LocalReg -> b) -> b -> a -> b fold f z n = foldRegsDefd platform f z n instance DefinerOfRegs GlobalReg (CmmNode e x) where {-# INLINEABLE foldRegsDefd #-} foldRegsDefd platform f !z n = case n of CmmAssign lhs _ -> fold f z lhs CmmUnsafeForeignCall tgt _ _ -> fold f z (foreignTargetRegs tgt) CmmCall {} -> fold f z activeRegs CmmForeignCall {} -> fold f z activeRegs -- See Note [Safe foreign calls clobber STG registers] _ -> z where fold :: forall a b. DefinerOfRegs GlobalReg a => (b -> GlobalReg -> b) -> b -> a -> b fold f z n = foldRegsDefd platform f z n activeRegs = activeStgRegs platform activeCallerSavesRegs = filter (callerSaves platform) activeRegs foreignTargetRegs (ForeignTarget _ (ForeignConvention _ _ _ CmmNeverReturns)) = [] foreignTargetRegs _ = activeCallerSavesRegs -- Note [Safe foreign calls clobber STG registers] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- During stack layout phase every safe foreign call is expanded into a block -- that contains unsafe foreign call (instead of safe foreign call) and ends -- with a normal call (See Note [Foreign calls]). This means that we must -- treat safe foreign call as if it was a normal call (because eventually it -- will be). This is important if we try to run sinking pass before stack -- layout phase. Consider this example of what might go wrong (this is cmm -- code from stablename001 test). Here is code after common block elimination -- (before stack layout): -- -- c1q6: -- _s1pf::P64 = R1; -- _c1q8::I64 = performMajorGC; -- I64[(young + 8)] = c1q9; -- foreign call "ccall" arg hints: [] result hints: [] (_c1q8::I64)(...) -- returns to c1q9 args: ([]) ress: ([])ret_args: 8ret_off: 8; -- c1q9: -- I64[(young + 8)] = c1qb; -- R1 = _s1pc::P64; -- call stg_makeStableName#(R1) returns to c1qb, args: 8, res: 8, upd: 8; -- -- If we run sinking pass now (still before stack layout) we will get this: -- -- c1q6: -- I64[(young + 8)] = c1q9; -- foreign call "ccall" arg hints: [] result hints: [] performMajorGC(...) -- returns to c1q9 args: ([]) ress: ([])ret_args: 8ret_off: 8; -- c1q9: -- I64[(young + 8)] = c1qb; -- _s1pf::P64 = R1; <------ _s1pf sunk past safe foreign call -- R1 = _s1pc::P64; -- call stg_makeStableName#(R1) returns to c1qb, args: 8, res: 8, upd: 8; -- -- Notice that _s1pf was sunk past a foreign call. When we run stack layout -- safe call to performMajorGC will be turned into: -- -- c1q6: -- _s1pc::P64 = P64[Sp + 8]; -- I64[Sp - 8] = c1q9; -- Sp = Sp - 8; -- I64[I64[CurrentTSO + 24] + 16] = Sp; -- P64[CurrentNursery + 8] = Hp + 8; -- (_u1qI::I64) = call "ccall" arg hints: [PtrHint,] -- result hints: [PtrHint] suspendThread(BaseReg, 0); -- call "ccall" arg hints: [] result hints: [] performMajorGC(); -- (_u1qJ::I64) = call "ccall" arg hints: [PtrHint] -- result hints: [PtrHint] resumeThread(_u1qI::I64); -- BaseReg = _u1qJ::I64; -- _u1qK::P64 = CurrentTSO; -- _u1qL::P64 = I64[_u1qK::P64 + 24]; -- Sp = I64[_u1qL::P64 + 16]; -- SpLim = _u1qL::P64 + 192; -- HpAlloc = 0; -- Hp = I64[CurrentNursery + 8] - 8; -- HpLim = I64[CurrentNursery] + (%MO_SS_Conv_W32_W64(I32[CurrentNursery + 48]) * 4096 - 1); -- call (I64[Sp])() returns to c1q9, args: 8, res: 8, upd: 8; -- c1q9: -- I64[(young + 8)] = c1qb; -- _s1pf::P64 = R1; <------ INCORRECT! -- R1 = _s1pc::P64; -- call stg_makeStableName#(R1) returns to c1qb, args: 8, res: 8, upd: 8; -- -- Notice that c1q6 now ends with a call. Sinking _s1pf::P64 = R1 past that -- call is clearly incorrect. This is what would happen if we assumed that -- safe foreign call has the same semantics as unsafe foreign call. To prevent -- this we need to treat safe foreign call as if was normal call. ----------------------------------- -- mapping Expr in GHC.Cmm.Node mapForeignTarget :: (CmmExpr -> CmmExpr) -> ForeignTarget -> ForeignTarget mapForeignTarget exp (ForeignTarget e c) = ForeignTarget (exp e) c mapForeignTarget _ m@(PrimTarget _) = m wrapRecExp :: (CmmExpr -> CmmExpr) -> CmmExpr -> CmmExpr -- Take a transformer on expressions and apply it recursively. -- (wrapRecExp f e) first recursively applies itself to sub-expressions of e -- then uses f to rewrite the resulting expression wrapRecExp f (CmmMachOp op es) = f (CmmMachOp op $ map (wrapRecExp f) es) wrapRecExp f (CmmLoad addr ty align) = f (CmmLoad (wrapRecExp f addr) ty align) wrapRecExp f e = f e mapExp :: (CmmExpr -> CmmExpr) -> CmmNode e x -> CmmNode e x mapExp _ f@(CmmEntry{}) = f mapExp _ m@(CmmComment _) = m mapExp _ m@(CmmTick _) = m mapExp f (CmmUnwind regs) = CmmUnwind (map (fmap (fmap f)) regs) mapExp f (CmmAssign r e) = CmmAssign r (f e) mapExp f (CmmStore addr e align) = CmmStore (f addr) (f e) align mapExp f (CmmUnsafeForeignCall tgt fs as) = CmmUnsafeForeignCall (mapForeignTarget f tgt) fs (map f as) mapExp _ l@(CmmBranch _) = l mapExp f (CmmCondBranch e ti fi l) = CmmCondBranch (f e) ti fi l mapExp f (CmmSwitch e ids) = CmmSwitch (f e) ids mapExp f n@CmmCall {cml_target=tgt} = n{cml_target = f tgt} mapExp f (CmmForeignCall tgt fs as succ ret_args updfr intrbl) = CmmForeignCall (mapForeignTarget f tgt) fs (map f as) succ ret_args updfr intrbl mapExpDeep :: (CmmExpr -> CmmExpr) -> CmmNode e x -> CmmNode e x mapExpDeep f = mapExp $ wrapRecExp f ------------------------------------------------------------------------ -- mapping Expr in GHC.Cmm.Node, but not performing allocation if no changes mapForeignTargetM :: (CmmExpr -> Maybe CmmExpr) -> ForeignTarget -> Maybe ForeignTarget mapForeignTargetM f (ForeignTarget e c) = (\x -> ForeignTarget x c) `fmap` f e mapForeignTargetM _ (PrimTarget _) = Nothing wrapRecExpM :: (CmmExpr -> Maybe CmmExpr) -> (CmmExpr -> Maybe CmmExpr) -- (wrapRecExpM f e) first recursively applies itself to sub-expressions of e -- then gives f a chance to rewrite the resulting expression wrapRecExpM f n@(CmmMachOp op es) = maybe (f n) (f . CmmMachOp op) (mapListM (wrapRecExpM f) es) wrapRecExpM f n@(CmmLoad addr ty align) = maybe (f n) (\addr' -> f $ CmmLoad addr' ty align) (wrapRecExpM f addr) wrapRecExpM f e = f e mapExpM :: (CmmExpr -> Maybe CmmExpr) -> CmmNode e x -> Maybe (CmmNode e x) mapExpM _ (CmmEntry{}) = Nothing mapExpM _ (CmmComment _) = Nothing mapExpM _ (CmmTick _) = Nothing mapExpM f (CmmUnwind regs) = CmmUnwind `fmap` mapM (\(r,e) -> mapM f e >>= \e' -> pure (r,e')) regs mapExpM f (CmmAssign r e) = CmmAssign r `fmap` f e mapExpM f (CmmStore addr e align) = (\[addr', e'] -> CmmStore addr' e' align) `fmap` mapListM f [addr, e] mapExpM _ (CmmBranch _) = Nothing mapExpM f (CmmCondBranch e ti fi l) = (\x -> CmmCondBranch x ti fi l) `fmap` f e mapExpM f (CmmSwitch e tbl) = (\x -> CmmSwitch x tbl) `fmap` f e mapExpM f (CmmCall tgt mb_id r o i s) = (\x -> CmmCall x mb_id r o i s) `fmap` f tgt mapExpM f (CmmUnsafeForeignCall tgt fs as) = case mapForeignTargetM f tgt of Just tgt' -> Just (CmmUnsafeForeignCall tgt' fs (mapListJ f as)) Nothing -> (\xs -> CmmUnsafeForeignCall tgt fs xs) `fmap` mapListM f as mapExpM f (CmmForeignCall tgt fs as succ ret_args updfr intrbl) = case mapForeignTargetM f tgt of Just tgt' -> Just (CmmForeignCall tgt' fs (mapListJ f as) succ ret_args updfr intrbl) Nothing -> (\xs -> CmmForeignCall tgt fs xs succ ret_args updfr intrbl) `fmap` mapListM f as -- share as much as possible mapListM :: (a -> Maybe a) -> [a] -> Maybe [a] mapListM f xs = let (b, r) = mapListT f xs in if b then Just r else Nothing mapListJ :: (a -> Maybe a) -> [a] -> [a] mapListJ f xs = snd (mapListT f xs) mapListT :: (a -> Maybe a) -> [a] -> (Bool, [a]) mapListT f xs = foldr g (False, []) (zip3 (tails xs) xs (map f xs)) where g (_, y, Nothing) (True, ys) = (True, y:ys) g (_, _, Just y) (True, ys) = (True, y:ys) g (ys', _, Nothing) (False, _) = (False, ys') g (_, _, Just y) (False, ys) = (True, y:ys) mapExpDeepM :: (CmmExpr -> Maybe CmmExpr) -> CmmNode e x -> Maybe (CmmNode e x) mapExpDeepM f = mapExpM $ wrapRecExpM f ----------------------------------- -- folding Expr in GHC.Cmm.Node foldExpForeignTarget :: (CmmExpr -> z -> z) -> ForeignTarget -> z -> z foldExpForeignTarget exp (ForeignTarget e _) z = exp e z foldExpForeignTarget _ (PrimTarget _) z = z -- Take a folder on expressions and apply it recursively. -- Specifically (wrapRecExpf f e z) deals with CmmMachOp and CmmLoad -- itself, delegating all the other CmmExpr forms to 'f'. wrapRecExpf :: (CmmExpr -> z -> z) -> CmmExpr -> z -> z wrapRecExpf f e@(CmmMachOp _ es) z = foldr (wrapRecExpf f) (f e z) es wrapRecExpf f e@(CmmLoad addr _ _) z = wrapRecExpf f addr (f e z) wrapRecExpf f e z = f e z foldExp :: (CmmExpr -> z -> z) -> CmmNode e x -> z -> z foldExp _ (CmmEntry {}) z = z foldExp _ (CmmComment {}) z = z foldExp _ (CmmTick {}) z = z foldExp f (CmmUnwind xs) z = foldr (maybe id f) z (map snd xs) foldExp f (CmmAssign _ e) z = f e z foldExp f (CmmStore addr e _) z = f addr $ f e z foldExp f (CmmUnsafeForeignCall t _ as) z = foldr f (foldExpForeignTarget f t z) as foldExp _ (CmmBranch _) z = z foldExp f (CmmCondBranch e _ _ _) z = f e z foldExp f (CmmSwitch e _) z = f e z foldExp f (CmmCall {cml_target=tgt}) z = f tgt z foldExp f (CmmForeignCall {tgt=tgt, args=args}) z = foldr f (foldExpForeignTarget f tgt z) args foldExpDeep :: (CmmExpr -> z -> z) -> CmmNode e x -> z -> z foldExpDeep f = foldExp (wrapRecExpf f) -- ----------------------------------------------------------------------------- mapSuccessors :: (Label -> Label) -> CmmNode O C -> CmmNode O C mapSuccessors f (CmmBranch bid) = CmmBranch (f bid) mapSuccessors f (CmmCondBranch p y n l) = CmmCondBranch p (f y) (f n) l mapSuccessors f (CmmSwitch e ids) = CmmSwitch e (mapSwitchTargets f ids) mapSuccessors _ n = n mapCollectSuccessors :: forall a. (Label -> (Label,a)) -> CmmNode O C -> (CmmNode O C, [a]) mapCollectSuccessors f (CmmBranch bid) = let (bid', acc) = f bid in (CmmBranch bid', [acc]) mapCollectSuccessors f (CmmCondBranch p y n l) = let (bidt, acct) = f y (bidf, accf) = f n in (CmmCondBranch p bidt bidf l, [accf, acct]) mapCollectSuccessors f (CmmSwitch e ids) = let lbls = switchTargetsToList ids :: [Label] lblMap = mapFromList $ zip lbls (map f lbls) :: LabelMap (Label, a) in ( CmmSwitch e (mapSwitchTargets (\l -> fst $ mapFindWithDefault (error "impossible") l lblMap) ids) , map snd (mapElems lblMap) ) mapCollectSuccessors _ n = (n, []) -- ----------------------------------------------------------------------------- -- | Tick scope identifier, allowing us to reason about what -- annotations in a Cmm block should scope over. We especially take -- care to allow optimisations to reorganise blocks without losing -- tick association in the process. data CmmTickScope = GlobalScope -- ^ The global scope is the "root" of the scope graph. Every -- scope is a sub-scope of the global scope. It doesn't make sense -- to add ticks to this scope. On the other hand, this means that -- setting this scope on a block means no ticks apply to it. | SubScope !U.Unique CmmTickScope -- ^ Constructs a new sub-scope to an existing scope. This allows -- us to translate Core-style scoping rules (see @tickishScoped@) -- into the Cmm world. Suppose the following code: -- -- tick<1> case ... of -- A -> tick<2> ... -- B -> tick<3> ... -- -- We want the top-level tick annotation to apply to blocks -- generated for the A and B alternatives. We can achieve that by -- generating tick<1> into a block with scope a, while the code -- for alternatives A and B gets generated into sub-scopes a/b and -- a/c respectively. | CombinedScope CmmTickScope CmmTickScope -- ^ A combined scope scopes over everything that the two given -- scopes cover. It is therefore a sub-scope of either scope. This -- is required for optimisations. Consider common block elimination: -- -- A -> tick<2> case ... of -- C -> [common] -- B -> tick<3> case ... of -- D -> [common] -- -- We will generate code for the C and D alternatives, and figure -- out afterwards that it's actually common code. Scoping rules -- dictate that the resulting common block needs to be covered by -- both tick<2> and tick<3>, therefore we need to construct a -- scope that is a child to *both* scope. Now we can do that - if -- we assign the scopes a/c and b/d to the common-ed up blocks, -- the new block could have a combined tick scope a/c+b/d, which -- both tick<2> and tick<3> apply to. -- Note [CmmTick scoping details] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The scope of a @CmmTick@ is given by the @CmmEntry@ node of the -- same block. Note that as a result of this, optimisations making -- tick scopes more specific can *reduce* the amount of code a tick -- scopes over. Fixing this would require a separate @CmmTickScope@ -- field for @CmmTick@. Right now we do not do this simply because I -- couldn't find an example where it actually mattered -- multiple -- blocks within the same scope generally jump to each other, which -- prevents common block elimination from happening in the first -- place. But this is no strong reason, so if Cmm optimisations become -- more involved in future this might have to be revisited. -- | Output all scope paths. scopeToPaths :: CmmTickScope -> [[U.Unique]] scopeToPaths GlobalScope = [[]] scopeToPaths (SubScope u s) = map (u:) (scopeToPaths s) scopeToPaths (CombinedScope s1 s2) = scopeToPaths s1 ++ scopeToPaths s2 -- | Returns the head uniques of the scopes. This is based on the -- assumption that the @Unique@ of @SubScope@ identifies the -- underlying super-scope. Used for efficient equality and comparison, -- see below. scopeUniques :: CmmTickScope -> [U.Unique] scopeUniques GlobalScope = [] scopeUniques (SubScope u _) = [u] scopeUniques (CombinedScope s1 s2) = scopeUniques s1 ++ scopeUniques s2 -- Equality and order is based on the head uniques defined above. We -- take care to short-cut the (extremely) common cases. instance Eq CmmTickScope where GlobalScope == GlobalScope = True GlobalScope == _ = False _ == GlobalScope = False (SubScope u _) == (SubScope u' _) = u == u' (SubScope _ _) == _ = False _ == (SubScope _ _) = False scope == scope' = sortBy nonDetCmpUnique (scopeUniques scope) == sortBy nonDetCmpUnique (scopeUniques scope') -- This is still deterministic because -- the order is the same for equal lists -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] -- See Note [No Ord for Unique] instance Ord CmmTickScope where compare GlobalScope GlobalScope = EQ compare GlobalScope _ = LT compare _ GlobalScope = GT compare (SubScope u _) (SubScope u' _) = nonDetCmpUnique u u' compare scope scope' = cmpList nonDetCmpUnique (sortBy nonDetCmpUnique $ scopeUniques scope) (sortBy nonDetCmpUnique $ scopeUniques scope') instance Outputable CmmTickScope where ppr GlobalScope = text "global" ppr (SubScope us GlobalScope) = ppr us ppr (SubScope us s) = ppr s <> char '/' <> ppr us ppr combined = parens $ hcat $ punctuate (char '+') $ map (hcat . punctuate (char '/') . map ppr . reverse) $ scopeToPaths combined -- | Checks whether two tick scopes are sub-scopes of each other. True -- if the two scopes are equal. isTickSubScope :: CmmTickScope -> CmmTickScope -> Bool isTickSubScope = cmp where cmp _ GlobalScope = True cmp GlobalScope _ = False cmp (CombinedScope s1 s2) s' = cmp s1 s' && cmp s2 s' cmp s (CombinedScope s1' s2') = cmp s s1' || cmp s s2' cmp (SubScope u s) s'@(SubScope u' _) = u == u' || cmp s s' -- | Combine two tick scopes. The new scope should be sub-scope of -- both parameters. We simplify automatically if one tick scope is a -- sub-scope of the other already. combineTickScopes :: CmmTickScope -> CmmTickScope -> CmmTickScope combineTickScopes s1 s2 | s1 `isTickSubScope` s2 = s1 | s2 `isTickSubScope` s1 = s2 | otherwise = CombinedScope s1 s2 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Switch.hs0000644000000000000000000004440614472400112020154 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Cmm.Switch ( SwitchTargets, mkSwitchTargets, switchTargetsCases, switchTargetsDefault, switchTargetsRange, switchTargetsSigned, mapSwitchTargets, switchTargetsToTable, switchTargetsFallThrough, switchTargetsToList, eqSwitchTargetWith, SwitchPlan(..), backendSupportsSwitch, createSwitchPlan, ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Driver.Backend import GHC.Utils.Panic import GHC.Cmm.Dataflow.Label (Label) import Data.Maybe import Data.List (groupBy) import Data.Function (on) import qualified Data.Map as M -- Note [Cmm Switches, the general plan] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Compiling a high-level switch statement, as it comes out of a STG case -- expression, for example, allows for a surprising amount of design decisions. -- Therefore, we cleanly separated this from the Stg → Cmm transformation, as -- well as from the actual code generation. -- -- The overall plan is: -- * The Stg → Cmm transformation creates a single `SwitchTargets` in -- emitSwitch and emitCmmLitSwitch in GHC.StgToCmm.Utils. -- At this stage, they are unsuitable for code generation. -- * A dedicated Cmm transformation (GHC.Cmm.Switch.Implement) replaces these -- switch statements with code that is suitable for code generation, i.e. -- a nice balanced tree of decisions with dense jump tables in the leafs. -- The actual planning of this tree is performed in pure code in createSwitchPlan -- in this module. See Note [createSwitchPlan]. -- * The actual code generation will not do any further processing and -- implement each CmmSwitch with a jump tables. -- -- When compiling to LLVM or C, GHC.Cmm.Switch.Implement leaves the switch -- statements alone, as we can turn a SwitchTargets value into a nice -- switch-statement in LLVM resp. C, and leave the rest to the compiler. -- -- See Note [GHC.Cmm.Switch vs. GHC.Cmm.Switch.Implement] why the two module are -- separated. -- Note [Magic Constants in GHC.Cmm.Switch] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- There are a lot of heuristics here that depend on magic values where it is -- hard to determine the "best" value (for whatever that means). These are the -- magic values: -- | Number of consecutive default values allowed in a jump table. If there are -- more of them, the jump tables are split. -- -- Currently 7, as it costs 7 words of additional code when a jump table is -- split (at least on x64, determined experimentally). maxJumpTableHole :: Integer maxJumpTableHole = 7 -- | Minimum size of a jump table. If the number is smaller, the switch is -- implemented using conditionals. -- Currently 5, because an if-then-else tree of 4 values is nice and compact. minJumpTableSize :: Int minJumpTableSize = 5 -- | Minimum non-zero offset for a jump table. See Note [Jump Table Offset]. minJumpTableOffset :: Integer minJumpTableOffset = 2 ----------------------------------------------------------------------------- -- Switch Targets -- Note [SwitchTargets] -- ~~~~~~~~~~~~~~~~~~~~ -- The branches of a switch are stored in a SwitchTargets, which consists of an -- (optional) default jump target, and a map from values to jump targets. -- -- If the default jump target is absent, the behaviour of the switch outside the -- values of the map is undefined. -- -- We use an Integer for the keys the map so that it can be used in switches on -- unsigned as well as signed integers. -- -- The map may be empty (we prune out-of-range branches here, so it could be us -- emptying it). -- -- Before code generation, the table needs to be brought into a form where all -- entries are non-negative, so that it can be compiled into a jump table. -- See switchTargetsToTable. -- | A value of type SwitchTargets contains the alternatives for a 'CmmSwitch' -- value, and knows whether the value is signed, the possible range, an -- optional default value and a map from values to jump labels. data SwitchTargets = SwitchTargets Bool -- Signed values (Integer, Integer) -- Range (Maybe Label) -- Default value (M.Map Integer Label) -- The branches deriving (Show, Eq) -- | The smart constructor mkSwitchTargets normalises the map a bit: -- * No entries outside the range -- * No entries equal to the default -- * No default if all elements have explicit values mkSwitchTargets :: Bool -> (Integer, Integer) -> Maybe Label -> M.Map Integer Label -> SwitchTargets mkSwitchTargets signed range@(lo,hi) mbdef ids = SwitchTargets signed range mbdef' ids' where ids' = dropDefault $ restrict ids mbdef' | defaultNeeded = mbdef | otherwise = Nothing -- Drop entries outside the range, if there is a range restrict = restrictMap (lo,hi) -- Drop entries that equal the default, if there is a default dropDefault | Just l <- mbdef = M.filter (/= l) | otherwise = id -- Check if the default is still needed defaultNeeded = fromIntegral (M.size ids') /= hi-lo+1 -- | Changes all labels mentioned in the SwitchTargets value mapSwitchTargets :: (Label -> Label) -> SwitchTargets -> SwitchTargets mapSwitchTargets f (SwitchTargets signed range mbdef branches) = SwitchTargets signed range (fmap f mbdef) (fmap f branches) -- | Returns the list of non-default branches of the SwitchTargets value switchTargetsCases :: SwitchTargets -> [(Integer, Label)] switchTargetsCases (SwitchTargets _ _ _ branches) = M.toList branches -- | Return the default label of the SwitchTargets value switchTargetsDefault :: SwitchTargets -> Maybe Label switchTargetsDefault (SwitchTargets _ _ mbdef _) = mbdef -- | Return the range of the SwitchTargets value switchTargetsRange :: SwitchTargets -> (Integer, Integer) switchTargetsRange (SwitchTargets _ range _ _) = range -- | Return whether this is used for a signed value switchTargetsSigned :: SwitchTargets -> Bool switchTargetsSigned (SwitchTargets signed _ _ _) = signed -- | switchTargetsToTable creates a dense jump table, usable for code generation. -- -- Also returns an offset to add to the value; the list is 0-based on the -- result of that addition. -- -- The conversion from Integer to Int is a bit of a wart, as the actual -- scrutinee might be an unsigned word, but it just works, due to wrap-around -- arithmetic (as verified by the CmmSwitchTest test case). switchTargetsToTable :: SwitchTargets -> (Int, [Maybe Label]) switchTargetsToTable (SwitchTargets _ (lo,hi) mbdef branches) = (fromIntegral (-start), [ labelFor i | i <- [start..hi] ]) where labelFor i = case M.lookup i branches of Just l -> Just l Nothing -> mbdef start | lo >= 0 && lo < minJumpTableOffset = 0 -- See Note [Jump Table Offset] | otherwise = lo -- Note [Jump Table Offset] -- ~~~~~~~~~~~~~~~~~~~~~~~~ -- Usually, the code for a jump table starting at x will first subtract x from -- the value, to avoid a large amount of empty entries. But if x is very small, -- the extra entries are no worse than the subtraction in terms of code size, and -- not having to do the subtraction is quicker. -- -- I.e. instead of -- _u20N: -- leaq -1(%r14),%rax -- jmp *_n20R(,%rax,8) -- _n20R: -- .quad _c20p -- .quad _c20q -- do -- _u20N: -- jmp *_n20Q(,%r14,8) -- -- _n20Q: -- .quad 0 -- .quad _c20p -- .quad _c20q -- .quad _c20r -- | The list of all labels occurring in the SwitchTargets value. switchTargetsToList :: SwitchTargets -> [Label] switchTargetsToList (SwitchTargets _ _ mbdef branches) = maybeToList mbdef ++ M.elems branches -- | Groups cases with equal targets, suitable for pretty-printing to a -- c-like switch statement with fall-through semantics. switchTargetsFallThrough :: SwitchTargets -> ([([Integer], Label)], Maybe Label) switchTargetsFallThrough (SwitchTargets _ _ mbdef branches) = (groups, mbdef) where groups = map (\xs -> (map fst xs, snd (head xs))) $ groupBy ((==) `on` snd) $ M.toList branches -- | Custom equality helper, needed for "GHC.Cmm.CommonBlockElim" eqSwitchTargetWith :: (Label -> Label -> Bool) -> SwitchTargets -> SwitchTargets -> Bool eqSwitchTargetWith eq (SwitchTargets signed1 range1 mbdef1 ids1) (SwitchTargets signed2 range2 mbdef2 ids2) = signed1 == signed2 && range1 == range2 && goMB mbdef1 mbdef2 && goList (M.toList ids1) (M.toList ids2) where goMB Nothing Nothing = True goMB (Just l1) (Just l2) = l1 `eq` l2 goMB _ _ = False goList [] [] = True goList ((i1,l1):ls1) ((i2,l2):ls2) = i1 == i2 && l1 `eq` l2 && goList ls1 ls2 goList _ _ = False ----------------------------------------------------------------------------- -- Code generation for Switches -- | A SwitchPlan abstractly describes how a Switch statement ought to be -- implemented. See Note [createSwitchPlan] data SwitchPlan = Unconditionally Label | IfEqual Integer Label SwitchPlan | IfLT Bool Integer SwitchPlan SwitchPlan | JumpTable SwitchTargets deriving Show -- -- Note [createSwitchPlan] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- A SwitchPlan describes how a Switch statement is to be broken down into -- smaller pieces suitable for code generation. -- -- createSwitchPlan creates such a switch plan, in these steps: -- 1. It splits the switch statement at segments of non-default values that -- are too large. See splitAtHoles and Note [Magic Constants in GHC.Cmm.Switch] -- 2. Too small jump tables should be avoided, so we break up smaller pieces -- in breakTooSmall. -- 3. We fill in the segments between those pieces with a jump to the default -- label (if there is one), returning a SeparatedList in mkFlatSwitchPlan -- 4. We find and replace two less-than branches by a single equal-to-test in -- findSingleValues -- 5. The thus collected pieces are assembled to a balanced binary tree. {- Note [Two alts + default] ~~~~~~~~~~~~~~~~~~~~~~~~~ Discussion and a bit more info at #14644 When dealing with a switch of the form: switch(e) { case 1: goto l1; case 3000: goto l2; default: goto ldef; } If we treat it as a sparse jump table we would generate: if (e > 3000) //Check if value is outside of the jump table. goto ldef; else { if (e < 3000) { //Compare to upper value if(e != 1) //Compare to remaining value goto ldef; else goto l2; } else goto l1; } Instead we special case this to : if (e==1) goto l1; else if (e==3000) goto l2; else goto l3; This means we have: * Less comparisons for: 1,<3000 * Unchanged for 3000 * One more for >3000 This improves code in a few ways: * One comparison less means smaller code which helps with cache. * It exchanges a taken jump for two jumps no taken in the >range case. Jumps not taken are cheaper (See Agner guides) making this about as fast. * For all other cases the first range check is removed making it faster. The end result is that the change is not measurably slower for the case >3000 and faster for the other cases. This makes running this kind of match in an inner loop cheaper by 10-20% depending on the data. In nofib this improves wheel-sieve1 by 4-9% depending on problem size. We could also add a second conditional jump after the comparison to keep the range check like this: cmp 3000, rArgument jg je While this is fairly cheap it made no big difference for the >3000 case and slowed down all other cases making it not worthwhile. -} -- | Does the backend support switch out of the box? Then leave this to the -- backend! backendSupportsSwitch :: Backend -> Bool backendSupportsSwitch ViaC = True backendSupportsSwitch LLVM = True backendSupportsSwitch _ = False -- | This function creates a SwitchPlan from a SwitchTargets value, breaking it -- down into smaller pieces suitable for code generation. createSwitchPlan :: SwitchTargets -> SwitchPlan -- Lets do the common case of a singleton map quickly and efficiently (#10677) createSwitchPlan (SwitchTargets _signed _range (Just defLabel) m) | [(x, l)] <- M.toList m = IfEqual x l (Unconditionally defLabel) -- And another common case, matching "booleans" createSwitchPlan (SwitchTargets _signed (lo,hi) Nothing m) | [(x1, l1), (_x2,l2)] <- M.toAscList m --Checking If |range| = 2 is enough if we have two unique literals , hi - lo == 1 = IfEqual x1 l1 (Unconditionally l2) -- See Note [Two alts + default] createSwitchPlan (SwitchTargets _signed _range (Just defLabel) m) | [(x1, l1), (x2,l2)] <- M.toAscList m = IfEqual x1 l1 (IfEqual x2 l2 (Unconditionally defLabel)) createSwitchPlan (SwitchTargets signed range mbdef m) = -- pprTrace "createSwitchPlan" (text (show ids) $$ text (show (range,m)) $$ text (show pieces) $$ text (show flatPlan) $$ text (show plan)) $ plan where pieces = concatMap breakTooSmall $ splitAtHoles maxJumpTableHole m flatPlan = findSingleValues $ mkFlatSwitchPlan signed mbdef range pieces plan = buildTree signed $ flatPlan --- --- Step 1: Splitting at large holes --- splitAtHoles :: Integer -> M.Map Integer a -> [M.Map Integer a] splitAtHoles _ m | M.null m = [] splitAtHoles holeSize m = map (\range -> restrictMap range m) nonHoles where holes = filter (\(l,h) -> h - l > holeSize) $ zip (M.keys m) (tail (M.keys m)) nonHoles = reassocTuples lo holes hi (lo,_) = M.findMin m (hi,_) = M.findMax m --- --- Step 2: Avoid small jump tables --- -- We do not want jump tables below a certain size. This breaks them up -- (into singleton maps, for now). breakTooSmall :: M.Map Integer a -> [M.Map Integer a] breakTooSmall m | M.size m > minJumpTableSize = [m] | otherwise = [M.singleton k v | (k,v) <- M.toList m] --- --- Step 3: Fill in the blanks --- -- | A FlatSwitchPlan is a list of SwitchPlans, with an integer inbetween every -- two entries, dividing the range. -- So if we have (abusing list syntax) [plan1,n,plan2], then we use plan1 if -- the expression is < n, and plan2 otherwise. type FlatSwitchPlan = SeparatedList Integer SwitchPlan mkFlatSwitchPlan :: Bool -> Maybe Label -> (Integer, Integer) -> [M.Map Integer Label] -> FlatSwitchPlan -- If we have no default (i.e. undefined where there is no entry), we can -- branch at the minimum of each map mkFlatSwitchPlan _ Nothing _ [] = pprPanic "mkFlatSwitchPlan with nothing left to do" empty mkFlatSwitchPlan signed Nothing _ (m:ms) = (mkLeafPlan signed Nothing m , [ (fst (M.findMin m'), mkLeafPlan signed Nothing m') | m' <- ms ]) -- If we have a default, we have to interleave segments that jump -- to the default between the maps mkFlatSwitchPlan signed (Just l) r ms = let ((_,p1):ps) = go r ms in (p1, ps) where go (lo,hi) [] | lo > hi = [] | otherwise = [(lo, Unconditionally l)] go (lo,hi) (m:ms) | lo < min = (lo, Unconditionally l) : go (min,hi) (m:ms) | lo == min = (lo, mkLeafPlan signed (Just l) m) : go (max+1,hi) ms | otherwise = pprPanic "mkFlatSwitchPlan" (integer lo <+> integer min) where min = fst (M.findMin m) max = fst (M.findMax m) mkLeafPlan :: Bool -> Maybe Label -> M.Map Integer Label -> SwitchPlan mkLeafPlan signed mbdef m | [(_,l)] <- M.toList m -- singleton map = Unconditionally l | otherwise = JumpTable $ mkSwitchTargets signed (min,max) mbdef m where min = fst (M.findMin m) max = fst (M.findMax m) --- --- Step 4: Reduce the number of branches using == --- -- A sequence of three unconditional jumps, with the outer two pointing to the -- same value and the bounds off by exactly one can be improved findSingleValues :: FlatSwitchPlan -> FlatSwitchPlan findSingleValues (Unconditionally l, (i, Unconditionally l2) : (i', Unconditionally l3) : xs) | l == l3 && i + 1 == i' = findSingleValues (IfEqual i l2 (Unconditionally l), xs) findSingleValues (p, (i,p'):xs) = (p,i) `consSL` findSingleValues (p', xs) findSingleValues (p, []) = (p, []) --- --- Step 5: Actually build the tree --- -- Build a balanced tree from a separated list buildTree :: Bool -> FlatSwitchPlan -> SwitchPlan buildTree _ (p,[]) = p buildTree signed sl = IfLT signed m (buildTree signed sl1) (buildTree signed sl2) where (sl1, m, sl2) = divideSL sl -- -- Utility data type: Non-empty lists with extra markers in between each -- element: -- type SeparatedList b a = (a, [(b,a)]) consSL :: (a, b) -> SeparatedList b a -> SeparatedList b a consSL (a, b) (a', xs) = (a, (b,a'):xs) divideSL :: SeparatedList b a -> (SeparatedList b a, b, SeparatedList b a) divideSL (_,[]) = error "divideSL: Singleton SeparatedList" divideSL (p,xs) = ((p, xs1), m, (p', xs2)) where (xs1, (m,p'):xs2) = splitAt (length xs `div` 2) xs -- -- Other Utilities -- restrictMap :: (Integer,Integer) -> M.Map Integer b -> M.Map Integer b restrictMap (lo,hi) m = mid where (_, mid_hi) = M.split (lo-1) m (mid, _) = M.split (hi+1) mid_hi -- for example: reassocTuples a [(b,c),(d,e)] f == [(a,b),(c,d),(e,f)] reassocTuples :: a -> [(a,a)] -> a -> [(a,a)] reassocTuples initial [] last = [(initial,last)] reassocTuples initial ((a,b):tuples) last = (initial,a) : reassocTuples b tuples last -- Note [GHC.Cmm.Switch vs. GHC.Cmm.Switch.Implement] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- I (Joachim) separated the two somewhat closely related modules -- -- - GHC.Cmm.Switch, which provides the CmmSwitchTargets type and contains the strategy -- for implementing a Cmm switch (createSwitchPlan), and -- - GHC.Cmm.Switch.Implement, which contains the actual Cmm graph modification, -- -- for these reasons: -- -- * GHC.Cmm.Switch is very low in the dependency tree, i.e. does not depend on any -- GHC specific modules at all (with the exception of Output and -- GHC.Cmm.Dataflow (Literal)). -- * GHC.Cmm.Switch.Implement is the Cmm transformation and hence very high in -- the dependency tree. -- * GHC.Cmm.Switch provides the CmmSwitchTargets data type, which is abstract, but -- used in GHC.Cmm.Node. -- * Because GHC.Cmm.Switch is low in the dependency tree, the separation allows -- for more parallelism when building GHC. -- * The interaction between the modules is very explicit and easy to -- understand, due to the small and simple interface. ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/Type.hs0000644000000000000000000003644714472400112017642 0ustar0000000000000000module GHC.Cmm.Type ( CmmType -- Abstract , b8, b16, b32, b64, b128, b256, b512, f32, f64, bWord, bHalfWord, gcWord , cInt , cmmBits, cmmFloat , typeWidth, cmmEqType, cmmEqType_ignoring_ptrhood , isFloatType, isGcPtrType, isBitsType , isWordAny, isWord32, isWord64 , isFloat64, isFloat32 , Width(..) , widthInBits, widthInBytes, widthInLog, widthFromBytes , wordWidth, halfWordWidth, cIntWidth , halfWordMask , narrowU, narrowS , rEP_CostCentreStack_mem_alloc , rEP_CostCentreStack_scc_count , rEP_StgEntCounter_allocs , rEP_StgEntCounter_allocd , ForeignHint(..) , Length , vec, vec2, vec4, vec8, vec16 , vec2f64, vec2b64, vec4f32, vec4b32, vec8b16, vec16b8 , cmmVec , vecLength, vecElemType , isVecType , DoAlignSanitisation ) where import GHC.Prelude import GHC.Platform import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Word import Data.Int ----------------------------------------------------------------------------- -- CmmType ----------------------------------------------------------------------------- -- NOTE: CmmType is an abstract type, not exported from this -- module so you can easily change its representation -- -- However Width is exported in a concrete way, -- and is used extensively in pattern-matching data CmmType -- The important one! = CmmType CmmCat !Width deriving Show data CmmCat -- "Category" (not exported) = GcPtrCat -- GC pointer | BitsCat -- Non-pointer | FloatCat -- Float | VecCat Length CmmCat -- Vector deriving( Eq, Show ) -- See Note [Signed vs unsigned] at the end instance Outputable CmmType where ppr (CmmType cat wid) = ppr cat <> ppr (widthInBits wid) instance Outputable CmmCat where ppr FloatCat = text "F" ppr GcPtrCat = text "P" ppr BitsCat = text "I" ppr (VecCat n cat) = ppr cat <> text "x" <> ppr n <> text "V" -- Why is CmmType stratified? For native code generation, -- most of the time you just want to know what sort of register -- to put the thing in, and for this you need to know how -- many bits thing has, and whether it goes in a floating-point -- register. By contrast, the distinction between GcPtr and -- GcNonPtr is of interest to only a few parts of the code generator. -------- Equality on CmmType -------------- -- CmmType is *not* an instance of Eq; sometimes we care about the -- Gc/NonGc distinction, and sometimes we don't -- So we use an explicit function to force you to think about it cmmEqType :: CmmType -> CmmType -> Bool -- Exact equality cmmEqType (CmmType c1 w1) (CmmType c2 w2) = c1==c2 && w1==w2 cmmEqType_ignoring_ptrhood :: CmmType -> CmmType -> Bool -- This equality is temporary; used in CmmLint -- but the RTS files are not yet well-typed wrt pointers cmmEqType_ignoring_ptrhood (CmmType c1 w1) (CmmType c2 w2) = c1 `weak_eq` c2 && w1==w2 where weak_eq :: CmmCat -> CmmCat -> Bool FloatCat `weak_eq` FloatCat = True FloatCat `weak_eq` _other = False _other `weak_eq` FloatCat = False (VecCat l1 cat1) `weak_eq` (VecCat l2 cat2) = l1 == l2 && cat1 `weak_eq` cat2 (VecCat {}) `weak_eq` _other = False _other `weak_eq` (VecCat {}) = False _word1 `weak_eq` _word2 = True -- Ignores GcPtr --- Simple operations on CmmType ----- typeWidth :: CmmType -> Width typeWidth (CmmType _ w) = w cmmBits, cmmFloat :: Width -> CmmType cmmBits = CmmType BitsCat cmmFloat = CmmType FloatCat -------- Common CmmTypes ------------ -- Floats and words of specific widths b8, b16, b32, b64, b128, b256, b512, f32, f64 :: CmmType b8 = cmmBits W8 b16 = cmmBits W16 b32 = cmmBits W32 b64 = cmmBits W64 b128 = cmmBits W128 b256 = cmmBits W256 b512 = cmmBits W512 f32 = cmmFloat W32 f64 = cmmFloat W64 -- CmmTypes of native word widths bWord :: Platform -> CmmType bWord platform = cmmBits (wordWidth platform) bHalfWord :: Platform -> CmmType bHalfWord platform = cmmBits (halfWordWidth platform) gcWord :: Platform -> CmmType gcWord platform = CmmType GcPtrCat (wordWidth platform) cInt :: Platform -> CmmType cInt platform = cmmBits (cIntWidth platform) ------------ Predicates ---------------- isFloatType, isGcPtrType, isBitsType :: CmmType -> Bool isFloatType (CmmType FloatCat _) = True isFloatType _other = False isGcPtrType (CmmType GcPtrCat _) = True isGcPtrType _other = False isBitsType (CmmType BitsCat _) = True isBitsType _ = False isWordAny, isWord32, isWord64, isFloat32, isFloat64 :: CmmType -> Bool -- isWord64 is true of 64-bit non-floats (both gc-ptrs and otherwise) -- isFloat32 and 64 are obvious isWordAny (CmmType BitsCat _) = True isWordAny (CmmType GcPtrCat _) = True isWordAny _other = False isWord64 (CmmType BitsCat W64) = True isWord64 (CmmType GcPtrCat W64) = True isWord64 _other = False isWord32 (CmmType BitsCat W32) = True isWord32 (CmmType GcPtrCat W32) = True isWord32 _other = False isFloat32 (CmmType FloatCat W32) = True isFloat32 _other = False isFloat64 (CmmType FloatCat W64) = True isFloat64 _other = False ----------------------------------------------------------------------------- -- Width ----------------------------------------------------------------------------- data Width = W8 | W16 | W32 | W64 | W128 | W256 | W512 deriving (Eq, Ord, Show) instance Outputable Width where ppr rep = text (show rep) -------- Common Widths ------------ -- | The width of the current platform's word size. wordWidth :: Platform -> Width wordWidth platform = case platformWordSize platform of PW4 -> W32 PW8 -> W64 -- | The width of the current platform's half-word size. halfWordWidth :: Platform -> Width halfWordWidth platform = case platformWordSize platform of PW4 -> W16 PW8 -> W32 -- | A bit-mask for the lower half-word of current platform. halfWordMask :: Platform -> Integer halfWordMask platform = case platformWordSize platform of PW4 -> 0xFFFF PW8 -> 0xFFFFFFFF -- cIntRep is the Width for a C-language 'int' cIntWidth :: Platform -> Width cIntWidth platform = case pc_CINT_SIZE (platformConstants platform) of 4 -> W32 8 -> W64 s -> panic ("cIntWidth: Unknown cINT_SIZE: " ++ show s) -- | A width in bits. widthInBits :: Width -> Int widthInBits W8 = 8 widthInBits W16 = 16 widthInBits W32 = 32 widthInBits W64 = 64 widthInBits W128 = 128 widthInBits W256 = 256 widthInBits W512 = 512 -- | A width in bytes. -- -- > widthFromBytes (widthInBytes w) === w widthInBytes :: Width -> Int widthInBytes W8 = 1 widthInBytes W16 = 2 widthInBytes W32 = 4 widthInBytes W64 = 8 widthInBytes W128 = 16 widthInBytes W256 = 32 widthInBytes W512 = 64 -- | *Partial* A width from the number of bytes. widthFromBytes :: Int -> Width widthFromBytes 1 = W8 widthFromBytes 2 = W16 widthFromBytes 4 = W32 widthFromBytes 8 = W64 widthFromBytes 16 = W128 widthFromBytes 32 = W256 widthFromBytes 64 = W512 widthFromBytes n = pprPanic "no width for given number of bytes" (ppr n) -- | log_2 of the width in bytes, useful for generating shifts. widthInLog :: Width -> Int widthInLog W8 = 0 widthInLog W16 = 1 widthInLog W32 = 2 widthInLog W64 = 3 widthInLog W128 = 4 widthInLog W256 = 5 widthInLog W512 = 6 -- widening / narrowing -- | Narrow a signed or unsigned value to the given width. The result will -- reside in @[0, +2^width)@. -- -- >>> narrowU W8 256 == 256 -- >>> narrowU W8 255 == 255 -- >>> narrowU W8 128 == 128 -- >>> narrowU W8 127 == 127 -- >>> narrowU W8 0 == 0 -- >>> narrowU W8 (-127) == 129 -- >>> narrowU W8 (-128) == 128 -- >>> narrowU W8 (-129) == 127 -- >>> narrowU W8 (-255) == 1 -- >>> narrowU W8 (-256) == 0 -- narrowU :: Width -> Integer -> Integer narrowU W8 x = fromIntegral (fromIntegral x :: Word8) narrowU W16 x = fromIntegral (fromIntegral x :: Word16) narrowU W32 x = fromIntegral (fromIntegral x :: Word32) narrowU W64 x = fromIntegral (fromIntegral x :: Word64) narrowU _ _ = panic "narrowTo" -- | Narrow a signed value to the given width. The result will reside -- in @[-2^(width-1), +2^(width-1))@. -- -- >>> narrowS W8 256 == 0 -- >>> narrowS W8 255 == -1 -- >>> narrowS W8 128 == -128 -- >>> narrowS W8 127 == 127 -- >>> narrowS W8 0 == 0 -- >>> narrowS W8 (-127) == -127 -- >>> narrowS W8 (-128) == -128 -- >>> narrowS W8 (-129) == 127 -- >>> narrowS W8 (-255) == 1 -- >>> narrowS W8 (-256) == 0 -- narrowS :: Width -> Integer -> Integer narrowS W8 x = fromIntegral (fromIntegral x :: Int8) narrowS W16 x = fromIntegral (fromIntegral x :: Int16) narrowS W32 x = fromIntegral (fromIntegral x :: Int32) narrowS W64 x = fromIntegral (fromIntegral x :: Int64) narrowS _ _ = panic "narrowTo" ----------------------------------------------------------------------------- -- SIMD ----------------------------------------------------------------------------- type Length = Int vec :: Length -> CmmType -> CmmType vec l (CmmType cat w) = CmmType (VecCat l cat) vecw where vecw :: Width vecw = widthFromBytes (l*widthInBytes w) vec2, vec4, vec8, vec16 :: CmmType -> CmmType vec2 = vec 2 vec4 = vec 4 vec8 = vec 8 vec16 = vec 16 vec2f64, vec2b64, vec4f32, vec4b32, vec8b16, vec16b8 :: CmmType vec2f64 = vec 2 f64 vec2b64 = vec 2 b64 vec4f32 = vec 4 f32 vec4b32 = vec 4 b32 vec8b16 = vec 8 b16 vec16b8 = vec 16 b8 cmmVec :: Int -> CmmType -> CmmType cmmVec n (CmmType cat w) = CmmType (VecCat n cat) (widthFromBytes (n*widthInBytes w)) vecLength :: CmmType -> Length vecLength (CmmType (VecCat l _) _) = l vecLength _ = panic "vecLength: not a vector" vecElemType :: CmmType -> CmmType vecElemType (CmmType (VecCat l cat) w) = CmmType cat scalw where scalw :: Width scalw = widthFromBytes (widthInBytes w `div` l) vecElemType _ = panic "vecElemType: not a vector" isVecType :: CmmType -> Bool isVecType (CmmType (VecCat {}) _) = True isVecType _ = False ------------------------------------------------------------------------- -- Hints -- Hints are extra type information we attach to the arguments and -- results of a foreign call, where more type information is sometimes -- needed by the ABI to make the correct kind of call. -- -- See Note [Signed vs unsigned] for one case where this is used. data ForeignHint = NoHint | AddrHint | SignedHint deriving( Eq ) -- Used to give extra per-argument or per-result -- information needed by foreign calling conventions ------------------------------------------------------------------------- -- These don't really belong here, but I don't know where is best to -- put them. rEP_CostCentreStack_mem_alloc :: Platform -> CmmType rEP_CostCentreStack_mem_alloc platform = cmmBits (widthFromBytes (pc_REP_CostCentreStack_mem_alloc pc)) where pc = platformConstants platform rEP_CostCentreStack_scc_count :: Platform -> CmmType rEP_CostCentreStack_scc_count platform = cmmBits (widthFromBytes (pc_REP_CostCentreStack_scc_count pc)) where pc = platformConstants platform rEP_StgEntCounter_allocs :: Platform -> CmmType rEP_StgEntCounter_allocs platform = cmmBits (widthFromBytes (pc_REP_StgEntCounter_allocs pc)) where pc = platformConstants platform rEP_StgEntCounter_allocd :: Platform -> CmmType rEP_StgEntCounter_allocd platform = cmmBits (widthFromBytes (pc_REP_StgEntCounter_allocd pc)) where pc = platformConstants platform ------------------------------------------------------------------------- {- Note [Signed vs unsigned] ~~~~~~~~~~~~~~~~~~~~~~~~~ Should a CmmType include a signed vs. unsigned distinction? This is very much like a "hint" in C-- terminology: it isn't necessary in order to generate correct code, but it might be useful in that the compiler can generate better code if it has access to higher-level hints about data. This is important at call boundaries, because the definition of a function is not visible at all of its call sites, so the compiler cannot infer the hints. Here in Cmm, we're taking a slightly different approach. We include the int vs. float hint in the CmmType, because (a) the majority of platforms have a strong distinction between float and int registers, and (b) we don't want to do any heavyweight hint-inference in the native code backend in order to get good code. We're treating the hint more like a type: our Cmm is always completely consistent with respect to hints. All coercions between float and int are explicit. What about the signed vs. unsigned hint? This information might be useful if we want to keep sub-word-sized values in word-size registers, which we must do if we only have word-sized registers. On such a system, there are two straightforward conventions for representing sub-word-sized values: (a) Leave the upper bits undefined. Comparison operations must sign- or zero-extend both operands before comparing them, depending on whether the comparison is signed or unsigned. (b) Always keep the values sign- or zero-extended as appropriate. Arithmetic operations must narrow the result to the appropriate size. A clever compiler might not use either (a) or (b) exclusively, instead it would attempt to minimize the coercions by analysis: the same kind of analysis that propagates hints around. In Cmm we don't want to have to do this, so we plump for having richer types and keeping the type information consistent. If signed/unsigned hints are missing from CmmType, then the only choice we have is (a), because we don't know whether the result of an operation should be sign- or zero-extended. Many architectures have extending load operations, which work well with (b). To make use of them with (a), you need to know whether the value is going to be sign- or zero-extended by an enclosing comparison (for example), which involves knowing above the context. This is doable but more complex. Further complicating the issue is foreign calls: a foreign calling convention can specify that signed 8-bit quantities are passed as sign-extended 32 bit quantities, for example (this is the case on the PowerPC). So we *do* need sign information on foreign call arguments. Pros for adding signed vs. unsigned to CmmType: - It would let us use convention (b) above, and get easier code generation for extending loads. - Less information required on foreign calls. - MachOp type would be simpler Cons: - More complexity - What is the CmmType for a VanillaReg? Currently it is always wordRep, but now we have to decide whether it is signed or unsigned. The same VanillaReg can thus have different CmmType in different parts of the program. - Extra coercions cluttering up expressions. Currently for GHC, the foreign call point is moot, because we do our own promotion of sub-word-sized values to word-sized values. The Int8 type is represented by an Int# which is kept sign-extended at all times (this is slightly naughty, because we're making assumptions about the C calling convention rather early on in the compiler). However, given this, the cons outweigh the pros. -} -- | is @-falignment-sanitisation@ enabled? type DoAlignSanitisation = Bool ghc-lib-parser-9.4.7.20230826/compiler/GHC/CmmToAsm/CFG/Weight.hs0000644000000000000000000000451514472400112021502 0ustar0000000000000000module GHC.CmmToAsm.CFG.Weight ( Weights (..) , defaultWeights , parseWeights ) where import GHC.Prelude import GHC.Utils.Panic -- | Edge weights to use when generating a CFG from CMM data Weights = Weights { uncondWeight :: Int , condBranchWeight :: Int , switchWeight :: Int , callWeight :: Int , likelyCondWeight :: Int , unlikelyCondWeight :: Int , infoTablePenalty :: Int , backEdgeBonus :: Int } -- | Default edge weights defaultWeights :: Weights defaultWeights = Weights { uncondWeight = 1000 , condBranchWeight = 800 , switchWeight = 1 , callWeight = -10 , likelyCondWeight = 900 , unlikelyCondWeight = 300 , infoTablePenalty = 300 , backEdgeBonus = 400 } parseWeights :: String -> Weights -> Weights parseWeights s oldWeights = foldl' (\cfg (n,v) -> update n v cfg) oldWeights assignments where assignments = map assignment $ settings s update "uncondWeight" n w = w {uncondWeight = n} update "condBranchWeight" n w = w {condBranchWeight = n} update "switchWeight" n w = w {switchWeight = n} update "callWeight" n w = w {callWeight = n} update "likelyCondWeight" n w = w {likelyCondWeight = n} update "unlikelyCondWeight" n w = w {unlikelyCondWeight = n} update "infoTablePenalty" n w = w {infoTablePenalty = n} update "backEdgeBonus" n w = w {backEdgeBonus = n} update other _ _ = panic $ other ++ " is not a CFG weight parameter. " ++ exampleString settings s | (s1,rest) <- break (== ',') s , null rest = [s1] | (s1,rest) <- break (== ',') s = s1 : settings (drop 1 rest) assignment as | (name, _:val) <- break (== '=') as = (name,read val) | otherwise = panic $ "Invalid CFG weight parameters." ++ exampleString exampleString = "Example parameters: uncondWeight=1000," ++ "condBranchWeight=800,switchWeight=0,callWeight=300" ++ ",likelyCondWeight=900,unlikelyCondWeight=300" ++ ",infoTablePenalty=300,backEdgeBonus=400" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core.hs0000644000000000000000000026275414472400112017077 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE DeriveDataTypeable, FlexibleContexts #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE BangPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | GHC.Core holds all the main data types for use by for the Glasgow Haskell Compiler midsection module GHC.Core ( -- * Main data types Expr(..), Alt(..), Bind(..), AltCon(..), Arg, CoreProgram, CoreExpr, CoreAlt, CoreBind, CoreArg, CoreBndr, TaggedExpr, TaggedAlt, TaggedBind, TaggedArg, TaggedBndr(..), deTagExpr, -- * In/Out type synonyms InId, InBind, InExpr, InAlt, InArg, InType, InKind, InBndr, InVar, InCoercion, InTyVar, InCoVar, OutId, OutBind, OutExpr, OutAlt, OutArg, OutType, OutKind, OutBndr, OutVar, OutCoercion, OutTyVar, OutCoVar, MOutCoercion, -- ** 'Expr' construction mkLet, mkLets, mkLetNonRec, mkLetRec, mkLams, mkApps, mkTyApps, mkCoApps, mkVarApps, mkTyArg, mkIntLit, mkIntLitWrap, mkWordLit, mkWordLitWrap, mkWord8Lit, mkWord64LitWord64, mkInt64LitInt64, mkCharLit, mkStringLit, mkFloatLit, mkFloatLitFloat, mkDoubleLit, mkDoubleLitDouble, mkConApp, mkConApp2, mkTyBind, mkCoBind, varToCoreExpr, varsToCoreExprs, isId, cmpAltCon, cmpAlt, ltAlt, -- ** Simple 'Expr' access functions and predicates bindersOf, bindersOfBinds, rhssOfBind, rhssOfAlts, collectBinders, collectTyBinders, collectTyAndValBinders, collectNBinders, collectArgs, stripNArgs, collectArgsTicks, flattenBinds, exprToType, wrapLamBody, isValArg, isTypeArg, isCoArg, isTyCoArg, valArgCount, valBndrCount, isRuntimeArg, isRuntimeVar, -- * Unfolding data types Unfolding(..), UnfoldingCache(..), UnfoldingGuidance(..), UnfoldingSource(..), -- ** Constructing 'Unfolding's noUnfolding, bootUnfolding, evaldUnfolding, mkOtherCon, unSaturatedOk, needSaturated, boringCxtOk, boringCxtNotOk, -- ** Predicates and deconstruction on 'Unfolding' unfoldingTemplate, expandUnfolding_maybe, maybeUnfoldingTemplate, otherCons, isValueUnfolding, isEvaldUnfolding, isCheapUnfolding, isExpandableUnfolding, isConLikeUnfolding, isCompulsoryUnfolding, isStableUnfolding, isInlineUnfolding, isBootUnfolding, hasCoreUnfolding, hasSomeUnfolding, canUnfold, neverUnfoldGuidance, isStableSource, -- * Annotated expression data types AnnExpr, AnnExpr'(..), AnnBind(..), AnnAlt(..), -- ** Operations on annotated expressions collectAnnArgs, collectAnnArgsTicks, -- ** Operations on annotations deAnnotate, deAnnotate', deAnnAlt, deAnnBind, collectAnnBndrs, collectNAnnBndrs, -- * Orphanhood IsOrphan(..), isOrphan, notOrphan, chooseOrphanAnchor, -- * Core rule data types CoreRule(..), RuleBase, RuleName, RuleFun, IdUnfoldingFun, InScopeEnv, RuleEnv(..), RuleOpts(..), mkRuleEnv, emptyRuleEnv, -- ** Operations on 'CoreRule's ruleArity, ruleName, ruleIdName, ruleActivation, setRuleIdName, ruleModule, isBuiltinRule, isLocalRule, isAutoRule, ) where import GHC.Prelude import GHC.Platform import GHC.Types.Var.Env( InScopeSet ) import GHC.Types.Var import GHC.Core.Type import GHC.Core.Coercion import GHC.Types.Name import GHC.Types.Name.Set import GHC.Types.Name.Env( NameEnv ) import GHC.Types.Literal import GHC.Types.Tickish import GHC.Core.DataCon import GHC.Unit.Module import GHC.Types.Basic import GHC.Types.Unique.Set import GHC.Utils.Binary import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.Data hiding (TyCon) import Data.Int import Data.Word infixl 4 `mkApps`, `mkTyApps`, `mkVarApps`, `App`, `mkCoApps` -- Left associative, so that we can say (f `mkTyApps` xs `mkVarApps` ys) {- ************************************************************************ * * \subsection{The main data types} * * ************************************************************************ These data types are the heart of the compiler -} -- | This is the data type that represents GHCs core intermediate language. Currently -- GHC uses System FC for this purpose, -- which is closely related to the simpler and better known System F . -- -- We get from Haskell source to this Core language in a number of stages: -- -- 1. The source code is parsed into an abstract syntax tree, which is represented -- by the data type 'GHC.Hs.Expr.HsExpr' with the names being 'GHC.Types.Name.Reader.RdrNames' -- -- 2. This syntax tree is /renamed/, which attaches a 'GHC.Types.Unique.Unique' to every 'GHC.Types.Name.Reader.RdrName' -- (yielding a 'GHC.Types.Name.Name') to disambiguate identifiers which are lexically identical. -- For example, this program: -- -- @ -- f x = let f x = x + 1 -- in f (x - 2) -- @ -- -- Would be renamed by having 'Unique's attached so it looked something like this: -- -- @ -- f_1 x_2 = let f_3 x_4 = x_4 + 1 -- in f_3 (x_2 - 2) -- @ -- But see Note [Shadowing] below. -- -- 3. The resulting syntax tree undergoes type checking (which also deals with instantiating -- type class arguments) to yield a 'GHC.Hs.Expr.HsExpr' type that has 'GHC.Types.Id.Id' as it's names. -- -- 4. Finally the syntax tree is /desugared/ from the expressive 'GHC.Hs.Expr.HsExpr' type into -- this 'Expr' type, which has far fewer constructors and hence is easier to perform -- optimization, analysis and code generation on. -- -- The type parameter @b@ is for the type of binders in the expression tree. -- -- The language consists of the following elements: -- -- * Variables -- See Note [Variable occurrences in Core] -- -- * Primitive literals -- -- * Applications: note that the argument may be a 'Type'. -- See Note [Core let/app invariant] -- See Note [Representation polymorphism invariants] -- -- * Lambda abstraction -- See Note [Representation polymorphism invariants] -- -- * Recursive and non recursive @let@s. Operationally -- this corresponds to allocating a thunk for the things -- bound and then executing the sub-expression. -- -- See Note [Core letrec invariant] -- See Note [Core let/app invariant] -- See Note [Representation polymorphism invariants] -- See Note [Core type and coercion invariant] -- -- * Case expression. Operationally this corresponds to evaluating -- the scrutinee (expression examined) to weak head normal form -- and then examining at most one level of resulting constructor (i.e. you -- cannot do nested pattern matching directly with this). -- -- The binder gets bound to the value of the scrutinee, -- and the 'Type' must be that of all the case alternatives -- -- IMPORTANT: see Note [Case expression invariants] -- -- * Cast an expression to a particular type. -- This is used to implement @newtype@s (a @newtype@ constructor or -- destructor just becomes a 'Cast' in Core) and GADTs. -- -- * Ticks. These are used to represent all the source annotation we -- support: profiling SCCs, HPC ticks, and GHCi breakpoints. -- -- * A type: this should only show up at the top level of an Arg -- -- * A coercion {- Note [Why does Case have a 'Type' field?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The obvious alternative is exprType (Case scrut bndr alts) | (_,_,rhs1):_ <- alts = exprType rhs1 But caching the type in the Case constructor exprType (Case scrut bndr ty alts) = ty is better for at least three reasons: * It works when there are no alternatives (see case invariant 1 above) * It might be faster in deeply-nested situations. * It might not be quite the same as (exprType rhs) for one of the RHSs in alts. Consider a phantom type synonym type S a = Int and we want to form the case expression case x of { K (a::*) -> (e :: S a) } Then exprType of the RHS is (S a), but we cannot make that be the 'ty' in the Case constructor because 'a' is simply not in scope there. Instead we must expand the synonym to Int before putting it in the Case constructor. See GHC.Core.Utils.mkSingleAltCase. So we'd have to do synonym expansion in exprType which would be inefficient. * The type stored in the case is checked with lintInTy. This checks (among other things) that it does not mention any variables that are not in scope. If we did not have the type there, it would be a bit harder for Core Lint to reject case blah of Ex x -> x where data Ex = forall a. Ex a. -} -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Expr b = Var Id | Lit Literal | App (Expr b) (Arg b) | Lam b (Expr b) | Let (Bind b) (Expr b) | Case (Expr b) b Type [Alt b] -- See Note [Case expression invariants] -- and Note [Why does Case have a 'Type' field?] | Cast (Expr b) CoercionR -- The Coercion has Representational role | Tick CoreTickish (Expr b) | Type Type | Coercion Coercion deriving Data -- | Type synonym for expressions that occur in function argument positions. -- Only 'Arg' should contain a 'Type' at top level, general 'Expr' should not type Arg b = Expr b -- | A case split alternative. Consists of the constructor leading to the alternative, -- the variables bound from the constructor, and the expression to be executed given that binding. -- The default alternative is @(DEFAULT, [], rhs)@ -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Alt b = Alt AltCon [b] (Expr b) deriving (Data) -- | A case alternative constructor (i.e. pattern match) -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data AltCon = DataAlt DataCon -- ^ A plain data constructor: @case e of { Foo x -> ... }@. -- Invariant: the 'DataCon' is always from a @data@ type, and never from a @newtype@ | LitAlt Literal -- ^ A literal: @case e of { 1 -> ... }@ -- Invariant: always an *unlifted* literal -- See Note [Literal alternatives] | DEFAULT -- ^ Trivial alternative: @case e of { _ -> ... }@ deriving (Eq, Data) -- This instance is a bit shady. It can only be used to compare AltCons for -- a single type constructor. Fortunately, it seems quite unlikely that we'll -- ever need to compare AltCons for different type constructors. -- The instance adheres to the order described in [Core case invariants] instance Ord AltCon where compare (DataAlt con1) (DataAlt con2) = assert (dataConTyCon con1 == dataConTyCon con2) $ compare (dataConTag con1) (dataConTag con2) compare (DataAlt _) _ = GT compare _ (DataAlt _) = LT compare (LitAlt l1) (LitAlt l2) = compare l1 l2 compare (LitAlt _) DEFAULT = GT compare DEFAULT DEFAULT = EQ compare DEFAULT _ = LT -- | Binding, used for top level bindings in a module and local bindings in a @let@. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Bind b = NonRec b (Expr b) | Rec [(b, (Expr b))] deriving Data {- Note [Shadowing] ~~~~~~~~~~~~~~~~ While various passes attempt to rename on-the-fly in a manner that avoids "shadowing" (thereby simplifying downstream optimizations), neither the simplifier nor any other pass GUARANTEES that shadowing is avoided. Thus, all passes SHOULD work fine even in the presence of arbitrary shadowing in their inputs. In particular, scrutinee variables `x` in expressions of the form `Case e x t` are often renamed to variables with a prefix "wild_". These "wild" variables may appear in the body of the case-expression, and further, may be shadowed within the body. So the Unique in a Var is not really unique at all. Still, it's very useful to give a constant-time equality/ordering for Vars, and to give a key that can be used to make sets of Vars (VarSet), or mappings from Vars to other things (VarEnv). Moreover, if you do want to eliminate shadowing, you can give a new Unique to an Id without changing its printable name, which makes debugging easier. Note [Literal alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Literal alternatives (LitAlt lit) are always for *un-lifted* literals. We have one literal, a literal Integer, that is lifted, and we don't allow in a LitAlt, because LitAlt cases don't do any evaluation. Also (see #5603) if you say case 3 of IS x -> ... IP _ -> ... IN _ -> ... (where IS, IP, IN are the constructors for Integer) we don't want the simplifier calling findAlt with argument (LitAlt 3). No no. Integer literals are an opaque encoding of an algebraic data type, not of an unlifted literal, like all the others. Also, we do not permit case analysis with literal patterns on floating-point types. See #9238 and Note [Rules for floating-point comparisons] in GHC.Core.Opt.ConstantFold for the rationale for this restriction. -------------------------- GHC.Core INVARIANTS --------------------------- Note [Variable occurrences in Core] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Variable /occurrences/ are never CoVars, though /bindings/ can be. All CoVars appear in Coercions. For example \(c :: Age~#Int) (d::Int). d |> (sym c) Here 'c' is a CoVar, which is lambda-bound, but it /occurs/ in a Coercion, (sym c). Note [Core letrec invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Core letrec invariant: The right hand sides of all /top-level/ or /recursive/ bindings must be of lifted type There is one exception to this rule, top-level @let@s are allowed to bind primitive string literals: see Note [Core top-level string literals]. See "Type#type_classification" in GHC.Core.Type for the meaning of "lifted" vs. "unlifted"). For the non-top-level, non-recursive case see Note [Core let-can-float invariant]. Note [Core let-can-float invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The let-can-float invariant: The right hand side of a /non-top-level/, /non-recursive/ binding may be of unlifted type, but only if the expression is ok-for-speculation or the 'Let' is for a join point. (For top-level or recursive lets see Note [Core letrec invariant].) This means that the let can be floated around without difficulty. For example, this is OK: y::Int# = x +# 1# But this is not, as it may affect termination if the expression is floated out: y::Int# = fac 4# In this situation you should use @case@ rather than a @let@. The function 'GHC.Core.Utils.needsCaseBinding' can help you determine which to generate, or alternatively use 'GHC.Core.Make.mkCoreLet' rather than this constructor directly, which will generate a @case@ if necessary The let-can-float invariant is initially enforced by mkCoreLet in GHC.Core.Make. For discussion of some implications of the let-can-float invariant primops see Note [Checking versus non-checking primops] in GHC.Builtin.PrimOps. Historical Note [The let/app invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before 2022 GHC used the "let/app invariant", which applied the let-can-float rules to the argument of an application, as well as to the RHS of a let. This made some kind of sense, because 'let' can always be encoded as application: let x=rhs in b = (\x.b) rhs But the let/app invariant got in the way of RULES; see #19313. For example up :: Int# -> Int# {-# RULES "up/down" forall x. up (down x) = x #-} The LHS of this rule doesn't satisfy the let/app invariant. Indeed RULES is a big reason that GHC doesn't use ANF, where the argument of an application is always a variable or a constant. To allow RULES to work nicely we need to allow lots of things in the arguments of a call. TL;DR: we relaxed the let/app invariant to become the let-can-float invariant. Note [Core top-level string literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As an exception to the usual rule that top-level binders must be lifted, we allow binding primitive string literals (of type Addr#) of type Addr# at the top level. This allows us to share string literals earlier in the pipeline and crucially allows other optimizations in the Core2Core pipeline to fire. Consider, f n = let a::Addr# = "foo"# in \x -> blah In order to be able to inline `f`, we would like to float `a` to the top. Another option would be to inline `a`, but that would lead to duplicating string literals, which we want to avoid. See #8472. The solution is simply to allow top-level unlifted binders. We can't allow arbitrary unlifted expression at the top-level though, unlifted binders cannot be thunks, so we just allow string literals. We allow the top-level primitive string literals to be wrapped in Ticks in the same way they can be wrapped when nested in an expression. CoreToSTG currently discards Ticks around top-level primitive string literals. See #14779. Also see Note [Compilation plan for top-level string literals]. Note [Compilation plan for top-level string literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is a summary on how top-level string literals are handled by various parts of the compilation pipeline. * In the source language, there is no way to bind a primitive string literal at the top level. * In Core, we have a special rule that permits top-level Addr# bindings. See Note [Core top-level string literals]. Core-to-core passes may introduce new top-level string literals. See GHC.Core.Utils.exprIsTopLevelBindable, and exprIsTickedString * In STG, top-level string literals are explicitly represented in the syntax tree. * A top-level string literal may end up exported from a module. In this case, in the object file, the content of the exported literal is given a label with the _bytes suffix. Note [NON-BOTTOM-DICTS invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is a global invariant (not checkable by Lint) that This means that the let can be floated around without difficulty. For example, this is OK: y::Int# = x +# 1# But this is not, as it may affect termination if the expression is floated out: y::Int# = fac 4# In this situation you should use @case@ rather than a @let@. The function 'GHC.Core.Utils.needsCaseBinding' can help you determine which to generate, or alternatively use 'GHC.Core.Make.mkCoreLet' rather than this constructor directly, which will generate a @case@ if necessary The let/app invariant is initially enforced by mkCoreLet and mkCoreApp in GHC.Core.Make. * A superclass selection from some other dictionary. This is harder to guarantee: see Note [Recursive superclasses] and Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance. A bad Core-to-Core pass could invalidate this reasoning, but that's too bad. It's still an invariant of Core programs generated by GHC from Haskell, and Core-to-Core passes maintain it. Why is it useful to know that dictionaries are non-bottom? 1. It justifies the use of `-XDictsStrict`; see `GHC.Core.Types.Demand.strictifyDictDmd` 2. It means that (eq_sel d) is ok-for-speculation and thus case (eq_sel d) of _ -> blah can be discarded by the Simplifier. See these Notes: Note [exprOkForSpeculation and type classes] in GHC.Core.Utils Note[Speculative evaluation] in GHC.CoreToStg.Prep Note [Case expression invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Case expressions are one of the more complicated elements of the Core language, and come with a number of invariants. All of them should be checked by Core Lint. 1. The list of alternatives may be empty; See Note [Empty case alternatives] 2. The 'DEFAULT' case alternative must be first in the list, if it occurs at all. Checked in GHC.Core.Lint.checkCaseAlts. 3. The remaining cases are in order of (strictly) increasing tag (for 'DataAlts') or lit (for 'LitAlts'). This makes finding the relevant constructor easy, and makes comparison easier too. Checked in GHC.Core.Lint.checkCaseAlts. 4. The list of alternatives must be exhaustive. An /exhaustive/ case does not necessarily mention all constructors: @ data Foo = Red | Green | Blue ... case x of Red -> True other -> f (case x of Green -> ... Blue -> ... ) ... @ The inner case does not need a @Red@ alternative, because @x@ can't be @Red@ at that program point. This is not checked by Core Lint -- it's very hard to do so. E.g. suppose that inner case was floated out, thus: let a = case x of Green -> ... Blue -> ... ) case x of Red -> True other -> f a Now it's really hard to see that the Green/Blue case is exhaustive. But it is. If you have a case-expression that really /isn't/ exhaustive, we may generate seg-faults. Consider the Green/Blue case above. Since there are only two branches we may generate code that tests for Green, and if not Green simply /assumes/ Blue (since, if the case is exhaustive, that's all that remains). Of course, if it's not Blue and we start fetching fields that should be in a Blue constructor, we may die horribly. See also Note [Core Lint guarantee] in GHC.Core.Lint. 5. Floating-point values must not be scrutinised against literals. See #9238 and Note [Rules for floating-point comparisons] in GHC.Core.Opt.ConstantFold for rationale. Checked in lintCaseExpr; see the call to isFloatingPrimTy. 6. The 'ty' field of (Case scrut bndr ty alts) is the type of the /entire/ case expression. Checked in lintAltExpr. See also Note [Why does Case have a 'Type' field?]. 7. The type of the scrutinee must be the same as the type of the case binder, obviously. Checked in lintCaseExpr. 8. The multiplicity of the binders in constructor patterns must be the multiplicity of the corresponding field /scaled by the multiplicity of the case binder/. Checked in lintCoreAlt. Note [Core type and coercion invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We allow a /non-recursive/, /non-top-level/ let to bind type and coercion variables. These can be very convenient for postponing type substitutions until the next run of the simplifier. * A type variable binding must have a RHS of (Type ty) * A coercion variable binding must have a RHS of (Coercion co) It is possible to have terms that return a coercion, but we use case-binding for those; e.g. case (eq_sel d) of (co :: a ~# b) -> blah where eq_sel :: (a~b) -> (a~#b) Or even case (df @Int) of (co :: a ~# b) -> blah Which is very exotic, and I think never encountered; but see Note [Equality superclasses in quantified constraints] in GHC.Tc.Solver.Canonical Note [Core case invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ See Note [Case expression invariants] Note [Representation polymorphism invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC allows us to abstract over calling conventions using **representation polymorphism**. For example, we have: ($) :: forall (r :: RuntimeRep) (a :: Type) (b :: TYPE r). a -> b -> b In this example, the type `b` is representation-polymorphic: it has kind `TYPE r`, where the type variable `r :: RuntimeRep` abstracts over the runtime representation of values of type `b`. To ensure that programs containing representation-polymorphism remain compilable, we enforce the following representation-polymorphism invariants: The paper "Levity Polymorphism" [PLDI'17] states the first two invariants: I1. The type of a bound variable must have a fixed runtime representation (except for join points: See Note [Invariants on join points]) I2. The type of a function argument must have a fixed runtime representation. On top of these two invariants, GHC's internal eta-expansion mechanism also requires: I3. In any partial application `f e_1 .. e_n`, where `f` is `hasNoBinding`, it must be the case that the application can be eta-expanded to match the arity of `f`. See Note [checkCanEtaExpand] in GHC.Core.Lint for more details. Example of I1: \(r::RuntimeRep). \(a::TYPE r). \(x::a). e This contravenes I1 because x's type has kind (TYPE r), which has 'r' free. We thus wouldn't know how to compile this lambda abstraction. Example of I2: f (undefined :: (a :: TYPE r)) This contravenes I2: we are applying the function `f` to a value with an unknown runtime representation. Examples of I3: myUnsafeCoerce# :: forall {r1} (a :: TYPE r1) {r2} (b :: TYPE r2). a -> b myUnsafeCoerce# = unsafeCoerce# This contravenes I3: we are instantiating `unsafeCoerce#` without any value arguments, and with a remaining argument type, `a`, which does not have a fixed runtime representation. But `unsafeCorce#` has no binding (see Note [Wiring in unsafeCoerce#] in GHC.HsToCore). So before code-generation we must saturate it by eta-expansion (see GHC.CoreToStg.Prep.maybeSaturate), thus myUnsafeCoerce# = \x. unsafeCoerce# x But we can't do that because now the \x binding would violate I1. bar :: forall (a :: TYPE) r (b :: TYPE r). a -> b bar = unsafeCoerce# OK: eta expand to `\ (x :: Type) -> unsafeCoerce# x`, and `x` has a fixed RuntimeRep. Note that we currently require something slightly stronger than a fixed runtime representation: we check whether bound variables and function arguments have a /fixed RuntimeRep/ in the sense of Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. See Note [Representation polymorphism checking] in GHC.Tc.Utils.Concrete for an overview of how we enforce these invariants in the typechecker. Note [Core let goal] ~~~~~~~~~~~~~~~~~~~~ * The simplifier tries to ensure that if the RHS of a let is a constructor application, its arguments are trivial, so that the constructor can be inlined vigorously. Note [Empty case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The alternatives of a case expression should be exhaustive. But this exhaustive list can be empty! * A case expression can have empty alternatives if (and only if) the scrutinee is bound to raise an exception or diverge. When do we know this? See Note [Bottoming expressions] in GHC.Core.Utils. * The possibility of empty alternatives is one reason we need a type on the case expression: if the alternatives are empty we can't get the type from the alternatives! * In the case of empty types (see Note [Bottoming expressions]), say data T we do NOT want to replace case (x::T) of Bool {} --> error Bool "Inaccessible case" because x might raise an exception, and *that*'s what we want to see! (#6067 is an example.) To preserve semantics we'd have to say x `seq` error Bool "Inaccessible case" but the 'seq' is just such a case, so we are back to square 1. * We can use the empty-alternative construct to coerce error values from one type to another. For example f :: Int -> Int f n = error "urk" g :: Int -> (# Char, Bool #) g x = case f x of { 0 -> ..., n -> ... } Then if we inline f in g's RHS we get case (error Int "urk") of (# Char, Bool #) { ... } and we can discard the alternatives since the scrutinee is bottom to give case (error Int "urk") of (# Char, Bool #) {} This is nicer than using an unsafe coerce between Int ~ (# Char,Bool #), if for no other reason that we don't need to instantiate the (~) at an unboxed type. * We treat a case expression with empty alternatives as trivial iff its scrutinee is (see GHC.Core.Utils.exprIsTrivial). This is actually important; see Note [Empty case is trivial] in GHC.Core.Utils * An empty case is replaced by its scrutinee during the CoreToStg conversion; remember STG is un-typed, so there is no need for the empty case to do the type conversion. Note [Join points] ~~~~~~~~~~~~~~~~~~ In Core, a *join point* is a specially tagged function whose only occurrences are saturated tail calls. A tail call can appear in these places: 1. In the branches (not the scrutinee) of a case 2. Underneath a let (value or join point) 3. Inside another join point We write a join-point declaration as join j @a @b x y = e1 in e2, like a let binding but with "join" instead (or "join rec" for "let rec"). Note that we put the parameters before the = rather than using lambdas; this is because it's relevant how many parameters the join point takes *as a join point.* This number is called the *join arity,* distinct from arity because it counts types as well as values. Note that a join point may return a lambda! So join j x = x + 1 is different from join j = \x -> x + 1 The former has join arity 1, while the latter has join arity 0. The identifier for a join point is called a join id or a *label.* An invocation is called a *jump.* We write a jump using the jump keyword: jump j 3 The words *label* and *jump* are evocative of assembly code (or Cmm) for a reason: join points are indeed compiled as labeled blocks, and jumps become actual jumps (plus argument passing and stack adjustment). There is no closure allocated and only a fraction of the function-call overhead. Hence we would like as many functions as possible to become join points (see OccurAnal) and the type rules for join points ensure we preserve the properties that make them efficient. In the actual AST, a join point is indicated by the IdDetails of the binder: a local value binding gets 'VanillaId' but a join point gets a 'JoinId' with its join arity. For more details, see the paper: Luke Maurer, Paul Downen, Zena Ariola, and Simon Peyton Jones. "Compiling without continuations." Submitted to PLDI'17. https://www.microsoft.com/en-us/research/publication/compiling-without-continuations/ Note [Invariants on join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Join points must follow these invariants: 1. All occurrences must be tail calls. Each of these tail calls must pass the same number of arguments, counting both types and values; we call this the "join arity" (to distinguish from regular arity, which only counts values). See Note [Join points are less general than the paper] 2. For join arity n, the right-hand side must begin with at least n lambdas. No ticks, no casts, just lambdas! C.f. GHC.Core.Utils.joinRhsArity. 2a. Moreover, this same constraint applies to any unfolding of the binder. Reason: if we want to push a continuation into the RHS we must push it into the unfolding as well. 2b. The Arity (in the IdInfo) of a join point is the number of value binders in the top n lambdas, where n is the join arity. So arity <= join arity; the former counts only value binders while the latter counts all binders. e.g. Suppose $j has join arity 1 let j = \x y. e in case x of { A -> j 1; B -> j 2 } Then its ordinary arity is also 1, not 2. The arity of a join point isn't very important; but short of setting it to zero, it is helpful to have an invariant. E.g. #17294. See also Note [Do not eta-expand join points] in GHC.Core.Opt.Simplify.Utils. 3. If the binding is recursive, then all other bindings in the recursive group must also be join points. 4. The binding's type must not be polymorphic in its return type (as defined in Note [The polymorphism rule of join points]). However, join points have simpler invariants in other ways 5. A join point can have an unboxed type without the RHS being ok-for-speculation (i.e. drop the let/app invariant) e.g. let j :: Int# = factorial x in ... 6. The RHS of join point is not required to have a fixed runtime representation, e.g. let j :: r :: TYPE l = fail void# in ... This happened in an intermediate program #13394 Examples: join j1 x = 1 + x in jump j (jump j x) -- Fails 1: non-tail call join j1' x = 1 + x in if even a then jump j1 a else jump j1 a b -- Fails 1: inconsistent calls join j2 x = flip (+) x in j2 1 2 -- Fails 2: not enough lambdas join j2' x = \y -> x + y in j3 1 -- Passes: extra lams ok join j @a (x :: a) = x -- Fails 4: polymorphic in ret type Invariant 1 applies to left-hand sides of rewrite rules, so a rule for a join point must have an exact call as its LHS. Strictly speaking, invariant 3 is redundant, since a call from inside a lazy binding isn't a tail call. Since a let-bound value can't invoke a free join point, then, they can't be mutually recursive. (A Core binding group *can* include spurious extra bindings if the occurrence analyser hasn't run, so invariant 3 does still need to be checked.) For the rigorous definition of "tail call", see Section 3 of the paper (Note [Join points]). Invariant 4 is subtle; see Note [The polymorphism rule of join points]. Invariant 6 is to enable code like this: f = \(r :: RuntimeRep) (a :: TYPE r) (x :: T). join j :: a j = error @r @a "bloop" in case x of A -> j B -> j C -> error @r @a "blurp" Core Lint will check these invariants, anticipating that any binder whose OccInfo is marked AlwaysTailCalled will become a join point as soon as the simplifier (or simpleOptPgm) runs. Note [Join points are less general than the paper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the paper "Compiling without continuations", this expression is perfectly valid: join { j = \_ -> e } in (case blah of ) ( True -> j void# ) arg ( False -> blah ) assuming 'j' has arity 1. Here the call to 'j' does not look like a tail call, but actually everything is fine. See Section 3, "Managing \Delta" in the paper. In GHC, however, we adopt a slightly more restrictive subset, in which join point calls must be tail calls. I think we /could/ loosen it up, but in fact the simplifier ensures that we always get tail calls, and it makes the back end a bit easier I think. Generally, just less to think about; nothing deeper than that. Note [The type of a join point] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A join point has the same type it would have as a function. That is, if it takes an Int and a Bool and its body produces a String, its type is `Int -> Bool -> String`. Natural as this may seem, it can be awkward. A join point shouldn't be thought to "return" in the same sense a function does---a jump is one-way. This is crucial for understanding how case-of-case interacts with join points: case (join j :: Int -> Bool -> String j x y = ... in jump j z w) of "" -> True _ -> False The simplifier will pull the case into the join point (see Note [Join points and case-of-case] in GHC.Core.Opt.Simplify): join j :: Int -> Bool -> Bool -- changed! j x y = case ... of "" -> True _ -> False in jump j z w The body of the join point now returns a Bool, so the label `j` has to have its type updated accordingly, which is done by GHC.Core.Opt.Simplify.Env.adjustJoinPointType. Inconvenient though this may be, it has the advantage that 'GHC.Core.Utils.exprType' can still return a type for any expression, including a jump. Relationship to the paper This plan differs from the paper (see Note [Invariants on join points]). In the paper, we instead give j the type `Int -> Bool -> forall a. a`. Then each jump carries the "return type" as a parameter, exactly the way other non-returning functions like `error` work: case (join j :: Int -> Bool -> forall a. a j x y = ... in jump j z w @String) of "" -> True _ -> False Now we can move the case inward and we only have to change the jump: join j :: Int -> Bool -> forall a. a j x y = case ... of "" -> True _ -> False in jump j z w @Bool (Core Lint would still check that the body of the join point has the right type; that type would simply not be reflected in the join id.) Note [The polymorphism rule of join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Invariant 4 of Note [Invariants on join points] forbids a join point to be polymorphic in its return type. That is, if its type is forall a1 ... ak. t1 -> ... -> tn -> r where its join arity is k+n, none of the type parameters ai may occur free in r. In some way, this falls out of the fact that given join j @a1 ... @ak x1 ... xn = e1 in e2 then all calls to `j` are in tail-call positions of `e`, and expressions in tail-call positions in `e` have the same type as `e`. Therefore the type of `e1` -- the return type of the join point -- must be the same as the type of e2. Since the type variables aren't bound in `e2`, its type can't include them, and thus neither can the type of `e1`. This unfortunately prevents the `go` in the following code from being a join-point: iter :: forall a. Int -> (a -> a) -> a -> a iter @a n f x = go @a n f x where go :: forall a. Int -> (a -> a) -> a -> a go @a 0 _ x = x go @a n f x = go @a (n-1) f (f x) In this case, a static argument transformation would fix that (see ticket #14620): iter :: forall a. Int -> (a -> a) -> a -> a iter @a n f x = go' @a n f x where go' :: Int -> (a -> a) -> a -> a go' 0 _ x = x go' n f x = go' (n-1) f (f x) In general, loopification could be employed to do that (see #14068.) Can we simply drop the requirement, and allow `go` to be a join-point? We could, and it would work. But we could not longer apply the case-of-join-point transformation universally. This transformation would do: case (join go @a n f x = case n of 0 -> x n -> go @a (n-1) f (f x) in go @Bool n neg True) of True -> e1; False -> e2 ===> join go @a n f x = case n of 0 -> case x of True -> e1; False -> e2 n -> go @a (n-1) f (f x) in go @Bool n neg True but that is ill-typed, as `x` is type `a`, not `Bool`. This also justifies why we do not consider the `e` in `e |> co` to be in tail position: A cast changes the type, but the type must be the same. But operationally, casts are vacuous, so this is a bit unfortunate! See #14610 for ideas how to fix this. ************************************************************************ * * In/Out type synonyms * * ********************************************************************* -} {- Many passes apply a substitution, and it's very handy to have type synonyms to remind us whether or not the substitution has been applied -} -- Pre-cloning or substitution type InBndr = CoreBndr type InType = Type type InKind = Kind type InBind = CoreBind type InExpr = CoreExpr type InAlt = CoreAlt type InArg = CoreArg type InCoercion = Coercion -- Post-cloning or substitution type OutBndr = CoreBndr type OutType = Type type OutKind = Kind type OutCoercion = Coercion type OutBind = CoreBind type OutExpr = CoreExpr type OutAlt = CoreAlt type OutArg = CoreArg type MOutCoercion = MCoercion {- ************************************************************************ * * Orphans * * ************************************************************************ -} -- | Is this instance an orphan? If it is not an orphan, contains an 'OccName' -- witnessing the instance's non-orphanhood. -- See Note [Orphans] data IsOrphan = IsOrphan | NotOrphan !OccName -- The OccName 'n' witnesses the instance's non-orphanhood -- In that case, the instance is fingerprinted as part -- of the definition of 'n's definition deriving Data -- | Returns true if 'IsOrphan' is orphan. isOrphan :: IsOrphan -> Bool isOrphan IsOrphan = True isOrphan _ = False -- | Returns true if 'IsOrphan' is not an orphan. notOrphan :: IsOrphan -> Bool notOrphan NotOrphan{} = True notOrphan _ = False chooseOrphanAnchor :: NameSet -> IsOrphan -- Something (rule, instance) is relate to all the Names in this -- list. Choose one of them to be an "anchor" for the orphan. We make -- the choice deterministic to avoid gratuitous changes in the ABI -- hash (#4012). Specifically, use lexicographic comparison of -- OccName rather than comparing Uniques -- -- NB: 'minimum' use Ord, and (Ord OccName) works lexicographically -- chooseOrphanAnchor local_names | isEmptyNameSet local_names = IsOrphan | otherwise = NotOrphan (minimum occs) where occs = map nameOccName $ nonDetEltsUniqSet local_names -- It's OK to use nonDetEltsUFM here, see comments above instance Binary IsOrphan where put_ bh IsOrphan = putByte bh 0 put_ bh (NotOrphan n) = do putByte bh 1 put_ bh n get bh = do h <- getByte bh case h of 0 -> return IsOrphan _ -> do n <- get bh return $ NotOrphan n {- Note [Orphans] ~~~~~~~~~~~~~~ Class instances, rules, and family instances are divided into orphans and non-orphans. Roughly speaking, an instance/rule is an orphan if its left hand side mentions nothing defined in this module. Orphan-hood has two major consequences * A module that contains orphans is called an "orphan module". If the module being compiled depends (transitively) on an orphan module M, then M.hi is read in regardless of whether M is otherwise needed. This is to ensure that we don't miss any instance decls in M. But it's painful, because it means we need to keep track of all the orphan modules below us. * A non-orphan is not finger-printed separately. Instead, for fingerprinting purposes it is treated as part of the entity it mentions on the LHS. For example data T = T1 | T2 instance Eq T where .... The instance (Eq T) is incorporated as part of T's fingerprint. In contrast, orphans are all fingerprinted together in the mi_orph_hash field of the ModIface. See GHC.Iface.Recomp.addFingerprints. Orphan-hood is computed * For class instances: when we make a ClsInst (because it is needed during instance lookup) * For rules and family instances: when we generate an IfaceRule (GHC.Iface.Make.coreRuleToIfaceRule) or IfaceFamInst (GHC.Iface.Make.instanceToIfaceInst) -} {- ************************************************************************ * * \subsection{Rewrite rules} * * ************************************************************************ The CoreRule type and its friends are dealt with mainly in GHC.Core.Rules, but GHC.Core.FVs, GHC.Core.Subst, GHC.Core.Ppr, GHC.Core.Tidy also inspect the representation. -} -- | Gathers a collection of 'CoreRule's. Maps (the name of) an 'Id' to its rules type RuleBase = NameEnv [CoreRule] -- The rules are unordered; -- we sort out any overlaps on lookup -- | A full rule environment which we can apply rules from. Like a 'RuleBase', -- but it also includes the set of visible orphans we use to filter out orphan -- rules which are not visible (even though we can see them...) data RuleEnv = RuleEnv { re_base :: [RuleBase] -- See Note [Why re_base is a list] , re_visible_orphs :: ModuleSet } mkRuleEnv :: RuleBase -> [Module] -> RuleEnv mkRuleEnv rules vis_orphs = RuleEnv [rules] (mkModuleSet vis_orphs) emptyRuleEnv :: RuleEnv emptyRuleEnv = RuleEnv [] emptyModuleSet {- Note [Why re_base is a list] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Note [Overall plumbing for rules], it is explained that the final RuleBase which we must consider is combined from 4 different sources. During simplifier runs, the fourth source of rules is constantly being updated as new interfaces are loaded into the EPS. Therefore just before we check to see if any rules match we get the EPS RuleBase and combine it with the existing RuleBase and then perform exactly 1 lookup into the new map. It is more efficient to avoid combining the environments and store the uncombined environments as we can instead perform 1 lookup into each environment and then combine the results. Essentially we use the identity: > lookupNameEnv n (plusNameEnv_C (++) rb1 rb2) > = lookupNameEnv n rb1 ++ lookupNameEnv n rb2 The latter being more efficient as we don't construct an intermediate map. -} -- | A 'CoreRule' is: -- -- * \"Local\" if the function it is a rule for is defined in the -- same module as the rule itself. -- -- * \"Orphan\" if nothing on the LHS is defined in the same module -- as the rule itself data CoreRule = Rule { ru_name :: RuleName, -- ^ Name of the rule, for communication with the user ru_act :: Activation, -- ^ When the rule is active -- Rough-matching stuff -- see comments with InstEnv.ClsInst( is_cls, is_rough ) ru_fn :: Name, -- ^ Name of the 'GHC.Types.Id.Id' at the head of this rule ru_rough :: [Maybe Name], -- ^ Name at the head of each argument to the left hand side -- Proper-matching stuff -- see comments with InstEnv.ClsInst( is_tvs, is_tys ) ru_bndrs :: [CoreBndr], -- ^ Variables quantified over ru_args :: [CoreExpr], -- ^ Left hand side arguments -- And the right-hand side ru_rhs :: CoreExpr, -- ^ Right hand side of the rule -- Occurrence info is guaranteed correct -- See Note [OccInfo in unfoldings and rules] -- Locality ru_auto :: Bool, -- ^ @True@ <=> this rule is auto-generated -- (notably by Specialise or SpecConstr) -- @False@ <=> generated at the user's behest -- See Note [Trimming auto-rules] in "GHC.Iface.Tidy" -- for the sole purpose of this field. ru_origin :: !Module, -- ^ 'Module' the rule was defined in, used -- to test if we should see an orphan rule. ru_orphan :: !IsOrphan, -- ^ Whether or not the rule is an orphan. ru_local :: Bool -- ^ @True@ iff the fn at the head of the rule is -- defined in the same module as the rule -- and is not an implicit 'Id' (like a record selector, -- class operation, or data constructor). This -- is different from 'ru_orphan', where a rule -- can avoid being an orphan if *any* Name in -- LHS of the rule was defined in the same -- module as the rule. } -- | Built-in rules are used for constant folding -- and suchlike. They have no free variables. -- A built-in rule is always visible (there is no such thing as -- an orphan built-in rule.) | BuiltinRule { ru_name :: RuleName, -- ^ As above ru_fn :: Name, -- ^ As above ru_nargs :: Int, -- ^ Number of arguments that 'ru_try' consumes, -- if it fires, including type arguments ru_try :: RuleFun -- ^ This function does the rewrite. It given too many -- arguments, it simply discards them; the returned 'CoreExpr' -- is just the rewrite of 'ru_fn' applied to the first 'ru_nargs' args } -- See Note [Extra args in the target] in GHC.Core.Rules -- | Rule options data RuleOpts = RuleOpts { roPlatform :: !Platform -- ^ Target platform , roNumConstantFolding :: !Bool -- ^ Enable more advanced numeric constant folding , roExcessRationalPrecision :: !Bool -- ^ Cut down precision of Rational values to that of Float/Double if disabled , roBignumRules :: !Bool -- ^ Enable rules for bignums } -- | The 'InScopeSet' in the 'InScopeEnv' is a /superset/ of variables that are -- currently in scope. See Note [The InScopeSet invariant]. type RuleFun = RuleOpts -> InScopeEnv -> Id -> [CoreExpr] -> Maybe CoreExpr type InScopeEnv = (InScopeSet, IdUnfoldingFun) type IdUnfoldingFun = Id -> Unfolding -- A function that embodies how to unfold an Id if you need -- to do that in the Rule. The reason we need to pass this info in -- is that whether an Id is unfoldable depends on the simplifier phase isBuiltinRule :: CoreRule -> Bool isBuiltinRule (BuiltinRule {}) = True isBuiltinRule _ = False isAutoRule :: CoreRule -> Bool isAutoRule (BuiltinRule {}) = False isAutoRule (Rule { ru_auto = is_auto }) = is_auto -- | The number of arguments the 'ru_fn' must be applied -- to before the rule can match on it ruleArity :: CoreRule -> Int ruleArity (BuiltinRule {ru_nargs = n}) = n ruleArity (Rule {ru_args = args}) = length args ruleName :: CoreRule -> RuleName ruleName = ru_name ruleModule :: CoreRule -> Maybe Module ruleModule Rule { ru_origin } = Just ru_origin ruleModule BuiltinRule {} = Nothing ruleActivation :: CoreRule -> Activation ruleActivation (BuiltinRule { }) = AlwaysActive ruleActivation (Rule { ru_act = act }) = act -- | The 'Name' of the 'GHC.Types.Id.Id' at the head of the rule left hand side ruleIdName :: CoreRule -> Name ruleIdName = ru_fn isLocalRule :: CoreRule -> Bool isLocalRule = ru_local -- | Set the 'Name' of the 'GHC.Types.Id.Id' at the head of the rule left hand side setRuleIdName :: Name -> CoreRule -> CoreRule setRuleIdName nm ru = ru { ru_fn = nm } {- ************************************************************************ * * Unfoldings * * ************************************************************************ The @Unfolding@ type is declared here to avoid numerous loops Note [Never put `OtherCon` unfoldings on lambda binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Based on #21496 we never attach unfoldings of any kind to lambda binders. It's just too easy for the call site to change and invalidate the unfolding. E.g. the caller of the lambda drops a seq (e.g. because the lambda is strict in it's binder) which in turn makes the OtherCon[] unfolding a lie. So unfoldings on lambda binders can never really be trusted when on lambda binders if there is the chance of the call site to change. So it's easiest to just never attach any to lambda binders to begin with, as well as stripping them off if we e.g. float out and expression while abstracting over some arguments. -} -- | Records the /unfolding/ of an identifier, which is approximately the form the -- identifier would have if we substituted its definition in for the identifier. -- This type should be treated as abstract everywhere except in "GHC.Core.Unfold" data Unfolding = NoUnfolding -- ^ We have no information about the unfolding. | BootUnfolding -- ^ We have no information about the unfolding, because -- this 'Id' came from an @hi-boot@ file. -- See Note [Inlining and hs-boot files] in "GHC.CoreToIface" -- for what this is used for. | OtherCon [AltCon] -- ^ It ain't one of these constructors. -- @OtherCon xs@ also indicates that something has been evaluated -- and hence there's no point in re-evaluating it. -- @OtherCon []@ is used even for non-data-type values -- to indicated evaluated-ness. Notably: -- -- > data C = C !(Int -> Int) -- > case x of { C f -> ... } -- -- Here, @f@ gets an @OtherCon []@ unfolding. | DFunUnfolding { -- The Unfolding of a DFunId -- See Note [DFun unfoldings] -- df = /\a1..am. \d1..dn. MkD t1 .. tk -- (op1 a1..am d1..dn) -- (op2 a1..am d1..dn) df_bndrs :: [Var], -- The bound variables [a1..m],[d1..dn] df_con :: DataCon, -- The dictionary data constructor (never a newtype datacon) df_args :: [CoreExpr] -- Args of the data con: types, superclasses and methods, } -- in positional order | CoreUnfolding { -- An unfolding for an Id with no pragma, -- or perhaps a NOINLINE pragma -- (For NOINLINE, the phase, if any, is in the -- InlinePragInfo for this Id.) uf_tmpl :: CoreExpr, -- Template; occurrence info is correct uf_src :: UnfoldingSource, -- Where the unfolding came from uf_is_top :: Bool, -- True <=> top level binding uf_cache :: UnfoldingCache, -- Cache of flags computable from the expr -- See Note [Tying the 'CoreUnfolding' knot] uf_guidance :: UnfoldingGuidance -- Tells about the *size* of the template. } -- ^ An unfolding with redundant cached information. Parameters: -- -- uf_tmpl: Template used to perform unfolding; -- NB: Occurrence info is guaranteed correct: -- see Note [OccInfo in unfoldings and rules] -- -- uf_is_top: Is this a top level binding? -- -- uf_is_value: 'exprIsHNF' template (cached); it is ok to discard a 'seq' on -- this variable -- -- uf_is_work_free: Does this waste only a little work if we expand it inside an inlining? -- Basically this is a cached version of 'exprIsWorkFree' -- -- uf_guidance: Tells us about the /size/ of the unfolding template ------------------------------------------------ data UnfoldingSource = -- See also Note [Historical note: unfoldings for wrappers] InlineRhs -- The current rhs of the function -- Replace uf_tmpl each time around | InlineStable -- From an INLINE or INLINABLE pragma -- INLINE if guidance is UnfWhen -- INLINABLE if guidance is UnfIfGoodArgs/UnfoldNever -- (well, technically an INLINABLE might be made -- UnfWhen if it was small enough, and then -- it will behave like INLINE outside the current -- module, but that is the way automatic unfoldings -- work so it is consistent with the intended -- meaning of INLINABLE). -- -- uf_tmpl may change, but only as a result of -- gentle simplification, it doesn't get updated -- to the current RHS during compilation as with -- InlineRhs. -- -- See Note [InlineStable] | InlineCompulsory -- Something that *has* no binding, so you *must* inline it -- Only a few primop-like things have this property -- (see "GHC.Types.Id.Make", calls to mkCompulsoryUnfolding). -- Inline absolutely always, however boring the context. -- | Properties of a 'CoreUnfolding' that could be computed on-demand from its template. -- See Note [UnfoldingCache] data UnfoldingCache = UnfoldingCache { uf_is_value :: !Bool, -- exprIsHNF template (cached); it is ok to discard -- a `seq` on this variable uf_is_conlike :: !Bool, -- True <=> applicn of constructor or CONLIKE function -- Cached version of exprIsConLike uf_is_work_free :: !Bool, -- True <=> doesn't waste (much) work to expand -- inside an inlining -- Cached version of exprIsCheap uf_expandable :: !Bool -- True <=> can expand in RULE matching -- Cached version of exprIsExpandable } deriving (Eq) -- | 'UnfoldingGuidance' says when unfolding should take place data UnfoldingGuidance = UnfWhen { -- Inline without thinking about the *size* of the uf_tmpl -- Used (a) for small *and* cheap unfoldings -- (b) for INLINE functions -- See Note [INLINE for small functions] in GHC.Core.Unfold ug_arity :: Arity, -- Number of value arguments expected ug_unsat_ok :: Bool, -- True <=> ok to inline even if unsaturated ug_boring_ok :: Bool -- True <=> ok to inline even if the context is boring -- So True,True means "always" } | UnfIfGoodArgs { -- Arose from a normal Id; the info here is the -- result of a simple analysis of the RHS ug_args :: [Int], -- Discount if the argument is evaluated. -- (i.e., a simplification will definitely -- be possible). One elt of the list per *value* arg. ug_size :: Int, -- The "size" of the unfolding. ug_res :: Int -- Scrutinee discount: the discount to subtract if the thing is in } -- a context (case (thing args) of ...), -- (where there are the right number of arguments.) | UnfNever -- The RHS is big, so don't inline it deriving (Eq) {- Note [UnfoldingCache] ~~~~~~~~~~~~~~~~~~~~~~~~ The UnfoldingCache field of an Unfolding holds four (strict) booleans, all derived from the uf_tmpl field of the unfolding. * We serialise the UnfoldingCache to and from interface files, for reasons described in Note [Tying the 'CoreUnfolding' knot] in GHC.IfaceToCore * Because it is a strict data type, we must be careful not to pattern-match on it until we actually want its values. E.g GHC.Core.Unfold.callSiteInline/tryUnfolding are careful not to force it unnecessarily. Just saves a bit of work. * When `seq`ing Core to eliminate space leaks, to suffices to `seq` on the cache, but not its fields, because it is strict in all fields. Note [Historical note: unfoldings for wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to have a nice clever scheme in interface files for wrappers. A wrapper's unfolding can be reconstructed from its worker's id and its strictness. This decreased .hi file size (sometimes significantly, for modules like GHC.Classes with many high-arity w/w splits) and had a slight corresponding effect on compile times. However, when we added the second demand analysis, this scheme lead to some Core lint errors. The second analysis could change the strictness signatures, which sometimes resulted in a wrapper's regenerated unfolding applying the wrapper to too many arguments. Instead of repairing the clever .hi scheme, we abandoned it in favor of simplicity. The .hi sizes are usually insignificant (excluding the +1M for base libraries), and compile time barely increases (~+1% for nofib). The nicer upshot is that the UnfoldingSource no longer mentions an Id, so, eg, substitutions need not traverse them. Note [DFun unfoldings] ~~~~~~~~~~~~~~~~~~~~~~ The Arity in a DFunUnfolding is total number of args (type and value) that the DFun needs to produce a dictionary. That's not necessarily related to the ordinary arity of the dfun Id, esp if the class has one method, so the dictionary is represented by a newtype. Example class C a where { op :: a -> Int } instance C a -> C [a] where op xs = op (head xs) The instance translates to $dfCList :: forall a. C a => C [a] -- Arity 2! $dfCList = /\a.\d. $copList {a} d |> co $copList :: forall a. C a => [a] -> Int -- Arity 2! $copList = /\a.\d.\xs. op {a} d (head xs) Now we might encounter (op (dfCList {ty} d) a1 a2) and we want the (op (dfList {ty} d)) rule to fire, because $dfCList has all its arguments, even though its (value) arity is 2. That's why we record the number of expected arguments in the DFunUnfolding. Note that although it's an Arity, it's most convenient for it to give the *total* number of arguments, both type and value. See the use site in exprIsConApp_maybe. -} -- Constants for the UnfWhen constructor needSaturated, unSaturatedOk :: Bool needSaturated = False unSaturatedOk = True boringCxtNotOk, boringCxtOk :: Bool boringCxtOk = True boringCxtNotOk = False ------------------------------------------------ noUnfolding :: Unfolding -- ^ There is no known 'Unfolding' evaldUnfolding :: Unfolding -- ^ This unfolding marks the associated thing as being evaluated noUnfolding = NoUnfolding evaldUnfolding = OtherCon [] -- | There is no known 'Unfolding', because this came from an -- hi-boot file. bootUnfolding :: Unfolding bootUnfolding = BootUnfolding mkOtherCon :: [AltCon] -> Unfolding mkOtherCon = OtherCon isStableSource :: UnfoldingSource -> Bool -- Keep the unfolding template isStableSource InlineCompulsory = True isStableSource InlineStable = True isStableSource InlineRhs = False -- | Retrieves the template of an unfolding: panics if none is known unfoldingTemplate :: Unfolding -> CoreExpr unfoldingTemplate = uf_tmpl -- | Retrieves the template of an unfolding if possible -- maybeUnfoldingTemplate is used mainly wnen specialising, and we do -- want to specialise DFuns, so it's important to return a template -- for DFunUnfoldings maybeUnfoldingTemplate :: Unfolding -> Maybe CoreExpr maybeUnfoldingTemplate (CoreUnfolding { uf_tmpl = expr }) = Just expr maybeUnfoldingTemplate (DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }) = Just (mkLams bndrs (mkApps (Var (dataConWorkId con)) args)) maybeUnfoldingTemplate _ = Nothing -- | The constructors that the unfolding could never be: -- returns @[]@ if no information is available otherCons :: Unfolding -> [AltCon] otherCons (OtherCon cons) = cons otherCons _ = [] -- | Determines if it is certainly the case that the unfolding will -- yield a value (something in HNF): returns @False@ if unsure isValueUnfolding :: Unfolding -> Bool -- Returns False for OtherCon isValueUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_value cache isValueUnfolding _ = False -- | Determines if it possibly the case that the unfolding will -- yield a value. Unlike 'isValueUnfolding' it returns @True@ -- for 'OtherCon' isEvaldUnfolding :: Unfolding -> Bool -- Returns True for OtherCon isEvaldUnfolding (OtherCon _) = True isEvaldUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_value cache isEvaldUnfolding _ = False -- | @True@ if the unfolding is a constructor application, the application -- of a CONLIKE function or 'OtherCon' isConLikeUnfolding :: Unfolding -> Bool isConLikeUnfolding (OtherCon _) = True isConLikeUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_conlike cache isConLikeUnfolding _ = False -- | Is the thing we will unfold into certainly cheap? isCheapUnfolding :: Unfolding -> Bool isCheapUnfolding (CoreUnfolding { uf_cache = cache }) = uf_is_work_free cache isCheapUnfolding _ = False isExpandableUnfolding :: Unfolding -> Bool isExpandableUnfolding (CoreUnfolding { uf_cache = cache }) = uf_expandable cache isExpandableUnfolding _ = False expandUnfolding_maybe :: Unfolding -> Maybe CoreExpr -- Expand an expandable unfolding; this is used in rule matching -- See Note [Expanding variables] in GHC.Core.Rules -- The key point here is that CONLIKE things can be expanded expandUnfolding_maybe (CoreUnfolding { uf_cache = cache, uf_tmpl = rhs }) | uf_expandable cache = Just rhs expandUnfolding_maybe _ = Nothing isCompulsoryUnfolding :: Unfolding -> Bool isCompulsoryUnfolding (CoreUnfolding { uf_src = InlineCompulsory }) = True isCompulsoryUnfolding _ = False isStableUnfolding :: Unfolding -> Bool -- True of unfoldings that should not be overwritten -- by a CoreUnfolding for the RHS of a let-binding isStableUnfolding (CoreUnfolding { uf_src = src }) = isStableSource src isStableUnfolding (DFunUnfolding {}) = True isStableUnfolding _ = False isInlineUnfolding :: Unfolding -> Bool -- ^ True of a /stable/ unfolding that is -- (a) always inlined; that is, with an `UnfWhen` guidance, or -- (b) a DFunUnfolding which never needs to be inlined isInlineUnfolding (CoreUnfolding { uf_src = src, uf_guidance = guidance }) | isStableSource src , UnfWhen {} <- guidance = True isInlineUnfolding (DFunUnfolding {}) = True -- Default case isInlineUnfolding _ = False -- | Only returns False if there is no unfolding information available at all hasSomeUnfolding :: Unfolding -> Bool hasSomeUnfolding NoUnfolding = False hasSomeUnfolding BootUnfolding = False hasSomeUnfolding _ = True isBootUnfolding :: Unfolding -> Bool isBootUnfolding BootUnfolding = True isBootUnfolding _ = False neverUnfoldGuidance :: UnfoldingGuidance -> Bool neverUnfoldGuidance UnfNever = True neverUnfoldGuidance _ = False hasCoreUnfolding :: Unfolding -> Bool -- An unfolding "has Core" if it contains a Core expression, which -- may mention free variables. See Note [Fragile unfoldings] hasCoreUnfolding (CoreUnfolding {}) = True hasCoreUnfolding (DFunUnfolding {}) = True hasCoreUnfolding _ = False -- NoUnfolding, BootUnfolding, OtherCon have no Core canUnfold :: Unfolding -> Bool canUnfold (CoreUnfolding { uf_guidance = g }) = not (neverUnfoldGuidance g) canUnfold _ = False {- Note [Fragile unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An unfolding is "fragile" if it mentions free variables (and hence would need substitution) or might be affected by optimisation. The non-fragile ones are NoUnfolding, BootUnfolding OtherCon {} If we know this binder (say a lambda binder) will be bound to an evaluated thing, we want to retain that info in simpleOptExpr; see #13077. We consider even a StableUnfolding as fragile, because it needs substitution. Note [InlineStable] ~~~~~~~~~~~~~~~~~ When you say {-# INLINE f #-} f x = you intend that calls (f e) are replaced by [e/x] So we should capture (\x.) in the Unfolding of 'f', and never meddle with it. Meanwhile, we can optimise to our heart's content, leaving the original unfolding intact in Unfolding of 'f'. For example all xs = foldr (&&) True xs any p = all . map p {-# INLINE any #-} We optimise any's RHS fully, but leave the InlineRule saying "all . map p", which deforests well at the call site. So INLINE pragma gives rise to an InlineRule, which captures the original RHS. Moreover, it's only used when 'f' is applied to the specified number of arguments; that is, the number of argument on the LHS of the '=' sign in the original source definition. For example, (.) is now defined in the libraries like this {-# INLINE (.) #-} (.) f g = \x -> f (g x) so that it'll inline when applied to two arguments. If 'x' appeared on the left, thus (.) f g x = f (g x) it'd only inline when applied to three arguments. This slightly-experimental change was requested by Roman, but it seems to make sense. See also Note [Inlining an InlineRule] in GHC.Core.Unfold. Note [OccInfo in unfoldings and rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In unfoldings and rules, we guarantee that the template is occ-analysed, so that the occurrence info on the binders is correct. This is important, because the Simplifier does not re-analyse the template when using it. If the occurrence info is wrong - We may get more simplifier iterations than necessary, because once-occ info isn't there - More seriously, we may get an infinite loop if there's a Rec without a loop breaker marked ************************************************************************ * * AltCon * * ************************************************************************ -} -- The Ord is needed for the FiniteMap used in the lookForConstructor -- in GHC.Core.Opt.Simplify.Env. If you declared that lookForConstructor -- *ignores* constructor-applications with LitArg args, then you could get rid -- of this Ord. instance Outputable AltCon where ppr (DataAlt dc) = ppr dc ppr (LitAlt lit) = ppr lit ppr DEFAULT = text "__DEFAULT" cmpAlt :: Alt a -> Alt a -> Ordering cmpAlt (Alt con1 _ _) (Alt con2 _ _) = con1 `cmpAltCon` con2 ltAlt :: Alt a -> Alt a -> Bool ltAlt a1 a2 = (a1 `cmpAlt` a2) == LT cmpAltCon :: AltCon -> AltCon -> Ordering -- ^ Compares 'AltCon's within a single list of alternatives -- DEFAULT comes out smallest, so that sorting by AltCon puts -- alternatives in the order required: see Note [Case expression invariants] cmpAltCon DEFAULT DEFAULT = EQ cmpAltCon DEFAULT _ = LT cmpAltCon (DataAlt d1) (DataAlt d2) = dataConTag d1 `compare` dataConTag d2 cmpAltCon (DataAlt _) DEFAULT = GT cmpAltCon (LitAlt l1) (LitAlt l2) = l1 `compare` l2 cmpAltCon (LitAlt _) DEFAULT = GT cmpAltCon con1 con2 = pprPanic "cmpAltCon" (ppr con1 $$ ppr con2) {- ************************************************************************ * * \subsection{Useful synonyms} * * ************************************************************************ Note [CoreProgram] ~~~~~~~~~~~~~~~~~~ The top level bindings of a program, a CoreProgram, are represented as a list of CoreBind * Later bindings in the list can refer to earlier ones, but not vice versa. So this is OK NonRec { x = 4 } Rec { p = ...q...x... ; q = ...p...x } Rec { f = ...p..x..f.. } NonRec { g = ..f..q...x.. } But it would NOT be ok for 'f' to refer to 'g'. * The occurrence analyser does strongly-connected component analysis on each Rec binding, and splits it into a sequence of smaller bindings where possible. So the program typically starts life as a single giant Rec, which is then dependency-analysed into smaller chunks. -} -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint type CoreProgram = [CoreBind] -- See Note [CoreProgram] -- | The common case for the type of binders and variables when -- we are manipulating the Core language within GHC type CoreBndr = Var -- | Expressions where binders are 'CoreBndr's type CoreExpr = Expr CoreBndr -- | Argument expressions where binders are 'CoreBndr's type CoreArg = Arg CoreBndr -- | Binding groups where binders are 'CoreBndr's type CoreBind = Bind CoreBndr -- | Case alternatives where binders are 'CoreBndr's type CoreAlt = Alt CoreBndr {- ************************************************************************ * * \subsection{Tagging} * * ************************************************************************ -} -- | Binders are /tagged/ with a t data TaggedBndr t = TB CoreBndr t -- TB for "tagged binder" type TaggedBind t = Bind (TaggedBndr t) type TaggedExpr t = Expr (TaggedBndr t) type TaggedArg t = Arg (TaggedBndr t) type TaggedAlt t = Alt (TaggedBndr t) instance Outputable b => Outputable (TaggedBndr b) where ppr (TB b l) = char '<' <> ppr b <> comma <> ppr l <> char '>' deTagExpr :: TaggedExpr t -> CoreExpr deTagExpr (Var v) = Var v deTagExpr (Lit l) = Lit l deTagExpr (Type ty) = Type ty deTagExpr (Coercion co) = Coercion co deTagExpr (App e1 e2) = App (deTagExpr e1) (deTagExpr e2) deTagExpr (Lam (TB b _) e) = Lam b (deTagExpr e) deTagExpr (Let bind body) = Let (deTagBind bind) (deTagExpr body) deTagExpr (Case e (TB b _) ty alts) = Case (deTagExpr e) b ty (map deTagAlt alts) deTagExpr (Tick t e) = Tick t (deTagExpr e) deTagExpr (Cast e co) = Cast (deTagExpr e) co deTagBind :: TaggedBind t -> CoreBind deTagBind (NonRec (TB b _) rhs) = NonRec b (deTagExpr rhs) deTagBind (Rec prs) = Rec [(b, deTagExpr rhs) | (TB b _, rhs) <- prs] deTagAlt :: TaggedAlt t -> CoreAlt deTagAlt (Alt con bndrs rhs) = Alt con [b | TB b _ <- bndrs] (deTagExpr rhs) {- ************************************************************************ * * \subsection{Core-constructing functions with checking} * * ************************************************************************ -} -- | Apply a list of argument expressions to a function expression in a nested fashion. Prefer to -- use 'GHC.Core.Make.mkCoreApps' if possible mkApps :: Expr b -> [Arg b] -> Expr b -- | Apply a list of type argument expressions to a function expression in a nested fashion mkTyApps :: Expr b -> [Type] -> Expr b -- | Apply a list of coercion argument expressions to a function expression in a nested fashion mkCoApps :: Expr b -> [Coercion] -> Expr b -- | Apply a list of type or value variables to a function expression in a nested fashion mkVarApps :: Expr b -> [Var] -> Expr b -- | Apply a list of argument expressions to a data constructor in a nested fashion. Prefer to -- use 'GHC.Core.Make.mkCoreConApps' if possible mkConApp :: DataCon -> [Arg b] -> Expr b mkApps f args = foldl' App f args mkCoApps f args = foldl' (\ e a -> App e (Coercion a)) f args mkVarApps f vars = foldl' (\ e a -> App e (varToCoreExpr a)) f vars mkConApp con args = mkApps (Var (dataConWorkId con)) args mkTyApps f args = foldl' (\ e a -> App e (mkTyArg a)) f args mkConApp2 :: DataCon -> [Type] -> [Var] -> Expr b mkConApp2 con tys arg_ids = Var (dataConWorkId con) `mkApps` map Type tys `mkApps` map varToCoreExpr arg_ids mkTyArg :: Type -> Expr b mkTyArg ty | Just co <- isCoercionTy_maybe ty = Coercion co | otherwise = Type ty -- | Create a machine integer literal expression of type @Int#@ from an @Integer@. -- If you want an expression of type @Int@ use 'GHC.Core.Make.mkIntExpr' mkIntLit :: Platform -> Integer -> Expr b mkIntLit platform n = Lit (mkLitInt platform n) -- | Create a machine integer literal expression of type @Int#@ from an -- @Integer@, wrapping if necessary. -- If you want an expression of type @Int@ use 'GHC.Core.Make.mkIntExpr' mkIntLitWrap :: Platform -> Integer -> Expr b mkIntLitWrap platform n = Lit (mkLitIntWrap platform n) -- | Create a machine word literal expression of type @Word#@ from an @Integer@. -- If you want an expression of type @Word@ use 'GHC.Core.Make.mkWordExpr' mkWordLit :: Platform -> Integer -> Expr b mkWordLit platform w = Lit (mkLitWord platform w) -- | Create a machine word literal expression of type @Word#@ from an -- @Integer@, wrapping if necessary. -- If you want an expression of type @Word@ use 'GHC.Core.Make.mkWordExpr' mkWordLitWrap :: Platform -> Integer -> Expr b mkWordLitWrap platform w = Lit (mkLitWordWrap platform w) mkWord8Lit :: Integer -> Expr b mkWord8Lit w = Lit (mkLitWord8 w) mkWord64LitWord64 :: Word64 -> Expr b mkWord64LitWord64 w = Lit (mkLitWord64 (toInteger w)) mkInt64LitInt64 :: Int64 -> Expr b mkInt64LitInt64 w = Lit (mkLitInt64 (toInteger w)) -- | Create a machine character literal expression of type @Char#@. -- If you want an expression of type @Char@ use 'GHC.Core.Make.mkCharExpr' mkCharLit :: Char -> Expr b -- | Create a machine string literal expression of type @Addr#@. -- If you want an expression of type @String@ use 'GHC.Core.Make.mkStringExpr' mkStringLit :: String -> Expr b mkCharLit c = Lit (mkLitChar c) mkStringLit s = Lit (mkLitString s) -- | Create a machine single precision literal expression of type @Float#@ from a @Rational@. -- If you want an expression of type @Float@ use 'GHC.Core.Make.mkFloatExpr' mkFloatLit :: Rational -> Expr b -- | Create a machine single precision literal expression of type @Float#@ from a @Float@. -- If you want an expression of type @Float@ use 'GHC.Core.Make.mkFloatExpr' mkFloatLitFloat :: Float -> Expr b mkFloatLit f = Lit (mkLitFloat f) mkFloatLitFloat f = Lit (mkLitFloat (toRational f)) -- | Create a machine double precision literal expression of type @Double#@ from a @Rational@. -- If you want an expression of type @Double@ use 'GHC.Core.Make.mkDoubleExpr' mkDoubleLit :: Rational -> Expr b -- | Create a machine double precision literal expression of type @Double#@ from a @Double@. -- If you want an expression of type @Double@ use 'GHC.Core.Make.mkDoubleExpr' mkDoubleLitDouble :: Double -> Expr b mkDoubleLit d = Lit (mkLitDouble d) mkDoubleLitDouble d = Lit (mkLitDouble (toRational d)) -- | Bind all supplied binding groups over an expression in a nested let expression. Assumes -- that the rhs satisfies the let/app invariant. Prefer to use 'GHC.Core.Make.mkCoreLets' if -- possible, which does guarantee the invariant mkLets :: [Bind b] -> Expr b -> Expr b -- | Bind all supplied binders over an expression in a nested lambda expression. Prefer to -- use 'GHC.Core.Make.mkCoreLams' if possible mkLams :: [b] -> Expr b -> Expr b mkLams binders body = foldr Lam body binders mkLets binds body = foldr mkLet body binds mkLet :: Bind b -> Expr b -> Expr b -- The desugarer sometimes generates an empty Rec group -- which Lint rejects, so we kill it off right away mkLet (Rec []) body = body mkLet bind body = Let bind body -- | @mkLetNonRec bndr rhs body@ wraps @body@ in a @let@ binding @bndr@. mkLetNonRec :: b -> Expr b -> Expr b -> Expr b mkLetNonRec b rhs body = Let (NonRec b rhs) body -- | @mkLetRec binds body@ wraps @body@ in a @let rec@ with the given set of -- @binds@ if binds is non-empty. mkLetRec :: [(b, Expr b)] -> Expr b -> Expr b mkLetRec [] body = body mkLetRec bs body = Let (Rec bs) body -- | Create a binding group where a type variable is bound to a type. -- Per Note [Core type and coercion invariant], -- this can only be used to bind something in a non-recursive @let@ expression mkTyBind :: TyVar -> Type -> CoreBind mkTyBind tv ty = NonRec tv (Type ty) -- | Create a binding group where a type variable is bound to a type. -- Per Note [Core type and coercion invariant], -- this can only be used to bind something in a non-recursive @let@ expression mkCoBind :: CoVar -> Coercion -> CoreBind mkCoBind cv co = NonRec cv (Coercion co) -- | Convert a binder into either a 'Var' or 'Type' 'Expr' appropriately varToCoreExpr :: CoreBndr -> Expr b varToCoreExpr v | isTyVar v = Type (mkTyVarTy v) | isCoVar v = Coercion (mkCoVarCo v) | otherwise = assert (isId v) $ Var v varsToCoreExprs :: [CoreBndr] -> [Expr b] varsToCoreExprs vs = map varToCoreExpr vs {- ************************************************************************ * * Getting a result type * * ************************************************************************ These are defined here to avoid a module loop between GHC.Core.Utils and GHC.Core.FVs -} -- | If the expression is a 'Type', converts. Otherwise, -- panics. NB: This does /not/ convert 'Coercion' to 'CoercionTy'. exprToType :: CoreExpr -> Type exprToType (Type ty) = ty exprToType _bad = pprPanic "exprToType" empty {- ************************************************************************ * * \subsection{Simple access functions} * * ************************************************************************ -} -- | Extract every variable by this group bindersOf :: Bind b -> [b] -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint bindersOf (NonRec binder _) = [binder] bindersOf (Rec pairs) = [binder | (binder, _) <- pairs] -- | 'bindersOf' applied to a list of binding groups bindersOfBinds :: [Bind b] -> [b] bindersOfBinds binds = foldr ((++) . bindersOf) [] binds rhssOfBind :: Bind b -> [Expr b] rhssOfBind (NonRec _ rhs) = [rhs] rhssOfBind (Rec pairs) = [rhs | (_,rhs) <- pairs] rhssOfAlts :: [Alt b] -> [Expr b] rhssOfAlts alts = [e | Alt _ _ e <- alts] -- | Collapse all the bindings in the supplied groups into a single -- list of lhs\/rhs pairs suitable for binding in a 'Rec' binding group flattenBinds :: [Bind b] -> [(b, Expr b)] flattenBinds (NonRec b r : binds) = (b,r) : flattenBinds binds flattenBinds (Rec prs1 : binds) = prs1 ++ flattenBinds binds flattenBinds [] = [] -- | We often want to strip off leading lambdas before getting down to -- business. Variants are 'collectTyBinders', 'collectValBinders', -- and 'collectTyAndValBinders' collectBinders :: Expr b -> ([b], Expr b) collectTyBinders :: CoreExpr -> ([TyVar], CoreExpr) collectValBinders :: CoreExpr -> ([Id], CoreExpr) collectTyAndValBinders :: CoreExpr -> ([TyVar], [Id], CoreExpr) -- | Strip off exactly N leading lambdas (type or value). Good for use with -- join points. collectNBinders :: Int -> Expr b -> ([b], Expr b) collectBinders expr = go [] expr where go bs (Lam b e) = go (b:bs) e go bs e = (reverse bs, e) collectTyBinders expr = go [] expr where go tvs (Lam b e) | isTyVar b = go (b:tvs) e go tvs e = (reverse tvs, e) collectValBinders expr = go [] expr where go ids (Lam b e) | isId b = go (b:ids) e go ids body = (reverse ids, body) collectTyAndValBinders expr = (tvs, ids, body) where (tvs, body1) = collectTyBinders expr (ids, body) = collectValBinders body1 collectNBinders orig_n orig_expr = go orig_n [] orig_expr where go 0 bs expr = (reverse bs, expr) go n bs (Lam b e) = go (n-1) (b:bs) e go _ _ _ = pprPanic "collectNBinders" $ int orig_n -- | Takes a nested application expression and returns the function -- being applied and the arguments to which it is applied collectArgs :: Expr b -> (Expr b, [Arg b]) collectArgs expr = go expr [] where go (App f a) as = go f (a:as) go e as = (e, as) -- | fmap on the body of a lambda. -- wrapLamBody f (\x -> body) == (\x -> f body) wrapLamBody :: (CoreExpr -> CoreExpr) -> CoreExpr -> CoreExpr wrapLamBody f expr = go expr where go (Lam v body) = Lam v $ go body go expr = f expr -- | Attempt to remove the last N arguments of a function call. -- Strip off any ticks or coercions encountered along the way and any -- at the end. stripNArgs :: Word -> Expr a -> Maybe (Expr a) stripNArgs !n (Tick _ e) = stripNArgs n e stripNArgs n (Cast f _) = stripNArgs n f stripNArgs 0 e = Just e stripNArgs n (App f _) = stripNArgs (n - 1) f stripNArgs _ _ = Nothing -- | Like @collectArgs@, but also collects looks through floatable -- ticks if it means that we can find more arguments. collectArgsTicks :: (CoreTickish -> Bool) -> Expr b -> (Expr b, [Arg b], [CoreTickish]) collectArgsTicks skipTick expr = go expr [] [] where go (App f a) as ts = go f (a:as) ts go (Tick t e) as ts | skipTick t = go e as (t:ts) go e as ts = (e, as, reverse ts) {- ************************************************************************ * * \subsection{Predicates} * * ************************************************************************ At one time we optionally carried type arguments through to runtime. @isRuntimeVar v@ returns if (Lam v _) really becomes a lambda at runtime, i.e. if type applications are actual lambdas because types are kept around at runtime. Similarly isRuntimeArg. -} -- | Will this variable exist at runtime? isRuntimeVar :: Var -> Bool isRuntimeVar = isId -- | Will this argument expression exist at runtime? isRuntimeArg :: CoreExpr -> Bool isRuntimeArg = isValArg -- | Returns @True@ for value arguments, false for type args -- NB: coercions are value arguments (zero width, to be sure, -- like State#, but still value args). isValArg :: Expr b -> Bool isValArg e = not (isTypeArg e) -- | Returns @True@ iff the expression is a 'Type' or 'Coercion' -- expression at its top level isTyCoArg :: Expr b -> Bool isTyCoArg (Type {}) = True isTyCoArg (Coercion {}) = True isTyCoArg _ = False -- | Returns @True@ iff the expression is a 'Coercion' -- expression at its top level isCoArg :: Expr b -> Bool isCoArg (Coercion {}) = True isCoArg _ = False -- | Returns @True@ iff the expression is a 'Type' expression at its -- top level. Note this does NOT include 'Coercion's. isTypeArg :: Expr b -> Bool isTypeArg (Type {}) = True isTypeArg _ = False -- | The number of binders that bind values rather than types valBndrCount :: [CoreBndr] -> Int valBndrCount = count isId -- | The number of argument expressions that are values rather than types at their top level valArgCount :: [Arg b] -> Int valArgCount = count isValArg {- ************************************************************************ * * \subsection{Annotated core} * * ************************************************************************ -} -- | Annotated core: allows annotation at every node in the tree type AnnExpr bndr annot = (annot, AnnExpr' bndr annot) -- | A clone of the 'Expr' type but allowing annotation at every tree node data AnnExpr' bndr annot = AnnVar Id | AnnLit Literal | AnnLam bndr (AnnExpr bndr annot) | AnnApp (AnnExpr bndr annot) (AnnExpr bndr annot) | AnnCase (AnnExpr bndr annot) bndr Type [AnnAlt bndr annot] | AnnLet (AnnBind bndr annot) (AnnExpr bndr annot) | AnnCast (AnnExpr bndr annot) (annot, Coercion) -- Put an annotation on the (root of) the coercion | AnnTick CoreTickish (AnnExpr bndr annot) | AnnType Type | AnnCoercion Coercion -- | A clone of the 'Alt' type but allowing annotation at every tree node data AnnAlt bndr annot = AnnAlt AltCon [bndr] (AnnExpr bndr annot) -- | A clone of the 'Bind' type but allowing annotation at every tree node data AnnBind bndr annot = AnnNonRec bndr (AnnExpr bndr annot) | AnnRec [(bndr, AnnExpr bndr annot)] -- | Takes a nested application expression and returns the function -- being applied and the arguments to which it is applied collectAnnArgs :: AnnExpr b a -> (AnnExpr b a, [AnnExpr b a]) collectAnnArgs expr = go expr [] where go (_, AnnApp f a) as = go f (a:as) go e as = (e, as) collectAnnArgsTicks :: (CoreTickish -> Bool) -> AnnExpr b a -> (AnnExpr b a, [AnnExpr b a], [CoreTickish]) collectAnnArgsTicks tickishOk expr = go expr [] [] where go (_, AnnApp f a) as ts = go f (a:as) ts go (_, AnnTick t e) as ts | tickishOk t = go e as (t:ts) go e as ts = (e, as, reverse ts) deAnnotate :: AnnExpr bndr annot -> Expr bndr deAnnotate (_, e) = deAnnotate' e deAnnotate' :: AnnExpr' bndr annot -> Expr bndr deAnnotate' (AnnType t) = Type t deAnnotate' (AnnCoercion co) = Coercion co deAnnotate' (AnnVar v) = Var v deAnnotate' (AnnLit lit) = Lit lit deAnnotate' (AnnLam binder body) = Lam binder (deAnnotate body) deAnnotate' (AnnApp fun arg) = App (deAnnotate fun) (deAnnotate arg) deAnnotate' (AnnCast e (_,co)) = Cast (deAnnotate e) co deAnnotate' (AnnTick tick body) = Tick tick (deAnnotate body) deAnnotate' (AnnLet bind body) = Let (deAnnBind bind) (deAnnotate body) deAnnotate' (AnnCase scrut v t alts) = Case (deAnnotate scrut) v t (map deAnnAlt alts) deAnnAlt :: AnnAlt bndr annot -> Alt bndr deAnnAlt (AnnAlt con args rhs) = Alt con args (deAnnotate rhs) deAnnBind :: AnnBind b annot -> Bind b deAnnBind (AnnNonRec var rhs) = NonRec var (deAnnotate rhs) deAnnBind (AnnRec pairs) = Rec [(v,deAnnotate rhs) | (v,rhs) <- pairs] -- | As 'collectBinders' but for 'AnnExpr' rather than 'Expr' collectAnnBndrs :: AnnExpr bndr annot -> ([bndr], AnnExpr bndr annot) collectAnnBndrs e = collect [] e where collect bs (_, AnnLam b body) = collect (b:bs) body collect bs body = (reverse bs, body) -- | As 'collectNBinders' but for 'AnnExpr' rather than 'Expr' collectNAnnBndrs :: Int -> AnnExpr bndr annot -> ([bndr], AnnExpr bndr annot) collectNAnnBndrs orig_n e = collect orig_n [] e where collect 0 bs body = (reverse bs, body) collect n bs (_, AnnLam b body) = collect (n-1) (b:bs) body collect _ _ _ = pprPanic "collectNBinders" $ int orig_n ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Class.hs0000644000000000000000000003235114472400112020130 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -- -- The @Class@ datatype module GHC.Core.Class ( Class, ClassOpItem, ClassATItem(..), ATValidityInfo(..), ClassMinimalDef, DefMethInfo, pprDefMethInfo, FunDep, pprFundeps, pprFunDep, mkClass, mkAbstractClass, classTyVars, classArity, classKey, className, classATs, classATItems, classTyCon, classMethods, classOpItems, classBigSig, classExtraBigSig, classTvsFds, classSCTheta, classAllSelIds, classSCSelId, classSCSelIds, classMinimalDef, classHasFds, isAbstractClass, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type, PredType ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType ) import GHC.Types.Var import GHC.Types.Name import GHC.Types.Basic import GHC.Types.Unique import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Types.SrcLoc import GHC.Utils.Outputable import GHC.Data.BooleanFormula (BooleanFormula, mkTrue) import qualified Data.Data as Data {- ************************************************************************ * * \subsection[Class-basic]{@Class@: basic definition} * * ************************************************************************ A @Class@ corresponds to a Greek kappa in the static semantics: -} data Class = Class { classTyCon :: TyCon, -- The data type constructor for -- dictionaries of this class -- See Note [ATyCon for classes] in GHC.Core.TyCo.Rep className :: Name, -- Just the cached name of the TyCon classKey :: Unique, -- Cached unique of TyCon classTyVars :: [TyVar], -- The class kind and type variables; -- identical to those of the TyCon -- If you want visibility info, look at the classTyCon -- This field is redundant because it's duplicated in the -- classTyCon, but classTyVars is used quite often, so maybe -- it's a bit faster to cache it here classFunDeps :: [FunDep TyVar], -- The functional dependencies classBody :: ClassBody -- Superclasses, ATs, methods } -- | e.g. -- -- > class C a b c | a b -> c, a c -> b where... -- -- Here fun-deps are [([a,b],[c]), ([a,c],[b])] -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnRarrow'', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation type FunDep a = ([a],[a]) type ClassOpItem = (Id, DefMethInfo) -- Selector function; contains unfolding -- Default-method info type DefMethInfo = Maybe (Name, DefMethSpec Type) -- Nothing No default method -- Just ($dm, VanillaDM) A polymorphic default method, name $dm -- Just ($gm, GenericDM ty) A generic default method, name $gm, type ty -- The generic dm type is *not* quantified -- over the class variables; ie has the -- class variables free data ClassATItem = ATI TyCon -- See Note [Associated type tyvar names] (Maybe (Type, ATValidityInfo)) -- Default associated type (if any) from this template -- Note [Associated type defaults] -- | Information about an associated type family default implementation. This -- is used solely for validity checking. -- See @Note [Type-checking default assoc decls]@ in "GHC.Tc.TyCl". data ATValidityInfo = NoATVI -- Used for associated type families that are imported -- from another module, for which we don't need to -- perform any validity checking. | ATVI SrcSpan [Type] -- Used for locally defined associated type families. -- The [Type] are the LHS patterns. type ClassMinimalDef = BooleanFormula Name -- Required methods data ClassBody = AbstractClass | ConcreteClass { -- Superclasses: eg: (F a ~ b, F b ~ G a, Eq a, Show b) -- We need value-level selectors for both the dictionary -- superclasses and the equality superclasses cls_sc_theta :: [PredType], -- Immediate superclasses, cls_sc_sel_ids :: [Id], -- Selector functions to extract the -- superclasses from a -- dictionary of this class -- Associated types cls_ats :: [ClassATItem], -- Associated type families -- Class operations (methods, not superclasses) cls_ops :: [ClassOpItem], -- Ordered by tag -- Minimal complete definition cls_min_def :: ClassMinimalDef } -- TODO: maybe super classes should be allowed in abstract class definitions classMinimalDef :: Class -> ClassMinimalDef classMinimalDef Class{ classBody = ConcreteClass{ cls_min_def = d } } = d classMinimalDef _ = mkTrue -- TODO: make sure this is the right direction {- Note [Associated type defaults] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following is an example of associated type defaults: class C a where data D a r type F x a b :: * type F p q r = (p,q)->r -- Default Note that * The TyCons for the associated types *share type variables* with the class, so that we can tell which argument positions should be instantiated in an instance decl. (The first for 'D', the second for 'F'.) * We can have default definitions only for *type* families, not data families * In the default decl, the "patterns" should all be type variables, but (in the source language) they don't need to be the same as in the 'type' decl signature or the class. It's more like a free-standing 'type instance' declaration. * HOWEVER, in the internal ClassATItem we rename the RHS to match the tyConTyVars of the family TyCon. So in the example above we'd get a ClassATItem of ATI F ((x,a) -> b) So the tyConTyVars of the family TyCon bind the free vars of the default Type rhs The @mkClass@ function fills in the indirect superclasses. The SrcSpan is for the entire original declaration. -} mkClass :: Name -> [TyVar] -> [FunDep TyVar] -> [PredType] -> [Id] -> [ClassATItem] -> [ClassOpItem] -> ClassMinimalDef -> TyCon -> Class mkClass cls_name tyvars fds super_classes superdict_sels at_stuff op_stuff mindef tycon = Class { classKey = nameUnique cls_name, className = cls_name, -- NB: tyConName tycon = cls_name, -- But it takes a module loop to assert it here classTyVars = tyvars, classFunDeps = fds, classBody = ConcreteClass { cls_sc_theta = super_classes, cls_sc_sel_ids = superdict_sels, cls_ats = at_stuff, cls_ops = op_stuff, cls_min_def = mindef }, classTyCon = tycon } mkAbstractClass :: Name -> [TyVar] -> [FunDep TyVar] -> TyCon -> Class mkAbstractClass cls_name tyvars fds tycon = Class { classKey = nameUnique cls_name, className = cls_name, -- NB: tyConName tycon = cls_name, -- But it takes a module loop to assert it here classTyVars = tyvars, classFunDeps = fds, classBody = AbstractClass, classTyCon = tycon } {- Note [Associated type tyvar names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The TyCon of an associated type should use the same variable names as its parent class. Thus class C a b where type F b x a :: * We make F use the same Name for 'a' as C does, and similarly 'b'. The reason for this is when checking instances it's easier to match them up, to ensure they match. Eg instance C Int [d] where type F [d] x Int = .... we should make sure that the first and third args match the instance header. Having the same variables for class and tycon is also used in checkValidRoles (in GHC.Tc.TyCl) when checking a class's roles. ************************************************************************ * * \subsection[Class-selectors]{@Class@: simple selectors} * * ************************************************************************ The rest of these functions are just simple selectors. -} classArity :: Class -> Arity classArity clas = length (classTyVars clas) -- Could memoise this classAllSelIds :: Class -> [Id] -- Both superclass-dictionary and method selectors classAllSelIds c@(Class { classBody = ConcreteClass { cls_sc_sel_ids = sc_sels }}) = sc_sels ++ classMethods c classAllSelIds c = assert (null (classMethods c) ) [] classSCSelIds :: Class -> [Id] -- Both superclass-dictionary and method selectors classSCSelIds (Class { classBody = ConcreteClass { cls_sc_sel_ids = sc_sels }}) = sc_sels classSCSelIds c = assert (null (classMethods c) ) [] classSCSelId :: Class -> Int -> Id -- Get the n'th superclass selector Id -- where n is 0-indexed, and counts -- *all* superclasses including equalities classSCSelId (Class { classBody = ConcreteClass { cls_sc_sel_ids = sc_sels } }) n = assert (n >= 0 && lengthExceeds sc_sels n ) sc_sels !! n classSCSelId c n = pprPanic "classSCSelId" (ppr c <+> ppr n) classMethods :: Class -> [Id] classMethods (Class { classBody = ConcreteClass { cls_ops = op_stuff } }) = [op_sel | (op_sel, _) <- op_stuff] classMethods _ = [] classOpItems :: Class -> [ClassOpItem] classOpItems (Class { classBody = ConcreteClass { cls_ops = op_stuff }}) = op_stuff classOpItems _ = [] classATs :: Class -> [TyCon] classATs (Class { classBody = ConcreteClass { cls_ats = at_stuff } }) = [tc | ATI tc _ <- at_stuff] classATs _ = [] classATItems :: Class -> [ClassATItem] classATItems (Class { classBody = ConcreteClass { cls_ats = at_stuff }}) = at_stuff classATItems _ = [] classSCTheta :: Class -> [PredType] classSCTheta (Class { classBody = ConcreteClass { cls_sc_theta = theta_stuff }}) = theta_stuff classSCTheta _ = [] classTvsFds :: Class -> ([TyVar], [FunDep TyVar]) classTvsFds c = (classTyVars c, classFunDeps c) classHasFds :: Class -> Bool classHasFds (Class { classFunDeps = fds }) = not (null fds) classBigSig :: Class -> ([TyVar], [PredType], [Id], [ClassOpItem]) classBigSig (Class {classTyVars = tyvars, classBody = AbstractClass}) = (tyvars, [], [], []) classBigSig (Class {classTyVars = tyvars, classBody = ConcreteClass { cls_sc_theta = sc_theta, cls_sc_sel_ids = sc_sels, cls_ops = op_stuff }}) = (tyvars, sc_theta, sc_sels, op_stuff) classExtraBigSig :: Class -> ([TyVar], [FunDep TyVar], [PredType], [Id], [ClassATItem], [ClassOpItem]) classExtraBigSig (Class {classTyVars = tyvars, classFunDeps = fundeps, classBody = AbstractClass}) = (tyvars, fundeps, [], [], [], []) classExtraBigSig (Class {classTyVars = tyvars, classFunDeps = fundeps, classBody = ConcreteClass { cls_sc_theta = sc_theta, cls_sc_sel_ids = sc_sels, cls_ats = ats, cls_ops = op_stuff }}) = (tyvars, fundeps, sc_theta, sc_sels, ats, op_stuff) isAbstractClass :: Class -> Bool isAbstractClass Class{ classBody = AbstractClass } = True isAbstractClass _ = False {- ************************************************************************ * * \subsection[Class-instances]{Instance declarations for @Class@} * * ************************************************************************ We compare @Classes@ by their keys (which include @Uniques@). -} instance Eq Class where c1 == c2 = classKey c1 == classKey c2 c1 /= c2 = classKey c1 /= classKey c2 instance Uniquable Class where getUnique c = classKey c instance NamedThing Class where getName clas = className clas instance Outputable Class where ppr c = ppr (getName c) pprDefMethInfo :: DefMethInfo -> SDoc pprDefMethInfo Nothing = empty -- No default method pprDefMethInfo (Just (n, VanillaDM)) = text "Default method" <+> ppr n pprDefMethInfo (Just (n, GenericDM ty)) = text "Generic default method" <+> ppr n <+> dcolon <+> pprType ty pprFundeps :: Outputable a => [FunDep a] -> SDoc pprFundeps [] = empty pprFundeps fds = hsep (vbar : punctuate comma (map pprFunDep fds)) pprFunDep :: Outputable a => FunDep a -> SDoc pprFunDep (us, vs) = hsep [interppSP us, arrow, interppSP vs] instance Data.Data Class where -- don't traverse? toConstr _ = abstractConstr "Class" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Class" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Coercion.hs0000644000000000000000000032431214472400112020625 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {- (c) The University of Glasgow 2006 -} -- | Module for (a) type kinds and (b) type coercions, -- as used in System FC. See 'GHC.Core.Expr' for -- more on System FC and how coercions fit into it. -- module GHC.Core.Coercion ( -- * Main data type Coercion, CoercionN, CoercionR, CoercionP, MCoercion(..), MCoercionN, MCoercionR, UnivCoProvenance, CoercionHole(..), coHoleCoVar, setCoHoleCoVar, LeftOrRight(..), Var, CoVar, TyCoVar, Role(..), ltRole, -- ** Functions over coercions coVarRType, coVarLType, coVarTypes, coVarKind, coVarKindsTypesRole, coVarRole, coercionType, mkCoercionType, coercionKind, coercionLKind, coercionRKind,coercionKinds, coercionRole, coercionKindRole, -- ** Constructing coercions mkGReflCo, mkReflCo, mkRepReflCo, mkNomReflCo, mkCoVarCo, mkCoVarCos, mkAxInstCo, mkUnbranchedAxInstCo, mkAxInstRHS, mkUnbranchedAxInstRHS, mkAxInstLHS, mkUnbranchedAxInstLHS, mkPiCo, mkPiCos, mkCoCast, mkSymCo, mkTransCo, mkNthCo, mkNthCoFunCo, nthCoRole, mkLRCo, mkInstCo, mkAppCo, mkAppCos, mkTyConAppCo, mkFunCo, mkFunResCo, mkForAllCo, mkForAllCos, mkHomoForAllCos, mkPhantomCo, mkHoleCo, mkUnivCo, mkSubCo, mkAxiomInstCo, mkProofIrrelCo, downgradeRole, mkAxiomRuleCo, mkGReflRightCo, mkGReflLeftCo, mkCoherenceLeftCo, mkCoherenceRightCo, mkKindCo, castCoercionKind, castCoercionKind1, castCoercionKind2, mkFamilyTyConAppCo, mkHeteroCoercionType, mkPrimEqPred, mkReprPrimEqPred, mkPrimEqPredRole, mkHeteroPrimEqPred, mkHeteroReprPrimEqPred, -- ** Decomposition instNewTyCon_maybe, NormaliseStepper, NormaliseStepResult(..), composeSteppers, mapStepResult, unwrapNewTypeStepper, topNormaliseNewType_maybe, topNormaliseTypeX, decomposeCo, decomposeFunCo, decomposePiCos, getCoVar_maybe, splitTyConAppCo_maybe, splitAppCo_maybe, splitFunCo_maybe, splitForAllCo_maybe, splitForAllCo_ty_maybe, splitForAllCo_co_maybe, nthRole, tyConRolesX, tyConRolesRepresentational, setNominalRole_maybe, pickLR, isGReflCo, isReflCo, isReflCo_maybe, isGReflCo_maybe, isReflexiveCo, isReflexiveCo_maybe, isReflCoVar_maybe, isGReflMCo, mkGReflLeftMCo, mkGReflRightMCo, mkCoherenceRightMCo, coToMCo, mkTransMCo, mkTransMCoL, mkTransMCoR, mkCastTyMCo, mkSymMCo, mkHomoForAllMCo, mkFunResMCo, mkPiMCos, isReflMCo, checkReflexiveMCo, -- ** Coercion variables mkCoVar, isCoVar, coVarName, setCoVarName, setCoVarUnique, isCoVar_maybe, -- ** Free variables tyCoVarsOfCo, tyCoVarsOfCos, coVarsOfCo, tyCoFVsOfCo, tyCoFVsOfCos, tyCoVarsOfCoDSet, coercionSize, anyFreeVarsOfCo, -- ** Substitution CvSubstEnv, emptyCvSubstEnv, lookupCoVar, substCo, substCos, substCoVar, substCoVars, substCoWith, substCoVarBndr, extendTvSubstAndInScope, getCvSubstEnv, -- ** Lifting liftCoSubst, liftCoSubstTyVar, liftCoSubstWith, liftCoSubstWithEx, emptyLiftingContext, extendLiftingContext, extendLiftingContextAndInScope, liftCoSubstVarBndrUsing, isMappedByLC, mkSubstLiftingContext, zapLiftingContext, substForAllCoBndrUsingLC, lcTCvSubst, lcInScopeSet, LiftCoEnv, LiftingContext(..), liftEnvSubstLeft, liftEnvSubstRight, substRightCo, substLeftCo, swapLiftCoEnv, lcSubstLeft, lcSubstRight, -- ** Comparison eqCoercion, eqCoercionX, -- ** Forcing evaluation of coercions seqCo, -- * Pretty-printing pprCo, pprParendCo, pprCoAxiom, pprCoAxBranch, pprCoAxBranchLHS, pprCoAxBranchUser, tidyCoAxBndrsForUser, etaExpandCoAxBranch, -- * Tidying tidyCo, tidyCos, -- * Other promoteCoercion, buildCoercion, multToCo, hasCoercionHoleTy, hasCoercionHoleCo, hasThisCoercionHoleTy, setCoHoleType ) where import {-# SOURCE #-} GHC.CoreToIface (toIfaceTyCon, tidyToIfaceTcArgs) import GHC.Prelude import GHC.Iface.Type import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Core.TyCo.Ppr import GHC.Core.TyCo.Subst import GHC.Core.TyCo.Tidy import GHC.Core.Type import GHC.Core.TyCon import GHC.Core.TyCon.RecWalk import GHC.Core.Coercion.Axiom import {-# SOURCE #-} GHC.Core.Utils ( mkFunctionType ) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name hiding ( varName ) import GHC.Types.Basic import GHC.Types.Unique import GHC.Data.Pair import GHC.Types.SrcLoc import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Data.List.SetOps import GHC.Data.Maybe import GHC.Types.Unique.FM import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Control.Monad (foldM, zipWithM) import Data.Function ( on ) import Data.Char( isDigit ) import qualified Data.Monoid as Monoid {- %************************************************************************ %* * -- The coercion arguments always *precisely* saturate -- arity of (that branch of) the CoAxiom. If there are -- any left over, we use AppCo. See -- See [Coercion axioms applied to coercions] in GHC.Core.TyCo.Rep \subsection{Coercion variables} %* * %************************************************************************ -} coVarName :: CoVar -> Name coVarName = varName setCoVarUnique :: CoVar -> Unique -> CoVar setCoVarUnique = setVarUnique setCoVarName :: CoVar -> Name -> CoVar setCoVarName = setVarName {- %************************************************************************ %* * Pretty-printing CoAxioms %* * %************************************************************************ Defined here to avoid module loops. CoAxiom is loaded very early on. -} etaExpandCoAxBranch :: CoAxBranch -> ([TyVar], [Type], Type) -- Return the (tvs,lhs,rhs) after eta-expanding, -- to the way in which the axiom was originally written -- See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom etaExpandCoAxBranch (CoAxBranch { cab_tvs = tvs , cab_eta_tvs = eta_tvs , cab_lhs = lhs , cab_rhs = rhs }) -- ToDo: what about eta_cvs? = (tvs ++ eta_tvs, lhs ++ eta_tys, mkAppTys rhs eta_tys) where eta_tys = mkTyVarTys eta_tvs pprCoAxiom :: CoAxiom br -> SDoc -- Used in debug-printing only pprCoAxiom ax@(CoAxiom { co_ax_tc = tc, co_ax_branches = branches }) = hang (text "axiom" <+> ppr ax) 2 (braces $ vcat (map (pprCoAxBranchUser tc) (fromBranches branches))) pprCoAxBranchUser :: TyCon -> CoAxBranch -> SDoc -- Used when printing injectivity errors (FamInst.reportInjectivityErrors) -- and inaccessible branches (GHC.Tc.Validity.inaccessibleCoAxBranch) -- This happens in error messages: don't print the RHS of a data -- family axiom, which is meaningless to a user pprCoAxBranchUser tc br | isDataFamilyTyCon tc = pprCoAxBranchLHS tc br | otherwise = pprCoAxBranch tc br pprCoAxBranchLHS :: TyCon -> CoAxBranch -> SDoc -- Print the family-instance equation when reporting -- a conflict between equations (FamInst.conflictInstErr) -- For type families the RHS is important; for data families not so. -- Indeed for data families the RHS is a mysterious internal -- type constructor, so we suppress it (#14179) -- See FamInstEnv Note [Family instance overlap conflicts] pprCoAxBranchLHS = ppr_co_ax_branch pp_rhs where pp_rhs _ _ = empty pprCoAxBranch :: TyCon -> CoAxBranch -> SDoc pprCoAxBranch = ppr_co_ax_branch ppr_rhs where ppr_rhs env rhs = equals <+> pprPrecTypeX env topPrec rhs ppr_co_ax_branch :: (TidyEnv -> Type -> SDoc) -> TyCon -> CoAxBranch -> SDoc ppr_co_ax_branch ppr_rhs fam_tc branch = foldr1 (flip hangNotEmpty 2) [ pprUserForAll (mkTyCoVarBinders Inferred bndrs') -- See Note [Printing foralls in type family instances] in GHC.Iface.Type , pp_lhs <+> ppr_rhs tidy_env ee_rhs , vcat [ text "-- Defined" <+> pp_loc , ppUnless (null incomps) $ whenPprDebug $ text "-- Incomps:" <+> vcat (map (pprCoAxBranch fam_tc) incomps) ] ] where incomps = coAxBranchIncomps branch loc = coAxBranchSpan branch pp_loc | isGoodSrcSpan loc = text "at" <+> ppr (srcSpanStart loc) | otherwise = text "in" <+> ppr loc -- Eta-expand LHS and RHS types, because sometimes data family -- instances are eta-reduced. -- See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. (ee_tvs, ee_lhs, ee_rhs) = etaExpandCoAxBranch branch pp_lhs = pprIfaceTypeApp topPrec (toIfaceTyCon fam_tc) (tidyToIfaceTcArgs tidy_env fam_tc ee_lhs) (tidy_env, bndrs') = tidyCoAxBndrsForUser emptyTidyEnv ee_tvs tidyCoAxBndrsForUser :: TidyEnv -> [Var] -> (TidyEnv, [Var]) -- Tidy wildcards "_1", "_2" to "_", and do not return them -- in the list of binders to be printed -- This is so that in error messages we see -- forall a. F _ [a] _ = ... -- rather than -- forall a _1 _2. F _1 [a] _2 = ... -- -- This is a rather disgusting function -- See Note [Wildcard names] in GHC.Tc.Gen.HsType tidyCoAxBndrsForUser init_env tcvs = (tidy_env, reverse tidy_bndrs) where (tidy_env, tidy_bndrs) = foldl tidy_one (init_env, []) tcvs tidy_one (env@(occ_env, subst), rev_bndrs') bndr | is_wildcard bndr = (env_wild, rev_bndrs') | otherwise = (env', bndr' : rev_bndrs') where (env', bndr') = tidyVarBndr env bndr env_wild = (occ_env, extendVarEnv subst bndr wild_bndr) wild_bndr = setVarName bndr $ tidyNameOcc (varName bndr) (mkTyVarOcc "_") -- Tidy the binder to "_" is_wildcard :: Var -> Bool is_wildcard tv = case occNameString (getOccName tv) of ('_' : rest) -> all isDigit rest _ -> False {- ********************************************************************* * * MCoercion * * ********************************************************************* -} coToMCo :: Coercion -> MCoercion -- Convert a coercion to a MCoercion, -- It's not clear whether or not isReflexiveCo would be better here -- See #19815 for a bit of data and dicussion on this point coToMCo co | isReflCo co = MRefl | otherwise = MCo co checkReflexiveMCo :: MCoercion -> MCoercion checkReflexiveMCo MRefl = MRefl checkReflexiveMCo (MCo co) | isReflexiveCo co = MRefl | otherwise = MCo co -- | Tests if this MCoercion is obviously generalized reflexive -- Guaranteed to work very quickly. isGReflMCo :: MCoercion -> Bool isGReflMCo MRefl = True isGReflMCo (MCo co) | isGReflCo co = True isGReflMCo _ = False -- | Make a generalized reflexive coercion mkGReflCo :: Role -> Type -> MCoercionN -> Coercion mkGReflCo r ty mco | isGReflMCo mco = if r == Nominal then Refl ty else GRefl r ty MRefl | otherwise = GRefl r ty mco -- | Compose two MCoercions via transitivity mkTransMCo :: MCoercion -> MCoercion -> MCoercion mkTransMCo MRefl co2 = co2 mkTransMCo co1 MRefl = co1 mkTransMCo (MCo co1) (MCo co2) = MCo (mkTransCo co1 co2) mkTransMCoL :: MCoercion -> Coercion -> MCoercion mkTransMCoL MRefl co2 = coToMCo co2 mkTransMCoL (MCo co1) co2 = MCo (mkTransCo co1 co2) mkTransMCoR :: Coercion -> MCoercion -> MCoercion mkTransMCoR co1 MRefl = coToMCo co1 mkTransMCoR co1 (MCo co2) = MCo (mkTransCo co1 co2) -- | Get the reverse of an 'MCoercion' mkSymMCo :: MCoercion -> MCoercion mkSymMCo MRefl = MRefl mkSymMCo (MCo co) = MCo (mkSymCo co) -- | Cast a type by an 'MCoercion' mkCastTyMCo :: Type -> MCoercion -> Type mkCastTyMCo ty MRefl = ty mkCastTyMCo ty (MCo co) = ty `mkCastTy` co mkHomoForAllMCo :: TyCoVar -> MCoercion -> MCoercion mkHomoForAllMCo _ MRefl = MRefl mkHomoForAllMCo tcv (MCo co) = MCo (mkHomoForAllCos [tcv] co) mkPiMCos :: [Var] -> MCoercion -> MCoercion mkPiMCos _ MRefl = MRefl mkPiMCos vs (MCo co) = MCo (mkPiCos Representational vs co) mkFunResMCo :: Scaled Type -> MCoercionR -> MCoercionR mkFunResMCo _ MRefl = MRefl mkFunResMCo arg_ty (MCo co) = MCo (mkFunResCo Representational arg_ty co) mkGReflLeftMCo :: Role -> Type -> MCoercionN -> Coercion mkGReflLeftMCo r ty MRefl = mkReflCo r ty mkGReflLeftMCo r ty (MCo co) = mkGReflLeftCo r ty co mkGReflRightMCo :: Role -> Type -> MCoercionN -> Coercion mkGReflRightMCo r ty MRefl = mkReflCo r ty mkGReflRightMCo r ty (MCo co) = mkGReflRightCo r ty co -- | Like 'mkCoherenceRightCo', but with an 'MCoercion' mkCoherenceRightMCo :: Role -> Type -> MCoercionN -> Coercion -> Coercion mkCoherenceRightMCo _ _ MRefl co2 = co2 mkCoherenceRightMCo r ty (MCo co) co2 = mkCoherenceRightCo r ty co co2 isReflMCo :: MCoercion -> Bool isReflMCo MRefl = True isReflMCo _ = False {- %************************************************************************ %* * Destructing coercions %* * %************************************************************************ Note [Function coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~ Remember that (->) :: forall {r1} {r2}. TYPE r1 -> TYPE r2 -> TYPE LiftedRep whose `RuntimeRep' arguments are intentionally marked inferred to avoid type application. Hence FunCo r mult co1 co2 :: (s1->t1) ~r (s2->t2) is short for TyConAppCo (->) mult co_rep1 co_rep2 co1 co2 where co_rep1, co_rep2 are the coercions on the representations. -} -- | This breaks a 'Coercion' with type @T A B C ~ T D E F@ into -- a list of 'Coercion's of kinds @A ~ D@, @B ~ E@ and @E ~ F@. Hence: -- -- > decomposeCo 3 c [r1, r2, r3] = [nth r1 0 c, nth r2 1 c, nth r3 2 c] decomposeCo :: Arity -> Coercion -> [Role] -- the roles of the output coercions -- this must have at least as many -- entries as the Arity provided -> [Coercion] decomposeCo arity co rs = [mkNthCo r n co | (n,r) <- [0..(arity-1)] `zip` rs ] -- Remember, Nth is zero-indexed decomposeFunCo :: HasDebugCallStack => Role -- Role of the input coercion -> Coercion -- Input coercion -> (CoercionN, Coercion, Coercion) -- Expects co :: (s1 -> t1) ~ (s2 -> t2) -- Returns (co1 :: s1~s2, co2 :: t1~t2) -- See Note [Function coercions] for the "3" and "4" decomposeFunCo _ (FunCo _ w co1 co2) = (w, co1, co2) -- Short-circuits the calls to mkNthCo decomposeFunCo r co = assertPpr all_ok (ppr co) (mkNthCo Nominal 0 co, mkNthCo r 3 co, mkNthCo r 4 co) where Pair s1t1 s2t2 = coercionKind co all_ok = isFunTy s1t1 && isFunTy s2t2 {- Note [Pushing a coercion into a pi-type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have this: (f |> co) t1 .. tn Then we want to push the coercion into the arguments, so as to make progress. For example of why you might want to do so, see Note [Respecting definitional equality] in GHC.Core.TyCo.Rep. This is done by decomposePiCos. Specifically, if decomposePiCos co [t1,..,tn] = ([co1,...,cok], cor) then (f |> co) t1 .. tn = (f (t1 |> co1) ... (tk |> cok)) |> cor) t(k+1) ... tn Notes: * k can be smaller than n! That is decomposePiCos can return *fewer* coercions than there are arguments (ie k < n), if the kind provided doesn't have enough binders. * If there is a type error, we might see (f |> co) t1 where co :: (forall a. ty) ~ (ty1 -> ty2) Here 'co' is insoluble, but we don't want to crash in decoposePiCos. So decomposePiCos carefully tests both sides of the coercion to check they are both foralls or both arrows. Not doing this caused #15343. -} decomposePiCos :: HasDebugCallStack => CoercionN -> Pair Type -- Coercion and its kind -> [Type] -> ([CoercionN], CoercionN) -- See Note [Pushing a coercion into a pi-type] decomposePiCos orig_co (Pair orig_k1 orig_k2) orig_args = go [] (orig_subst,orig_k1) orig_co (orig_subst,orig_k2) orig_args where orig_subst = mkEmptyTCvSubst $ mkInScopeSet $ tyCoVarsOfTypes orig_args `unionVarSet` tyCoVarsOfCo orig_co go :: [CoercionN] -- accumulator for argument coercions, reversed -> (TCvSubst,Kind) -- Lhs kind of coercion -> CoercionN -- coercion originally applied to the function -> (TCvSubst,Kind) -- Rhs kind of coercion -> [Type] -- Arguments to that function -> ([CoercionN], Coercion) -- Invariant: co :: subst1(k1) ~ subst2(k2) go acc_arg_cos (subst1,k1) co (subst2,k2) (ty:tys) | Just (a, t1) <- splitForAllTyCoVar_maybe k1 , Just (b, t2) <- splitForAllTyCoVar_maybe k2 -- know co :: (forall a:s1.t1) ~ (forall b:s2.t2) -- function :: forall a:s1.t1 (the function is not passed to decomposePiCos) -- a :: s1 -- b :: s2 -- ty :: s2 -- need arg_co :: s2 ~ s1 -- res_co :: t1[ty |> arg_co / a] ~ t2[ty / b] = let arg_co = mkNthCo Nominal 0 (mkSymCo co) res_co = mkInstCo co (mkGReflLeftCo Nominal ty arg_co) subst1' = extendTCvSubst subst1 a (ty `CastTy` arg_co) subst2' = extendTCvSubst subst2 b ty in go (arg_co : acc_arg_cos) (subst1', t1) res_co (subst2', t2) tys | Just (_w1, _s1, t1) <- splitFunTy_maybe k1 , Just (_w1, _s2, t2) <- splitFunTy_maybe k2 -- know co :: (s1 -> t1) ~ (s2 -> t2) -- function :: s1 -> t1 -- ty :: s2 -- need arg_co :: s2 ~ s1 -- res_co :: t1 ~ t2 = let (_, sym_arg_co, res_co) = decomposeFunCo Nominal co -- It should be fine to ignore the multiplicity bit of the coercion -- for a Nominal coercion. arg_co = mkSymCo sym_arg_co in go (arg_co : acc_arg_cos) (subst1,t1) res_co (subst2,t2) tys | not (isEmptyTCvSubst subst1) || not (isEmptyTCvSubst subst2) = go acc_arg_cos (zapTCvSubst subst1, substTy subst1 k1) co (zapTCvSubst subst2, substTy subst1 k2) (ty:tys) -- tys might not be empty, if the left-hand type of the original coercion -- didn't have enough binders go acc_arg_cos _ki1 co _ki2 _tys = (reverse acc_arg_cos, co) -- | Attempts to obtain the type variable underlying a 'Coercion' getCoVar_maybe :: Coercion -> Maybe CoVar getCoVar_maybe (CoVarCo cv) = Just cv getCoVar_maybe _ = Nothing -- | Attempts to tease a coercion apart into a type constructor and the application -- of a number of coercion arguments to that constructor splitTyConAppCo_maybe :: Coercion -> Maybe (TyCon, [Coercion]) splitTyConAppCo_maybe co | Just (ty, r) <- isReflCo_maybe co = do { (tc, tys) <- splitTyConApp_maybe ty ; let args = zipWith mkReflCo (tyConRolesX r tc) tys ; return (tc, args) } splitTyConAppCo_maybe (TyConAppCo _ tc cos) = Just (tc, cos) splitTyConAppCo_maybe (FunCo _ w arg res) = Just (funTyCon, cos) where cos = [w, mkRuntimeRepCo arg, mkRuntimeRepCo res, arg, res] splitTyConAppCo_maybe _ = Nothing multToCo :: Mult -> Coercion multToCo r = mkNomReflCo r -- first result has role equal to input; third result is Nominal splitAppCo_maybe :: Coercion -> Maybe (Coercion, Coercion) -- ^ Attempt to take a coercion application apart. splitAppCo_maybe (AppCo co arg) = Just (co, arg) splitAppCo_maybe (TyConAppCo r tc args) | args `lengthExceeds` tyConArity tc , Just (args', arg') <- snocView args = Just ( mkTyConAppCo r tc args', arg' ) | not (mustBeSaturated tc) -- Never create unsaturated type family apps! , Just (args', arg') <- snocView args , Just arg'' <- setNominalRole_maybe (nthRole r tc (length args')) arg' = Just ( mkTyConAppCo r tc args', arg'' ) -- Use mkTyConAppCo to preserve the invariant -- that identity coercions are always represented by Refl splitAppCo_maybe co | Just (ty, r) <- isReflCo_maybe co , Just (ty1, ty2) <- splitAppTy_maybe ty = Just (mkReflCo r ty1, mkNomReflCo ty2) splitAppCo_maybe _ = Nothing -- Only used in specialise/Rules splitFunCo_maybe :: Coercion -> Maybe (Coercion, Coercion) splitFunCo_maybe (FunCo _ _ arg res) = Just (arg, res) splitFunCo_maybe _ = Nothing splitForAllCo_maybe :: Coercion -> Maybe (TyCoVar, Coercion, Coercion) splitForAllCo_maybe (ForAllCo tv k_co co) = Just (tv, k_co, co) splitForAllCo_maybe _ = Nothing -- | Like 'splitForAllCo_maybe', but only returns Just for tyvar binder splitForAllCo_ty_maybe :: Coercion -> Maybe (TyVar, Coercion, Coercion) splitForAllCo_ty_maybe (ForAllCo tv k_co co) | isTyVar tv = Just (tv, k_co, co) splitForAllCo_ty_maybe _ = Nothing -- | Like 'splitForAllCo_maybe', but only returns Just for covar binder splitForAllCo_co_maybe :: Coercion -> Maybe (CoVar, Coercion, Coercion) splitForAllCo_co_maybe (ForAllCo cv k_co co) | isCoVar cv = Just (cv, k_co, co) splitForAllCo_co_maybe _ = Nothing ------------------------------------------------------- -- and some coercion kind stuff coVarLType, coVarRType :: HasDebugCallStack => CoVar -> Type coVarLType cv | (_, _, ty1, _, _) <- coVarKindsTypesRole cv = ty1 coVarRType cv | (_, _, _, ty2, _) <- coVarKindsTypesRole cv = ty2 coVarTypes :: HasDebugCallStack => CoVar -> Pair Type coVarTypes cv | (_, _, ty1, ty2, _) <- coVarKindsTypesRole cv = Pair ty1 ty2 coVarKindsTypesRole :: HasDebugCallStack => CoVar -> (Kind,Kind,Type,Type,Role) coVarKindsTypesRole cv | Just (tc, [k1,k2,ty1,ty2]) <- splitTyConApp_maybe (varType cv) = (k1, k2, ty1, ty2, eqTyConRole tc) | otherwise = pprPanic "coVarKindsTypesRole, non coercion variable" (ppr cv $$ ppr (varType cv)) coVarKind :: CoVar -> Type coVarKind cv = assert (isCoVar cv ) varType cv coVarRole :: CoVar -> Role coVarRole cv = eqTyConRole (case tyConAppTyCon_maybe (varType cv) of Just tc0 -> tc0 Nothing -> pprPanic "coVarRole: not tyconapp" (ppr cv)) eqTyConRole :: TyCon -> Role -- Given (~#) or (~R#) return the Nominal or Representational respectively eqTyConRole tc | tc `hasKey` eqPrimTyConKey = Nominal | tc `hasKey` eqReprPrimTyConKey = Representational | otherwise = pprPanic "eqTyConRole: unknown tycon" (ppr tc) -- | Given a coercion @co1 :: (a :: TYPE r1) ~ (b :: TYPE r2)@, -- produce a coercion @rep_co :: r1 ~ r2@. mkRuntimeRepCo :: HasDebugCallStack => Coercion -> Coercion mkRuntimeRepCo co = mkNthCo Nominal 0 kind_co where kind_co = mkKindCo co -- kind_co :: TYPE r1 ~ TYPE r2 -- (up to silliness with Constraint) isReflCoVar_maybe :: Var -> Maybe Coercion -- If cv :: t~t then isReflCoVar_maybe cv = Just (Refl t) -- Works on all kinds of Vars, not just CoVars isReflCoVar_maybe cv | isCoVar cv , Pair ty1 ty2 <- coVarTypes cv , ty1 `eqType` ty2 = Just (mkReflCo (coVarRole cv) ty1) | otherwise = Nothing -- | Tests if this coercion is obviously a generalized reflexive coercion. -- Guaranteed to work very quickly. isGReflCo :: Coercion -> Bool isGReflCo (GRefl{}) = True isGReflCo (Refl{}) = True -- Refl ty == GRefl N ty MRefl isGReflCo _ = False -- | Tests if this coercion is obviously reflexive. Guaranteed to work -- very quickly. Sometimes a coercion can be reflexive, but not obviously -- so. c.f. 'isReflexiveCo' isReflCo :: Coercion -> Bool isReflCo (Refl{}) = True isReflCo (GRefl _ _ mco) | isGReflMCo mco = True isReflCo _ = False -- | Returns the type coerced if this coercion is a generalized reflexive -- coercion. Guaranteed to work very quickly. isGReflCo_maybe :: Coercion -> Maybe (Type, Role) isGReflCo_maybe (GRefl r ty _) = Just (ty, r) isGReflCo_maybe (Refl ty) = Just (ty, Nominal) isGReflCo_maybe _ = Nothing -- | Returns the type coerced if this coercion is reflexive. Guaranteed -- to work very quickly. Sometimes a coercion can be reflexive, but not -- obviously so. c.f. 'isReflexiveCo_maybe' isReflCo_maybe :: Coercion -> Maybe (Type, Role) isReflCo_maybe (Refl ty) = Just (ty, Nominal) isReflCo_maybe (GRefl r ty mco) | isGReflMCo mco = Just (ty, r) isReflCo_maybe _ = Nothing -- | Slowly checks if the coercion is reflexive. Don't call this in a loop, -- as it walks over the entire coercion. isReflexiveCo :: Coercion -> Bool isReflexiveCo = isJust . isReflexiveCo_maybe -- | Extracts the coerced type from a reflexive coercion. This potentially -- walks over the entire coercion, so avoid doing this in a loop. isReflexiveCo_maybe :: Coercion -> Maybe (Type, Role) isReflexiveCo_maybe (Refl ty) = Just (ty, Nominal) isReflexiveCo_maybe (GRefl r ty mco) | isGReflMCo mco = Just (ty, r) isReflexiveCo_maybe co | ty1 `eqType` ty2 = Just (ty1, r) | otherwise = Nothing where (Pair ty1 ty2, r) = coercionKindRole co {- %************************************************************************ %* * Building coercions %* * %************************************************************************ These "smart constructors" maintain the invariants listed in the definition of Coercion, and they perform very basic optimizations. Note [Role twiddling functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a plethora of functions for twiddling roles: mkSubCo: Requires a nominal input coercion and always produces a representational output. This is used when you (the programmer) are sure you know exactly that role you have and what you want. downgradeRole_maybe: This function takes both the input role and the output role as parameters. (The *output* role comes first!) It can only *downgrade* a role -- that is, change it from N to R or P, or from R to P. This one-way behavior is why there is the "_maybe". If an upgrade is requested, this function produces Nothing. This is used when you need to change the role of a coercion, but you're not sure (as you're writing the code) of which roles are involved. This function could have been written using coercionRole to ascertain the role of the input. But, that function is recursive, and the caller of downgradeRole_maybe often knows the input role. So, this is more efficient. downgradeRole: This is just like downgradeRole_maybe, but it panics if the conversion isn't a downgrade. setNominalRole_maybe: This is the only function that can *upgrade* a coercion. The result (if it exists) is always Nominal. The input can be at any role. It works on a "best effort" basis, as it should never be strictly necessary to upgrade a coercion during compilation. It is currently only used within GHC in splitAppCo_maybe. In order to be a proper inverse of mkAppCo, the second coercion that splitAppCo_maybe returns must be nominal. But, it's conceivable that splitAppCo_maybe is operating over a TyConAppCo that uses a representational coercion. Hence the need for setNominalRole_maybe. splitAppCo_maybe, in turn, is used only within coercion optimization -- thus, it is not absolutely critical that setNominalRole_maybe be complete. Note that setNominalRole_maybe will never upgrade a phantom UnivCo. Phantom UnivCos are perfectly type-safe, whereas representational and nominal ones are not. (Nominal ones are no worse than representational ones, so this function *will* change a UnivCo Representational to a UnivCo Nominal.) Conal Elliott also came across a need for this function while working with the GHC API, as he was decomposing Core casts. The Core casts use representational coercions, as they must, but his use case required nominal coercions (he was building a GADT). So, that's why this function is exported from this module. One might ask: shouldn't downgradeRole_maybe just use setNominalRole_maybe as appropriate? I (Richard E.) have decided not to do this, because upgrading a role is bizarre and a caller should have to ask for this behavior explicitly. -} -- | Make a reflexive coercion mkReflCo :: Role -> Type -> Coercion mkReflCo Nominal ty = Refl ty mkReflCo r ty = GRefl r ty MRefl -- | Make a representational reflexive coercion mkRepReflCo :: Type -> Coercion mkRepReflCo ty = GRefl Representational ty MRefl -- | Make a nominal reflexive coercion mkNomReflCo :: Type -> Coercion mkNomReflCo = Refl -- | Apply a type constructor to a list of coercions. It is the -- caller's responsibility to get the roles correct on argument coercions. mkTyConAppCo :: HasDebugCallStack => Role -> TyCon -> [Coercion] -> Coercion mkTyConAppCo r tc cos | [w, _rep1, _rep2, co1, co2] <- cos -- See Note [Function coercions] , isFunTyCon tc = -- (a :: TYPE ra) -> (b :: TYPE rb) ~ (c :: TYPE rc) -> (d :: TYPE rd) -- rep1 :: ra ~ rc rep2 :: rb ~ rd -- co1 :: a ~ c co2 :: b ~ d mkFunCo r w co1 co2 -- Expand type synonyms | Just (tv_co_prs, rhs_ty, leftover_cos) <- expandSynTyCon_maybe tc cos = mkAppCos (liftCoSubst r (mkLiftingContext tv_co_prs) rhs_ty) leftover_cos | Just tys_roles <- traverse isReflCo_maybe cos = mkReflCo r (mkTyConApp tc (map fst tys_roles)) -- See Note [Refl invariant] | otherwise = TyConAppCo r tc cos -- | Build a function 'Coercion' from two other 'Coercion's. That is, -- given @co1 :: a ~ b@ and @co2 :: x ~ y@ produce @co :: (a -> x) ~ (b -> y)@. mkFunCo :: Role -> CoercionN -> Coercion -> Coercion -> Coercion mkFunCo r w co1 co2 -- See Note [Refl invariant] | Just (ty1, _) <- isReflCo_maybe co1 , Just (ty2, _) <- isReflCo_maybe co2 , Just (w, _) <- isReflCo_maybe w = mkReflCo r (mkVisFunTy w ty1 ty2) | otherwise = FunCo r w co1 co2 -- | Apply a 'Coercion' to another 'Coercion'. -- The second coercion must be Nominal, unless the first is Phantom. -- If the first is Phantom, then the second can be either Phantom or Nominal. mkAppCo :: Coercion -- ^ :: t1 ~r t2 -> Coercion -- ^ :: s1 ~N s2, where s1 :: k1, s2 :: k2 -> Coercion -- ^ :: t1 s1 ~r t2 s2 mkAppCo co arg | Just (ty1, r) <- isReflCo_maybe co , Just (ty2, _) <- isReflCo_maybe arg = mkReflCo r (mkAppTy ty1 ty2) | Just (ty1, r) <- isReflCo_maybe co , Just (tc, tys) <- splitTyConApp_maybe ty1 -- Expand type synonyms; a TyConAppCo can't have a type synonym (#9102) = mkTyConAppCo r tc (zip_roles (tyConRolesX r tc) tys) where zip_roles (r1:_) [] = [downgradeRole r1 Nominal arg] zip_roles (r1:rs) (ty1:tys) = mkReflCo r1 ty1 : zip_roles rs tys zip_roles _ _ = panic "zip_roles" -- but the roles are infinite... mkAppCo (TyConAppCo r tc args) arg = case r of Nominal -> mkTyConAppCo Nominal tc (args ++ [arg]) Representational -> mkTyConAppCo Representational tc (args ++ [arg']) where new_role = (tyConRolesRepresentational tc) !! (length args) arg' = downgradeRole new_role Nominal arg Phantom -> mkTyConAppCo Phantom tc (args ++ [toPhantomCo arg]) mkAppCo co arg = AppCo co arg -- Note, mkAppCo is careful to maintain invariants regarding -- where Refl constructors appear; see the comments in the definition -- of Coercion and the Note [Refl invariant] in GHC.Core.TyCo.Rep. -- | Applies multiple 'Coercion's to another 'Coercion', from left to right. -- See also 'mkAppCo'. mkAppCos :: Coercion -> [Coercion] -> Coercion mkAppCos co1 cos = foldl' mkAppCo co1 cos {- Note [Unused coercion variable in ForAllCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See Note [Unused coercion variable in ForAllTy] in GHC.Core.TyCo.Rep for the motivation for checking coercion variable in types. To lift the design choice to (ForAllCo cv kind_co body_co), we have two options: (1) In mkForAllCo, we check whether cv is a coercion variable and whether it is not used in body_co. If so we construct a FunCo. (2) We don't do this check in mkForAllCo. In coercionKind, we use mkTyCoForAllTy to perform the check and construct a FunTy when necessary. We chose (2) for two reasons: * for a coercion, all that matters is its kind, So ForAllCo or FunCo does not make a difference. * even if cv occurs in body_co, it is possible that cv does not occur in the kind of body_co. Therefore the check in coercionKind is inevitable. The last wrinkle is that there are restrictions around the use of the cv in the coercion, as described in Section 5.8.5.2 of Richard's thesis. The idea is that we cannot prove that the type system is consistent with unrestricted use of this cv; the consistency proof uses an untyped rewrite relation that works over types with all coercions and casts removed. So, we can allow the cv to appear only in positions that are erased. As an approximation of this (and keeping close to the published theory), we currently allow the cv only within the type in a Refl node and under a GRefl node (including in the Coercion stored in a GRefl). It's possible other places are OK, too, but this is a safe approximation. Sadly, with heterogeneous equality, this restriction might be able to be violated; Richard's thesis is unable to prove that it isn't. Specifically, the liftCoSubst function might create an invalid coercion. Because a violation of the restriction might lead to a program that "goes wrong", it is checked all the time, even in a production compiler and without -dcore-list. We *have* proved that the problem does not occur with homogeneous equality, so this check can be dropped once ~# is made to be homogeneous. -} -- | Make a Coercion from a tycovar, a kind coercion, and a body coercion. -- The kind of the tycovar should be the left-hand kind of the kind coercion. -- See Note [Unused coercion variable in ForAllCo] mkForAllCo :: TyCoVar -> CoercionN -> Coercion -> Coercion mkForAllCo v kind_co co | assert (varType v `eqType` (pFst $ coercionKind kind_co)) True , assert (isTyVar v || almostDevoidCoVarOfCo v co) True , Just (ty, r) <- isReflCo_maybe co , isGReflCo kind_co = mkReflCo r (mkTyCoInvForAllTy v ty) | otherwise = ForAllCo v kind_co co -- | Like 'mkForAllCo', but the inner coercion shouldn't be an obvious -- reflexive coercion. For example, it is guaranteed in 'mkForAllCos'. -- The kind of the tycovar should be the left-hand kind of the kind coercion. mkForAllCo_NoRefl :: TyCoVar -> CoercionN -> Coercion -> Coercion mkForAllCo_NoRefl v kind_co co | assert (varType v `eqType` (pFst $ coercionKind kind_co)) True , assert (isTyVar v || almostDevoidCoVarOfCo v co) True , assert (not (isReflCo co)) True , isCoVar v , not (v `elemVarSet` tyCoVarsOfCo co) = FunCo (coercionRole co) (multToCo Many) kind_co co -- Functions from coercions are always unrestricted | otherwise = ForAllCo v kind_co co -- | Make nested ForAllCos mkForAllCos :: [(TyCoVar, CoercionN)] -> Coercion -> Coercion mkForAllCos bndrs co | Just (ty, r ) <- isReflCo_maybe co = let (refls_rev'd, non_refls_rev'd) = span (isReflCo . snd) (reverse bndrs) in foldl' (flip $ uncurry mkForAllCo_NoRefl) (mkReflCo r (mkTyCoInvForAllTys (reverse (map fst refls_rev'd)) ty)) non_refls_rev'd | otherwise = foldr (uncurry mkForAllCo_NoRefl) co bndrs -- | Make a Coercion quantified over a type/coercion variable; -- the variable has the same type in both sides of the coercion mkHomoForAllCos :: [TyCoVar] -> Coercion -> Coercion mkHomoForAllCos vs co | Just (ty, r) <- isReflCo_maybe co = mkReflCo r (mkTyCoInvForAllTys vs ty) | otherwise = mkHomoForAllCos_NoRefl vs co -- | Like 'mkHomoForAllCos', but the inner coercion shouldn't be an obvious -- reflexive coercion. For example, it is guaranteed in 'mkHomoForAllCos'. mkHomoForAllCos_NoRefl :: [TyCoVar] -> Coercion -> Coercion mkHomoForAllCos_NoRefl vs orig_co = assert (not (isReflCo orig_co)) foldr go orig_co vs where go v co = mkForAllCo_NoRefl v (mkNomReflCo (varType v)) co mkCoVarCo :: CoVar -> Coercion -- cv :: s ~# t -- See Note [mkCoVarCo] mkCoVarCo cv = CoVarCo cv mkCoVarCos :: [CoVar] -> [Coercion] mkCoVarCos = map mkCoVarCo {- Note [mkCoVarCo] ~~~~~~~~~~~~~~~~~~~ In the past, mkCoVarCo optimised (c :: t~t) to (Refl t). That is valid (although see Note [Unbound RULE binders] in GHC.Core.Rules), but it's a relatively expensive test and perhaps better done in optCoercion. Not a big deal either way. -} -- | Extract a covar, if possible. This check is dirty. Be ashamed -- of yourself. (It's dirty because it cares about the structure of -- a coercion, which is morally reprehensible.) isCoVar_maybe :: Coercion -> Maybe CoVar isCoVar_maybe (CoVarCo cv) = Just cv isCoVar_maybe _ = Nothing mkAxInstCo :: Role -> CoAxiom br -> BranchIndex -> [Type] -> [Coercion] -> Coercion -- mkAxInstCo can legitimately be called over-staturated; -- i.e. with more type arguments than the coercion requires mkAxInstCo role ax index tys cos | arity == n_tys = downgradeRole role ax_role $ mkAxiomInstCo ax_br index (rtys `chkAppend` cos) | otherwise = assert (arity < n_tys) $ downgradeRole role ax_role $ mkAppCos (mkAxiomInstCo ax_br index (ax_args `chkAppend` cos)) leftover_args where n_tys = length tys ax_br = toBranchedAxiom ax branch = coAxiomNthBranch ax_br index tvs = coAxBranchTyVars branch arity = length tvs arg_roles = coAxBranchRoles branch rtys = zipWith mkReflCo (arg_roles ++ repeat Nominal) tys (ax_args, leftover_args) = splitAt arity rtys ax_role = coAxiomRole ax -- worker function mkAxiomInstCo :: CoAxiom Branched -> BranchIndex -> [Coercion] -> Coercion mkAxiomInstCo ax index args = assert (args `lengthIs` coAxiomArity ax index) $ AxiomInstCo ax index args -- to be used only with unbranched axioms mkUnbranchedAxInstCo :: Role -> CoAxiom Unbranched -> [Type] -> [Coercion] -> Coercion mkUnbranchedAxInstCo role ax tys cos = mkAxInstCo role ax 0 tys cos mkAxInstRHS :: CoAxiom br -> BranchIndex -> [Type] -> [Coercion] -> Type -- Instantiate the axiom with specified types, -- returning the instantiated RHS -- A companion to mkAxInstCo: -- mkAxInstRhs ax index tys = snd (coercionKind (mkAxInstCo ax index tys)) mkAxInstRHS ax index tys cos = assert (tvs `equalLength` tys1) $ mkAppTys rhs' tys2 where branch = coAxiomNthBranch ax index tvs = coAxBranchTyVars branch cvs = coAxBranchCoVars branch (tys1, tys2) = splitAtList tvs tys rhs' = substTyWith tvs tys1 $ substTyWithCoVars cvs cos $ coAxBranchRHS branch mkUnbranchedAxInstRHS :: CoAxiom Unbranched -> [Type] -> [Coercion] -> Type mkUnbranchedAxInstRHS ax = mkAxInstRHS ax 0 -- | Return the left-hand type of the axiom, when the axiom is instantiated -- at the types given. mkAxInstLHS :: CoAxiom br -> BranchIndex -> [Type] -> [Coercion] -> Type mkAxInstLHS ax index tys cos = assert (tvs `equalLength` tys1) $ mkTyConApp fam_tc (lhs_tys `chkAppend` tys2) where branch = coAxiomNthBranch ax index tvs = coAxBranchTyVars branch cvs = coAxBranchCoVars branch (tys1, tys2) = splitAtList tvs tys lhs_tys = substTysWith tvs tys1 $ substTysWithCoVars cvs cos $ coAxBranchLHS branch fam_tc = coAxiomTyCon ax -- | Instantiate the left-hand side of an unbranched axiom mkUnbranchedAxInstLHS :: CoAxiom Unbranched -> [Type] -> [Coercion] -> Type mkUnbranchedAxInstLHS ax = mkAxInstLHS ax 0 -- | Make a coercion from a coercion hole mkHoleCo :: CoercionHole -> Coercion mkHoleCo h = HoleCo h -- | Make a universal coercion between two arbitrary types. mkUnivCo :: UnivCoProvenance -> Role -- ^ role of the built coercion, "r" -> Type -- ^ t1 :: k1 -> Type -- ^ t2 :: k2 -> Coercion -- ^ :: t1 ~r t2 mkUnivCo prov role ty1 ty2 | ty1 `eqType` ty2 = mkReflCo role ty1 | otherwise = UnivCo prov role ty1 ty2 -- | Create a symmetric version of the given 'Coercion' that asserts -- equality between the same types but in the other "direction", so -- a kind of @t1 ~ t2@ becomes the kind @t2 ~ t1@. mkSymCo :: Coercion -> Coercion -- Do a few simple optimizations, but don't bother pushing occurrences -- of symmetry to the leaves; the optimizer will take care of that. mkSymCo co | isReflCo co = co mkSymCo (SymCo co) = co mkSymCo (SubCo (SymCo co)) = SubCo co mkSymCo co = SymCo co -- | Create a new 'Coercion' by composing the two given 'Coercion's transitively. -- (co1 ; co2) mkTransCo :: Coercion -> Coercion -> Coercion mkTransCo co1 co2 | isReflCo co1 = co2 | isReflCo co2 = co1 mkTransCo (GRefl r t1 (MCo co1)) (GRefl _ _ (MCo co2)) = GRefl r t1 (MCo $ mkTransCo co1 co2) mkTransCo co1 co2 = TransCo co1 co2 mkNthCo :: HasDebugCallStack => Role -- The role of the coercion you're creating -> Int -- Zero-indexed -> Coercion -> Coercion mkNthCo r n co = assertPpr good_call bad_call_msg $ go n co where Pair ty1 ty2 = coercionKind co go 0 co | Just (ty, _) <- isReflCo_maybe co , Just (tv, _) <- splitForAllTyCoVar_maybe ty = -- works for both tyvar and covar assert (r == Nominal) $ mkNomReflCo (varType tv) go n co | Just (ty, r0) <- isReflCo_maybe co , let tc = tyConAppTyCon ty = assertPpr (ok_tc_app ty n) (ppr n $$ ppr ty) $ assert (nthRole r0 tc n == r) $ mkReflCo r (tyConAppArgN n ty) where ok_tc_app :: Type -> Int -> Bool ok_tc_app ty n | Just (_, tys) <- splitTyConApp_maybe ty = tys `lengthExceeds` n | isForAllTy ty -- nth:0 pulls out a kind coercion from a hetero forall = n == 0 | otherwise = False go 0 (ForAllCo _ kind_co _) = assert (r == Nominal) kind_co -- If co :: (forall a1:k1. t1) ~ (forall a2:k2. t2) -- then (nth 0 co :: k1 ~N k2) -- If co :: (forall a1:t1 ~ t2. t1) ~ (forall a2:t3 ~ t4. t2) -- then (nth 0 co :: (t1 ~ t2) ~N (t3 ~ t4)) go n (FunCo _ w arg res) = mkNthCoFunCo n w arg res go n (TyConAppCo r0 tc arg_cos) = assertPpr (r == nthRole r0 tc n) (vcat [ ppr tc , ppr arg_cos , ppr r0 , ppr n , ppr r ]) $ arg_cos `getNth` n go n (SymCo co) -- Recurse, hoping to get to a TyConAppCo or FunCo = mkSymCo (go n co) go n co = NthCo r n co -- Assertion checking bad_call_msg = vcat [ text "Coercion =" <+> ppr co , text "LHS ty =" <+> ppr ty1 , text "RHS ty =" <+> ppr ty2 , text "n =" <+> ppr n, text "r =" <+> ppr r , text "coercion role =" <+> ppr (coercionRole co) ] good_call -- If the Coercion passed in is between forall-types, then the Int must -- be 0 and the role must be Nominal. | Just (_tv1, _) <- splitForAllTyCoVar_maybe ty1 , Just (_tv2, _) <- splitForAllTyCoVar_maybe ty2 = n == 0 && r == Nominal -- If the Coercion passed in is between T tys and T tys', then the Int -- must be less than the length of tys/tys' (which must be the same -- lengths). -- -- If the role of the Coercion is nominal, then the role passed in must -- be nominal. If the role of the Coercion is representational, then the -- role passed in must be tyConRolesRepresentational T !! n. If the role -- of the Coercion is Phantom, then the role passed in must be Phantom. -- -- See also Note [NthCo Cached Roles] if you're wondering why it's -- blaringly obvious that we should be *computing* this role instead of -- passing it in. | Just (tc1, tys1) <- splitTyConApp_maybe ty1 , Just (tc2, tys2) <- splitTyConApp_maybe ty2 , tc1 == tc2 = let len1 = length tys1 len2 = length tys2 good_role = case coercionRole co of Nominal -> r == Nominal Representational -> r == (tyConRolesRepresentational tc1 !! n) Phantom -> r == Phantom in len1 == len2 && n < len1 && good_role | otherwise = True -- | Extract the nth field of a FunCo mkNthCoFunCo :: Int -- ^ "n" -> CoercionN -- ^ multiplicity coercion -> Coercion -- ^ argument coercion -> Coercion -- ^ result coercion -> Coercion -- ^ nth coercion from a FunCo -- See Note [Function coercions] -- If FunCo _ mult arg_co res_co :: (s1:TYPE sk1 :mult-> s2:TYPE sk2) -- ~ (t1:TYPE tk1 :mult-> t2:TYPE tk2) -- Then we want to behave as if co was -- TyConAppCo mult argk_co resk_co arg_co res_co -- where -- argk_co :: sk1 ~ tk1 = mkNthCo 0 (mkKindCo arg_co) -- resk_co :: sk2 ~ tk2 = mkNthCo 0 (mkKindCo res_co) -- i.e. mkRuntimeRepCo mkNthCoFunCo n w co1 co2 = case n of 0 -> w 1 -> mkRuntimeRepCo co1 2 -> mkRuntimeRepCo co2 3 -> co1 4 -> co2 _ -> pprPanic "mkNthCo(FunCo)" (ppr n $$ ppr w $$ ppr co1 $$ ppr co2) -- | If you're about to call @mkNthCo r n co@, then @r@ should be -- whatever @nthCoRole n co@ returns. nthCoRole :: Int -> Coercion -> Role nthCoRole n co | Just (tc, _) <- splitTyConApp_maybe lty = nthRole r tc n | Just _ <- splitForAllTyCoVar_maybe lty = Nominal | otherwise = pprPanic "nthCoRole" (ppr co) where lty = coercionLKind co r = coercionRole co mkLRCo :: LeftOrRight -> Coercion -> Coercion mkLRCo lr co | Just (ty, eq) <- isReflCo_maybe co = mkReflCo eq (pickLR lr (splitAppTy ty)) | otherwise = LRCo lr co -- | Instantiates a 'Coercion'. mkInstCo :: Coercion -> Coercion -> Coercion mkInstCo (ForAllCo tcv _kind_co body_co) co | Just (arg, _) <- isReflCo_maybe co -- works for both tyvar and covar = substCoUnchecked (zipTCvSubst [tcv] [arg]) body_co mkInstCo co arg = InstCo co arg -- | Given @ty :: k1@, @co :: k1 ~ k2@, -- produces @co' :: ty ~r (ty |> co)@ mkGReflRightCo :: Role -> Type -> CoercionN -> Coercion mkGReflRightCo r ty co | isGReflCo co = mkReflCo r ty -- the kinds of @k1@ and @k2@ are the same, thus @isGReflCo@ -- instead of @isReflCo@ | otherwise = GRefl r ty (MCo co) -- | Given @ty :: k1@, @co :: k1 ~ k2@, -- produces @co' :: (ty |> co) ~r ty@ mkGReflLeftCo :: Role -> Type -> CoercionN -> Coercion mkGReflLeftCo r ty co | isGReflCo co = mkReflCo r ty -- the kinds of @k1@ and @k2@ are the same, thus @isGReflCo@ -- instead of @isReflCo@ | otherwise = mkSymCo $ GRefl r ty (MCo co) -- | Given @ty :: k1@, @co :: k1 ~ k2@, @co2:: ty ~r ty'@, -- produces @co' :: (ty |> co) ~r ty' -- It is not only a utility function, but it saves allocation when co -- is a GRefl coercion. mkCoherenceLeftCo :: Role -> Type -> CoercionN -> Coercion -> Coercion mkCoherenceLeftCo r ty co co2 | isGReflCo co = co2 | otherwise = (mkSymCo $ GRefl r ty (MCo co)) `mkTransCo` co2 -- | Given @ty :: k1@, @co :: k1 ~ k2@, @co2:: ty' ~r ty@, -- produces @co' :: ty' ~r (ty |> co) -- It is not only a utility function, but it saves allocation when co -- is a GRefl coercion. mkCoherenceRightCo :: Role -> Type -> CoercionN -> Coercion -> Coercion mkCoherenceRightCo r ty co co2 | isGReflCo co = co2 | otherwise = co2 `mkTransCo` GRefl r ty (MCo co) -- | Given @co :: (a :: k) ~ (b :: k')@ produce @co' :: k ~ k'@. mkKindCo :: Coercion -> Coercion mkKindCo co | Just (ty, _) <- isReflCo_maybe co = Refl (typeKind ty) mkKindCo (GRefl _ _ (MCo co)) = co mkKindCo (UnivCo (PhantomProv h) _ _ _) = h mkKindCo (UnivCo (ProofIrrelProv h) _ _ _) = h mkKindCo co | Pair ty1 ty2 <- coercionKind co -- generally, calling coercionKind during coercion creation is a bad idea, -- as it can lead to exponential behavior. But, we don't have nested mkKindCos, -- so it's OK here. , let tk1 = typeKind ty1 tk2 = typeKind ty2 , tk1 `eqType` tk2 = Refl tk1 | otherwise = KindCo co mkSubCo :: HasDebugCallStack => Coercion -> Coercion -- Input coercion is Nominal, result is Representational -- see also Note [Role twiddling functions] mkSubCo (Refl ty) = GRefl Representational ty MRefl mkSubCo (GRefl Nominal ty co) = GRefl Representational ty co mkSubCo (TyConAppCo Nominal tc cos) = TyConAppCo Representational tc (applyRoles tc cos) mkSubCo (FunCo Nominal w arg res) = FunCo Representational w (downgradeRole Representational Nominal arg) (downgradeRole Representational Nominal res) mkSubCo co = assertPpr (coercionRole co == Nominal) (ppr co <+> ppr (coercionRole co)) $ SubCo co -- | Changes a role, but only a downgrade. See Note [Role twiddling functions] downgradeRole_maybe :: Role -- ^ desired role -> Role -- ^ current role -> Coercion -> Maybe Coercion -- In (downgradeRole_maybe dr cr co) it's a precondition that -- cr = coercionRole co downgradeRole_maybe Nominal Nominal co = Just co downgradeRole_maybe Nominal _ _ = Nothing downgradeRole_maybe Representational Nominal co = Just (mkSubCo co) downgradeRole_maybe Representational Representational co = Just co downgradeRole_maybe Representational Phantom _ = Nothing downgradeRole_maybe Phantom Phantom co = Just co downgradeRole_maybe Phantom _ co = Just (toPhantomCo co) -- | Like 'downgradeRole_maybe', but panics if the change isn't a downgrade. -- See Note [Role twiddling functions] downgradeRole :: Role -- desired role -> Role -- current role -> Coercion -> Coercion downgradeRole r1 r2 co = case downgradeRole_maybe r1 r2 co of Just co' -> co' Nothing -> pprPanic "downgradeRole" (ppr co) mkAxiomRuleCo :: CoAxiomRule -> [Coercion] -> Coercion mkAxiomRuleCo = AxiomRuleCo -- | Make a "coercion between coercions". mkProofIrrelCo :: Role -- ^ role of the created coercion, "r" -> CoercionN -- ^ :: phi1 ~N phi2 -> Coercion -- ^ g1 :: phi1 -> Coercion -- ^ g2 :: phi2 -> Coercion -- ^ :: g1 ~r g2 -- if the two coercion prove the same fact, I just don't care what -- the individual coercions are. mkProofIrrelCo r co g _ | isGReflCo co = mkReflCo r (mkCoercionTy g) -- kco is a kind coercion, thus @isGReflCo@ rather than @isReflCo@ mkProofIrrelCo r kco g1 g2 = mkUnivCo (ProofIrrelProv kco) r (mkCoercionTy g1) (mkCoercionTy g2) {- %************************************************************************ %* * Roles %* * %************************************************************************ -} -- | Converts a coercion to be nominal, if possible. -- See Note [Role twiddling functions] setNominalRole_maybe :: Role -- of input coercion -> Coercion -> Maybe Coercion setNominalRole_maybe r co | r == Nominal = Just co | otherwise = setNominalRole_maybe_helper co where setNominalRole_maybe_helper (SubCo co) = Just co setNominalRole_maybe_helper co@(Refl _) = Just co setNominalRole_maybe_helper (GRefl _ ty co) = Just $ GRefl Nominal ty co setNominalRole_maybe_helper (TyConAppCo Representational tc cos) = do { cos' <- zipWithM setNominalRole_maybe (tyConRolesX Representational tc) cos ; return $ TyConAppCo Nominal tc cos' } setNominalRole_maybe_helper (FunCo Representational w co1 co2) = do { co1' <- setNominalRole_maybe Representational co1 ; co2' <- setNominalRole_maybe Representational co2 ; return $ FunCo Nominal w co1' co2' } setNominalRole_maybe_helper (SymCo co) = SymCo <$> setNominalRole_maybe_helper co setNominalRole_maybe_helper (TransCo co1 co2) = TransCo <$> setNominalRole_maybe_helper co1 <*> setNominalRole_maybe_helper co2 setNominalRole_maybe_helper (AppCo co1 co2) = AppCo <$> setNominalRole_maybe_helper co1 <*> pure co2 setNominalRole_maybe_helper (ForAllCo tv kind_co co) = ForAllCo tv kind_co <$> setNominalRole_maybe_helper co setNominalRole_maybe_helper (NthCo _r n co) -- NB, this case recurses via setNominalRole_maybe, not -- setNominalRole_maybe_helper! = NthCo Nominal n <$> setNominalRole_maybe (coercionRole co) co setNominalRole_maybe_helper (InstCo co arg) = InstCo <$> setNominalRole_maybe_helper co <*> pure arg setNominalRole_maybe_helper (UnivCo prov _ co1 co2) | case prov of PhantomProv _ -> False -- should always be phantom ProofIrrelProv _ -> True -- it's always safe PluginProv _ -> False -- who knows? This choice is conservative. CorePrepProv _ -> True = Just $ UnivCo prov Nominal co1 co2 setNominalRole_maybe_helper _ = Nothing -- | Make a phantom coercion between two types. The coercion passed -- in must be a nominal coercion between the kinds of the -- types. mkPhantomCo :: Coercion -> Type -> Type -> Coercion mkPhantomCo h t1 t2 = mkUnivCo (PhantomProv h) Phantom t1 t2 -- takes any coercion and turns it into a Phantom coercion toPhantomCo :: Coercion -> Coercion toPhantomCo co = mkPhantomCo (mkKindCo co) ty1 ty2 where Pair ty1 ty2 = coercionKind co -- Convert args to a TyConAppCo Nominal to the same TyConAppCo Representational applyRoles :: TyCon -> [Coercion] -> [Coercion] applyRoles tc cos = zipWith (\r -> downgradeRole r Nominal) (tyConRolesRepresentational tc) cos -- the Role parameter is the Role of the TyConAppCo -- defined here because this is intimately concerned with the implementation -- of TyConAppCo -- Always returns an infinite list (with a infinite tail of Nominal) tyConRolesX :: Role -> TyCon -> [Role] tyConRolesX Representational tc = tyConRolesRepresentational tc tyConRolesX role _ = repeat role -- Returns the roles of the parameters of a tycon, with an infinite tail -- of Nominal tyConRolesRepresentational :: TyCon -> [Role] tyConRolesRepresentational tc = tyConRoles tc ++ repeat Nominal nthRole :: Role -> TyCon -> Int -> Role nthRole Nominal _ _ = Nominal nthRole Phantom _ _ = Phantom nthRole Representational tc n = (tyConRolesRepresentational tc) `getNth` n ltRole :: Role -> Role -> Bool -- Is one role "less" than another? -- Nominal < Representational < Phantom ltRole Phantom _ = False ltRole Representational Phantom = True ltRole Representational _ = False ltRole Nominal Nominal = False ltRole Nominal _ = True ------------------------------- -- | like mkKindCo, but aggressively & recursively optimizes to avoid using -- a KindCo constructor. The output role is nominal. promoteCoercion :: Coercion -> CoercionN -- First cases handles anything that should yield refl. promoteCoercion co = case co of _ | ki1 `eqType` ki2 -> mkNomReflCo (typeKind ty1) -- no later branch should return refl -- The assert (False )s throughout -- are these cases explicitly, but they should never fire. Refl _ -> assert False $ mkNomReflCo ki1 GRefl _ _ MRefl -> assert False $ mkNomReflCo ki1 GRefl _ _ (MCo co) -> co TyConAppCo _ tc args | Just co' <- instCoercions (mkNomReflCo (tyConKind tc)) args -> co' | otherwise -> mkKindCo co AppCo co1 arg | Just co' <- instCoercion (coercionKind (mkKindCo co1)) (promoteCoercion co1) arg -> co' | otherwise -> mkKindCo co ForAllCo tv _ g | isTyVar tv -> promoteCoercion g ForAllCo _ _ _ -> assert False $ mkNomReflCo liftedTypeKind -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep FunCo _ _ _ _ -> assert False $ mkNomReflCo liftedTypeKind CoVarCo {} -> mkKindCo co HoleCo {} -> mkKindCo co AxiomInstCo {} -> mkKindCo co AxiomRuleCo {} -> mkKindCo co UnivCo (PhantomProv kco) _ _ _ -> kco UnivCo (ProofIrrelProv kco) _ _ _ -> kco UnivCo (PluginProv _) _ _ _ -> mkKindCo co UnivCo (CorePrepProv _) _ _ _ -> mkKindCo co SymCo g -> mkSymCo (promoteCoercion g) TransCo co1 co2 -> mkTransCo (promoteCoercion co1) (promoteCoercion co2) NthCo _ n co1 | Just (_, args) <- splitTyConAppCo_maybe co1 , args `lengthExceeds` n -> promoteCoercion (args !! n) | Just _ <- splitForAllCo_maybe co , n == 0 -> assert False $ mkNomReflCo liftedTypeKind | otherwise -> mkKindCo co LRCo lr co1 | Just (lco, rco) <- splitAppCo_maybe co1 -> case lr of CLeft -> promoteCoercion lco CRight -> promoteCoercion rco | otherwise -> mkKindCo co InstCo g _ | isForAllTy_ty ty1 -> assert (isForAllTy_ty ty2) $ promoteCoercion g | otherwise -> assert False $ mkNomReflCo liftedTypeKind -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep KindCo _ -> assert False $ mkNomReflCo liftedTypeKind SubCo g -> promoteCoercion g where Pair ty1 ty2 = coercionKind co ki1 = typeKind ty1 ki2 = typeKind ty2 -- | say @g = promoteCoercion h@. Then, @instCoercion g w@ yields @Just g'@, -- where @g' = promoteCoercion (h w)@. -- fails if this is not possible, if @g@ coerces between a forall and an -> -- or if second parameter has a representational role and can't be used -- with an InstCo. instCoercion :: Pair Type -- g :: lty ~ rty -> CoercionN -- ^ must be nominal -> Coercion -> Maybe CoercionN instCoercion (Pair lty rty) g w | (isForAllTy_ty lty && isForAllTy_ty rty) || (isForAllTy_co lty && isForAllTy_co rty) , Just w' <- setNominalRole_maybe (coercionRole w) w -- g :: (forall t1. t2) ~ (forall t1. t3) -- w :: s1 ~ s2 -- returns mkInstCo g w' :: t2 [t1 |-> s1 ] ~ t3 [t1 |-> s2] = Just $ mkInstCo g w' | isFunTy lty && isFunTy rty -- g :: (t1 -> t2) ~ (t3 -> t4) -- returns t2 ~ t4 = Just $ mkNthCo Nominal 4 g -- extract result type, which is the 5th argument to (->) | otherwise -- one forall, one funty... = Nothing -- | Repeated use of 'instCoercion' instCoercions :: CoercionN -> [Coercion] -> Maybe CoercionN instCoercions g ws = let arg_ty_pairs = map coercionKind ws in snd <$> foldM go (coercionKind g, g) (zip arg_ty_pairs ws) where go :: (Pair Type, Coercion) -> (Pair Type, Coercion) -> Maybe (Pair Type, Coercion) go (g_tys, g) (w_tys, w) = do { g' <- instCoercion g_tys g w ; return (piResultTy <$> g_tys <*> w_tys, g') } -- | Creates a new coercion with both of its types casted by different casts -- @castCoercionKind2 g r t1 t2 h1 h2@, where @g :: t1 ~r t2@, -- has type @(t1 |> h1) ~r (t2 |> h2)@. -- @h1@ and @h2@ must be nominal. castCoercionKind2 :: Coercion -> Role -> Type -> Type -> CoercionN -> CoercionN -> Coercion castCoercionKind2 g r t1 t2 h1 h2 = mkCoherenceRightCo r t2 h2 (mkCoherenceLeftCo r t1 h1 g) -- | @castCoercionKind1 g r t1 t2 h@ = @coercionKind g r t1 t2 h h@ -- That is, it's a specialised form of castCoercionKind, where the two -- kind coercions are identical -- @castCoercionKind1 g r t1 t2 h@, where @g :: t1 ~r t2@, -- has type @(t1 |> h) ~r (t2 |> h)@. -- @h@ must be nominal. -- See Note [castCoercionKind1] castCoercionKind1 :: Coercion -> Role -> Type -> Type -> CoercionN -> Coercion castCoercionKind1 g r t1 t2 h = case g of Refl {} -> assert (r == Nominal) $ -- Refl is always Nominal mkNomReflCo (mkCastTy t2 h) GRefl _ _ mco -> case mco of MRefl -> mkReflCo r (mkCastTy t2 h) MCo kind_co -> GRefl r (mkCastTy t1 h) $ MCo (mkSymCo h `mkTransCo` kind_co `mkTransCo` h) _ -> castCoercionKind2 g r t1 t2 h h -- | Creates a new coercion with both of its types casted by different casts -- @castCoercionKind g h1 h2@, where @g :: t1 ~r t2@, -- has type @(t1 |> h1) ~r (t2 |> h2)@. -- @h1@ and @h2@ must be nominal. -- It calls @coercionKindRole@, so it's quite inefficient (which 'I' stands for) -- Use @castCoercionKind2@ instead if @t1@, @t2@, and @r@ are known beforehand. castCoercionKind :: Coercion -> CoercionN -> CoercionN -> Coercion castCoercionKind g h1 h2 = castCoercionKind2 g r t1 t2 h1 h2 where (Pair t1 t2, r) = coercionKindRole g mkFamilyTyConAppCo :: TyCon -> [CoercionN] -> CoercionN -- ^ Given a family instance 'TyCon' and its arg 'Coercion's, return the -- corresponding family 'Coercion'. E.g: -- -- > data family T a -- > data instance T (Maybe b) = MkT b -- -- Where the instance 'TyCon' is :RTL, so: -- -- > mkFamilyTyConAppCo :RTL (co :: a ~# Int) = T (Maybe a) ~# T (Maybe Int) -- -- cf. 'mkFamilyTyConApp' mkFamilyTyConAppCo tc cos | Just (fam_tc, fam_tys) <- tyConFamInst_maybe tc , let tvs = tyConTyVars tc fam_cos = assertPpr (tvs `equalLength` cos) (ppr tc <+> ppr cos) $ map (liftCoSubstWith Nominal tvs cos) fam_tys = mkTyConAppCo Nominal fam_tc fam_cos | otherwise = mkTyConAppCo Nominal tc cos -- See Note [Newtype coercions] in GHC.Core.TyCon mkPiCos :: Role -> [Var] -> Coercion -> Coercion mkPiCos r vs co = foldr (mkPiCo r) co vs -- | Make a forall 'Coercion', where both types related by the coercion -- are quantified over the same variable. mkPiCo :: Role -> Var -> Coercion -> Coercion mkPiCo r v co | isTyVar v = mkHomoForAllCos [v] co | isCoVar v = assert (not (v `elemVarSet` tyCoVarsOfCo co)) $ -- We didn't call mkForAllCo here because if v does not appear -- in co, the argement coercion will be nominal. But here we -- want it to be r. It is only called in 'mkPiCos', which is -- only used in GHC.Core.Opt.Simplify.Utils, where we are sure for -- now (Aug 2018) v won't occur in co. mkFunResCo r scaled_ty co | otherwise = mkFunResCo r scaled_ty co where scaled_ty = Scaled (varMult v) (varType v) mkFunResCo :: Role -> Scaled Type -> Coercion -> Coercion -- Given res_co :: res1 -> res2, -- mkFunResCo r m arg res_co :: (arg -> res1) ~r (arg -> res2) -- Reflexive in the multiplicity argument mkFunResCo role (Scaled mult arg_ty) res_co = mkFunCo role (multToCo mult) (mkReflCo role arg_ty) res_co -- mkCoCast (c :: s1 ~?r t1) (g :: (s1 ~?r t1) ~#R (s2 ~?r t2)) :: s2 ~?r t2 -- The first coercion might be lifted or unlifted; thus the ~? above -- Lifted and unlifted equalities take different numbers of arguments, -- so we have to make sure to supply the right parameter to decomposeCo. -- Also, note that the role of the first coercion is the same as the role of -- the equalities related by the second coercion. The second coercion is -- itself always representational. mkCoCast :: Coercion -> CoercionR -> Coercion mkCoCast c g | (g2:g1:_) <- reverse co_list = mkSymCo g1 `mkTransCo` c `mkTransCo` g2 | otherwise = pprPanic "mkCoCast" (ppr g $$ ppr (coercionKind g)) where -- g :: (s1 ~# t1) ~# (s2 ~# t2) -- g1 :: s1 ~# s2 -- g2 :: t1 ~# t2 (tc, _) = splitTyConApp (coercionLKind g) co_list = decomposeCo (tyConArity tc) g (tyConRolesRepresentational tc) {- Note [castCoercionKind1] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ castCoercionKind1 deals with the very important special case of castCoercionKind2 where the two kind coercions are identical. In that case we can exploit the situation where the main coercion is reflexive, via the special cases for Refl and GRefl. This is important when rewriting (ty |> co). We rewrite ty, yielding fco :: ty ~ ty' and now we want a coercion xco between xco :: (ty |> co) ~ (ty' |> co) That's exactly what castCoercionKind1 does. And it's very very common for fco to be Refl. In that case we do NOT want to get some terrible composition of mkLeftCoherenceCo and mkRightCoherenceCo, which is what castCoercionKind2 has to do in its full generality. See #18413. -} {- %************************************************************************ %* * Newtypes %* * %************************************************************************ -} -- | If `instNewTyCon_maybe T ts = Just (rep_ty, co)` -- then `co :: T ts ~R# rep_ty` -- -- Checks for a newtype, and for being saturated instNewTyCon_maybe :: TyCon -> [Type] -> Maybe (Type, Coercion) instNewTyCon_maybe tc tys | Just (tvs, ty, co_tc) <- unwrapNewTyConEtad_maybe tc -- Check for newtype , tvs `leLength` tys -- Check saturated enough = Just (applyTysX tvs ty tys, mkUnbranchedAxInstCo Representational co_tc tys []) | otherwise = Nothing {- ************************************************************************ * * Type normalisation * * ************************************************************************ -} -- | A function to check if we can reduce a type by one step. Used -- with 'topNormaliseTypeX'. type NormaliseStepper ev = RecTcChecker -> TyCon -- tc -> [Type] -- tys -> NormaliseStepResult ev -- | The result of stepping in a normalisation function. -- See 'topNormaliseTypeX'. data NormaliseStepResult ev = NS_Done -- ^ Nothing more to do | NS_Abort -- ^ Utter failure. The outer function should fail too. | NS_Step RecTcChecker Type ev -- ^ We stepped, yielding new bits; -- ^ ev is evidence; -- Usually a co :: old type ~ new type instance Outputable ev => Outputable (NormaliseStepResult ev) where ppr NS_Done = text "NS_Done" ppr NS_Abort = text "NS_Abort" ppr (NS_Step _ ty ev) = sep [text "NS_Step", ppr ty, ppr ev] mapStepResult :: (ev1 -> ev2) -> NormaliseStepResult ev1 -> NormaliseStepResult ev2 mapStepResult f (NS_Step rec_nts ty ev) = NS_Step rec_nts ty (f ev) mapStepResult _ NS_Done = NS_Done mapStepResult _ NS_Abort = NS_Abort -- | Try one stepper and then try the next, if the first doesn't make -- progress. -- So if it returns NS_Done, it means that both steppers are satisfied composeSteppers :: NormaliseStepper ev -> NormaliseStepper ev -> NormaliseStepper ev composeSteppers step1 step2 rec_nts tc tys = case step1 rec_nts tc tys of success@(NS_Step {}) -> success NS_Done -> step2 rec_nts tc tys NS_Abort -> NS_Abort -- | A 'NormaliseStepper' that unwraps newtypes, careful not to fall into -- a loop. If it would fall into a loop, it produces 'NS_Abort'. unwrapNewTypeStepper :: NormaliseStepper Coercion unwrapNewTypeStepper rec_nts tc tys | Just (ty', co) <- instNewTyCon_maybe tc tys = -- pprTrace "unNS" (ppr tc <+> ppr (getUnique tc) <+> ppr tys $$ ppr ty' $$ ppr rec_nts) $ case checkRecTc rec_nts tc of Just rec_nts' -> NS_Step rec_nts' ty' co Nothing -> NS_Abort | otherwise = NS_Done -- | A general function for normalising the top-level of a type. It continues -- to use the provided 'NormaliseStepper' until that function fails, and then -- this function returns. The roles of the coercions produced by the -- 'NormaliseStepper' must all be the same, which is the role returned from -- the call to 'topNormaliseTypeX'. -- -- Typically ev is Coercion. -- -- If topNormaliseTypeX step plus ty = Just (ev, ty') -- then ty ~ev1~ t1 ~ev2~ t2 ... ~evn~ ty' -- and ev = ev1 `plus` ev2 `plus` ... `plus` evn -- If it returns Nothing then no newtype unwrapping could happen topNormaliseTypeX :: NormaliseStepper ev -> (ev -> ev -> ev) -> Type -> Maybe (ev, Type) topNormaliseTypeX stepper plus ty | Just (tc, tys) <- splitTyConApp_maybe ty -- SPJ: The default threshold for initRecTc is 100 which is extremely dangerous -- for certain type synonyms, we should think about reducing it (see #20990) , NS_Step rec_nts ty' ev <- stepper initRecTc tc tys = go rec_nts ev ty' | otherwise = Nothing where go rec_nts ev ty | Just (tc, tys) <- splitTyConApp_maybe ty = case stepper rec_nts tc tys of NS_Step rec_nts' ty' ev' -> go rec_nts' (ev `plus` ev') ty' NS_Done -> Just (ev, ty) NS_Abort -> Nothing | otherwise = Just (ev, ty) topNormaliseNewType_maybe :: Type -> Maybe (Coercion, Type) -- ^ Sometimes we want to look through a @newtype@ and get its associated coercion. -- This function strips off @newtype@ layers enough to reveal something that isn't -- a @newtype@. Specifically, here's the invariant: -- -- > topNormaliseNewType_maybe rec_nts ty = Just (co, ty') -- -- then (a) @co : ty ~ ty'@. -- (b) ty' is not a newtype. -- -- The function returns @Nothing@ for non-@newtypes@, -- or unsaturated applications -- -- This function does *not* look through type families, because it has no access to -- the type family environment. If you do have that at hand, consider to use -- topNormaliseType_maybe, which should be a drop-in replacement for -- topNormaliseNewType_maybe -- If topNormliseNewType_maybe ty = Just (co, ty'), then co : ty ~R ty' topNormaliseNewType_maybe ty = topNormaliseTypeX unwrapNewTypeStepper mkTransCo ty {- %************************************************************************ %* * Comparison of coercions %* * %************************************************************************ -} -- | Syntactic equality of coercions eqCoercion :: Coercion -> Coercion -> Bool eqCoercion = eqType `on` coercionType -- | Compare two 'Coercion's, with respect to an RnEnv2 eqCoercionX :: RnEnv2 -> Coercion -> Coercion -> Bool eqCoercionX env = eqTypeX env `on` coercionType {- %************************************************************************ %* * "Lifting" substitution [(TyCoVar,Coercion)] -> Type -> Coercion %* * %************************************************************************ Note [Lifting coercions over types: liftCoSubst] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The KPUSH rule deals with this situation data T a = K (a -> Maybe a) g :: T t1 ~ T t2 x :: t1 -> Maybe t1 case (K @t1 x) |> g of K (y:t2 -> Maybe t2) -> rhs We want to push the coercion inside the constructor application. So we do this g' :: t1~t2 = Nth 0 g case K @t2 (x |> g' -> Maybe g') of K (y:t2 -> Maybe t2) -> rhs The crucial operation is that we * take the type of K's argument: a -> Maybe a * and substitute g' for a thus giving *coercion*. This is what liftCoSubst does. In the presence of kind coercions, this is a bit of a hairy operation. So, we refer you to the paper introducing kind coercions, available at www.cis.upenn.edu/~sweirich/papers/fckinds-extended.pdf Note [extendLiftingContextEx] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider we have datatype K :: \/k. \/a::k. P -> T k -- P be some type g :: T k1 ~ T k2 case (K @k1 @t1 x) |> g of K y -> rhs We want to push the coercion inside the constructor application. We first get the coercion mapped by the universal type variable k: lc = k |-> Nth 0 g :: k1~k2 Here, the important point is that the kind of a is coerced, and P might be dependent on the existential type variable a. Thus we first get the coercion of a's kind g2 = liftCoSubst lc k :: k1 ~ k2 Then we store a new mapping into the lifting context lc2 = a |-> (t1 ~ t1 |> g2), lc So later when we can correctly deal with the argument type P liftCoSubst lc2 P :: P [k|->k1][a|->t1] ~ P[k|->k2][a |-> (t1|>g2)] This is exactly what extendLiftingContextEx does. * For each (tyvar:k, ty) pair, we product the mapping tyvar |-> (ty ~ ty |> (liftCoSubst lc k)) * For each (covar:s1~s2, ty) pair, we produce the mapping covar |-> (co ~ co') co' = Sym (liftCoSubst lc s1) ;; covar ;; liftCoSubst lc s2 :: s1'~s2' This follows the lifting context extension definition in the "FC with Explicit Kind Equality" paper. -} -- ---------------------------------------------------- -- See Note [Lifting coercions over types: liftCoSubst] -- ---------------------------------------------------- data LiftingContext = LC TCvSubst LiftCoEnv -- in optCoercion, we need to lift when optimizing InstCo. -- See Note [Optimising InstCo] in GHC.Core.Coercion.Opt -- We thus propagate the substitution from GHC.Core.Coercion.Opt here. instance Outputable LiftingContext where ppr (LC _ env) = hang (text "LiftingContext:") 2 (ppr env) type LiftCoEnv = VarEnv Coercion -- Maps *type variables* to *coercions*. -- That's the whole point of this function! -- Also maps coercion variables to ProofIrrelCos. -- like liftCoSubstWith, but allows for existentially-bound types as well liftCoSubstWithEx :: Role -- desired role for output coercion -> [TyVar] -- universally quantified tyvars -> [Coercion] -- coercions to substitute for those -> [TyCoVar] -- existentially quantified tycovars -> [Type] -- types and coercions to be bound to ex vars -> (Type -> Coercion, [Type]) -- (lifting function, converted ex args) liftCoSubstWithEx role univs omegas exs rhos = let theta = mkLiftingContext (zipEqual "liftCoSubstWithExU" univs omegas) psi = extendLiftingContextEx theta (zipEqual "liftCoSubstWithExX" exs rhos) in (ty_co_subst psi role, substTys (lcSubstRight psi) (mkTyCoVarTys exs)) liftCoSubstWith :: Role -> [TyCoVar] -> [Coercion] -> Type -> Coercion liftCoSubstWith r tvs cos ty = liftCoSubst r (mkLiftingContext $ zipEqual "liftCoSubstWith" tvs cos) ty -- | @liftCoSubst role lc ty@ produces a coercion (at role @role@) -- that coerces between @lc_left(ty)@ and @lc_right(ty)@, where -- @lc_left@ is a substitution mapping type variables to the left-hand -- types of the mapped coercions in @lc@, and similar for @lc_right@. liftCoSubst :: HasDebugCallStack => Role -> LiftingContext -> Type -> Coercion {-# INLINE liftCoSubst #-} -- Inlining this function is worth 2% of allocation in T9872d, liftCoSubst r lc@(LC subst env) ty | isEmptyVarEnv env = mkReflCo r (substTy subst ty) | otherwise = ty_co_subst lc r ty emptyLiftingContext :: InScopeSet -> LiftingContext emptyLiftingContext in_scope = LC (mkEmptyTCvSubst in_scope) emptyVarEnv mkLiftingContext :: [(TyCoVar,Coercion)] -> LiftingContext mkLiftingContext pairs = LC (mkEmptyTCvSubst $ mkInScopeSet $ tyCoVarsOfCos (map snd pairs)) (mkVarEnv pairs) mkSubstLiftingContext :: TCvSubst -> LiftingContext mkSubstLiftingContext subst = LC subst emptyVarEnv -- | Extend a lifting context with a new mapping. extendLiftingContext :: LiftingContext -- ^ original LC -> TyCoVar -- ^ new variable to map... -> Coercion -- ^ ...to this lifted version -> LiftingContext -- mappings to reflexive coercions are just substitutions extendLiftingContext (LC subst env) tv arg | Just (ty, _) <- isReflCo_maybe arg = LC (extendTCvSubst subst tv ty) env | otherwise = LC subst (extendVarEnv env tv arg) -- | Extend a lifting context with a new mapping, and extend the in-scope set extendLiftingContextAndInScope :: LiftingContext -- ^ Original LC -> TyCoVar -- ^ new variable to map... -> Coercion -- ^ to this coercion -> LiftingContext extendLiftingContextAndInScope (LC subst env) tv co = extendLiftingContext (LC (extendTCvInScopeSet subst (tyCoVarsOfCo co)) env) tv co -- | Extend a lifting context with existential-variable bindings. -- See Note [extendLiftingContextEx] extendLiftingContextEx :: LiftingContext -- ^ original lifting context -> [(TyCoVar,Type)] -- ^ ex. var / value pairs -> LiftingContext -- Note that this is more involved than extendLiftingContext. That function -- takes a coercion to extend with, so it's assumed that the caller has taken -- into account any of the kind-changing stuff worried about here. extendLiftingContextEx lc [] = lc extendLiftingContextEx lc@(LC subst env) ((v,ty):rest) -- This function adds bindings for *Nominal* coercions. Why? Because it -- works with existentially bound variables, which are considered to have -- nominal roles. | isTyVar v = let lc' = LC (subst `extendTCvInScopeSet` tyCoVarsOfType ty) (extendVarEnv env v $ mkGReflRightCo Nominal ty (ty_co_subst lc Nominal (tyVarKind v))) in extendLiftingContextEx lc' rest | CoercionTy co <- ty = -- co :: s1 ~r s2 -- lift_s1 :: s1 ~r s1' -- lift_s2 :: s2 ~r s2' -- kco :: (s1 ~r s2) ~N (s1' ~r s2') assert (isCoVar v) $ let (_, _, s1, s2, r) = coVarKindsTypesRole v lift_s1 = ty_co_subst lc r s1 lift_s2 = ty_co_subst lc r s2 kco = mkTyConAppCo Nominal (equalityTyCon r) [ mkKindCo lift_s1, mkKindCo lift_s2 , lift_s1 , lift_s2 ] lc' = LC (subst `extendTCvInScopeSet` tyCoVarsOfCo co) (extendVarEnv env v (mkProofIrrelCo Nominal kco co $ (mkSymCo lift_s1) `mkTransCo` co `mkTransCo` lift_s2)) in extendLiftingContextEx lc' rest | otherwise = pprPanic "extendLiftingContextEx" (ppr v <+> text "|->" <+> ppr ty) -- | Erase the environments in a lifting context zapLiftingContext :: LiftingContext -> LiftingContext zapLiftingContext (LC subst _) = LC (zapTCvSubst subst) emptyVarEnv -- | Like 'substForAllCoBndr', but works on a lifting context substForAllCoBndrUsingLC :: Bool -> (Coercion -> Coercion) -> LiftingContext -> TyCoVar -> Coercion -> (LiftingContext, TyCoVar, Coercion) substForAllCoBndrUsingLC sym sco (LC subst lc_env) tv co = (LC subst' lc_env, tv', co') where (subst', tv', co') = substForAllCoBndrUsing sym sco subst tv co -- | The \"lifting\" operation which substitutes coercions for type -- variables in a type to produce a coercion. -- -- For the inverse operation, see 'liftCoMatch' ty_co_subst :: LiftingContext -> Role -> Type -> Coercion ty_co_subst !lc role ty -- !lc: making this function strict in lc allows callers to -- pass its two components separately, rather than boxing them. -- Unfortunately, Boxity Analysis concludes that we need lc boxed -- because it's used that way in liftCoSubstTyVarBndrUsing. = go role ty where go :: Role -> Type -> Coercion go r ty | Just ty' <- coreView ty = go r ty' go Phantom ty = lift_phantom ty go r (TyVarTy tv) = expectJust "ty_co_subst bad roles" $ liftCoSubstTyVar lc r tv go r (AppTy ty1 ty2) = mkAppCo (go r ty1) (go Nominal ty2) go r (TyConApp tc tys) = mkTyConAppCo r tc (zipWith go (tyConRolesX r tc) tys) go r (FunTy _ w ty1 ty2) = mkFunCo r (go Nominal w) (go r ty1) (go r ty2) go r t@(ForAllTy (Bndr v _) ty) = let (lc', v', h) = liftCoSubstVarBndr lc v body_co = ty_co_subst lc' r ty in if isTyVar v' || almostDevoidCoVarOfCo v' body_co -- Lifting a ForAllTy over a coercion variable could fail as ForAllCo -- imposes an extra restriction on where a covar can appear. See last -- wrinkle in Note [Unused coercion variable in ForAllCo]. -- We specifically check for this and panic because we know that -- there's a hole in the type system here, and we'd rather panic than -- fall into it. then mkForAllCo v' h body_co else pprPanic "ty_co_subst: covar is not almost devoid" (ppr t) go r ty@(LitTy {}) = assert (r == Nominal) $ mkNomReflCo ty go r (CastTy ty co) = castCoercionKind (go r ty) (substLeftCo lc co) (substRightCo lc co) go r (CoercionTy co) = mkProofIrrelCo r kco (substLeftCo lc co) (substRightCo lc co) where kco = go Nominal (coercionType co) lift_phantom ty = mkPhantomCo (go Nominal (typeKind ty)) (substTy (lcSubstLeft lc) ty) (substTy (lcSubstRight lc) ty) {- Note [liftCoSubstTyVar] ~~~~~~~~~~~~~~~~~~~~~~~~~ This function can fail if a coercion in the environment is of too low a role. liftCoSubstTyVar is called from two places: in liftCoSubst (naturally), and also in matchAxiom in GHC.Core.Coercion.Opt. From liftCoSubst, the so-called lifting lemma guarantees that the roles work out. If we fail in this case, we really should panic -- something is deeply wrong. But, in matchAxiom, failing is fine. matchAxiom is trying to find a set of coercions that match, but it may fail, and this is healthy behavior. -} -- See Note [liftCoSubstTyVar] liftCoSubstTyVar :: LiftingContext -> Role -> TyVar -> Maybe Coercion liftCoSubstTyVar (LC subst env) r v | Just co_arg <- lookupVarEnv env v = downgradeRole_maybe r (coercionRole co_arg) co_arg | otherwise = Just $ mkReflCo r (substTyVar subst v) {- Note [liftCoSubstVarBndr] ~~~~~~~~~~~~~~~~~~~~~~~~~ callback: 'liftCoSubstVarBndrUsing' needs to be general enough to work in two situations: - in this module, which manipulates 'Coercion's, and - in GHC.Core.FamInstEnv, where we work with 'Reduction's, which contain a coercion as well as a type. To achieve this, we require that the return type of the 'callback' function contain a coercion within it. This is witnessed by the first argument to 'liftCoSubstVarBndrUsing': a getter, which allows us to retrieve the coercion inside the return type. Thus: - in this module, we simply pass 'id' as the getter, - in GHC.Core.FamInstEnv, we pass 'reductionCoercion' as the getter. liftCoSubstTyVarBndrUsing: Given forall tv:k. t We want to get forall (tv:k1) (kind_co :: k1 ~ k2) body_co We lift the kind k to get the kind_co kind_co = ty_co_subst k :: k1 ~ k2 Now in the LiftingContext, we add the new mapping tv |-> (tv :: k1) ~ ((tv |> kind_co) :: k2) liftCoSubstCoVarBndrUsing: Given forall cv:(s1 ~ s2). t We want to get forall (cv:s1'~s2') (kind_co :: (s1'~s2') ~ (t1 ~ t2)) body_co We lift s1 and s2 respectively to get eta1 :: s1' ~ t1 eta2 :: s2' ~ t2 And kind_co = TyConAppCo Nominal (~#) eta1 eta2 Now in the liftingContext, we add the new mapping cv |-> (cv :: s1' ~ s2') ~ ((sym eta1;cv;eta2) :: t1 ~ t2) -} -- See Note [liftCoSubstVarBndr] liftCoSubstVarBndr :: LiftingContext -> TyCoVar -> (LiftingContext, TyCoVar, Coercion) liftCoSubstVarBndr lc tv = liftCoSubstVarBndrUsing id callback lc tv where callback lc' ty' = ty_co_subst lc' Nominal ty' -- the callback must produce a nominal coercion liftCoSubstVarBndrUsing :: (r -> CoercionN) -- ^ coercion getter -> (LiftingContext -> Type -> r) -- ^ callback -> LiftingContext -> TyCoVar -> (LiftingContext, TyCoVar, r) liftCoSubstVarBndrUsing view_co fun lc old_var | isTyVar old_var = liftCoSubstTyVarBndrUsing view_co fun lc old_var | otherwise = liftCoSubstCoVarBndrUsing view_co fun lc old_var -- Works for tyvar binder liftCoSubstTyVarBndrUsing :: (r -> CoercionN) -- ^ coercion getter -> (LiftingContext -> Type -> r) -- ^ callback -> LiftingContext -> TyVar -> (LiftingContext, TyVar, r) liftCoSubstTyVarBndrUsing view_co fun lc@(LC subst cenv) old_var = assert (isTyVar old_var) $ ( LC (subst `extendTCvInScope` new_var) new_cenv , new_var, stuff ) where old_kind = tyVarKind old_var stuff = fun lc old_kind eta = view_co stuff k1 = coercionLKind eta new_var = uniqAway (getTCvInScope subst) (setVarType old_var k1) lifted = mkGReflRightCo Nominal (TyVarTy new_var) eta -- :: new_var ~ new_var |> eta new_cenv = extendVarEnv cenv old_var lifted -- Works for covar binder liftCoSubstCoVarBndrUsing :: (r -> CoercionN) -- ^ coercion getter -> (LiftingContext -> Type -> r) -- ^ callback -> LiftingContext -> CoVar -> (LiftingContext, CoVar, r) liftCoSubstCoVarBndrUsing view_co fun lc@(LC subst cenv) old_var = assert (isCoVar old_var) $ ( LC (subst `extendTCvInScope` new_var) new_cenv , new_var, stuff ) where old_kind = coVarKind old_var stuff = fun lc old_kind eta = view_co stuff k1 = coercionLKind eta new_var = uniqAway (getTCvInScope subst) (setVarType old_var k1) -- old_var :: s1 ~r s2 -- eta :: (s1' ~r s2') ~N (t1 ~r t2) -- eta1 :: s1' ~r t1 -- eta2 :: s2' ~r t2 -- co1 :: s1' ~r s2' -- co2 :: t1 ~r t2 -- lifted :: co1 ~N co2 role = coVarRole old_var eta' = downgradeRole role Nominal eta eta1 = mkNthCo role 2 eta' eta2 = mkNthCo role 3 eta' co1 = mkCoVarCo new_var co2 = mkSymCo eta1 `mkTransCo` co1 `mkTransCo` eta2 lifted = mkProofIrrelCo Nominal eta co1 co2 new_cenv = extendVarEnv cenv old_var lifted -- | Is a var in the domain of a lifting context? isMappedByLC :: TyCoVar -> LiftingContext -> Bool isMappedByLC tv (LC _ env) = tv `elemVarEnv` env -- If [a |-> g] is in the substitution and g :: t1 ~ t2, substitute a for t1 -- If [a |-> (g1, g2)] is in the substitution, substitute a for g1 substLeftCo :: LiftingContext -> Coercion -> Coercion substLeftCo lc co = substCo (lcSubstLeft lc) co -- Ditto, but for t2 and g2 substRightCo :: LiftingContext -> Coercion -> Coercion substRightCo lc co = substCo (lcSubstRight lc) co -- | Apply "sym" to all coercions in a 'LiftCoEnv' swapLiftCoEnv :: LiftCoEnv -> LiftCoEnv swapLiftCoEnv = mapVarEnv mkSymCo lcSubstLeft :: LiftingContext -> TCvSubst lcSubstLeft (LC subst lc_env) = liftEnvSubstLeft subst lc_env lcSubstRight :: LiftingContext -> TCvSubst lcSubstRight (LC subst lc_env) = liftEnvSubstRight subst lc_env liftEnvSubstLeft :: TCvSubst -> LiftCoEnv -> TCvSubst liftEnvSubstLeft = liftEnvSubst pFst liftEnvSubstRight :: TCvSubst -> LiftCoEnv -> TCvSubst liftEnvSubstRight = liftEnvSubst pSnd liftEnvSubst :: (forall a. Pair a -> a) -> TCvSubst -> LiftCoEnv -> TCvSubst liftEnvSubst selector subst lc_env = composeTCvSubst (TCvSubst emptyInScopeSet tenv cenv) subst where pairs = nonDetUFMToList lc_env -- It's OK to use nonDetUFMToList here because we -- immediately forget the ordering by creating -- a VarEnv (tpairs, cpairs) = partitionWith ty_or_co pairs tenv = mkVarEnv_Directly tpairs cenv = mkVarEnv_Directly cpairs ty_or_co :: (Unique, Coercion) -> Either (Unique, Type) (Unique, Coercion) ty_or_co (u, co) | Just equality_co <- isCoercionTy_maybe equality_ty = Right (u, equality_co) | otherwise = Left (u, equality_ty) where equality_ty = selector (coercionKind co) -- | Extract the underlying substitution from the LiftingContext lcTCvSubst :: LiftingContext -> TCvSubst lcTCvSubst (LC subst _) = subst -- | Get the 'InScopeSet' from a 'LiftingContext' lcInScopeSet :: LiftingContext -> InScopeSet lcInScopeSet (LC subst _) = getTCvInScope subst {- %************************************************************************ %* * Sequencing on coercions %* * %************************************************************************ -} seqMCo :: MCoercion -> () seqMCo MRefl = () seqMCo (MCo co) = seqCo co seqCo :: Coercion -> () seqCo (Refl ty) = seqType ty seqCo (GRefl r ty mco) = r `seq` seqType ty `seq` seqMCo mco seqCo (TyConAppCo r tc cos) = r `seq` tc `seq` seqCos cos seqCo (AppCo co1 co2) = seqCo co1 `seq` seqCo co2 seqCo (ForAllCo tv k co) = seqType (varType tv) `seq` seqCo k `seq` seqCo co seqCo (FunCo r w co1 co2) = r `seq` seqCo w `seq` seqCo co1 `seq` seqCo co2 seqCo (CoVarCo cv) = cv `seq` () seqCo (HoleCo h) = coHoleCoVar h `seq` () seqCo (AxiomInstCo con ind cos) = con `seq` ind `seq` seqCos cos seqCo (UnivCo p r t1 t2) = seqProv p `seq` r `seq` seqType t1 `seq` seqType t2 seqCo (SymCo co) = seqCo co seqCo (TransCo co1 co2) = seqCo co1 `seq` seqCo co2 seqCo (NthCo r n co) = r `seq` n `seq` seqCo co seqCo (LRCo lr co) = lr `seq` seqCo co seqCo (InstCo co arg) = seqCo co `seq` seqCo arg seqCo (KindCo co) = seqCo co seqCo (SubCo co) = seqCo co seqCo (AxiomRuleCo _ cs) = seqCos cs seqProv :: UnivCoProvenance -> () seqProv (PhantomProv co) = seqCo co seqProv (ProofIrrelProv co) = seqCo co seqProv (PluginProv _) = () seqProv (CorePrepProv _) = () seqCos :: [Coercion] -> () seqCos [] = () seqCos (co:cos) = seqCo co `seq` seqCos cos {- %************************************************************************ %* * The kind of a type, and of a coercion %* * %************************************************************************ -} -- | Apply 'coercionKind' to multiple 'Coercion's coercionKinds :: [Coercion] -> Pair [Type] coercionKinds tys = sequenceA $ map coercionKind tys -- | Get a coercion's kind and role. coercionKindRole :: Coercion -> (Pair Type, Role) coercionKindRole co = (coercionKind co, coercionRole co) coercionType :: Coercion -> Type coercionType co = case coercionKindRole co of (Pair ty1 ty2, r) -> mkCoercionType r ty1 ty2 ------------------ -- | If it is the case that -- -- > c :: (t1 ~ t2) -- -- i.e. the kind of @c@ relates @t1@ and @t2@, then @coercionKind c = Pair t1 t2@. coercionKind :: Coercion -> Pair Type coercionKind co = Pair (coercionLKind co) (coercionRKind co) coercionLKind :: Coercion -> Type coercionLKind co = go co where go (Refl ty) = ty go (GRefl _ ty _) = ty go (TyConAppCo _ tc cos) = mkTyConApp tc (map go cos) go (AppCo co1 co2) = mkAppTy (go co1) (go co2) go (ForAllCo tv1 _ co1) = mkTyCoInvForAllTy tv1 (go co1) go (FunCo _ w co1 co2) = mkFunctionType (go w) (go co1) (go co2) go (CoVarCo cv) = coVarLType cv go (HoleCo h) = coVarLType (coHoleCoVar h) go (UnivCo _ _ ty1 _) = ty1 go (SymCo co) = coercionRKind co go (TransCo co1 _) = go co1 go (LRCo lr co) = pickLR lr (splitAppTy (go co)) go (InstCo aco arg) = go_app aco [go arg] go (KindCo co) = typeKind (go co) go (SubCo co) = go co go (NthCo _ d co) = go_nth d (go co) go (AxiomInstCo ax ind cos) = go_ax_inst ax ind (map go cos) go (AxiomRuleCo ax cos) = pFst $ expectJust "coercionKind" $ coaxrProves ax $ map coercionKind cos go_ax_inst ax ind tys | CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_lhs = lhs } <- coAxiomNthBranch ax ind , let (tys1, cotys1) = splitAtList tvs tys cos1 = map stripCoercionTy cotys1 = assert (tys `equalLength` (tvs ++ cvs)) $ -- Invariant of AxiomInstCo: cos should -- exactly saturate the axiom branch substTyWith tvs tys1 $ substTyWithCoVars cvs cos1 $ mkTyConApp (coAxiomTyCon ax) lhs go_app :: Coercion -> [Type] -> Type -- Collect up all the arguments and apply all at once -- See Note [Nested InstCos] go_app (InstCo co arg) args = go_app co (go arg:args) go_app co args = piResultTys (go co) args go_nth :: Int -> Type -> Type go_nth d ty | Just args <- tyConAppArgs_maybe ty = assert (args `lengthExceeds` d) $ args `getNth` d | d == 0 , Just (tv,_) <- splitForAllTyCoVar_maybe ty = tyVarKind tv | otherwise = pprPanic "coercionLKind:nth" (ppr d <+> ppr ty) coercionRKind :: Coercion -> Type coercionRKind co = go co where go (Refl ty) = ty go (GRefl _ ty MRefl) = ty go (GRefl _ ty (MCo co1)) = mkCastTy ty co1 go (TyConAppCo _ tc cos) = mkTyConApp tc (map go cos) go (AppCo co1 co2) = mkAppTy (go co1) (go co2) go (CoVarCo cv) = coVarRType cv go (HoleCo h) = coVarRType (coHoleCoVar h) go (FunCo _ w co1 co2) = mkFunctionType (go w) (go co1) (go co2) go (UnivCo _ _ _ ty2) = ty2 go (SymCo co) = coercionLKind co go (TransCo _ co2) = go co2 go (LRCo lr co) = pickLR lr (splitAppTy (go co)) go (InstCo aco arg) = go_app aco [go arg] go (KindCo co) = typeKind (go co) go (SubCo co) = go co go (NthCo _ d co) = go_nth d (go co) go (AxiomInstCo ax ind cos) = go_ax_inst ax ind (map go cos) go (AxiomRuleCo ax cos) = pSnd $ expectJust "coercionKind" $ coaxrProves ax $ map coercionKind cos go co@(ForAllCo tv1 k_co co1) -- works for both tyvar and covar | isGReflCo k_co = mkTyCoInvForAllTy tv1 (go co1) -- kind_co always has kind @Type@, thus @isGReflCo@ | otherwise = go_forall empty_subst co where empty_subst = mkEmptyTCvSubst (mkInScopeSet $ tyCoVarsOfCo co) go_ax_inst ax ind tys | CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_rhs = rhs } <- coAxiomNthBranch ax ind , let (tys2, cotys2) = splitAtList tvs tys cos2 = map stripCoercionTy cotys2 = assert (tys `equalLength` (tvs ++ cvs)) $ -- Invariant of AxiomInstCo: cos should -- exactly saturate the axiom branch substTyWith tvs tys2 $ substTyWithCoVars cvs cos2 rhs go_app :: Coercion -> [Type] -> Type -- Collect up all the arguments and apply all at once -- See Note [Nested InstCos] go_app (InstCo co arg) args = go_app co (go arg:args) go_app co args = piResultTys (go co) args go_forall subst (ForAllCo tv1 k_co co) -- See Note [Nested ForAllCos] | isTyVar tv1 = mkInfForAllTy tv2 (go_forall subst' co) where k2 = coercionRKind k_co tv2 = setTyVarKind tv1 (substTy subst k2) subst' | isGReflCo k_co = extendTCvInScope subst tv1 -- kind_co always has kind @Type@, thus @isGReflCo@ | otherwise = extendTvSubst (extendTCvInScope subst tv2) tv1 $ TyVarTy tv2 `mkCastTy` mkSymCo k_co go_forall subst (ForAllCo cv1 k_co co) | isCoVar cv1 = mkTyCoInvForAllTy cv2 (go_forall subst' co) where k2 = coercionRKind k_co r = coVarRole cv1 eta1 = mkNthCo r 2 (downgradeRole r Nominal k_co) eta2 = mkNthCo r 3 (downgradeRole r Nominal k_co) -- k_co :: (t1 ~r t2) ~N (s1 ~r s2) -- k1 = t1 ~r t2 -- k2 = s1 ~r s2 -- cv1 :: t1 ~r t2 -- cv2 :: s1 ~r s2 -- eta1 :: t1 ~r s1 -- eta2 :: t2 ~r s2 -- n_subst = (eta1 ; cv2 ; sym eta2) :: t1 ~r t2 cv2 = setVarType cv1 (substTy subst k2) n_subst = eta1 `mkTransCo` (mkCoVarCo cv2) `mkTransCo` (mkSymCo eta2) subst' | isReflCo k_co = extendTCvInScope subst cv1 | otherwise = extendCvSubst (extendTCvInScope subst cv2) cv1 n_subst go_forall subst other_co -- when other_co is not a ForAllCo = substTy subst (go other_co) {- Note [Nested ForAllCos] ~~~~~~~~~~~~~~~~~~~~~~~ Suppose we need `coercionKind (ForAllCo a1 (ForAllCo a2 ... (ForAllCo an co)...) )`. We do not want to perform `n` single-type-variable substitutions over the kind of `co`; rather we want to do one substitution which substitutes for all of `a1`, `a2` ... simultaneously. If we do one at a time we get the performance hole reported in #11735. Solution: gather up the type variables for nested `ForAllCos`, and substitute for them all at once. Remarkably, for #11735 this single change reduces /total/ compile time by a factor of more than ten. -} -- | Retrieve the role from a coercion. coercionRole :: Coercion -> Role coercionRole = go where go (Refl _) = Nominal go (GRefl r _ _) = r go (TyConAppCo r _ _) = r go (AppCo co1 _) = go co1 go (ForAllCo _ _ co) = go co go (FunCo r _ _ _) = r go (CoVarCo cv) = coVarRole cv go (HoleCo h) = coVarRole (coHoleCoVar h) go (AxiomInstCo ax _ _) = coAxiomRole ax go (UnivCo _ r _ _) = r go (SymCo co) = go co go (TransCo co1 _co2) = go co1 go (NthCo r _d _co) = r go (LRCo {}) = Nominal go (InstCo co _) = go co go (KindCo {}) = Nominal go (SubCo _) = Representational go (AxiomRuleCo ax _) = coaxrRole ax {- Note [Nested InstCos] ~~~~~~~~~~~~~~~~~~~~~ In #5631 we found that 70% of the entire compilation time was being spent in coercionKind! The reason was that we had (g @ ty1 @ ty2 .. @ ty100) -- The "@s" are InstCos where g :: forall a1 a2 .. a100. phi If we deal with the InstCos one at a time, we'll do this: 1. Find the kind of (g @ ty1 .. @ ty99) : forall a100. phi' 2. Substitute phi'[ ty100/a100 ], a single tyvar->type subst But this is a *quadratic* algorithm, and the blew up #5631. So it's very important to do the substitution simultaneously; cf Type.piResultTys (which in fact we call here). -} -- | Makes a coercion type from two types: the types whose equality -- is proven by the relevant 'Coercion' mkCoercionType :: Role -> Type -> Type -> Type mkCoercionType Nominal = mkPrimEqPred mkCoercionType Representational = mkReprPrimEqPred mkCoercionType Phantom = \ty1 ty2 -> let ki1 = typeKind ty1 ki2 = typeKind ty2 in TyConApp eqPhantPrimTyCon [ki1, ki2, ty1, ty2] mkHeteroCoercionType :: Role -> Kind -> Kind -> Type -> Type -> Type mkHeteroCoercionType Nominal = mkHeteroPrimEqPred mkHeteroCoercionType Representational = mkHeteroReprPrimEqPred mkHeteroCoercionType Phantom = panic "mkHeteroCoercionType" -- | Creates a primitive type equality predicate. -- Invariant: the types are not Coercions mkPrimEqPred :: Type -> Type -> Type mkPrimEqPred ty1 ty2 = mkTyConApp eqPrimTyCon [k1, k2, ty1, ty2] where k1 = typeKind ty1 k2 = typeKind ty2 -- | Makes a lifted equality predicate at the given role mkPrimEqPredRole :: Role -> Type -> Type -> PredType mkPrimEqPredRole Nominal = mkPrimEqPred mkPrimEqPredRole Representational = mkReprPrimEqPred mkPrimEqPredRole Phantom = panic "mkPrimEqPredRole phantom" -- | Creates a primitive type equality predicate with explicit kinds mkHeteroPrimEqPred :: Kind -> Kind -> Type -> Type -> Type mkHeteroPrimEqPred k1 k2 ty1 ty2 = mkTyConApp eqPrimTyCon [k1, k2, ty1, ty2] -- | Creates a primitive representational type equality predicate -- with explicit kinds mkHeteroReprPrimEqPred :: Kind -> Kind -> Type -> Type -> Type mkHeteroReprPrimEqPred k1 k2 ty1 ty2 = mkTyConApp eqReprPrimTyCon [k1, k2, ty1, ty2] mkReprPrimEqPred :: Type -> Type -> Type mkReprPrimEqPred ty1 ty2 = mkTyConApp eqReprPrimTyCon [k1, k2, ty1, ty2] where k1 = typeKind ty1 k2 = typeKind ty2 -- | Assuming that two types are the same, ignoring coercions, find -- a nominal coercion between the types. This is useful when optimizing -- transitivity over coercion applications, where splitting two -- AppCos might yield different kinds. See Note [EtaAppCo] in -- "GHC.Core.Coercion.Opt". buildCoercion :: Type -> Type -> CoercionN buildCoercion orig_ty1 orig_ty2 = go orig_ty1 orig_ty2 where go ty1 ty2 | Just ty1' <- coreView ty1 = go ty1' ty2 | Just ty2' <- coreView ty2 = go ty1 ty2' go (CastTy ty1 co) ty2 = let co' = go ty1 ty2 r = coercionRole co' in mkCoherenceLeftCo r ty1 co co' go ty1 (CastTy ty2 co) = let co' = go ty1 ty2 r = coercionRole co' in mkCoherenceRightCo r ty2 co co' go ty1@(TyVarTy tv1) _tyvarty = assert (case _tyvarty of { TyVarTy tv2 -> tv1 == tv2 ; _ -> False }) $ mkNomReflCo ty1 go (FunTy { ft_mult = w1, ft_arg = arg1, ft_res = res1 }) (FunTy { ft_mult = w2, ft_arg = arg2, ft_res = res2 }) = mkFunCo Nominal (go w1 w2) (go arg1 arg2) (go res1 res2) go (TyConApp tc1 args1) (TyConApp tc2 args2) = assert (tc1 == tc2) $ mkTyConAppCo Nominal tc1 (zipWith go args1 args2) go (AppTy ty1a ty1b) ty2 | Just (ty2a, ty2b) <- repSplitAppTy_maybe ty2 = mkAppCo (go ty1a ty2a) (go ty1b ty2b) go ty1 (AppTy ty2a ty2b) | Just (ty1a, ty1b) <- repSplitAppTy_maybe ty1 = mkAppCo (go ty1a ty2a) (go ty1b ty2b) go (ForAllTy (Bndr tv1 _flag1) ty1) (ForAllTy (Bndr tv2 _flag2) ty2) | isTyVar tv1 = assert (isTyVar tv2) $ mkForAllCo tv1 kind_co (go ty1 ty2') where kind_co = go (tyVarKind tv1) (tyVarKind tv2) in_scope = mkInScopeSet $ tyCoVarsOfType ty2 `unionVarSet` tyCoVarsOfCo kind_co ty2' = substTyWithInScope in_scope [tv2] [mkTyVarTy tv1 `mkCastTy` kind_co] ty2 go (ForAllTy (Bndr cv1 _flag1) ty1) (ForAllTy (Bndr cv2 _flag2) ty2) = assert (isCoVar cv1 && isCoVar cv2) $ mkForAllCo cv1 kind_co (go ty1 ty2') where s1 = varType cv1 s2 = varType cv2 kind_co = go s1 s2 -- s1 = t1 ~r t2 -- s2 = t3 ~r t4 -- kind_co :: (t1 ~r t2) ~N (t3 ~r t4) -- eta1 :: t1 ~r t3 -- eta2 :: t2 ~r t4 r = coVarRole cv1 kind_co' = downgradeRole r Nominal kind_co eta1 = mkNthCo r 2 kind_co' eta2 = mkNthCo r 3 kind_co' subst = mkEmptyTCvSubst $ mkInScopeSet $ tyCoVarsOfType ty2 `unionVarSet` tyCoVarsOfCo kind_co ty2' = substTy (extendCvSubst subst cv2 $ mkSymCo eta1 `mkTransCo` mkCoVarCo cv1 `mkTransCo` eta2) ty2 go ty1@(LitTy lit1) _lit2 = assert (case _lit2 of { LitTy lit2 -> lit1 == lit2 ; _ -> False }) $ mkNomReflCo ty1 go (CoercionTy co1) (CoercionTy co2) = mkProofIrrelCo Nominal kind_co co1 co2 where kind_co = go (coercionType co1) (coercionType co2) go ty1 ty2 = pprPanic "buildKindCoercion" (vcat [ ppr orig_ty1, ppr orig_ty2 , ppr ty1, ppr ty2 ]) {- %************************************************************************ %* * Coercion holes %* * %************************************************************************ -} has_co_hole_ty :: Type -> Monoid.Any has_co_hole_co :: Coercion -> Monoid.Any (has_co_hole_ty, _, has_co_hole_co, _) = foldTyCo folder () where folder = TyCoFolder { tcf_view = noView , tcf_tyvar = const2 (Monoid.Any False) , tcf_covar = const2 (Monoid.Any False) , tcf_hole = const2 (Monoid.Any True) , tcf_tycobinder = const2 } -- | Is there a coercion hole in this type? hasCoercionHoleTy :: Type -> Bool hasCoercionHoleTy = Monoid.getAny . has_co_hole_ty -- | Is there a coercion hole in this coercion? hasCoercionHoleCo :: Coercion -> Bool hasCoercionHoleCo = Monoid.getAny . has_co_hole_co hasThisCoercionHoleTy :: Type -> CoercionHole -> Bool hasThisCoercionHoleTy ty hole = Monoid.getAny (f ty) where (f, _, _, _) = foldTyCo folder () folder = TyCoFolder { tcf_view = noView , tcf_tyvar = const2 (Monoid.Any False) , tcf_covar = const2 (Monoid.Any False) , tcf_hole = \ _ h -> Monoid.Any (getUnique h == getUnique hole) , tcf_tycobinder = const2 } -- | Set the type of a 'CoercionHole' setCoHoleType :: CoercionHole -> Type -> CoercionHole setCoHoleType h t = setCoHoleCoVar h (setVarType (coHoleCoVar h) t) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Coercion/Axiom.hs0000644000000000000000000005616314472400112021710 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE ScopedTypeVariables #-} -- (c) The University of Glasgow 2012 -- | Module for coercion axioms, used to represent type family instances -- and newtypes module GHC.Core.Coercion.Axiom ( BranchFlag, Branched, Unbranched, BranchIndex, Branches(..), manyBranches, unbranched, fromBranches, numBranches, mapAccumBranches, CoAxiom(..), CoAxBranch(..), toBranchedAxiom, toUnbranchedAxiom, coAxiomName, coAxiomArity, coAxiomBranches, coAxiomTyCon, isImplicitCoAxiom, coAxiomNumPats, coAxiomNthBranch, coAxiomSingleBranch_maybe, coAxiomRole, coAxiomSingleBranch, coAxBranchTyVars, coAxBranchCoVars, coAxBranchRoles, coAxBranchLHS, coAxBranchRHS, coAxBranchSpan, coAxBranchIncomps, placeHolderIncomps, Role(..), fsFromRole, CoAxiomRule(..), TypeEqn, BuiltInSynFamily(..), trivialBuiltInFamily ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType, pprTyVar ) import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Types.Name import GHC.Types.Unique import GHC.Types.Var import GHC.Utils.Misc import GHC.Utils.Binary import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.Pair import GHC.Types.Basic import Data.Typeable ( Typeable ) import GHC.Types.SrcLoc import qualified Data.Data as Data import Data.Array import Data.List ( mapAccumL ) {- Note [Coercion axiom branches] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to allow closed type families, an axiom needs to contain an ordered list of alternatives, called branches. The kind of the coercion built from an axiom is determined by which index is used when building the coercion from the axiom. For example, consider the axiom derived from the following declaration: type family F a where F [Int] = Bool F [a] = Double F (a b) = Char This will give rise to this axiom: axF :: { F [Int] ~ Bool ; forall (a :: *). F [a] ~ Double ; forall (k :: *) (a :: k -> *) (b :: k). F (a b) ~ Char } The axiom is used with the AxiomInstCo constructor of Coercion. If we wish to have a coercion showing that F (Maybe Int) ~ Char, it will look like axF[2] <*> :: F (Maybe Int) ~ Char -- or, written using concrete-ish syntax -- AxiomInstCo axF 2 [Refl *, Refl Maybe, Refl Int] Note that the index is 0-based. For type-checking, it is also necessary to check that no previous pattern can unify with the supplied arguments. After all, it is possible that some of the type arguments are lambda-bound type variables whose instantiation may cause an earlier match among the branches. We wish to prohibit this behavior, so the type checker rules out the choice of a branch where a previous branch can unify. See also [Apartness] in GHC.Core.FamInstEnv. For example, the following is malformed, where 'a' is a lambda-bound type variable: axF[2] <*> :: F (a Bool) ~ Char Why? Because a might be instantiated with [], meaning that branch 1 should apply, not branch 2. This is a vital consistency check; without it, we could derive Int ~ Bool, and that is a Bad Thing. Note [Branched axioms] ~~~~~~~~~~~~~~~~~~~~~~ Although a CoAxiom has the capacity to store many branches, in certain cases, we want only one. These cases are in data/newtype family instances, newtype coercions, and type family instances. Furthermore, these unbranched axioms are used in a variety of places throughout GHC, and it would difficult to generalize all of that code to deal with branched axioms, especially when the code can be sure of the fact that an axiom is indeed a singleton. At the same time, it seems dangerous to assume singlehood in various places through GHC. The solution to this is to label a CoAxiom with a phantom type variable declaring whether it is known to be a singleton or not. The branches are stored using a special datatype, declared below, that ensures that the type variable is accurate. ************************************************************************ * * Branches * * ************************************************************************ -} type BranchIndex = Int -- The index of the branch in the list of branches -- Counting from zero -- promoted data type data BranchFlag = Branched | Unbranched type Branched = 'Branched type Unbranched = 'Unbranched -- By using type synonyms for the promoted constructors, we avoid needing -- DataKinds and the promotion quote in client modules. This also means that -- we don't need to export the term-level constructors, which should never be used. newtype Branches (br :: BranchFlag) = MkBranches { unMkBranches :: Array BranchIndex CoAxBranch } type role Branches nominal manyBranches :: [CoAxBranch] -> Branches Branched manyBranches brs = assert (snd bnds >= fst bnds ) MkBranches (listArray bnds brs) where bnds = (0, length brs - 1) unbranched :: CoAxBranch -> Branches Unbranched unbranched br = MkBranches (listArray (0, 0) [br]) toBranched :: Branches br -> Branches Branched toBranched = MkBranches . unMkBranches toUnbranched :: Branches br -> Branches Unbranched toUnbranched (MkBranches arr) = assert (bounds arr == (0,0) ) MkBranches arr fromBranches :: Branches br -> [CoAxBranch] fromBranches = elems . unMkBranches branchesNth :: Branches br -> BranchIndex -> CoAxBranch branchesNth (MkBranches arr) n = arr ! n numBranches :: Branches br -> Int numBranches (MkBranches arr) = snd (bounds arr) + 1 -- | The @[CoAxBranch]@ passed into the mapping function is a list of -- all previous branches, reversed mapAccumBranches :: ([CoAxBranch] -> CoAxBranch -> CoAxBranch) -> Branches br -> Branches br mapAccumBranches f (MkBranches arr) = MkBranches (listArray (bounds arr) (snd $ mapAccumL go [] (elems arr))) where go :: [CoAxBranch] -> CoAxBranch -> ([CoAxBranch], CoAxBranch) go prev_branches cur_branch = ( cur_branch : prev_branches , f prev_branches cur_branch ) {- ************************************************************************ * * Coercion axioms * * ************************************************************************ Note [Storing compatibility] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During axiom application, we need to be aware of which branches are compatible with which others. The full explanation is in Note [Compatibility] in GHc.Core.FamInstEnv. (The code is placed there to avoid a dependency from GHC.Core.Coercion.Axiom on the unification algorithm.) Although we could theoretically compute compatibility on the fly, this is silly, so we store it in a CoAxiom. Specifically, each branch refers to all other branches with which it is incompatible. This list might well be empty, and it will always be for the first branch of any axiom. CoAxBranches that do not (yet) belong to a CoAxiom should have a panic thunk stored in cab_incomps. The incompatibilities are properly a property of the axiom as a whole, and they are computed only when the final axiom is built. During serialization, the list is converted into a list of the indices of the branches. Note [CoAxioms are homogeneous] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All axioms must be *homogeneous*, meaning that the kind of the LHS must match the kind of the RHS. In practice, this means: Given a CoAxiom { co_ax_tc = ax_tc }, for every branch CoAxBranch { cab_lhs = lhs, cab_rhs = rhs }: typeKind (mkTyConApp ax_tc lhs) `eqType` typeKind rhs This is checked in FamInstEnv.mkCoAxBranch. -} -- | A 'CoAxiom' is a \"coercion constructor\", i.e. a named equality axiom. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data CoAxiom br = CoAxiom -- Type equality axiom. { co_ax_unique :: Unique -- Unique identifier , co_ax_name :: Name -- Name for pretty-printing , co_ax_role :: Role -- Role of the axiom's equality , co_ax_tc :: TyCon -- The head of the LHS patterns -- e.g. the newtype or family tycon , co_ax_branches :: Branches br -- The branches that form this axiom , co_ax_implicit :: Bool -- True <=> the axiom is "implicit" -- See Note [Implicit axioms] -- INVARIANT: co_ax_implicit == True implies length co_ax_branches == 1. } data CoAxBranch = CoAxBranch { cab_loc :: SrcSpan -- Location of the defining equation -- See Note [CoAxiom locations] , cab_tvs :: [TyVar] -- Bound type variables; not necessarily fresh -- See Note [CoAxBranch type variables] , cab_eta_tvs :: [TyVar] -- Eta-reduced tyvars -- cab_tvs and cab_lhs may be eta-reduced; see -- Note [Eta reduction for data families] , cab_cvs :: [CoVar] -- Bound coercion variables -- Always empty, for now. -- See Note [Constraints in patterns] -- in GHC.Tc.TyCl , cab_roles :: [Role] -- See Note [CoAxBranch roles] , cab_lhs :: [Type] -- Type patterns to match against , cab_rhs :: Type -- Right-hand side of the equality -- See Note [CoAxioms are homogeneous] , cab_incomps :: [CoAxBranch] -- The previous incompatible branches -- See Note [Storing compatibility] } deriving Data.Data toBranchedAxiom :: CoAxiom br -> CoAxiom Branched toBranchedAxiom (CoAxiom unique name role tc branches implicit) = CoAxiom unique name role tc (toBranched branches) implicit toUnbranchedAxiom :: CoAxiom br -> CoAxiom Unbranched toUnbranchedAxiom (CoAxiom unique name role tc branches implicit) = CoAxiom unique name role tc (toUnbranched branches) implicit coAxiomNumPats :: CoAxiom br -> Int coAxiomNumPats = length . coAxBranchLHS . (flip coAxiomNthBranch 0) coAxiomNthBranch :: CoAxiom br -> BranchIndex -> CoAxBranch coAxiomNthBranch (CoAxiom { co_ax_branches = bs }) index = branchesNth bs index coAxiomArity :: CoAxiom br -> BranchIndex -> Arity coAxiomArity ax index = length tvs + length cvs where CoAxBranch { cab_tvs = tvs, cab_cvs = cvs } = coAxiomNthBranch ax index coAxiomName :: CoAxiom br -> Name coAxiomName = co_ax_name coAxiomRole :: CoAxiom br -> Role coAxiomRole = co_ax_role coAxiomBranches :: CoAxiom br -> Branches br coAxiomBranches = co_ax_branches coAxiomSingleBranch_maybe :: CoAxiom br -> Maybe CoAxBranch coAxiomSingleBranch_maybe (CoAxiom { co_ax_branches = MkBranches arr }) | snd (bounds arr) == 0 = Just $ arr ! 0 | otherwise = Nothing coAxiomSingleBranch :: CoAxiom Unbranched -> CoAxBranch coAxiomSingleBranch (CoAxiom { co_ax_branches = MkBranches arr }) = arr ! 0 coAxiomTyCon :: CoAxiom br -> TyCon coAxiomTyCon = co_ax_tc coAxBranchTyVars :: CoAxBranch -> [TyVar] coAxBranchTyVars = cab_tvs coAxBranchCoVars :: CoAxBranch -> [CoVar] coAxBranchCoVars = cab_cvs coAxBranchLHS :: CoAxBranch -> [Type] coAxBranchLHS = cab_lhs coAxBranchRHS :: CoAxBranch -> Type coAxBranchRHS = cab_rhs coAxBranchRoles :: CoAxBranch -> [Role] coAxBranchRoles = cab_roles coAxBranchSpan :: CoAxBranch -> SrcSpan coAxBranchSpan = cab_loc isImplicitCoAxiom :: CoAxiom br -> Bool isImplicitCoAxiom = co_ax_implicit coAxBranchIncomps :: CoAxBranch -> [CoAxBranch] coAxBranchIncomps = cab_incomps -- See Note [Compatibility] in GHC.Core.FamInstEnv placeHolderIncomps :: [CoAxBranch] placeHolderIncomps = panic "placeHolderIncomps" {- Note [CoAxBranch type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case of a CoAxBranch of an associated type-family instance, we use the *same* type variables in cab_tvs (where possible) as the enclosing class or instance. Consider instance C Int [z] where type F Int [z] = ... -- Second param must be [z] In the CoAxBranch in the instance decl (F Int [z]) we use the same 'z', so that it's easy to check that that type is the same as that in the instance header. However, I believe that the cab_tvs of any CoAxBranch are distinct from the cab_tvs of other CoAxBranches in the same CoAxiom. This is important when checking for compatiblity and apartness; e.g. see GHC.Core.FamInstEnv.compatibleBranches. (The story seems a bit wobbly here, but it seems to work.) Note [CoAxBranch roles] ~~~~~~~~~~~~~~~~~~~~~~~ Consider this code: newtype Age = MkAge Int newtype Wrap a = MkWrap a convert :: Wrap Age -> Int convert (MkWrap (MkAge i)) = i We want this to compile to: NTCo:Wrap :: forall a. Wrap a ~R a NTCo:Age :: Age ~R Int convert = \x -> x |> (NTCo:Wrap[0] NTCo:Age[0]) But, note that NTCo:Age is at role R. Thus, we need to be able to pass coercions at role R into axioms. However, we don't *always* want to be able to do this, as it would be disastrous with type families. The solution is to annotate the arguments to the axiom with roles, much like we annotate tycon tyvars. Where do these roles get set? Newtype axioms inherit their roles from the newtype tycon; family axioms are all at role N. Note [CoAxiom locations] ~~~~~~~~~~~~~~~~~~~~~~~~ The source location of a CoAxiom is stored in two places in the datatype tree. * The first is in the location info buried in the Name of the CoAxiom. This span includes all of the branches of a branched CoAxiom. * The second is in the cab_loc fields of the CoAxBranches. In the case of a single branch, we can extract the source location of the branch from the name of the CoAxiom. In other cases, we need an explicit SrcSpan to correctly store the location of the equation giving rise to the FamInstBranch. Note [Implicit axioms] ~~~~~~~~~~~~~~~~~~~~~~ See also Note [Implicit TyThings] in GHC.Types.TyThing * A CoAxiom arising from data/type family instances is not "implicit". That is, it has its own IfaceAxiom declaration in an interface file * The CoAxiom arising from a newtype declaration *is* "implicit". That is, it does not have its own IfaceAxiom declaration in an interface file; instead the CoAxiom is generated by type-checking the newtype declaration Note [Eta reduction for data families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this data family T a b :: * newtype instance T Int a = MkT (IO a) deriving( Monad ) We'd like this to work. From the 'newtype instance' you might think we'd get: newtype TInt a = MkT (IO a) axiom ax1 a :: T Int a ~ TInt a -- The newtype-instance part axiom ax2 a :: TInt a ~ IO a -- The newtype part But now what can we do? We have this problem Given: d :: Monad IO Wanted: d' :: Monad (T Int) = d |> ???? What coercion can we use for the ??? Solution: eta-reduce both axioms, thus: axiom ax1 :: T Int ~ TInt axiom ax2 :: TInt ~ IO Now d' = d |> Monad (sym (ax2 ; ax1)) ----- Bottom line ------ For a CoAxBranch for a data family instance with representation TyCon rep_tc: - cab_tvs (of its CoAxiom) may be shorter than tyConTyVars of rep_tc. - cab_lhs may be shorter than tyConArity of the family tycon i.e. LHS is unsaturated - cab_rhs will be (rep_tc cab_tvs) i.e. RHS is un-saturated - This eta reduction happens for data instances as well as newtype instances. Here we want to eta-reduce the data family axiom. - This eta-reduction is done in GHC.Tc.TyCl.Instance.tcDataFamInstDecl. But for a /type/ family - cab_lhs has the exact arity of the family tycon There are certain situations (e.g., pretty-printing) where it is necessary to deal with eta-expanded data family instances. For these situations, the cab_eta_tvs field records the stuff that has been eta-reduced away. So if we have axiom forall a b. F [a->b] = D b a and cab_eta_tvs is [p,q], then the original user-written definition looked like axiom forall a b p q. F [a->b] p q = D b a p q (See #9692, #14179, and #15845 for examples of what can go wrong if we don't eta-expand when showing things to the user.) See also: * Note [Newtype eta] in GHC.Core.TyCon. This is notionally separate and deals with the axiom connecting a newtype with its representation type; but it too is eta-reduced. * Note [Implementing eta reduction for data families] in "GHC.Tc.TyCl.Instance". This describes the implementation details of this eta reduction happen. * Note [RoughMap and rm_empty] for how this complicates the RoughMap implementation slightly. -} {- ********************************************************************* * * Instances, especially pretty-printing * * ********************************************************************* -} instance Eq (CoAxiom br) where a == b = getUnique a == getUnique b a /= b = getUnique a /= getUnique b instance Uniquable (CoAxiom br) where getUnique = co_ax_unique instance NamedThing (CoAxiom br) where getName = co_ax_name instance Typeable br => Data.Data (CoAxiom br) where -- don't traverse? toConstr _ = abstractConstr "CoAxiom" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "CoAxiom" instance Outputable (CoAxiom br) where -- You may want GHC.Core.Coercion.pprCoAxiom instead ppr = ppr . getName instance Outputable CoAxBranch where -- This instance doesn't know the name of the type family -- If possible, use GHC.Core.Coercion.pprCoAxBranch instead ppr (CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_lhs = lhs_tys, cab_rhs = rhs, cab_incomps = incomps }) = text "CoAxBranch" <+> braces payload where payload = hang (text "forall" <+> pprWithCommas pprTyVar (tvs ++ cvs) <> dot) 2 (vcat [ text "" <+> sep (map pprType lhs_tys) , nest 2 (text "=" <+> ppr rhs) , ppUnless (null incomps) $ text "incomps:" <+> vcat (map ppr incomps) ]) {- ************************************************************************ * * Roles * * ************************************************************************ Roles are defined here to avoid circular dependencies. -} -- See Note [Roles] in GHC.Core.Coercion -- defined here to avoid cyclic dependency with GHC.Core.Coercion -- -- Order of constructors matters: the Ord instance coincides with the *super*typing -- relation on roles. data Role = Nominal | Representational | Phantom deriving (Eq, Ord, Data.Data) -- These names are slurped into the parser code. Changing these strings -- will change the **surface syntax** that GHC accepts! If you want to -- change only the pretty-printing, do some replumbing. See -- mkRoleAnnotDecl in GHC.Parser.PostProcess fsFromRole :: Role -> FastString fsFromRole Nominal = fsLit "nominal" fsFromRole Representational = fsLit "representational" fsFromRole Phantom = fsLit "phantom" instance Outputable Role where ppr = ftext . fsFromRole instance Binary Role where put_ bh Nominal = putByte bh 1 put_ bh Representational = putByte bh 2 put_ bh Phantom = putByte bh 3 get bh = do tag <- getByte bh case tag of 1 -> return Nominal 2 -> return Representational 3 -> return Phantom _ -> panic ("get Role " ++ show tag) {- ************************************************************************ * * CoAxiomRule Rules for building Evidence * * ************************************************************************ Conditional axioms. The general idea is that a `CoAxiomRule` looks like this: forall as. (r1 ~ r2, s1 ~ s2) => t1 ~ t2 My intention is to reuse these for both (~) and (~#). The short-term plan is to use this datatype to represent the type-nat axioms. In the longer run, it may be good to unify this and `CoAxiom`, as `CoAxiom` is the special case when there are no assumptions. -} -- | A more explicit representation for `t1 ~ t2`. type TypeEqn = Pair Type -- | For now, we work only with nominal equality. data CoAxiomRule = CoAxiomRule { coaxrName :: FastString , coaxrAsmpRoles :: [Role] -- roles of parameter equations , coaxrRole :: Role -- role of resulting equation , coaxrProves :: [TypeEqn] -> Maybe TypeEqn -- ^ coaxrProves returns @Nothing@ when it doesn't like -- the supplied arguments. When this happens in a coercion -- that means that the coercion is ill-formed, and Core Lint -- checks for that. } instance Data.Data CoAxiomRule where -- don't traverse? toConstr _ = abstractConstr "CoAxiomRule" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "CoAxiomRule" instance Uniquable CoAxiomRule where getUnique = getUnique . coaxrName instance Eq CoAxiomRule where x == y = coaxrName x == coaxrName y instance Ord CoAxiomRule where -- we compare lexically to avoid non-deterministic output when sets of rules -- are printed compare x y = lexicalCompareFS (coaxrName x) (coaxrName y) instance Outputable CoAxiomRule where ppr = ppr . coaxrName -- Type checking of built-in families data BuiltInSynFamily = BuiltInSynFamily { sfMatchFam :: [Type] -> Maybe (CoAxiomRule, [Type], Type) -- Does this reduce on the given arguments? -- If it does, returns (CoAxiomRule, types to instantiate the rule at, rhs type) -- That is: mkAxiomRuleCo coax (zipWith mkReflCo (coaxrAsmpRoles coax) ts) -- :: F tys ~r rhs, -- where the r in the output is coaxrRole of the rule. It is up to the -- caller to ensure that this role is appropriate. , sfInteractTop :: [Type] -> Type -> [TypeEqn] -- If given these type arguments and RHS, returns the equalities that -- are guaranteed to hold. , sfInteractInert :: [Type] -> Type -> [Type] -> Type -> [TypeEqn] -- If given one set of arguments and result, and another set of arguments -- and result, returns the equalities that are guaranteed to hold. } -- Provides default implementations that do nothing. trivialBuiltInFamily :: BuiltInSynFamily trivialBuiltInFamily = BuiltInSynFamily { sfMatchFam = \_ -> Nothing , sfInteractTop = \_ _ -> [] , sfInteractInert = \_ _ _ _ -> [] } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Coercion/Opt.hs0000644000000000000000000012655214472400112021375 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE CPP #-} module GHC.Core.Coercion.Opt ( optCoercion , checkAxInstCo , OptCoercionOpts (..) ) where import GHC.Prelude import GHC.Tc.Utils.TcType ( exactTyCoVarsOfType ) import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Subst import GHC.Core.Coercion import GHC.Core.Type as Type hiding( substTyVarBndr, substTy ) import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Core.Unify import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Data.Pair import GHC.Data.List.SetOps ( getNth ) import GHC.Utils.Outputable import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Trace import Control.Monad ( zipWithM ) {- %************************************************************************ %* * Optimising coercions %* * %************************************************************************ Note [Optimising coercion optimisation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Looking up a coercion's role or kind is linear in the size of the coercion. Thus, doing this repeatedly during the recursive descent of coercion optimisation is disastrous. We must be careful to avoid doing this if at all possible. Because it is generally easy to know a coercion's components' roles from the role of the outer coercion, we pass down the known role of the input in the algorithm below. We also keep functions opt_co2 and opt_co3 separate from opt_co4, so that the former two do Phantom checks that opt_co4 can avoid. This is a big win because Phantom coercions rarely appear within non-phantom coercions -- only in some TyConAppCos and some AxiomInstCos. We handle these cases specially by calling opt_co2. Note [Optimising InstCo] ~~~~~~~~~~~~~~~~~~~~~~~~ (1) tv is a type variable When we have (InstCo (ForAllCo tv h g) g2), we want to optimise. Let's look at the typing rules. h : k1 ~ k2 tv:k1 |- g : t1 ~ t2 ----------------------------- ForAllCo tv h g : (all tv:k1.t1) ~ (all tv:k2.t2[tv |-> tv |> sym h]) g1 : (all tv:k1.t1') ~ (all tv:k2.t2') g2 : s1 ~ s2 -------------------- InstCo g1 g2 : t1'[tv |-> s1] ~ t2'[tv |-> s2] We thus want some coercion proving this: (t1[tv |-> s1]) ~ (t2[tv |-> s2 |> sym h]) If we substitute the *type* tv for the *coercion* (g2 ; t2 ~ t2 |> sym h) in g, we'll get this result exactly. This is bizarre, though, because we're substituting a type variable with a coercion. However, this operation already exists: it's called *lifting*, and defined in GHC.Core.Coercion. We just need to enhance the lifting operation to be able to deal with an ambient substitution, which is why a LiftingContext stores a TCvSubst. (2) cv is a coercion variable Now consider we have (InstCo (ForAllCo cv h g) g2), we want to optimise. h : (t1 ~r t2) ~N (t3 ~r t4) cv : t1 ~r t2 |- g : t1' ~r2 t2' n1 = nth r 2 (downgradeRole r N h) :: t1 ~r t3 n2 = nth r 3 (downgradeRole r N h) :: t2 ~r t4 ------------------------------------------------ ForAllCo cv h g : (all cv:t1 ~r t2. t1') ~r2 (all cv:t3 ~r t4. t2'[cv |-> n1 ; cv ; sym n2]) g1 : (all cv:t1 ~r t2. t1') ~ (all cv: t3 ~r t4. t2') g2 : h1 ~N h2 h1 : t1 ~r t2 h2 : t3 ~r t4 ------------------------------------------------ InstCo g1 g2 : t1'[cv |-> h1] ~ t2'[cv |-> h2] We thus want some coercion proving this: t1'[cv |-> h1] ~ t2'[cv |-> n1 ; h2; sym n2] So we substitute the coercion variable c for the coercion (h1 ~N (n1; h2; sym n2)) in g. -} -- | Coercion optimisation options newtype OptCoercionOpts = OptCoercionOpts { optCoercionEnabled :: Bool -- ^ Enable coercion optimisation (reduce its size) } optCoercion :: OptCoercionOpts -> TCvSubst -> Coercion -> NormalCo -- ^ optCoercion applies a substitution to a coercion, -- *and* optimises it to reduce its size optCoercion opts env co | optCoercionEnabled opts = optCoercion' env co | otherwise = substCo env co optCoercion' :: TCvSubst -> Coercion -> NormalCo optCoercion' env co | debugIsOn = let out_co = opt_co1 lc False co (Pair in_ty1 in_ty2, in_role) = coercionKindRole co (Pair out_ty1 out_ty2, out_role) = coercionKindRole out_co in assertPpr (substTyUnchecked env in_ty1 `eqType` out_ty1 && substTyUnchecked env in_ty2 `eqType` out_ty2 && in_role == out_role) ( text "optCoercion changed types!" $$ hang (text "in_co:") 2 (ppr co) $$ hang (text "in_ty1:") 2 (ppr in_ty1) $$ hang (text "in_ty2:") 2 (ppr in_ty2) $$ hang (text "out_co:") 2 (ppr out_co) $$ hang (text "out_ty1:") 2 (ppr out_ty1) $$ hang (text "out_ty2:") 2 (ppr out_ty2) $$ hang (text "subst:") 2 (ppr env)) out_co | otherwise = opt_co1 lc False co where lc = mkSubstLiftingContext env type NormalCo = Coercion -- Invariants: -- * The substitution has been fully applied -- * For trans coercions (co1 `trans` co2) -- co1 is not a trans, and neither co1 nor co2 is identity type NormalNonIdCo = NormalCo -- Extra invariant: not the identity -- | Do we apply a @sym@ to the result? type SymFlag = Bool -- | Do we force the result to be representational? type ReprFlag = Bool -- | Optimize a coercion, making no assumptions. All coercions in -- the lifting context are already optimized (and sym'd if nec'y) opt_co1 :: LiftingContext -> SymFlag -> Coercion -> NormalCo opt_co1 env sym co = opt_co2 env sym (coercionRole co) co -- See Note [Optimising coercion optimisation] -- | Optimize a coercion, knowing the coercion's role. No other assumptions. opt_co2 :: LiftingContext -> SymFlag -> Role -- ^ The role of the input coercion -> Coercion -> NormalCo opt_co2 env sym Phantom co = opt_phantom env sym co opt_co2 env sym r co = opt_co3 env sym Nothing r co -- See Note [Optimising coercion optimisation] -- | Optimize a coercion, knowing the coercion's non-Phantom role. opt_co3 :: LiftingContext -> SymFlag -> Maybe Role -> Role -> Coercion -> NormalCo opt_co3 env sym (Just Phantom) _ co = opt_phantom env sym co opt_co3 env sym (Just Representational) r co = opt_co4_wrap env sym True r co -- if mrole is Just Nominal, that can't be a downgrade, so we can ignore opt_co3 env sym _ r co = opt_co4_wrap env sym False r co -- See Note [Optimising coercion optimisation] -- | Optimize a non-phantom coercion. opt_co4, opt_co4_wrap :: LiftingContext -> SymFlag -> ReprFlag -> Role -> Coercion -> NormalCo opt_co4_wrap = opt_co4 {- opt_co4_wrap env sym rep r co = pprTrace "opt_co4_wrap {" ( vcat [ text "Sym:" <+> ppr sym , text "Rep:" <+> ppr rep , text "Role:" <+> ppr r , text "Co:" <+> ppr co ]) $ assert (r == coercionRole co ) let result = opt_co4 env sym rep r co in pprTrace "opt_co4_wrap }" (ppr co $$ text "---" $$ ppr result) $ result -} opt_co4 env _ rep r (Refl ty) = assertPpr (r == Nominal) (text "Expected role:" <+> ppr r $$ text "Found role:" <+> ppr Nominal $$ text "Type:" <+> ppr ty) $ liftCoSubst (chooseRole rep r) env ty opt_co4 env _ rep r (GRefl _r ty MRefl) = assertPpr (r == _r) (text "Expected role:" <+> ppr r $$ text "Found role:" <+> ppr _r $$ text "Type:" <+> ppr ty) $ liftCoSubst (chooseRole rep r) env ty opt_co4 env sym rep r (GRefl _r ty (MCo co)) = assertPpr (r == _r) (text "Expected role:" <+> ppr r $$ text "Found role:" <+> ppr _r $$ text "Type:" <+> ppr ty) $ if isGReflCo co || isGReflCo co' then liftCoSubst r' env ty else wrapSym sym $ mkCoherenceRightCo r' ty' co' (liftCoSubst r' env ty) where r' = chooseRole rep r ty' = substTy (lcSubstLeft env) ty co' = opt_co4 env False False Nominal co opt_co4 env sym rep r (SymCo co) = opt_co4_wrap env (not sym) rep r co -- surprisingly, we don't have to do anything to the env here. This is -- because any "lifting" substitutions in the env are tied to ForAllCos, -- which treat their left and right sides differently. We don't want to -- exchange them. opt_co4 env sym rep r g@(TyConAppCo _r tc cos) = assert (r == _r) $ case (rep, r) of (True, Nominal) -> mkTyConAppCo Representational tc (zipWith3 (opt_co3 env sym) (map Just (tyConRolesRepresentational tc)) (repeat Nominal) cos) (False, Nominal) -> mkTyConAppCo Nominal tc (map (opt_co4_wrap env sym False Nominal) cos) (_, Representational) -> -- must use opt_co2 here, because some roles may be P -- See Note [Optimising coercion optimisation] mkTyConAppCo r tc (zipWith (opt_co2 env sym) (tyConRolesRepresentational tc) -- the current roles cos) (_, Phantom) -> pprPanic "opt_co4 sees a phantom!" (ppr g) opt_co4 env sym rep r (AppCo co1 co2) = mkAppCo (opt_co4_wrap env sym rep r co1) (opt_co4_wrap env sym False Nominal co2) opt_co4 env sym rep r (ForAllCo tv k_co co) = case optForAllCoBndr env sym tv k_co of (env', tv', k_co') -> mkForAllCo tv' k_co' $ opt_co4_wrap env' sym rep r co -- Use the "mk" functions to check for nested Refls opt_co4 env sym rep r (FunCo _r cow co1 co2) = assert (r == _r) $ if rep then mkFunCo Representational cow' co1' co2' else mkFunCo r cow' co1' co2' where co1' = opt_co4_wrap env sym rep r co1 co2' = opt_co4_wrap env sym rep r co2 cow' = opt_co1 env sym cow opt_co4 env sym rep r (CoVarCo cv) | Just co <- lookupCoVar (lcTCvSubst env) cv = opt_co4_wrap (zapLiftingContext env) sym rep r co | ty1 `eqType` ty2 -- See Note [Optimise CoVarCo to Refl] = mkReflCo (chooseRole rep r) ty1 | otherwise = assert (isCoVar cv1 ) wrapRole rep r $ wrapSym sym $ CoVarCo cv1 where Pair ty1 ty2 = coVarTypes cv1 cv1 = case lookupInScope (lcInScopeSet env) cv of Just cv1 -> cv1 Nothing -> warnPprTrace True "opt_co: not in scope" (ppr cv $$ ppr env) cv -- cv1 might have a substituted kind! opt_co4 _ _ _ _ (HoleCo h) = pprPanic "opt_univ fell into a hole" (ppr h) opt_co4 env sym rep r (AxiomInstCo con ind cos) -- Do *not* push sym inside top-level axioms -- e.g. if g is a top-level axiom -- g a : f a ~ a -- then (sym (g ty)) /= g (sym ty) !! = assert (r == coAxiomRole con ) wrapRole rep (coAxiomRole con) $ wrapSym sym $ -- some sub-cos might be P: use opt_co2 -- See Note [Optimising coercion optimisation] AxiomInstCo con ind (zipWith (opt_co2 env False) (coAxBranchRoles (coAxiomNthBranch con ind)) cos) -- Note that the_co does *not* have sym pushed into it opt_co4 env sym rep r (UnivCo prov _r t1 t2) = assert (r == _r ) opt_univ env sym prov (chooseRole rep r) t1 t2 opt_co4 env sym rep r (TransCo co1 co2) -- sym (g `o` h) = sym h `o` sym g | sym = opt_trans in_scope co2' co1' | otherwise = opt_trans in_scope co1' co2' where co1' = opt_co4_wrap env sym rep r co1 co2' = opt_co4_wrap env sym rep r co2 in_scope = lcInScopeSet env opt_co4 env _sym rep r (NthCo _r n co) | Just (ty, _) <- isReflCo_maybe co , Just (_tc, args) <- assert (r == _r ) splitTyConApp_maybe ty = liftCoSubst (chooseRole rep r) env (args `getNth` n) | Just (ty, _) <- isReflCo_maybe co , n == 0 , Just (tv, _) <- splitForAllTyCoVar_maybe ty -- works for both tyvar and covar = liftCoSubst (chooseRole rep r) env (varType tv) opt_co4 env sym rep r (NthCo r1 n (TyConAppCo _ _ cos)) = assert (r == r1 ) opt_co4_wrap env sym rep r (cos `getNth` n) -- see the definition of GHC.Builtin.Types.Prim.funTyCon opt_co4 env sym rep r (NthCo r1 n (FunCo _r2 w co1 co2)) = assert (r == r1 ) opt_co4_wrap env sym rep r (mkNthCoFunCo n w co1 co2) opt_co4 env sym rep r (NthCo _r n (ForAllCo _ eta _)) -- works for both tyvar and covar = assert (r == _r ) assert (n == 0 ) opt_co4_wrap env sym rep Nominal eta opt_co4 env sym rep r (NthCo _r n co) | Just nth_co <- case co' of TyConAppCo _ _ cos -> Just (cos `getNth` n) FunCo _ w co1 co2 -> Just (mkNthCoFunCo n w co1 co2) ForAllCo _ eta _ -> Just eta _ -> Nothing = if rep && (r == Nominal) -- keep propagating the SubCo then opt_co4_wrap (zapLiftingContext env) False True Nominal nth_co else nth_co | otherwise = wrapRole rep r $ NthCo r n co' where co' = opt_co1 env sym co opt_co4 env sym rep r (LRCo lr co) | Just pr_co <- splitAppCo_maybe co = assert (r == Nominal ) opt_co4_wrap env sym rep Nominal (pick_lr lr pr_co) | Just pr_co <- splitAppCo_maybe co' = assert (r == Nominal) $ if rep then opt_co4_wrap (zapLiftingContext env) False True Nominal (pick_lr lr pr_co) else pick_lr lr pr_co | otherwise = wrapRole rep Nominal $ LRCo lr co' where co' = opt_co4_wrap env sym False Nominal co pick_lr CLeft (l, _) = l pick_lr CRight (_, r) = r -- See Note [Optimising InstCo] opt_co4 env sym rep r (InstCo co1 arg) -- forall over type... | Just (tv, kind_co, co_body) <- splitForAllCo_ty_maybe co1 = opt_co4_wrap (extendLiftingContext env tv (mkCoherenceRightCo Nominal t2 (mkSymCo kind_co) sym_arg)) -- mkSymCo kind_co :: k1 ~ k2 -- sym_arg :: (t1 :: k1) ~ (t2 :: k2) -- tv |-> (t1 :: k1) ~ (((t2 :: k2) |> (sym kind_co)) :: k1) sym rep r co_body -- forall over coercion... | Just (cv, kind_co, co_body) <- splitForAllCo_co_maybe co1 , CoercionTy h1 <- t1 , CoercionTy h2 <- t2 = let new_co = mk_new_co cv (opt_co4_wrap env sym False Nominal kind_co) h1 h2 in opt_co4_wrap (extendLiftingContext env cv new_co) sym rep r co_body -- See if it is a forall after optimization -- If so, do an inefficient one-variable substitution, then re-optimize -- forall over type... | Just (tv', kind_co', co_body') <- splitForAllCo_ty_maybe co1' = opt_co4_wrap (extendLiftingContext (zapLiftingContext env) tv' (mkCoherenceRightCo Nominal t2' (mkSymCo kind_co') arg')) False False r' co_body' -- forall over coercion... | Just (cv', kind_co', co_body') <- splitForAllCo_co_maybe co1' , CoercionTy h1' <- t1' , CoercionTy h2' <- t2' = let new_co = mk_new_co cv' kind_co' h1' h2' in opt_co4_wrap (extendLiftingContext (zapLiftingContext env) cv' new_co) False False r' co_body' | otherwise = InstCo co1' arg' where co1' = opt_co4_wrap env sym rep r co1 r' = chooseRole rep r arg' = opt_co4_wrap env sym False Nominal arg sym_arg = wrapSym sym arg' -- Performance note: don't be alarmed by the two calls to coercionKind -- here, as only one call to coercionKind is actually demanded per guard. -- t1/t2 are used when checking if co1 is a forall, and t1'/t2' are used -- when checking if co1' (i.e., co1 post-optimization) is a forall. -- -- t1/t2 must come from sym_arg, not arg', since it's possible that arg' -- might have an extra Sym at the front (after being optimized) that co1 -- lacks, so we need to use sym_arg to balance the number of Syms. (#15725) Pair t1 t2 = coercionKind sym_arg Pair t1' t2' = coercionKind arg' mk_new_co cv kind_co h1 h2 = let -- h1 :: (t1 ~ t2) -- h2 :: (t3 ~ t4) -- kind_co :: (t1 ~ t2) ~ (t3 ~ t4) -- n1 :: t1 ~ t3 -- n2 :: t2 ~ t4 -- new_co = (h1 :: t1 ~ t2) ~ ((n1;h2;sym n2) :: t1 ~ t2) r2 = coVarRole cv kind_co' = downgradeRole r2 Nominal kind_co n1 = mkNthCo r2 2 kind_co' n2 = mkNthCo r2 3 kind_co' in mkProofIrrelCo Nominal (Refl (coercionType h1)) h1 (n1 `mkTransCo` h2 `mkTransCo` (mkSymCo n2)) opt_co4 env sym _rep r (KindCo co) = assert (r == Nominal) $ let kco' = promoteCoercion co in case kco' of KindCo co' -> promoteCoercion (opt_co1 env sym co') _ -> opt_co4_wrap env sym False Nominal kco' -- This might be able to be optimized more to do the promotion -- and substitution/optimization at the same time opt_co4 env sym _ r (SubCo co) = assert (r == Representational) $ opt_co4_wrap env sym True Nominal co -- This could perhaps be optimized more. opt_co4 env sym rep r (AxiomRuleCo co cs) = assert (r == coaxrRole co) $ wrapRole rep r $ wrapSym sym $ AxiomRuleCo co (zipWith (opt_co2 env False) (coaxrAsmpRoles co) cs) {- Note [Optimise CoVarCo to Refl] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have (c :: t~t) we can optimise it to Refl. That increases the chances of floating the Refl upwards; e.g. Maybe c --> Refl (Maybe t) We do so here in optCoercion, not in mkCoVarCo; see Note [mkCoVarCo] in GHC.Core.Coercion. -} ------------- -- | Optimize a phantom coercion. The input coercion may not necessarily -- be a phantom, but the output sure will be. opt_phantom :: LiftingContext -> SymFlag -> Coercion -> NormalCo opt_phantom env sym co = opt_univ env sym (PhantomProv (mkKindCo co)) Phantom ty1 ty2 where Pair ty1 ty2 = coercionKind co {- Note [Differing kinds] ~~~~~~~~~~~~~~~~~~~~~~ The two types may not have the same kind (although that would be very unusual). But even if they have the same kind, and the same type constructor, the number of arguments in a `CoTyConApp` can differ. Consider Any :: forall k. k Any * Int :: * Any (*->*) Maybe Int :: * Hence the need to compare argument lengths; see #13658 Note [opt_univ needs injectivity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If opt_univ sees a coercion between `T a1 a2` and `T b1 b2` it will optimize it by producing a TyConAppCo for T, and pushing the UnivCo into the arguments. But this works only if T is injective. Otherwise we can have something like type family F x where F Int = Int F Bool = Int where `UnivCo :: F Int ~ F Bool` is reasonable (it is effectively just an alternative representation for a couple of uses of AxiomInstCos) but we do not want to produce `F (UnivCo :: Int ~ Bool)` where the inner coercion is clearly inconsistent. Hence the opt_univ case for TyConApps checks isInjectiveTyCon. See #19509. -} opt_univ :: LiftingContext -> SymFlag -> UnivCoProvenance -> Role -> Type -> Type -> Coercion opt_univ env sym (PhantomProv h) _r ty1 ty2 | sym = mkPhantomCo h' ty2' ty1' | otherwise = mkPhantomCo h' ty1' ty2' where h' = opt_co4 env sym False Nominal h ty1' = substTy (lcSubstLeft env) ty1 ty2' = substTy (lcSubstRight env) ty2 opt_univ env sym prov role oty1 oty2 | Just (tc1, tys1) <- splitTyConApp_maybe oty1 , Just (tc2, tys2) <- splitTyConApp_maybe oty2 , tc1 == tc2 , isInjectiveTyCon tc1 role -- see Note [opt_univ needs injectivity] , equalLength tys1 tys2 -- see Note [Differing kinds] -- NB: prov must not be the two interesting ones (ProofIrrel & Phantom); -- Phantom is already taken care of, and ProofIrrel doesn't relate tyconapps = let roles = tyConRolesX role tc1 arg_cos = zipWith3 (mkUnivCo prov') roles tys1 tys2 arg_cos' = zipWith (opt_co4 env sym False) roles arg_cos in mkTyConAppCo role tc1 arg_cos' -- can't optimize the AppTy case because we can't build the kind coercions. | Just (tv1, ty1) <- splitForAllTyVar_maybe oty1 , Just (tv2, ty2) <- splitForAllTyVar_maybe oty2 -- NB: prov isn't interesting here either = let k1 = tyVarKind tv1 k2 = tyVarKind tv2 eta = mkUnivCo prov' Nominal k1 k2 -- eta gets opt'ed soon, but not yet. ty2' = substTyWith [tv2] [TyVarTy tv1 `mkCastTy` eta] ty2 (env', tv1', eta') = optForAllCoBndr env sym tv1 eta in mkForAllCo tv1' eta' (opt_univ env' sym prov' role ty1 ty2') | Just (cv1, ty1) <- splitForAllCoVar_maybe oty1 , Just (cv2, ty2) <- splitForAllCoVar_maybe oty2 -- NB: prov isn't interesting here either = let k1 = varType cv1 k2 = varType cv2 r' = coVarRole cv1 eta = mkUnivCo prov' Nominal k1 k2 eta_d = downgradeRole r' Nominal eta -- eta gets opt'ed soon, but not yet. n_co = (mkSymCo $ mkNthCo r' 2 eta_d) `mkTransCo` (mkCoVarCo cv1) `mkTransCo` (mkNthCo r' 3 eta_d) ty2' = substTyWithCoVars [cv2] [n_co] ty2 (env', cv1', eta') = optForAllCoBndr env sym cv1 eta in mkForAllCo cv1' eta' (opt_univ env' sym prov' role ty1 ty2') | otherwise = let ty1 = substTyUnchecked (lcSubstLeft env) oty1 ty2 = substTyUnchecked (lcSubstRight env) oty2 (a, b) | sym = (ty2, ty1) | otherwise = (ty1, ty2) in mkUnivCo prov' role a b where prov' = case prov of #if __GLASGOW_HASKELL__ < 901 -- This alt is redundant with the first match of the FunDef PhantomProv kco -> PhantomProv $ opt_co4_wrap env sym False Nominal kco #endif ProofIrrelProv kco -> ProofIrrelProv $ opt_co4_wrap env sym False Nominal kco PluginProv _ -> prov CorePrepProv _ -> prov ------------- opt_transList :: HasDebugCallStack => InScopeSet -> [NormalCo] -> [NormalCo] -> [NormalCo] opt_transList is = zipWithEqual "opt_transList" (opt_trans is) -- The input lists must have identical length. opt_trans :: InScopeSet -> NormalCo -> NormalCo -> NormalCo opt_trans is co1 co2 | isReflCo co1 = co2 -- optimize when co1 is a Refl Co | otherwise = opt_trans1 is co1 co2 opt_trans1 :: InScopeSet -> NormalNonIdCo -> NormalCo -> NormalCo -- First arg is not the identity opt_trans1 is co1 co2 | isReflCo co2 = co1 -- optimize when co2 is a Refl Co | otherwise = opt_trans2 is co1 co2 opt_trans2 :: InScopeSet -> NormalNonIdCo -> NormalNonIdCo -> NormalCo -- Neither arg is the identity opt_trans2 is (TransCo co1a co1b) co2 -- Don't know whether the sub-coercions are the identity = opt_trans is co1a (opt_trans is co1b co2) opt_trans2 is co1 co2 | Just co <- opt_trans_rule is co1 co2 = co opt_trans2 is co1 (TransCo co2a co2b) | Just co1_2a <- opt_trans_rule is co1 co2a = if isReflCo co1_2a then co2b else opt_trans1 is co1_2a co2b opt_trans2 _ co1 co2 = mkTransCo co1 co2 ------ -- Optimize coercions with a top-level use of transitivity. opt_trans_rule :: InScopeSet -> NormalNonIdCo -> NormalNonIdCo -> Maybe NormalCo opt_trans_rule is in_co1@(GRefl r1 t1 (MCo co1)) in_co2@(GRefl r2 _ (MCo co2)) = assert (r1 == r2) $ fireTransRule "GRefl" in_co1 in_co2 $ mkGReflRightCo r1 t1 (opt_trans is co1 co2) -- Push transitivity through matching destructors opt_trans_rule is in_co1@(NthCo r1 d1 co1) in_co2@(NthCo r2 d2 co2) | d1 == d2 , coercionRole co1 == coercionRole co2 , co1 `compatible_co` co2 = assert (r1 == r2) $ fireTransRule "PushNth" in_co1 in_co2 $ mkNthCo r1 d1 (opt_trans is co1 co2) opt_trans_rule is in_co1@(LRCo d1 co1) in_co2@(LRCo d2 co2) | d1 == d2 , co1 `compatible_co` co2 = fireTransRule "PushLR" in_co1 in_co2 $ mkLRCo d1 (opt_trans is co1 co2) -- Push transitivity inside instantiation opt_trans_rule is in_co1@(InstCo co1 ty1) in_co2@(InstCo co2 ty2) | ty1 `eqCoercion` ty2 , co1 `compatible_co` co2 = fireTransRule "TrPushInst" in_co1 in_co2 $ mkInstCo (opt_trans is co1 co2) ty1 opt_trans_rule is in_co1@(UnivCo p1 r1 tyl1 _tyr1) in_co2@(UnivCo p2 r2 _tyl2 tyr2) | Just prov' <- opt_trans_prov p1 p2 = assert (r1 == r2) $ fireTransRule "UnivCo" in_co1 in_co2 $ mkUnivCo prov' r1 tyl1 tyr2 where -- if the provenances are different, opt'ing will be very confusing opt_trans_prov (PhantomProv kco1) (PhantomProv kco2) = Just $ PhantomProv $ opt_trans is kco1 kco2 opt_trans_prov (ProofIrrelProv kco1) (ProofIrrelProv kco2) = Just $ ProofIrrelProv $ opt_trans is kco1 kco2 opt_trans_prov (PluginProv str1) (PluginProv str2) | str1 == str2 = Just p1 opt_trans_prov _ _ = Nothing -- Push transitivity down through matching top-level constructors. opt_trans_rule is in_co1@(TyConAppCo r1 tc1 cos1) in_co2@(TyConAppCo r2 tc2 cos2) | tc1 == tc2 = assert (r1 == r2) $ fireTransRule "PushTyConApp" in_co1 in_co2 $ mkTyConAppCo r1 tc1 (opt_transList is cos1 cos2) opt_trans_rule is in_co1@(FunCo r1 w1 co1a co1b) in_co2@(FunCo r2 w2 co2a co2b) = assert (r1 == r2) $ -- Just like the TyConAppCo/TyConAppCo case fireTransRule "PushFun" in_co1 in_co2 $ mkFunCo r1 (opt_trans is w1 w2) (opt_trans is co1a co2a) (opt_trans is co1b co2b) opt_trans_rule is in_co1@(AppCo co1a co1b) in_co2@(AppCo co2a co2b) -- Must call opt_trans_rule_app; see Note [EtaAppCo] = opt_trans_rule_app is in_co1 in_co2 co1a [co1b] co2a [co2b] -- Eta rules opt_trans_rule is co1@(TyConAppCo r tc cos1) co2 | Just cos2 <- etaTyConAppCo_maybe tc co2 = fireTransRule "EtaCompL" co1 co2 $ mkTyConAppCo r tc (opt_transList is cos1 cos2) opt_trans_rule is co1 co2@(TyConAppCo r tc cos2) | Just cos1 <- etaTyConAppCo_maybe tc co1 = fireTransRule "EtaCompR" co1 co2 $ mkTyConAppCo r tc (opt_transList is cos1 cos2) opt_trans_rule is co1@(AppCo co1a co1b) co2 | Just (co2a,co2b) <- etaAppCo_maybe co2 = opt_trans_rule_app is co1 co2 co1a [co1b] co2a [co2b] opt_trans_rule is co1 co2@(AppCo co2a co2b) | Just (co1a,co1b) <- etaAppCo_maybe co1 = opt_trans_rule_app is co1 co2 co1a [co1b] co2a [co2b] -- Push transitivity inside forall -- forall over types. opt_trans_rule is co1 co2 | Just (tv1, eta1, r1) <- splitForAllCo_ty_maybe co1 , Just (tv2, eta2, r2) <- etaForAllCo_ty_maybe co2 = push_trans tv1 eta1 r1 tv2 eta2 r2 | Just (tv2, eta2, r2) <- splitForAllCo_ty_maybe co2 , Just (tv1, eta1, r1) <- etaForAllCo_ty_maybe co1 = push_trans tv1 eta1 r1 tv2 eta2 r2 where push_trans tv1 eta1 r1 tv2 eta2 r2 -- Given: -- co1 = /\ tv1 : eta1. r1 -- co2 = /\ tv2 : eta2. r2 -- Wanted: -- /\tv1 : (eta1;eta2). (r1; r2[tv2 |-> tv1 |> eta1]) = fireTransRule "EtaAllTy_ty" co1 co2 $ mkForAllCo tv1 (opt_trans is eta1 eta2) (opt_trans is' r1 r2') where is' = is `extendInScopeSet` tv1 r2' = substCoWithUnchecked [tv2] [mkCastTy (TyVarTy tv1) eta1] r2 -- Push transitivity inside forall -- forall over coercions. opt_trans_rule is co1 co2 | Just (cv1, eta1, r1) <- splitForAllCo_co_maybe co1 , Just (cv2, eta2, r2) <- etaForAllCo_co_maybe co2 = push_trans cv1 eta1 r1 cv2 eta2 r2 | Just (cv2, eta2, r2) <- splitForAllCo_co_maybe co2 , Just (cv1, eta1, r1) <- etaForAllCo_co_maybe co1 = push_trans cv1 eta1 r1 cv2 eta2 r2 where push_trans cv1 eta1 r1 cv2 eta2 r2 -- Given: -- co1 = /\ cv1 : eta1. r1 -- co2 = /\ cv2 : eta2. r2 -- Wanted: -- n1 = nth 2 eta1 -- n2 = nth 3 eta1 -- nco = /\ cv1 : (eta1;eta2). (r1; r2[cv2 |-> (sym n1);cv1;n2]) = fireTransRule "EtaAllTy_co" co1 co2 $ mkForAllCo cv1 (opt_trans is eta1 eta2) (opt_trans is' r1 r2') where is' = is `extendInScopeSet` cv1 role = coVarRole cv1 eta1' = downgradeRole role Nominal eta1 n1 = mkNthCo role 2 eta1' n2 = mkNthCo role 3 eta1' r2' = substCo (zipCvSubst [cv2] [(mkSymCo n1) `mkTransCo` (mkCoVarCo cv1) `mkTransCo` n2]) r2 -- Push transitivity inside axioms opt_trans_rule is co1 co2 -- See Note [Why call checkAxInstCo during optimisation] -- TrPushSymAxR | Just (sym, con, ind, cos1) <- co1_is_axiom_maybe , True <- sym , Just cos2 <- matchAxiom sym con ind co2 , let newAxInst = AxiomInstCo con ind (opt_transList is (map mkSymCo cos2) cos1) , Nothing <- checkAxInstCo newAxInst = fireTransRule "TrPushSymAxR" co1 co2 $ SymCo newAxInst -- TrPushAxR | Just (sym, con, ind, cos1) <- co1_is_axiom_maybe , False <- sym , Just cos2 <- matchAxiom sym con ind co2 , let newAxInst = AxiomInstCo con ind (opt_transList is cos1 cos2) , Nothing <- checkAxInstCo newAxInst = fireTransRule "TrPushAxR" co1 co2 newAxInst -- TrPushSymAxL | Just (sym, con, ind, cos2) <- co2_is_axiom_maybe , True <- sym , Just cos1 <- matchAxiom (not sym) con ind co1 , let newAxInst = AxiomInstCo con ind (opt_transList is cos2 (map mkSymCo cos1)) , Nothing <- checkAxInstCo newAxInst = fireTransRule "TrPushSymAxL" co1 co2 $ SymCo newAxInst -- TrPushAxL | Just (sym, con, ind, cos2) <- co2_is_axiom_maybe , False <- sym , Just cos1 <- matchAxiom (not sym) con ind co1 , let newAxInst = AxiomInstCo con ind (opt_transList is cos1 cos2) , Nothing <- checkAxInstCo newAxInst = fireTransRule "TrPushAxL" co1 co2 newAxInst -- TrPushAxSym/TrPushSymAx | Just (sym1, con1, ind1, cos1) <- co1_is_axiom_maybe , Just (sym2, con2, ind2, cos2) <- co2_is_axiom_maybe , con1 == con2 , ind1 == ind2 , sym1 == not sym2 , let branch = coAxiomNthBranch con1 ind1 qtvs = coAxBranchTyVars branch ++ coAxBranchCoVars branch lhs = coAxNthLHS con1 ind1 rhs = coAxBranchRHS branch pivot_tvs = exactTyCoVarsOfType (if sym2 then rhs else lhs) , all (`elemVarSet` pivot_tvs) qtvs = fireTransRule "TrPushAxSym" co1 co2 $ if sym2 -- TrPushAxSym then liftCoSubstWith role qtvs (opt_transList is cos1 (map mkSymCo cos2)) lhs -- TrPushSymAx else liftCoSubstWith role qtvs (opt_transList is (map mkSymCo cos1) cos2) rhs where co1_is_axiom_maybe = isAxiom_maybe co1 co2_is_axiom_maybe = isAxiom_maybe co2 role = coercionRole co1 -- should be the same as coercionRole co2! opt_trans_rule _ co1 co2 -- Identity rule | let ty1 = coercionLKind co1 r = coercionRole co1 ty2 = coercionRKind co2 , ty1 `eqType` ty2 = fireTransRule "RedTypeDirRefl" co1 co2 $ mkReflCo r ty2 opt_trans_rule _ _ _ = Nothing -- See Note [EtaAppCo] opt_trans_rule_app :: InScopeSet -> Coercion -- original left-hand coercion (printing only) -> Coercion -- original right-hand coercion (printing only) -> Coercion -- left-hand coercion "function" -> [Coercion] -- left-hand coercion "args" -> Coercion -- right-hand coercion "function" -> [Coercion] -- right-hand coercion "args" -> Maybe Coercion opt_trans_rule_app is orig_co1 orig_co2 co1a co1bs co2a co2bs | AppCo co1aa co1ab <- co1a , Just (co2aa, co2ab) <- etaAppCo_maybe co2a = opt_trans_rule_app is orig_co1 orig_co2 co1aa (co1ab:co1bs) co2aa (co2ab:co2bs) | AppCo co2aa co2ab <- co2a , Just (co1aa, co1ab) <- etaAppCo_maybe co1a = opt_trans_rule_app is orig_co1 orig_co2 co1aa (co1ab:co1bs) co2aa (co2ab:co2bs) | otherwise = assert (co1bs `equalLength` co2bs) $ fireTransRule ("EtaApps:" ++ show (length co1bs)) orig_co1 orig_co2 $ let rt1a = coercionRKind co1a lt2a = coercionLKind co2a rt2a = coercionRole co2a rt1bs = map coercionRKind co1bs lt2bs = map coercionLKind co2bs rt2bs = map coercionRole co2bs kcoa = mkKindCo $ buildCoercion lt2a rt1a kcobs = map mkKindCo $ zipWith buildCoercion lt2bs rt1bs co2a' = mkCoherenceLeftCo rt2a lt2a kcoa co2a co2bs' = zipWith3 mkGReflLeftCo rt2bs lt2bs kcobs co2bs'' = zipWith mkTransCo co2bs' co2bs in mkAppCos (opt_trans is co1a co2a') (zipWith (opt_trans is) co1bs co2bs'') fireTransRule :: String -> Coercion -> Coercion -> Coercion -> Maybe Coercion fireTransRule _rule _co1 _co2 res = Just res {- Note [Conflict checking with AxiomInstCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following type family and axiom: type family Equal (a :: k) (b :: k) :: Bool type instance where Equal a a = True Equal a b = False -- Equal :: forall k::*. k -> k -> Bool axEqual :: { forall k::*. forall a::k. Equal k a a ~ True ; forall k::*. forall a::k. forall b::k. Equal k a b ~ False } We wish to disallow (axEqual[1] <*> ) :: (Equal * Int Int ~ False) and that all is OK. But, all is not OK: we want to use the first branch of the axiom in this case, not the second. The problem is that the parameters of the first branch can unify with the supplied coercions, thus meaning that the first branch should be taken. See also Note [Apartness] in "GHC.Core.FamInstEnv". Note [Why call checkAxInstCo during optimisation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible that otherwise-good-looking optimisations meet with disaster in the presence of axioms with multiple equations. Consider type family Equal (a :: *) (b :: *) :: Bool where Equal a a = True Equal a b = False type family Id (a :: *) :: * where Id a = a axEq :: { [a::*]. Equal a a ~ True ; [a::*, b::*]. Equal a b ~ False } axId :: [a::*]. Id a ~ a co1 = Equal (axId[0] Int) (axId[0] Bool) :: Equal (Id Int) (Id Bool) ~ Equal Int Bool co2 = axEq[1] :: Equal Int Bool ~ False We wish to optimise (co1 ; co2). We end up in rule TrPushAxL, noting that co2 is an axiom and that matchAxiom succeeds when looking at co1. But, what happens when we push the coercions inside? We get co3 = axEq[1] (axId[0] Int) (axId[0] Bool) :: Equal (Id Int) (Id Bool) ~ False which is bogus! This is because the type system isn't smart enough to know that (Id Int) and (Id Bool) are Surely Apart, as they're headed by type families. At the time of writing, I (Richard Eisenberg) couldn't think of a way of detecting this any more efficient than just building the optimised coercion and checking. Note [EtaAppCo] ~~~~~~~~~~~~~~~ Suppose we're trying to optimize (co1a co1b ; co2a co2b). Ideally, we'd like to rewrite this to (co1a ; co2a) (co1b ; co2b). The problem is that the resultant coercions might not be well kinded. Here is an example (things labeled with x don't matter in this example): k1 :: Type k2 :: Type a :: k1 -> Type b :: k1 h :: k1 ~ k2 co1a :: x1 ~ (a |> (h -> ) co1b :: x2 ~ (b |> h) co2a :: a ~ x3 co2b :: b ~ x4 First, convince yourself of the following: co1a co1b :: x1 x2 ~ (a |> (h -> )) (b |> h) co2a co2b :: a b ~ x3 x4 (a |> (h -> )) (b |> h) `eqType` a b That last fact is due to Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep, where we ignore coercions in types as long as two types' kinds are the same. In our case, we meet this last condition, because (a |> (h -> )) (b |> h) :: Type and a b :: Type So the input coercion (co1a co1b ; co2a co2b) is well-formed. But the suggested output coercions (co1a ; co2a) and (co1b ; co2b) are not -- the kinds don't match up. The solution here is to twiddle the kinds in the output coercions. First, we need to find coercions ak :: kind(a |> (h -> )) ~ kind(a) bk :: kind(b |> h) ~ kind(b) This can be done with mkKindCo and buildCoercion. The latter assumes two types are identical modulo casts and builds a coercion between them. Then, we build (co1a ; co2a |> sym ak) and (co1b ; co2b |> sym bk) as the output coercions. These are well-kinded. Also, note that all of this is done after accumulated any nested AppCo parameters. This step is to avoid quadratic behavior in calling coercionKind. The problem described here was first found in dependent/should_compile/dynamic-paper. -} -- | Check to make sure that an AxInstCo is internally consistent. -- Returns the conflicting branch, if it exists -- See Note [Conflict checking with AxiomInstCo] checkAxInstCo :: Coercion -> Maybe CoAxBranch -- defined here to avoid dependencies in GHC.Core.Coercion -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint checkAxInstCo (AxiomInstCo ax ind cos) = let branch = coAxiomNthBranch ax ind tvs = coAxBranchTyVars branch cvs = coAxBranchCoVars branch incomps = coAxBranchIncomps branch (tys, cotys) = splitAtList tvs (map coercionLKind cos) co_args = map stripCoercionTy cotys subst = zipTvSubst tvs tys `composeTCvSubst` zipCvSubst cvs co_args target = Type.substTys subst (coAxBranchLHS branch) in_scope = mkInScopeSet $ unionVarSets (map (tyCoVarsOfTypes . coAxBranchLHS) incomps) flattened_target = flattenTys in_scope target in check_no_conflict flattened_target incomps where check_no_conflict :: [Type] -> [CoAxBranch] -> Maybe CoAxBranch check_no_conflict _ [] = Nothing check_no_conflict flat (b@CoAxBranch { cab_lhs = lhs_incomp } : rest) -- See Note [Apartness] in GHC.Core.FamInstEnv | SurelyApart <- tcUnifyTysFG alwaysBindFun flat lhs_incomp = check_no_conflict flat rest | otherwise = Just b checkAxInstCo _ = Nothing ----------- wrapSym :: SymFlag -> Coercion -> Coercion wrapSym sym co | sym = mkSymCo co | otherwise = co -- | Conditionally set a role to be representational wrapRole :: ReprFlag -> Role -- ^ current role -> Coercion -> Coercion wrapRole False _ = id wrapRole True current = downgradeRole Representational current -- | If we require a representational role, return that. Otherwise, -- return the "default" role provided. chooseRole :: ReprFlag -> Role -- ^ "default" role -> Role chooseRole True _ = Representational chooseRole _ r = r ----------- isAxiom_maybe :: Coercion -> Maybe (Bool, CoAxiom Branched, Int, [Coercion]) isAxiom_maybe (SymCo co) | Just (sym, con, ind, cos) <- isAxiom_maybe co = Just (not sym, con, ind, cos) isAxiom_maybe (AxiomInstCo con ind cos) = Just (False, con, ind, cos) isAxiom_maybe _ = Nothing matchAxiom :: Bool -- True = match LHS, False = match RHS -> CoAxiom br -> Int -> Coercion -> Maybe [Coercion] matchAxiom sym ax@(CoAxiom { co_ax_tc = tc }) ind co | CoAxBranch { cab_tvs = qtvs , cab_cvs = [] -- can't infer these, so fail if there are any , cab_roles = roles , cab_lhs = lhs , cab_rhs = rhs } <- coAxiomNthBranch ax ind , Just subst <- liftCoMatch (mkVarSet qtvs) (if sym then (mkTyConApp tc lhs) else rhs) co , all (`isMappedByLC` subst) qtvs = zipWithM (liftCoSubstTyVar subst) roles qtvs | otherwise = Nothing ------------- compatible_co :: Coercion -> Coercion -> Bool -- Check whether (co1 . co2) will be well-kinded compatible_co co1 co2 = x1 `eqType` x2 where x1 = coercionRKind co1 x2 = coercionLKind co2 ------------- {- etaForAllCo ~~~~~~~~~~~~~~~~~ (1) etaForAllCo_ty_maybe Suppose we have g : all a1:k1.t1 ~ all a2:k2.t2 but g is *not* a ForAllCo. We want to eta-expand it. So, we do this: g' = all a1:(ForAllKindCo g).(InstCo g (a1 ~ a1 |> ForAllKindCo g)) Call the kind coercion h1 and the body coercion h2. We can see that h2 : t1 ~ t2[a2 |-> (a1 |> h1)] According to the typing rule for ForAllCo, we get that g' : all a1:k1.t1 ~ all a1:k2.(t2[a2 |-> (a1 |> h1)][a1 |-> a1 |> sym h1]) or g' : all a1:k1.t1 ~ all a1:k2.(t2[a2 |-> a1]) as desired. (2) etaForAllCo_co_maybe Suppose we have g : all c1:(s1~s2). t1 ~ all c2:(s3~s4). t2 Similarly, we do this g' = all c1:h1. h2 : all c1:(s1~s2). t1 ~ all c1:(s3~s4). t2[c2 |-> (sym eta1;c1;eta2)] [c1 |-> eta1;c1;sym eta2] Here, h1 = mkNthCo Nominal 0 g :: (s1~s2)~(s3~s4) eta1 = mkNthCo r 2 h1 :: (s1 ~ s3) eta2 = mkNthCo r 3 h1 :: (s2 ~ s4) h2 = mkInstCo g (cv1 ~ (sym eta1;c1;eta2)) -} etaForAllCo_ty_maybe :: Coercion -> Maybe (TyVar, Coercion, Coercion) -- Try to make the coercion be of form (forall tv:kind_co. co) etaForAllCo_ty_maybe co | Just (tv, kind_co, r) <- splitForAllCo_ty_maybe co = Just (tv, kind_co, r) | Pair ty1 ty2 <- coercionKind co , Just (tv1, _) <- splitForAllTyVar_maybe ty1 , isForAllTy_ty ty2 , let kind_co = mkNthCo Nominal 0 co = Just ( tv1, kind_co , mkInstCo co (mkGReflRightCo Nominal (TyVarTy tv1) kind_co)) | otherwise = Nothing etaForAllCo_co_maybe :: Coercion -> Maybe (CoVar, Coercion, Coercion) -- Try to make the coercion be of form (forall cv:kind_co. co) etaForAllCo_co_maybe co | Just (cv, kind_co, r) <- splitForAllCo_co_maybe co = Just (cv, kind_co, r) | Pair ty1 ty2 <- coercionKind co , Just (cv1, _) <- splitForAllCoVar_maybe ty1 , isForAllTy_co ty2 = let kind_co = mkNthCo Nominal 0 co r = coVarRole cv1 l_co = mkCoVarCo cv1 kind_co' = downgradeRole r Nominal kind_co r_co = (mkSymCo (mkNthCo r 2 kind_co')) `mkTransCo` l_co `mkTransCo` (mkNthCo r 3 kind_co') in Just ( cv1, kind_co , mkInstCo co (mkProofIrrelCo Nominal kind_co l_co r_co)) | otherwise = Nothing etaAppCo_maybe :: Coercion -> Maybe (Coercion,Coercion) -- If possible, split a coercion -- g :: t1a t1b ~ t2a t2b -- into a pair of coercions (left g, right g) etaAppCo_maybe co | Just (co1,co2) <- splitAppCo_maybe co = Just (co1,co2) | (Pair ty1 ty2, Nominal) <- coercionKindRole co , Just (_,t1) <- splitAppTy_maybe ty1 , Just (_,t2) <- splitAppTy_maybe ty2 , let isco1 = isCoercionTy t1 , let isco2 = isCoercionTy t2 , isco1 == isco2 = Just (LRCo CLeft co, LRCo CRight co) | otherwise = Nothing etaTyConAppCo_maybe :: TyCon -> Coercion -> Maybe [Coercion] -- If possible, split a coercion -- g :: T s1 .. sn ~ T t1 .. tn -- into [ Nth 0 g :: s1~t1, ..., Nth (n-1) g :: sn~tn ] etaTyConAppCo_maybe tc (TyConAppCo _ tc2 cos2) = assert (tc == tc2) $ Just cos2 etaTyConAppCo_maybe tc co | not (mustBeSaturated tc) , (Pair ty1 ty2, r) <- coercionKindRole co , Just (tc1, tys1) <- splitTyConApp_maybe ty1 , Just (tc2, tys2) <- splitTyConApp_maybe ty2 , tc1 == tc2 , isInjectiveTyCon tc r -- See Note [NthCo and newtypes] in GHC.Core.TyCo.Rep , let n = length tys1 , tys2 `lengthIs` n -- This can fail in an erroneous program -- E.g. T a ~# T a b -- #14607 = assert (tc == tc1) $ Just (decomposeCo n co (tyConRolesX r tc1)) -- NB: n might be <> tyConArity tc -- e.g. data family T a :: * -> * -- g :: T a b ~ T c d | otherwise = Nothing {- Note [Eta for AppCo] ~~~~~~~~~~~~~~~~~~~~ Suppose we have g :: s1 t1 ~ s2 t2 Then we can't necessarily make left g :: s1 ~ s2 right g :: t1 ~ t2 because it's possible that s1 :: * -> * t1 :: * s2 :: (*->*) -> * t2 :: * -> * and in that case (left g) does not have the same kind on either side. It's enough to check that kind t1 = kind t2 because if g is well-kinded then kind (s1 t2) = kind (s2 t2) and these two imply kind s1 = kind s2 -} optForAllCoBndr :: LiftingContext -> Bool -> TyCoVar -> Coercion -> (LiftingContext, TyCoVar, Coercion) optForAllCoBndr env sym = substForAllCoBndrUsingLC sym (opt_co4_wrap env sym False Nominal) env ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/ConLike.hs0000644000000000000000000001745614472400112020420 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[ConLike]{@ConLike@: Constructor-like things} -} module GHC.Core.ConLike ( ConLike(..) , isVanillaConLike , conLikeArity , conLikeFieldLabels , conLikeInstOrigArgTys , conLikeUserTyVarBinders , conLikeExTyCoVars , conLikeName , conLikeStupidTheta , conLikeImplBangs , conLikeFullSig , conLikeResTy , conLikeFieldType , conLikesWithFields , conLikeIsInfix , conLikeHasBuilder ) where import GHC.Prelude import GHC.Core.DataCon import GHC.Core.PatSyn import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Utils.Misc import GHC.Types.Name import GHC.Types.Basic import GHC.Core.TyCo.Rep (Type, ThetaType) import GHC.Types.Var import GHC.Core.Type(mkTyConApp) import GHC.Core.Multiplicity import Data.Maybe( isJust ) import qualified Data.Data as Data {- ************************************************************************ * * \subsection{Constructor-like things} * * ************************************************************************ -} -- | A constructor-like thing data ConLike = RealDataCon DataCon | PatSynCon PatSyn -- | Is this a \'vanilla\' constructor-like thing -- (no existentials, no provided constraints)? isVanillaConLike :: ConLike -> Bool isVanillaConLike (RealDataCon con) = isVanillaDataCon con isVanillaConLike (PatSynCon ps ) = isVanillaPatSyn ps {- ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Eq ConLike where (==) = eqConLike eqConLike :: ConLike -> ConLike -> Bool eqConLike x y = getUnique x == getUnique y -- There used to be an Ord ConLike instance here that used Unique for ordering. -- It was intentionally removed to prevent determinism problems. -- See Note [Unique Determinism] in GHC.Types.Unique. instance Uniquable ConLike where getUnique (RealDataCon dc) = getUnique dc getUnique (PatSynCon ps) = getUnique ps instance NamedThing ConLike where getName (RealDataCon dc) = getName dc getName (PatSynCon ps) = getName ps instance Outputable ConLike where ppr (RealDataCon dc) = ppr dc ppr (PatSynCon ps) = ppr ps instance OutputableBndr ConLike where pprInfixOcc (RealDataCon dc) = pprInfixOcc dc pprInfixOcc (PatSynCon ps) = pprInfixOcc ps pprPrefixOcc (RealDataCon dc) = pprPrefixOcc dc pprPrefixOcc (PatSynCon ps) = pprPrefixOcc ps instance Data.Data ConLike where -- don't traverse? toConstr _ = abstractConstr "ConLike" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "ConLike" -- | Number of arguments conLikeArity :: ConLike -> Arity conLikeArity (RealDataCon data_con) = dataConSourceArity data_con conLikeArity (PatSynCon pat_syn) = patSynArity pat_syn -- | Names of fields used for selectors conLikeFieldLabels :: ConLike -> [FieldLabel] conLikeFieldLabels (RealDataCon data_con) = dataConFieldLabels data_con conLikeFieldLabels (PatSynCon pat_syn) = patSynFieldLabels pat_syn -- | Returns just the instantiated /value/ argument types of a 'ConLike', -- (excluding dictionary args) conLikeInstOrigArgTys :: ConLike -> [Type] -> [Scaled Type] conLikeInstOrigArgTys (RealDataCon data_con) tys = dataConInstOrigArgTys data_con tys conLikeInstOrigArgTys (PatSynCon pat_syn) tys = map unrestricted $ patSynInstArgTys pat_syn tys -- | 'TyVarBinder's for the type variables of the 'ConLike'. For pattern -- synonyms, this will always consist of the universally quantified variables -- followed by the existentially quantified type variables. For data -- constructors, the situation is slightly more complicated—see -- @Note [DataCon user type variable binders]@ in "GHC.Core.DataCon". conLikeUserTyVarBinders :: ConLike -> [InvisTVBinder] conLikeUserTyVarBinders (RealDataCon data_con) = dataConUserTyVarBinders data_con conLikeUserTyVarBinders (PatSynCon pat_syn) = patSynUnivTyVarBinders pat_syn ++ patSynExTyVarBinders pat_syn -- The order here is because of the order in `GHC.Tc.TyCl.PatSyn`. -- | Existentially quantified type/coercion variables conLikeExTyCoVars :: ConLike -> [TyCoVar] conLikeExTyCoVars (RealDataCon dcon1) = dataConExTyCoVars dcon1 conLikeExTyCoVars (PatSynCon psyn1) = patSynExTyVars psyn1 conLikeName :: ConLike -> Name conLikeName (RealDataCon data_con) = dataConName data_con conLikeName (PatSynCon pat_syn) = patSynName pat_syn -- | The \"stupid theta\" of the 'ConLike', such as @data Eq a@ in: -- -- > data Eq a => T a = ... -- It is empty for `PatSynCon` as they do not allow such contexts. -- See @Note [The stupid context]@ in "GHC.Core.DataCon". conLikeStupidTheta :: ConLike -> ThetaType conLikeStupidTheta (RealDataCon data_con) = dataConStupidTheta data_con conLikeStupidTheta (PatSynCon {}) = [] -- | 'conLikeHasBuilder' returns True except for -- uni-directional pattern synonyms, which have no builder conLikeHasBuilder :: ConLike -> Bool conLikeHasBuilder (RealDataCon {}) = True conLikeHasBuilder (PatSynCon pat_syn) = isJust (patSynBuilder pat_syn) -- | Returns the strictness information for each constructor conLikeImplBangs :: ConLike -> [HsImplBang] conLikeImplBangs (RealDataCon data_con) = dataConImplBangs data_con conLikeImplBangs (PatSynCon pat_syn) = replicate (patSynArity pat_syn) HsLazy -- | Returns the type of the whole pattern conLikeResTy :: ConLike -> [Type] -> Type conLikeResTy (RealDataCon con) tys = mkTyConApp (dataConTyCon con) tys conLikeResTy (PatSynCon ps) tys = patSynInstResTy ps tys -- | The \"full signature\" of the 'ConLike' returns, in order: -- -- 1) The universally quantified type variables -- -- 2) The existentially quantified type/coercion variables -- -- 3) The equality specification -- -- 4) The provided theta (the constraints provided by a match) -- -- 5) The required theta (the constraints required for a match) -- -- 6) The original argument types (i.e. before -- any change of the representation of the type) -- -- 7) The original result type conLikeFullSig :: ConLike -> ([TyVar], [TyCoVar], [EqSpec] -- Why tyvars for universal but tycovars for existential? -- See Note [Existential coercion variables] in GHC.Core.DataCon , ThetaType, ThetaType, [Scaled Type], Type) conLikeFullSig (RealDataCon con) = let (univ_tvs, ex_tvs, eq_spec, theta, arg_tys, res_ty) = dataConFullSig con -- Required theta is empty as normal data cons require no additional -- constraints for a match in (univ_tvs, ex_tvs, eq_spec, theta, [], arg_tys, res_ty) conLikeFullSig (PatSynCon pat_syn) = let (univ_tvs, req, ex_tvs, prov, arg_tys, res_ty) = patSynSig pat_syn -- eqSpec is empty in (univ_tvs, ex_tvs, [], prov, req, arg_tys, res_ty) -- | Extract the type for any given labelled field of the 'ConLike' conLikeFieldType :: ConLike -> FieldLabelString -> Type conLikeFieldType (PatSynCon ps) label = patSynFieldType ps label conLikeFieldType (RealDataCon dc) label = dataConFieldType dc label -- | The ConLikes that have *all* the given fields conLikesWithFields :: [ConLike] -> [FieldLabelString] -> [ConLike] conLikesWithFields con_likes lbls = filter has_flds con_likes where has_flds dc = all (has_fld dc) lbls has_fld dc lbl = any (\ fl -> flLabel fl == lbl) (conLikeFieldLabels dc) conLikeIsInfix :: ConLike -> Bool conLikeIsInfix (RealDataCon dc) = dataConIsInfix dc conLikeIsInfix (PatSynCon ps) = patSynIsInfix ps ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/DataCon.hs0000644000000000000000000021367314472400112020404 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[DataCon]{@DataCon@: Data Constructors} -} {-# LANGUAGE DeriveDataTypeable #-} module GHC.Core.DataCon ( -- * Main data types DataCon, DataConRep(..), SrcStrictness(..), SrcUnpackedness(..), HsSrcBang(..), HsImplBang(..), StrictnessMark(..), ConTag, DataConEnv, -- ** Equality specs EqSpec, mkEqSpec, eqSpecTyVar, eqSpecType, eqSpecPair, eqSpecPreds, substEqSpec, filterEqSpec, -- ** Field labels FieldLabel(..), FieldLabelString, -- ** Type construction mkDataCon, fIRST_TAG, -- ** Type deconstruction dataConRepType, dataConInstSig, dataConFullSig, dataConName, dataConIdentity, dataConTag, dataConTagZ, dataConTyCon, dataConOrigTyCon, dataConWrapperType, dataConNonlinearType, dataConDisplayType, dataConUnivTyVars, dataConExTyCoVars, dataConUnivAndExTyCoVars, dataConUserTyVars, dataConUserTyVarBinders, dataConEqSpec, dataConTheta, dataConStupidTheta, dataConOtherTheta, dataConInstArgTys, dataConOrigArgTys, dataConOrigResTy, dataConInstOrigArgTys, dataConRepArgTys, dataConInstUnivs, dataConFieldLabels, dataConFieldType, dataConFieldType_maybe, dataConSrcBangs, dataConSourceArity, dataConRepArity, dataConIsInfix, dataConWorkId, dataConWrapId, dataConWrapId_maybe, dataConImplicitTyThings, dataConRepStrictness, dataConImplBangs, dataConBoxer, splitDataProductType_maybe, -- ** Predicates on DataCons isNullarySrcDataCon, isNullaryRepDataCon, isTupleDataCon, isBoxedTupleDataCon, isUnboxedTupleDataCon, isUnboxedSumDataCon, isVanillaDataCon, isNewDataCon, classDataCon, dataConCannotMatch, dataConUserTyVarsArePermuted, isBanged, isMarkedStrict, cbvFromStrictMark, eqHsBang, isSrcStrict, isSrcUnpacked, specialPromotedDc, -- ** Promotion related functions promoteDataCon ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Id.Make ( DataConBoxer ) import GHC.Core.Type as Type import GHC.Core.Coercion import GHC.Core.Unify import GHC.Core.TyCon import GHC.Core.TyCo.Subst import GHC.Core.Multiplicity import {-# SOURCE #-} GHC.Types.TyThing import GHC.Types.FieldLabel import GHC.Types.SourceText import GHC.Core.Class import GHC.Types.Name import GHC.Builtin.Names import GHC.Core.Predicate import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Basic import GHC.Data.FastString import GHC.Unit.Types import GHC.Unit.Module.Name import GHC.Utils.Binary import GHC.Types.Unique.FM ( UniqFM ) import GHC.Types.Unique.Set import GHC.Builtin.Uniques( mkAlphaTyVarUnique ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.ByteString (ByteString) import qualified Data.ByteString.Builder as BSB import qualified Data.ByteString.Lazy as LBS import qualified Data.Data as Data import Data.Char import Data.List( find ) {- Note [Data constructor representation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following Haskell data type declaration data T = T !Int ![Int] Using the strictness annotations, GHC will represent this as data T = T Int# [Int] That is, the Int has been unboxed. Furthermore, the Haskell source construction T e1 e2 is translated to case e1 of { I# x -> case e2 of { r -> T x r }} That is, the first argument is unboxed, and the second is evaluated. Finally, pattern matching is translated too: case e of { T a b -> ... } becomes case e of { T a' b -> let a = I# a' in ... } To keep ourselves sane, we name the different versions of the data constructor differently, as follows. Note [Data Constructor Naming] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each data constructor C has two, and possibly up to four, Names associated with it: OccName Name space Name of Notes --------------------------------------------------------------------------- The "data con itself" C DataName DataCon In dom( GlobalRdrEnv ) The "worker data con" C VarName Id The worker The "wrapper data con" $WC VarName Id The wrapper The "newtype coercion" :CoT TcClsName TyCon EVERY data constructor (incl for newtypes) has the former two (the data con itself, and its worker. But only some data constructors have a wrapper (see Note [The need for a wrapper]). Each of these three has a distinct Unique. The "data con itself" name appears in the output of the renamer, and names the Haskell-source data constructor. The type checker translates it into either the wrapper Id (if it exists) or worker Id (otherwise). The data con has one or two Ids associated with it: The "worker Id", is the actual data constructor. * Every data constructor (newtype or data type) has a worker * The worker is very like a primop, in that it has no binding. * For a *data* type, the worker *is* the data constructor; it has no unfolding * For a *newtype*, the worker has a compulsory unfolding which does a cast, e.g. newtype T = MkT Int The worker for MkT has unfolding \\(x:Int). x `cast` sym CoT Here CoT is the type constructor, witnessing the FC axiom axiom CoT : T = Int The "wrapper Id", \$WC, goes as follows * Its type is exactly what it looks like in the source program. * It is an ordinary function, and it gets a top-level binding like any other function. * The wrapper Id isn't generated for a data type if there is nothing for the wrapper to do. That is, if its defn would be \$wC = C Note [Data constructor workers and wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Algebraic data types - Always have a worker, with no unfolding - May or may not have a wrapper; see Note [The need for a wrapper] * Newtypes - Always have a worker, which has a compulsory unfolding (just a cast) - May or may not have a wrapper; see Note [The need for a wrapper] * INVARIANT: the dictionary constructor for a class never has a wrapper. * See Note [Data Constructor Naming] for how the worker and wrapper are named * Neither_ the worker _nor_ the wrapper take the dcStupidTheta dicts as arguments * The wrapper (if it exists) takes dcOrigArgTys as its arguments. The worker takes dataConRepArgTys as its arguments If the wrapper is absent, dataConRepArgTys is the same as dcOrigArgTys * The 'NoDataConRep' case of DataConRep is important. Not only is it efficient, but it also ensures that the wrapper is replaced by the worker (because it *is* the worker) even when there are no args. E.g. in f (:) x the (:) *is* the worker. This is really important in rule matching, (We could match on the wrappers, but that makes it less likely that rules will match when we bring bits of unfoldings together.) Note [The need for a wrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why might the wrapper have anything to do? The full story is in wrapper_reqd in GHC.Types.Id.Make.mkDataConRep. * Unboxing strict fields (with -funbox-strict-fields) data T = MkT !(Int,Int) \$wMkT :: (Int,Int) -> T \$wMkT (x,y) = MkT x y Notice that the worker has two fields where the wapper has just one. That is, the worker has type MkT :: Int -> Int -> T * Equality constraints for GADTs data T a where { MkT :: a -> T [a] } The worker gets a type with explicit equality constraints, thus: MkT :: forall a b. (a=[b]) => b -> T a The wrapper has the programmer-specified type: \$wMkT :: a -> T [a] \$wMkT a x = MkT [a] a [a] x The third argument is a coercion [a] :: [a]~[a] * Data family instances may do a cast on the result * Type variables may be permuted; see MkId Note [Data con wrappers and GADT syntax] Note [The stupid context] ~~~~~~~~~~~~~~~~~~~~~~~~~ Data types can have a context: data (Eq a, Ord b) => T a b = T1 a b | T2 a And that makes the constructors have a context too. A constructor's context isn't necessarily the same as the data type's context, however. Per the Haskell98 Report, the part of the datatype context that is used in a data constructor is the largest subset of the datatype context that constrains only the type variables free in the data constructor's field types. For example, here are the types of T1 and T2: T1 :: (Eq a, Ord b) => a -> b -> T a b T2 :: (Eq a) => a -> T a b Notice that T2's context is "thinned". Since its field is of type `a`, only the part of the datatype context that mentions `a`—that is, `Eq a`—is included in T2's context. On the other hand, T1's fields mention both `a` and `b`, so T1's context includes all of the datatype context. Furthermore, this context pops up when pattern matching (though GHC hasn't implemented this, but it is in H98, and I've fixed GHC so that it now does): f (T2 x) = x gets inferred type f :: Eq a => T a b -> a I say the context is "stupid" because the dictionaries passed are immediately discarded -- they do nothing and have no benefit. (See Note [Instantiating stupid theta] in GHC.Tc.Gen.Head.) It's a flaw in the language. GHC has made some efforts to correct this flaw. In GHC, datatype contexts are not available by default. Instead, one must explicitly opt in to them by using the DatatypeContexts extension. To discourage their use, GHC has deprecated DatatypeContexts. Some other notes about stupid contexts: * Stupid contexts can interact badly with `deriving`. For instance, it's unclear how to make this derived Functor instance typecheck: data Eq a => T a = MkT a deriving Functor This is because the derived instance would need to look something like `instance Functor T where ...`, but there is nowhere to mention the requisite `Eq a` constraint. For this reason, GHC will throw an error if a user attempts to derive an instance for Functor (or a Functor-like class) where the last type variable is used in a datatype context. For Generic(1), the requirements are even harsher, as stupid contexts are not allowed at all in derived Generic(1) instances. (We could consider relaxing this requirement somewhat, although no one has asked for this yet.) Stupid contexts are permitted when deriving instances of non-Functor-like classes, or when deriving instances of Functor-like classes where the last type variable isn't mentioned in the stupid context. For example, the following is permitted: data Show a => T a = MkT deriving Eq Note that because of the "thinning" behavior mentioned above, the generated Eq instance should not mention `Show a`, as the type of MkT doesn't require it. That is, the following should be generated (#20501): instance Eq (T a) where (MkT == MkT) = True * It's not obvious how stupid contexts should interact with GADTs. For this reason, GHC disallows combining datatype contexts with GADT syntax. As a result, dcStupidTheta is always empty for data types defined using GADT syntax. ************************************************************************ * * \subsection{Data constructors} * * ************************************************************************ -} -- | A data constructor -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose','GHC.Parser.Annotation.AnnComma' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data DataCon = MkData { dcName :: Name, -- This is the name of the *source data con* -- (see "Note [Data Constructor Naming]" above) dcUnique :: Unique, -- Cached from Name dcTag :: ConTag, -- ^ Tag, used for ordering 'DataCon's -- Running example: -- -- *** As declared by the user -- data T a b c where -- MkT :: forall c y x b. (x~y,Ord x) => x -> y -> T (x,y) b c -- *** As represented internally -- data T a b c where -- MkT :: forall a b c. forall x y. (a~(x,y),x~y,Ord x) -- => x -> y -> T a b c -- -- The next six fields express the type of the constructor, in pieces -- e.g. -- -- dcUnivTyVars = [a,b,c] -- dcExTyCoVars = [x,y] -- dcUserTyVarBinders = [c,y,x,b] -- dcEqSpec = [a~(x,y)] -- dcOtherTheta = [x~y, Ord x] -- dcOrigArgTys = [x,y] -- dcRepTyCon = T -- In general, the dcUnivTyVars are NOT NECESSARILY THE SAME AS THE -- TYVARS FOR THE PARENT TyCon. (This is a change (Oct05): previously, -- vanilla datacons guaranteed to have the same type variables as their -- parent TyCon, but that seems ugly.) They can be different in the case -- where a GADT constructor uses different names for the universal -- tyvars than does the tycon. For example: -- -- data H a where -- MkH :: b -> H b -- -- Here, the tyConTyVars of H will be [a], but the dcUnivTyVars of MkH -- will be [b]. dcVanilla :: Bool, -- True <=> This is a vanilla Haskell 98 data constructor -- Its type is of form -- forall a1..an . t1 -> ... tm -> T a1..an -- No existentials, no coercions, nothing. -- That is: dcExTyCoVars = dcEqSpec = dcOtherTheta = [] -- NB 1: newtypes always have a vanilla data con -- NB 2: a vanilla constructor can still be declared in GADT-style -- syntax, provided its type looks like the above. -- The declaration format is held in the TyCon (algTcGadtSyntax) -- Universally-quantified type vars [a,b,c] -- INVARIANT: length matches arity of the dcRepTyCon -- INVARIANT: result type of data con worker is exactly (T a b c) -- COROLLARY: The dcUnivTyVars are always in one-to-one correspondence with -- the tyConTyVars of the parent TyCon dcUnivTyVars :: [TyVar], -- Existentially-quantified type and coercion vars [x,y] -- For an example involving coercion variables, -- Why tycovars? See Note [Existential coercion variables] dcExTyCoVars :: [TyCoVar], -- INVARIANT: the UnivTyVars and ExTyCoVars all have distinct OccNames -- Reason: less confusing, and easier to generate Iface syntax -- The type/coercion vars in the order the user wrote them [c,y,x,b] -- INVARIANT: the set of tyvars in dcUserTyVarBinders is exactly the set -- of tyvars (*not* covars) of dcExTyCoVars unioned with the -- set of dcUnivTyVars whose tyvars do not appear in dcEqSpec -- See Note [DataCon user type variable binders] dcUserTyVarBinders :: [InvisTVBinder], dcEqSpec :: [EqSpec], -- Equalities derived from the result type, -- _as written by the programmer_. -- Only non-dependent GADT equalities (dependent -- GADT equalities are in the covars of -- dcExTyCoVars). -- This field allows us to move conveniently between the two ways -- of representing a GADT constructor's type: -- MkT :: forall a b. (a ~ [b]) => b -> T a -- MkT :: forall b. b -> T [b] -- Each equality is of the form (a ~ ty), where 'a' is one of -- the universally quantified type variables. Moreover, the -- only place in the DataCon where this 'a' will occur is in -- dcUnivTyVars. See [The dcEqSpec domain invariant]. -- The next two fields give the type context of the data constructor -- (aside from the GADT constraints, -- which are given by the dcExpSpec) -- In GADT form, this is *exactly* what the programmer writes, even if -- the context constrains only universally quantified variables -- MkT :: forall a b. (a ~ b, Ord b) => a -> T a b dcOtherTheta :: ThetaType, -- The other constraints in the data con's type -- other than those in the dcEqSpec dcStupidTheta :: ThetaType, -- The context of the data type declaration -- data Eq a => T a = ... -- or, rather, a "thinned" version thereof -- "Thinned", because the Report says -- to eliminate any constraints that don't mention -- tyvars free in the arg types for this constructor. -- See Note [The stupid context]. -- -- INVARIANT: the free tyvars of dcStupidTheta are a subset of dcUnivTyVars -- Reason: dcStupidTeta is gotten by thinning the stupid theta from the tycon -- -- "Stupid", because the dictionaries aren't used for anything. -- Indeed, [as of March 02] they are no longer in the type of -- the wrapper Id, because that makes it harder to use the wrap-id -- to rebuild values after record selection or in generics. dcOrigArgTys :: [Scaled Type], -- Original argument types -- (before unboxing and flattening of strict fields) dcOrigResTy :: Type, -- Original result type, as seen by the user -- NB: for a data instance, the original user result type may -- differ from the DataCon's representation TyCon. Example -- data instance T [a] where MkT :: a -> T [a] -- The dcOrigResTy is T [a], but the dcRepTyCon might be R:TList -- Now the strictness annotations and field labels of the constructor dcSrcBangs :: [HsSrcBang], -- See Note [Bangs on data constructor arguments] -- -- The [HsSrcBang] as written by the programmer. -- -- Matches 1-1 with dcOrigArgTys -- Hence length = dataConSourceArity dataCon dcFields :: [FieldLabel], -- Field labels for this constructor, in the -- same order as the dcOrigArgTys; -- length = 0 (if not a record) or dataConSourceArity. -- The curried worker function that corresponds to the constructor: -- It doesn't have an unfolding; the code generator saturates these Ids -- and allocates a real constructor when it finds one. dcWorkId :: Id, -- Constructor representation dcRep :: DataConRep, -- Cached; see Note [DataCon arities] -- INVARIANT: dcRepArity == length dataConRepArgTys + count isCoVar (dcExTyCoVars) -- INVARIANT: dcSourceArity == length dcOrigArgTys dcRepArity :: Arity, dcSourceArity :: Arity, -- Result type of constructor is T t1..tn dcRepTyCon :: TyCon, -- Result tycon, T dcRepType :: Type, -- Type of the constructor -- forall a x y. (a~(x,y), x~y, Ord x) => -- x -> y -> T a -- (this is *not* of the constructor wrapper Id: -- see Note [Data con representation] below) -- Notice that the existential type parameters come *second*. -- Reason: in a case expression we may find: -- case (e :: T t) of -- MkT x y co1 co2 (d:Ord x) (v:r) (w:F s) -> ... -- It's convenient to apply the rep-type of MkT to 't', to get -- forall x y. (t~(x,y), x~y, Ord x) => x -> y -> T t -- and use that to check the pattern. Mind you, this is really only -- used in GHC.Core.Lint. dcInfix :: Bool, -- True <=> declared infix -- Used for Template Haskell and 'deriving' only -- The actual fixity is stored elsewhere dcPromoted :: TyCon -- The promoted TyCon -- See Note [Promoted data constructors] in GHC.Core.TyCon } {- Note [TyVarBinders in DataCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For the TyVarBinders in a DataCon and PatSyn: * Each argument flag is Inferred or Specified. None are Required. (A DataCon is a term-level function; see Note [No Required TyCoBinder in terms] in GHC.Core.TyCo.Rep.) Why do we need the TyVarBinders, rather than just the TyVars? So that we can construct the right type for the DataCon with its foralls attributed the correct visibility. That in turn governs whether you can use visible type application at a call of the data constructor. See also [DataCon user type variable binders] for an extended discussion on the order in which TyVarBinders appear in a DataCon. Note [Existential coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For now (Aug 2018) we can't write coercion quantifications in source Haskell, but we can in Core. Consider having: data T :: forall k. k -> k -> Constraint where MkT :: forall k (a::k) (b::k). forall k' (c::k') (co::k'~k). (b~(c|>co)) => T k a b dcUnivTyVars = [k,a,b] dcExTyCoVars = [k',c,co] dcUserTyVarBinders = [k,a,k',c] dcEqSpec = [b~(c|>co)] dcOtherTheta = [] dcOrigArgTys = [] dcRepTyCon = T Function call 'dataConKindEqSpec' returns [k'~k] Note [DataCon arities] ~~~~~~~~~~~~~~~~~~~~~~ A `DataCon`'s source arity and core representation arity may differ: `dcSourceArity` does not take constraints into account, but `dcRepArity` does. The additional arguments taken into account by `dcRepArity` include quantified dictionaries and coercion arguments, lifted and unlifted (despite the unlifted coercion arguments having a zero-width runtime representation). For example: MkT :: Ord a => a -> T a dcSourceArity = 1 dcRepArity = 2 MkU :: (b ~ '[]) => U b dcSourceArity = 0 dcRepArity = 1 Note [DataCon user type variable binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In System FC, data constructor type signatures always quantify over all of their universal type variables, followed by their existential type variables. Normally, this isn't a problem, as most datatypes naturally quantify their type variables in this order anyway. For example: data T a b = forall c. MkT b c Here, we have `MkT :: forall {k} (a :: k) (b :: *) (c :: *). b -> c -> T a b`, where k, a, and b are universal and c is existential. (The inferred variable k isn't available for TypeApplications, hence why it's in braces.) This is a perfectly reasonable order to use, as the syntax of H98-style datatypes (+ ExistentialQuantification) suggests it. Things become more complicated when GADT syntax enters the picture. Consider this example: data X a where MkX :: forall b a. b -> Proxy a -> X a If we adopt the earlier approach of quantifying all the universal variables followed by all the existential ones, GHC would come up with this type signature for MkX: MkX :: forall {k} (a :: k) (b :: *). b -> Proxy a -> X a But this is not what we want at all! After all, if a user were to use TypeApplications on MkX, they would expect to instantiate `b` before `a`, as that's the order in which they were written in the `forall`. (See #11721.) Instead, we'd like GHC to come up with this type signature: MkX :: forall {k} (b :: *) (a :: k). b -> Proxy a -> X a In fact, even if we left off the explicit forall: data X a where MkX :: b -> Proxy a -> X a Then a user should still expect `b` to be quantified before `a`, since according to the rules of TypeApplications, in the absence of `forall` GHC performs a stable topological sort on the type variables in the user-written type signature, which would place `b` before `a`. But as noted above, enacting this behavior is not entirely trivial, as System FC demands the variables go in universal-then-existential order under the hood. Our solution is thus to equip DataCon with two different sets of type variables: * dcUnivTyVars and dcExTyCoVars, for the universal type variable and existential type/coercion variables, respectively. Their order is irrelevant for the purposes of TypeApplications, and as a consequence, they do not come equipped with visibilities (that is, they are TyVars/TyCoVars instead of TyCoVarBinders). * dcUserTyVarBinders, for the type variables binders in the order in which they originally arose in the user-written type signature. Their order *does* matter for TypeApplications, so they are full TyVarBinders, complete with visibilities. This encoding has some redundancy. The set of tyvars in dcUserTyVarBinders consists precisely of: * The set of tyvars in dcUnivTyVars whose type variables do not appear in dcEqSpec, unioned with: * The set of tyvars (*not* covars) in dcExTyCoVars No covars here because because they're not user-written The word "set" is used above because the order in which the tyvars appear in dcUserTyVarBinders can be completely different from the order in dcUnivTyVars or dcExTyCoVars. That is, the tyvars in dcUserTyVarBinders are a permutation of (tyvars of dcExTyCoVars + a subset of dcUnivTyVars). But aside from the ordering, they in fact share the same type variables (with the same Uniques). We sometimes refer to this as "the dcUserTyVarBinders invariant". dcUserTyVarBinders, as the name suggests, is the one that users will see most of the time. It's used when computing the type signature of a data constructor wrapper (see dataConWrapperType), and as a result, it's what matters from a TypeApplications perspective. Note [The dcEqSpec domain invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this example of a GADT constructor: data Y a where MkY :: Bool -> Y Bool The user-written type of MkY is `Bool -> Y Bool`, but what is the underlying Core type for MkY? There are two conceivable possibilities: 1. MkY :: forall a. (a ~# Bool) => Bool -> Y a 2. MkY :: forall a. (a ~# Bool) => a -> Y a In practice, GHC picks (1) as the Core type for MkY. This is because we maintain an invariant that the type variables in the domain of dcEqSpec will only ever appear in the dcUnivTyVars. As a consequence, the type variables in the domain of dcEqSpec will /never/ appear in the dcExTyCoVars, dcOtherTheta, dcOrigArgTys, or dcOrigResTy; these can only ever mention variables from dcUserTyVarBinders, which excludes things in the domain of dcEqSpec. (See Note [DataCon user type variable binders].) This explains why GHC would not pick (2) as the Core type, since the argument type `a` mentions a type variable in the dcEqSpec. There are certain parts of the codebase where it is convenient to apply the substitution arising from the dcEqSpec to the dcUnivTyVars in order to obtain the user-written return type of a GADT constructor. A consequence of the dcEqSpec domain invariant is that you /never/ need to apply the substitution to any other part of the constructor type, as they don't require it. -} -- | Data Constructor Representation -- See Note [Data constructor workers and wrappers] data DataConRep = -- NoDataConRep means that the data con has no wrapper NoDataConRep -- DCR means that the data con has a wrapper | DCR { dcr_wrap_id :: Id -- Takes src args, unboxes/flattens, -- and constructs the representation , dcr_boxer :: DataConBoxer , dcr_arg_tys :: [Scaled Type] -- Final, representation argument types, -- after unboxing and flattening, -- and *including* all evidence args , dcr_stricts :: [StrictnessMark] -- 1-1 with dcr_arg_tys -- See also Note [Data-con worker strictness] , dcr_bangs :: [HsImplBang] -- The actual decisions made (including failures) -- about the original arguments; 1-1 with orig_arg_tys -- See Note [Bangs on data constructor arguments] } type DataConEnv a = UniqFM DataCon a -- Keyed by DataCon ------------------------- -- | Haskell Source Bang -- -- Bangs on data constructor arguments as the user wrote them in the -- source code. -- -- @(HsSrcBang _ SrcUnpack SrcLazy)@ and -- @(HsSrcBang _ SrcUnpack NoSrcStrict)@ (without StrictData) makes no sense, we -- emit a warning (in checkValidDataCon) and treat it like -- @(HsSrcBang _ NoSrcUnpack SrcLazy)@ data HsSrcBang = HsSrcBang SourceText -- Note [Pragma source text] in GHC.Types.SourceText SrcUnpackedness SrcStrictness deriving Data.Data -- | Haskell Implementation Bang -- -- Bangs of data constructor arguments as generated by the compiler -- after consulting HsSrcBang, flags, etc. data HsImplBang = HsLazy -- ^ Lazy field, or one with an unlifted type | HsStrict -- ^ Strict but not unpacked field | HsUnpack (Maybe Coercion) -- ^ Strict and unpacked field -- co :: arg-ty ~ product-ty HsBang deriving Data.Data -- | Source Strictness -- -- What strictness annotation the user wrote data SrcStrictness = SrcLazy -- ^ Lazy, ie '~' | SrcStrict -- ^ Strict, ie '!' | NoSrcStrict -- ^ no strictness annotation deriving (Eq, Data.Data) -- | Source Unpackedness -- -- What unpackedness the user requested data SrcUnpackedness = SrcUnpack -- ^ {-# UNPACK #-} specified | SrcNoUnpack -- ^ {-# NOUNPACK #-} specified | NoSrcUnpack -- ^ no unpack pragma deriving (Eq, Data.Data) ------------------------- -- StrictnessMark is used to indicate strictness -- of the DataCon *worker* fields data StrictnessMark = MarkedStrict | NotMarkedStrict deriving Eq -- | An 'EqSpec' is a tyvar/type pair representing an equality made in -- rejigging a GADT constructor data EqSpec = EqSpec TyVar Type -- | Make a non-dependent 'EqSpec' mkEqSpec :: TyVar -> Type -> EqSpec mkEqSpec tv ty = EqSpec tv ty eqSpecTyVar :: EqSpec -> TyVar eqSpecTyVar (EqSpec tv _) = tv eqSpecType :: EqSpec -> Type eqSpecType (EqSpec _ ty) = ty eqSpecPair :: EqSpec -> (TyVar, Type) eqSpecPair (EqSpec tv ty) = (tv, ty) eqSpecPreds :: [EqSpec] -> ThetaType eqSpecPreds spec = [ mkPrimEqPred (mkTyVarTy tv) ty | EqSpec tv ty <- spec ] -- | Substitute in an 'EqSpec'. Precondition: if the LHS of the EqSpec -- is mapped in the substitution, it is mapped to a type variable, not -- a full type. substEqSpec :: TCvSubst -> EqSpec -> EqSpec substEqSpec subst (EqSpec tv ty) = EqSpec tv' (substTy subst ty) where tv' = getTyVar "substEqSpec" (substTyVar subst tv) -- | Filter out any 'TyVar's mentioned in an 'EqSpec'. filterEqSpec :: [EqSpec] -> [TyVar] -> [TyVar] filterEqSpec eq_spec = filter not_in_eq_spec where not_in_eq_spec var = all (not . (== var) . eqSpecTyVar) eq_spec instance Outputable EqSpec where ppr (EqSpec tv ty) = ppr (tv, ty) {- Note [Data-con worker strictness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notice that we do *not* say the worker Id is strict even if the data constructor is declared strict e.g. data T = MkT ![Int] Bool Even though most often the evals are done by the *wrapper* $WMkT, there are situations in which tag inference will re-insert evals around the worker. So for all intents and purposes the *worker* MkT is strict, too! Unfortunately, if we exposed accurate strictness of DataCon workers, we'd see the following transformation: f xs = case xs of xs' { __DEFAULT -> ... case MkT xs b of x { __DEFAULT -> [x] } } -- DmdAnal: Strict in xs ==> { drop-seq, binder swap on xs' } f xs = case MkT xs b of x { __DEFAULT -> [x] } -- DmdAnal: Still strict in xs ==> { case-to-let } f xs = let x = MkT xs' b in [x] -- DmdAnal: No longer strict in xs! I.e., we are ironically losing strictness in `xs` by dropping the eval on `xs` and then doing case-to-let. The issue is that `exprIsHNF` currently says that every DataCon worker app is a value. The implicit assumption is that surrounding evals will have evaluated strict fields like `xs` before! But now that we had just dropped the eval on `xs`, that assumption is no longer valid. Long story short: By keeping the demand signature lazy, the Simplifier will not drop the eval on `xs` and using `exprIsHNF` to decide case-to-let and others remains sound. Similarly, during demand analysis in dmdTransformDataConSig, we bump up the field demand with `C_01`, *not* `C_11`, because the latter exposes too much strictness that will drop the eval on `xs` above. This issue is discussed at length in "Failed idea: no wrappers for strict data constructors" in #21497 and #22475. Note [Bangs on data constructor arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT !Int {-# UNPACK #-} !Int Bool When compiling the module, GHC will decide how to represent MkT, depending on the optimisation level, and settings of flags like -funbox-small-strict-fields. Terminology: * HsSrcBang: What the user wrote Constructors: HsSrcBang * HsImplBang: What GHC decided Constructors: HsLazy, HsStrict, HsUnpack * If T was defined in this module, MkT's dcSrcBangs field records the [HsSrcBang] of what the user wrote; in the example [ HsSrcBang _ NoSrcUnpack SrcStrict , HsSrcBang _ SrcUnpack SrcStrict , HsSrcBang _ NoSrcUnpack NoSrcStrictness] * However, if T was defined in an imported module, the importing module must follow the decisions made in the original module, regardless of the flag settings in the importing module. Also see Note [Bangs on imported data constructors] in GHC.Types.Id.Make * The dcr_bangs field of the dcRep field records the [HsImplBang] If T was defined in this module, Without -O the dcr_bangs might be [HsStrict, HsStrict, HsLazy] With -O it might be [HsStrict, HsUnpack _, HsLazy] With -funbox-small-strict-fields it might be [HsUnpack, HsUnpack _, HsLazy] With -XStrictData it might be [HsStrict, HsUnpack _, HsStrict] Note [Data con representation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The dcRepType field contains the type of the representation of a constructor This may differ from the type of the constructor *Id* (built by MkId.mkDataConId) for two reasons: a) the constructor Id may be overloaded, but the dictionary isn't stored e.g. data Eq a => T a = MkT a a b) the constructor may store an unboxed version of a strict field. So whenever this module talks about the representation of a data constructor what it means is the DataCon with all Unpacking having been applied. We can think of this as the Core representation. Here's an example illustrating the Core representation: data Ord a => T a = MkT Int! a Void# Here T :: Ord a => Int -> a -> Void# -> T a but the rep type is Trep :: Int# -> a -> Void# -> T a Actually, the unboxed part isn't implemented yet! Note that this representation is still *different* from runtime representation. (Which is what STG uses after unarise). This is how T would end up being used in STG post-unarise: let x = T 1# y in ... case x of T int a -> ... The Void# argument is dropped and the boxed int is replaced by an unboxed one. In essence we only generate binders for runtime relevant values. We also flatten out unboxed tuples in this process. See the unarise pass for details on how this is done. But as an example consider `data S = MkS Bool (# Bool | Char #)` which when matched on would result in an alternative with three binders like this MkS bool tag tpl_field -> See Note [Translating unboxed sums to unboxed tuples] and Note [Unarisation] for the details of this transformation. ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Eq DataCon where a == b = getUnique a == getUnique b a /= b = getUnique a /= getUnique b instance Uniquable DataCon where getUnique = dcUnique instance NamedThing DataCon where getName = dcName instance Outputable DataCon where ppr con = ppr (dataConName con) instance OutputableBndr DataCon where pprInfixOcc con = pprInfixName (dataConName con) pprPrefixOcc con = pprPrefixName (dataConName con) instance Data.Data DataCon where -- don't traverse? toConstr _ = abstractConstr "DataCon" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "DataCon" instance Outputable HsSrcBang where ppr (HsSrcBang _ prag mark) = ppr prag <+> ppr mark instance Outputable HsImplBang where ppr HsLazy = text "Lazy" ppr (HsUnpack Nothing) = text "Unpacked" ppr (HsUnpack (Just co)) = text "Unpacked" <> parens (ppr co) ppr HsStrict = text "StrictNotUnpacked" instance Outputable SrcStrictness where ppr SrcLazy = char '~' ppr SrcStrict = char '!' ppr NoSrcStrict = empty instance Outputable SrcUnpackedness where ppr SrcUnpack = text "{-# UNPACK #-}" ppr SrcNoUnpack = text "{-# NOUNPACK #-}" ppr NoSrcUnpack = empty instance Outputable StrictnessMark where ppr MarkedStrict = text "!" ppr NotMarkedStrict = empty instance Binary StrictnessMark where put_ bh NotMarkedStrict = putByte bh 0 put_ bh MarkedStrict = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return NotMarkedStrict 1 -> return MarkedStrict _ -> panic "Invalid binary format" instance Binary SrcStrictness where put_ bh SrcLazy = putByte bh 0 put_ bh SrcStrict = putByte bh 1 put_ bh NoSrcStrict = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return SrcLazy 1 -> return SrcStrict _ -> return NoSrcStrict instance Binary SrcUnpackedness where put_ bh SrcNoUnpack = putByte bh 0 put_ bh SrcUnpack = putByte bh 1 put_ bh NoSrcUnpack = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return SrcNoUnpack 1 -> return SrcUnpack _ -> return NoSrcUnpack -- | Compare strictness annotations eqHsBang :: HsImplBang -> HsImplBang -> Bool eqHsBang HsLazy HsLazy = True eqHsBang HsStrict HsStrict = True eqHsBang (HsUnpack Nothing) (HsUnpack Nothing) = True eqHsBang (HsUnpack (Just c1)) (HsUnpack (Just c2)) = eqType (coercionType c1) (coercionType c2) eqHsBang _ _ = False isBanged :: HsImplBang -> Bool isBanged (HsUnpack {}) = True isBanged (HsStrict {}) = True isBanged HsLazy = False isSrcStrict :: SrcStrictness -> Bool isSrcStrict SrcStrict = True isSrcStrict _ = False isSrcUnpacked :: SrcUnpackedness -> Bool isSrcUnpacked SrcUnpack = True isSrcUnpacked _ = False isMarkedStrict :: StrictnessMark -> Bool isMarkedStrict NotMarkedStrict = False isMarkedStrict _ = True -- All others are strict cbvFromStrictMark :: StrictnessMark -> CbvMark cbvFromStrictMark NotMarkedStrict = NotMarkedCbv cbvFromStrictMark MarkedStrict = MarkedCbv {- ********************************************************************* * * \subsection{Construction} * * ********************************************************************* -} -- | Build a new data constructor mkDataCon :: Name -> Bool -- ^ Is the constructor declared infix? -> TyConRepName -- ^ TyConRepName for the promoted TyCon -> [HsSrcBang] -- ^ Strictness/unpack annotations, from user -> [FieldLabel] -- ^ Field labels for the constructor, -- if it is a record, otherwise empty -> [TyVar] -- ^ Universals. -> [TyCoVar] -- ^ Existentials. -> [InvisTVBinder] -- ^ User-written 'TyVarBinder's. -- These must be Inferred/Specified. -- See @Note [TyVarBinders in DataCons]@ -> [EqSpec] -- ^ GADT equalities -> KnotTied ThetaType -- ^ Theta-type occurring before the arguments proper -> [KnotTied (Scaled Type)] -- ^ Original argument types -> KnotTied Type -- ^ Original result type -> RuntimeRepInfo -- ^ See comments on 'GHC.Core.TyCon.RuntimeRepInfo' -> KnotTied TyCon -- ^ Representation type constructor -> ConTag -- ^ Constructor tag -> ThetaType -- ^ The "stupid theta", context of the data -- declaration e.g. @data Eq a => T a ...@ -> Id -- ^ Worker Id -> DataConRep -- ^ Representation -> DataCon -- Can get the tag from the TyCon mkDataCon name declared_infix prom_info arg_stricts -- Must match orig_arg_tys 1-1 fields univ_tvs ex_tvs user_tvbs eq_spec theta orig_arg_tys orig_res_ty rep_info rep_tycon tag stupid_theta work_id rep -- Warning: mkDataCon is not a good place to check certain invariants. -- If the programmer writes the wrong result type in the decl, thus: -- data T a where { MkT :: S } -- then it's possible that the univ_tvs may hit an assertion failure -- if you pull on univ_tvs. This case is checked by checkValidDataCon, -- so the error is detected properly... it's just that assertions here -- are a little dodgy. = con where is_vanilla = null ex_tvs && null eq_spec && null theta con = MkData {dcName = name, dcUnique = nameUnique name, dcVanilla = is_vanilla, dcInfix = declared_infix, dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs, dcUserTyVarBinders = user_tvbs, dcEqSpec = eq_spec, dcOtherTheta = theta, dcStupidTheta = stupid_theta, dcOrigArgTys = orig_arg_tys, dcOrigResTy = orig_res_ty, dcRepTyCon = rep_tycon, dcSrcBangs = arg_stricts, dcFields = fields, dcTag = tag, dcRepType = rep_ty, dcWorkId = work_id, dcRep = rep, dcSourceArity = length orig_arg_tys, dcRepArity = length rep_arg_tys + count isCoVar ex_tvs, dcPromoted = promoted } -- The 'arg_stricts' passed to mkDataCon are simply those for the -- source-language arguments. We add extra ones for the -- dictionary arguments right here. rep_arg_tys = dataConRepArgTys con rep_ty = case rep of -- If the DataCon has no wrapper, then the worker's type *is* the -- user-facing type, so we can simply use dataConWrapperType. NoDataConRep -> dataConWrapperType con -- If the DataCon has a wrapper, then the worker's type is never seen -- by the user. The visibilities we pick do not matter here. DCR{} -> mkInfForAllTys univ_tvs $ mkTyCoInvForAllTys ex_tvs $ mkVisFunTys rep_arg_tys $ mkTyConApp rep_tycon (mkTyVarTys univ_tvs) -- See Note [Promoted data constructors] in GHC.Core.TyCon prom_tv_bndrs = [ mkNamedTyConBinder (Invisible spec) tv | Bndr tv spec <- user_tvbs ] fresh_names = freshNames (map getName user_tvbs) -- fresh_names: make sure that the "anonymous" tyvars don't -- clash in name or unique with the universal/existential ones. -- Tiresome! And unnecessary because these tyvars are never looked at prom_theta_bndrs = [ mkAnonTyConBinder InvisArg (mkTyVar n t) {- Invisible -} | (n,t) <- fresh_names `zip` theta ] prom_arg_bndrs = [ mkAnonTyConBinder VisArg (mkTyVar n t) {- Visible -} | (n,t) <- dropList theta fresh_names `zip` map scaledThing orig_arg_tys ] prom_bndrs = prom_tv_bndrs ++ prom_theta_bndrs ++ prom_arg_bndrs prom_res_kind = orig_res_ty promoted = mkPromotedDataCon con name prom_info prom_bndrs prom_res_kind roles rep_info roles = map (\tv -> if isTyVar tv then Nominal else Phantom) (univ_tvs ++ ex_tvs) ++ map (const Representational) (theta ++ map scaledThing orig_arg_tys) freshNames :: [Name] -> [Name] -- Make an infinite list of Names whose Uniques and OccNames -- differ from those in the 'avoid' list freshNames avoids = [ mkSystemName uniq occ | n <- [0..] , let uniq = mkAlphaTyVarUnique n occ = mkTyVarOccFS (mkFastString ('x' : show n)) , not (uniq `elementOfUniqSet` avoid_uniqs) , not (occ `elemOccSet` avoid_occs) ] where avoid_uniqs :: UniqSet Unique avoid_uniqs = mkUniqSet (map getUnique avoids) avoid_occs :: OccSet avoid_occs = mkOccSet (map getOccName avoids) -- | The 'Name' of the 'DataCon', giving it a unique, rooted identification dataConName :: DataCon -> Name dataConName = dcName -- | The tag used for ordering 'DataCon's dataConTag :: DataCon -> ConTag dataConTag = dcTag dataConTagZ :: DataCon -> ConTagZ dataConTagZ con = dataConTag con - fIRST_TAG -- | The type constructor that we are building via this data constructor dataConTyCon :: DataCon -> TyCon dataConTyCon = dcRepTyCon -- | The original type constructor used in the definition of this data -- constructor. In case of a data family instance, that will be the family -- type constructor. dataConOrigTyCon :: DataCon -> TyCon dataConOrigTyCon dc | Just (tc, _) <- tyConFamInst_maybe (dcRepTyCon dc) = tc | otherwise = dcRepTyCon dc -- | The representation type of the data constructor, i.e. the sort -- type that will represent values of this type at runtime dataConRepType :: DataCon -> Type dataConRepType = dcRepType -- | Should the 'DataCon' be presented infix? dataConIsInfix :: DataCon -> Bool dataConIsInfix = dcInfix -- | The universally-quantified type variables of the constructor dataConUnivTyVars :: DataCon -> [TyVar] dataConUnivTyVars (MkData { dcUnivTyVars = tvbs }) = tvbs -- | The existentially-quantified type/coercion variables of the constructor -- including dependent (kind-) GADT equalities dataConExTyCoVars :: DataCon -> [TyCoVar] dataConExTyCoVars (MkData { dcExTyCoVars = tvbs }) = tvbs -- | Both the universal and existential type/coercion variables of the constructor dataConUnivAndExTyCoVars :: DataCon -> [TyCoVar] dataConUnivAndExTyCoVars (MkData { dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs }) = univ_tvs ++ ex_tvs -- See Note [DataCon user type variable binders] -- | The type variables of the constructor, in the order the user wrote them dataConUserTyVars :: DataCon -> [TyVar] dataConUserTyVars (MkData { dcUserTyVarBinders = tvbs }) = binderVars tvbs -- See Note [DataCon user type variable binders] -- | 'InvisTVBinder's for the type variables of the constructor, in the order the -- user wrote them dataConUserTyVarBinders :: DataCon -> [InvisTVBinder] dataConUserTyVarBinders = dcUserTyVarBinders -- | Equalities derived from the result type of the data constructor, as written -- by the programmer in any GADT declaration. This includes *all* GADT-like -- equalities, including those written in by hand by the programmer. dataConEqSpec :: DataCon -> [EqSpec] dataConEqSpec con@(MkData { dcEqSpec = eq_spec, dcOtherTheta = theta }) = dataConKindEqSpec con ++ eq_spec ++ [ spec -- heterogeneous equality | Just (tc, [_k1, _k2, ty1, ty2]) <- map splitTyConApp_maybe theta , tc `hasKey` heqTyConKey , spec <- case (getTyVar_maybe ty1, getTyVar_maybe ty2) of (Just tv1, _) -> [mkEqSpec tv1 ty2] (_, Just tv2) -> [mkEqSpec tv2 ty1] _ -> [] ] ++ [ spec -- homogeneous equality | Just (tc, [_k, ty1, ty2]) <- map splitTyConApp_maybe theta , tc `hasKey` eqTyConKey , spec <- case (getTyVar_maybe ty1, getTyVar_maybe ty2) of (Just tv1, _) -> [mkEqSpec tv1 ty2] (_, Just tv2) -> [mkEqSpec tv2 ty1] _ -> [] ] -- | Dependent (kind-level) equalities in a constructor. -- There are extracted from the existential variables. -- See Note [Existential coercion variables] dataConKindEqSpec :: DataCon -> [EqSpec] dataConKindEqSpec (MkData {dcExTyCoVars = ex_tcvs}) -- It is used in 'dataConEqSpec' (maybe also 'dataConFullSig' in the future), -- which are frequently used functions. -- For now (Aug 2018) this function always return empty set as we don't really -- have coercion variables. -- In the future when we do, we might want to cache this information in DataCon -- so it won't be computed every time when aforementioned functions are called. = [ EqSpec tv ty | cv <- ex_tcvs , isCoVar cv , let (_, _, ty1, ty, _) = coVarKindsTypesRole cv tv = getTyVar "dataConKindEqSpec" ty1 ] -- | The *full* constraints on the constructor type, including dependent GADT -- equalities. dataConTheta :: DataCon -> ThetaType dataConTheta con@(MkData { dcEqSpec = eq_spec, dcOtherTheta = theta }) = eqSpecPreds (dataConKindEqSpec con ++ eq_spec) ++ theta -- | Get the Id of the 'DataCon' worker: a function that is the "actual" -- constructor and has no top level binding in the program. The type may -- be different from the obvious one written in the source program. Panics -- if there is no such 'Id' for this 'DataCon' dataConWorkId :: DataCon -> Id dataConWorkId dc = dcWorkId dc -- | Get the Id of the 'DataCon' wrapper: a function that wraps the "actual" -- constructor so it has the type visible in the source program: c.f. -- 'dataConWorkId'. -- Returns Nothing if there is no wrapper, which occurs for an algebraic data -- constructor and also for a newtype (whose constructor is inlined -- compulsorily) dataConWrapId_maybe :: DataCon -> Maybe Id dataConWrapId_maybe dc = case dcRep dc of NoDataConRep -> Nothing DCR { dcr_wrap_id = wrap_id } -> Just wrap_id -- | Returns an Id which looks like the Haskell-source constructor by using -- the wrapper if it exists (see 'dataConWrapId_maybe') and failing over to -- the worker (see 'dataConWorkId') dataConWrapId :: DataCon -> Id dataConWrapId dc = case dcRep dc of NoDataConRep-> dcWorkId dc -- worker=wrapper DCR { dcr_wrap_id = wrap_id } -> wrap_id -- | Find all the 'Id's implicitly brought into scope by the data constructor. Currently, -- the union of the 'dataConWorkId' and the 'dataConWrapId' dataConImplicitTyThings :: DataCon -> [TyThing] dataConImplicitTyThings (MkData { dcWorkId = work, dcRep = rep }) = [mkAnId work] ++ wrap_ids where wrap_ids = case rep of NoDataConRep -> [] DCR { dcr_wrap_id = wrap } -> [mkAnId wrap] -- | The labels for the fields of this particular 'DataCon' dataConFieldLabels :: DataCon -> [FieldLabel] dataConFieldLabels = dcFields -- | Extract the type for any given labelled field of the 'DataCon' dataConFieldType :: DataCon -> FieldLabelString -> Type dataConFieldType con label = case dataConFieldType_maybe con label of Just (_, ty) -> ty Nothing -> pprPanic "dataConFieldType" (ppr con <+> ppr label) -- | Extract the label and type for any given labelled field of the -- 'DataCon', or return 'Nothing' if the field does not belong to it dataConFieldType_maybe :: DataCon -> FieldLabelString -> Maybe (FieldLabel, Type) dataConFieldType_maybe con label = find ((== label) . flLabel . fst) (dcFields con `zip` (scaledThing <$> dcOrigArgTys con)) -- | Strictness/unpack annotations, from user; or, for imported -- DataCons, from the interface file -- The list is in one-to-one correspondence with the arity of the 'DataCon' dataConSrcBangs :: DataCon -> [HsSrcBang] dataConSrcBangs = dcSrcBangs -- | Source-level arity of the data constructor dataConSourceArity :: DataCon -> Arity dataConSourceArity (MkData { dcSourceArity = arity }) = arity -- | Gives the number of value arguments (including zero-width coercions) -- stored by the given `DataCon`'s worker in its Core representation. This may -- differ from the number of arguments that appear in the source code; see also -- Note [DataCon arities] dataConRepArity :: DataCon -> Arity dataConRepArity (MkData { dcRepArity = arity }) = arity -- | Return whether there are any argument types for this 'DataCon's original source type -- See Note [DataCon arities] isNullarySrcDataCon :: DataCon -> Bool isNullarySrcDataCon dc = dataConSourceArity dc == 0 -- | Return whether this `DataCon`'s worker, in its Core representation, takes -- any value arguments. -- -- In particular, remember that we include coercion arguments in the arity of -- the Core representation of the `DataCon` -- both lifted and unlifted -- coercions, despite the latter having zero-width runtime representation. -- -- See also Note [DataCon arities]. isNullaryRepDataCon :: DataCon -> Bool isNullaryRepDataCon dc = dataConRepArity dc == 0 dataConRepStrictness :: DataCon -> [StrictnessMark] -- ^ Give the demands on the arguments of a -- Core constructor application (Con dc args) dataConRepStrictness dc = case dcRep dc of NoDataConRep -> [NotMarkedStrict | _ <- dataConRepArgTys dc] DCR { dcr_stricts = strs } -> strs dataConImplBangs :: DataCon -> [HsImplBang] -- The implementation decisions about the strictness/unpack of each -- source program argument to the data constructor dataConImplBangs dc = case dcRep dc of NoDataConRep -> replicate (dcSourceArity dc) HsLazy DCR { dcr_bangs = bangs } -> bangs dataConBoxer :: DataCon -> Maybe DataConBoxer dataConBoxer (MkData { dcRep = DCR { dcr_boxer = boxer } }) = Just boxer dataConBoxer _ = Nothing dataConInstSig :: DataCon -> [Type] -- Instantiate the *universal* tyvars with these types -> ([TyCoVar], ThetaType, [Type]) -- Return instantiated existentials -- theta and arg tys -- ^ Instantiate the universal tyvars of a data con, -- returning -- ( instantiated existentials -- , instantiated constraints including dependent GADT equalities -- which are *also* listed in the instantiated existentials -- , instantiated args) dataConInstSig con@(MkData { dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs , dcOrigArgTys = arg_tys }) univ_tys = ( ex_tvs' , substTheta subst (dataConTheta con) , substTys subst (map scaledThing arg_tys)) where univ_subst = zipTvSubst univ_tvs univ_tys (subst, ex_tvs') = Type.substVarBndrs univ_subst ex_tvs -- | The \"full signature\" of the 'DataCon' returns, in order: -- -- 1) The result of 'dataConUnivTyVars' -- -- 2) The result of 'dataConExTyCoVars' -- -- 3) The non-dependent GADT equalities. -- Dependent GADT equalities are implied by coercion variables in -- return value (2). -- -- 4) The other constraints of the data constructor type, excluding GADT -- equalities -- -- 5) The original argument types to the 'DataCon' (i.e. before -- any change of the representation of the type) with linearity -- annotations -- -- 6) The original result type of the 'DataCon' dataConFullSig :: DataCon -> ([TyVar], [TyCoVar], [EqSpec], ThetaType, [Scaled Type], Type) dataConFullSig (MkData {dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs, dcEqSpec = eq_spec, dcOtherTheta = theta, dcOrigArgTys = arg_tys, dcOrigResTy = res_ty}) = (univ_tvs, ex_tvs, eq_spec, theta, arg_tys, res_ty) dataConOrigResTy :: DataCon -> Type dataConOrigResTy dc = dcOrigResTy dc -- | The \"stupid theta\" of the 'DataCon', such as @data Eq a@ in: -- -- > data Eq a => T a = ... -- -- See @Note [The stupid context]@. dataConStupidTheta :: DataCon -> ThetaType dataConStupidTheta dc = dcStupidTheta dc {- Note [Displaying linear fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A constructor with a linear field can be written either as MkT :: a %1 -> T a (with -XLinearTypes) or MkT :: a -> T a (with -XNoLinearTypes) There are three different methods to retrieve a type of a datacon. They differ in how linear fields are handled. 1. dataConWrapperType: The type of the wrapper in Core. For example, dataConWrapperType for Maybe is a %1 -> Just a. 2. dataConNonlinearType: The type of the constructor, with linear arrows replaced by unrestricted ones. Used when we don't want to introduce linear types to user (in holes and in types in hie used by haddock). 3. dataConDisplayType (takes a boolean indicating if -XLinearTypes is enabled): The type we'd like to show in error messages, :info and -ddump-types. Ideally, it should reflect the type written by the user; the function returns a type with arrows that would be required to write this constructor under the current setting of -XLinearTypes. In principle, this type can be different from the user's source code when the value of -XLinearTypes has changed, but we don't expect this to cause much trouble. Due to internal plumbing in checkValidDataCon, we can't just return a Doc. The multiplicity of arrows returned by dataConDisplayType and dataConDisplayType is used only for pretty-printing. -} dataConWrapperType :: DataCon -> Type -- ^ The user-declared type of the data constructor -- in the nice-to-read form: -- -- > T :: forall a b. a -> b -> T [a] -- -- rather than: -- -- > T :: forall a c. forall b. (c~[a]) => a -> b -> T c -- -- The type variables are quantified in the order that the user wrote them. -- See @Note [DataCon user type variable binders]@. -- -- NB: If the constructor is part of a data instance, the result type -- mentions the family tycon, not the internal one. dataConWrapperType (MkData { dcUserTyVarBinders = user_tvbs, dcOtherTheta = theta, dcOrigArgTys = arg_tys, dcOrigResTy = res_ty }) = mkInvisForAllTys user_tvbs $ mkInvisFunTysMany theta $ mkVisFunTys arg_tys $ res_ty dataConNonlinearType :: DataCon -> Type -- Just like dataConWrapperType, but with the -- linearity on the arguments all zapped to Many dataConNonlinearType (MkData { dcUserTyVarBinders = user_tvbs, dcOtherTheta = theta, dcOrigArgTys = arg_tys, dcOrigResTy = res_ty, dcStupidTheta = stupid_theta }) = mkInvisForAllTys user_tvbs $ mkInvisFunTysMany (stupid_theta ++ theta) $ mkVisFunTys arg_tys' $ res_ty where arg_tys' = map (\(Scaled w t) -> Scaled (case w of One -> Many; _ -> w) t) arg_tys dataConDisplayType :: Bool -> DataCon -> Type dataConDisplayType show_linear_types dc = if show_linear_types then dataConWrapperType dc else dataConNonlinearType dc -- | Finds the instantiated types of the arguments required to construct a -- 'DataCon' representation -- NB: these INCLUDE any dictionary args -- but EXCLUDE the data-declaration context, which is discarded -- It's all post-flattening etc; this is a representation type dataConInstArgTys :: DataCon -- ^ A datacon with no existentials or equality constraints -- However, it can have a dcTheta (notably it can be a -- class dictionary, with superclasses) -> [Type] -- ^ Instantiated at these types -> [Scaled Type] dataConInstArgTys dc@(MkData {dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs}) inst_tys = assertPpr (univ_tvs `equalLength` inst_tys) (text "dataConInstArgTys" <+> ppr dc $$ ppr univ_tvs $$ ppr inst_tys) $ assertPpr (null ex_tvs) (ppr dc) $ map (mapScaledType (substTyWith univ_tvs inst_tys)) (dataConRepArgTys dc) -- | Returns just the instantiated /value/ argument types of a 'DataCon', -- (excluding dictionary args) dataConInstOrigArgTys :: DataCon -- Works for any DataCon -> [Type] -- Includes existential tyvar args, but NOT -- equality constraints or dicts -> [Scaled Type] -- For vanilla datacons, it's all quite straightforward -- But for the call in GHC.HsToCore.Match.Constructor, we really do want just -- the value args dataConInstOrigArgTys dc@(MkData {dcOrigArgTys = arg_tys, dcUnivTyVars = univ_tvs, dcExTyCoVars = ex_tvs}) inst_tys = assertPpr (tyvars `equalLength` inst_tys) (text "dataConInstOrigArgTys" <+> ppr dc $$ ppr tyvars $$ ppr inst_tys) $ substScaledTys subst arg_tys where tyvars = univ_tvs ++ ex_tvs subst = zipTCvSubst tyvars inst_tys -- | Given a data constructor @dc@ with /n/ universally quantified type -- variables @a_{1}@, @a_{2}@, ..., @a_{n}@, and given a list of argument -- types @dc_args@ of length /m/ where /m/ <= /n/, then: -- -- @ -- dataConInstUnivs dc dc_args -- @ -- -- Will return: -- -- @ -- [dc_arg_{1}, dc_arg_{2}, ..., dc_arg_{m}, a_{m+1}, ..., a_{n}] -- @ -- -- That is, return the list of universal type variables with -- @a_{1}@, @a_{2}@, ..., @a_{m}@ instantiated with -- @dc_arg_{1}@, @dc_arg_{2}@, ..., @dc_arg_{m}@. It is possible for @m@ to -- be less than @n@, in which case the remaining @n - m@ elements will simply -- be universal type variables (with their kinds possibly instantiated). -- -- Examples: -- -- * Given the data constructor @D :: forall a b. Foo a b@ and -- @dc_args@ @[Int, Bool]@, then @dataConInstUnivs D dc_args@ will return -- @[Int, Bool]@. -- -- * Given the data constructor @D :: forall a b. Foo a b@ and -- @dc_args@ @[Int]@, then @@dataConInstUnivs D dc_args@ will return -- @[Int, b]@. -- -- * Given the data constructor @E :: forall k (a :: k). Bar k a@ and -- @dc_args@ @[Type]@, then @@dataConInstUnivs D dc_args@ will return -- @[Type, (a :: Type)]@. -- -- This is primarily used in @GHC.Tc.Deriv.*@ in service of instantiating data -- constructors' field types. -- See @Note [Instantiating field types in stock deriving]@ for a notable -- example of this. dataConInstUnivs :: DataCon -> [Type] -> [Type] dataConInstUnivs dc dc_args = chkAppend dc_args $ map mkTyVarTy dc_args_suffix where (dc_univs_prefix, dc_univs_suffix) = -- Assert that m <= n assertPpr (dc_args `leLength` dataConUnivTyVars dc) (text "dataConInstUnivs" <+> ppr dc_args <+> ppr (dataConUnivTyVars dc)) $ splitAt (length dc_args) $ dataConUnivTyVars dc (_, dc_args_suffix) = substTyVarBndrs prefix_subst dc_univs_suffix prefix_subst = mkTvSubst prefix_in_scope prefix_env prefix_in_scope = mkInScopeSet $ tyCoVarsOfTypes dc_args prefix_env = zipTyEnv dc_univs_prefix dc_args -- | Returns the argument types of the wrapper, excluding all dictionary arguments -- and without substituting for any type variables dataConOrigArgTys :: DataCon -> [Scaled Type] dataConOrigArgTys dc = dcOrigArgTys dc -- | Returns constraints in the wrapper type, other than those in the dataConEqSpec dataConOtherTheta :: DataCon -> ThetaType dataConOtherTheta dc = dcOtherTheta dc -- | Returns the arg types of the worker, including *all* non-dependent -- evidence, after any flattening has been done and without substituting for -- any type variables dataConRepArgTys :: DataCon -> [Scaled Type] dataConRepArgTys (MkData { dcRep = rep , dcEqSpec = eq_spec , dcOtherTheta = theta , dcOrigArgTys = orig_arg_tys }) = case rep of NoDataConRep -> assert (null eq_spec) $ (map unrestricted theta) ++ orig_arg_tys DCR { dcr_arg_tys = arg_tys } -> arg_tys -- | The string @package:module.name@ identifying a constructor, which is attached -- to its info table and used by the GHCi debugger and the heap profiler dataConIdentity :: DataCon -> ByteString -- We want this string to be UTF-8, so we get the bytes directly from the FastStrings. dataConIdentity dc = LBS.toStrict $ BSB.toLazyByteString $ mconcat [ BSB.shortByteString $ fastStringToShortByteString $ unitFS $ moduleUnit mod , BSB.int8 $ fromIntegral (ord ':') , BSB.shortByteString $ fastStringToShortByteString $ moduleNameFS $ moduleName mod , BSB.int8 $ fromIntegral (ord '.') , BSB.shortByteString $ fastStringToShortByteString $ occNameFS $ nameOccName name ] where name = dataConName dc mod = assert (isExternalName name) $ nameModule name isTupleDataCon :: DataCon -> Bool isTupleDataCon (MkData {dcRepTyCon = tc}) = isTupleTyCon tc isBoxedTupleDataCon :: DataCon -> Bool isBoxedTupleDataCon (MkData {dcRepTyCon = tc}) = isBoxedTupleTyCon tc isUnboxedTupleDataCon :: DataCon -> Bool isUnboxedTupleDataCon (MkData {dcRepTyCon = tc}) = isUnboxedTupleTyCon tc isUnboxedSumDataCon :: DataCon -> Bool isUnboxedSumDataCon (MkData {dcRepTyCon = tc}) = isUnboxedSumTyCon tc -- | Vanilla 'DataCon's are those that are nice boring Haskell 98 constructors isVanillaDataCon :: DataCon -> Bool isVanillaDataCon dc = dcVanilla dc -- | Is this the 'DataCon' of a newtype? isNewDataCon :: DataCon -> Bool isNewDataCon dc = isNewTyCon (dataConTyCon dc) -- | Should this DataCon be allowed in a type even without -XDataKinds? -- Currently, only Lifted & Unlifted specialPromotedDc :: DataCon -> Bool specialPromotedDc = isKindTyCon . dataConTyCon classDataCon :: Class -> DataCon classDataCon clas = case tyConDataCons (classTyCon clas) of (dict_constr:no_more) -> assert (null no_more) dict_constr [] -> panic "classDataCon" dataConCannotMatch :: [Type] -> DataCon -> Bool -- Returns True iff the data con *definitely cannot* match a -- scrutinee of type (T tys) -- where T is the dcRepTyCon for the data con dataConCannotMatch tys con -- See (U6) in Note [Implementing unsafeCoerce] -- in base:Unsafe.Coerce | dataConName con == unsafeReflDataConName = False | null inst_theta = False -- Common | all isTyVarTy tys = False -- Also common | otherwise = typesCantMatch (concatMap predEqs inst_theta) where (_, inst_theta, _) = dataConInstSig con tys -- TODO: could gather equalities from superclasses too predEqs pred = case classifyPredType pred of EqPred NomEq ty1 ty2 -> [(ty1, ty2)] ClassPred eq args | eq `hasKey` eqTyConKey , [_, ty1, ty2] <- args -> [(ty1, ty2)] | eq `hasKey` heqTyConKey , [_, _, ty1, ty2] <- args -> [(ty1, ty2)] _ -> [] -- | Were the type variables of the data con written in a different order -- than the regular order (universal tyvars followed by existential tyvars)? -- -- This is not a cheap test, so we minimize its use in GHC as much as possible. -- Currently, its only call site in the GHC codebase is in 'mkDataConRep' in -- "MkId", and so 'dataConUserTyVarsArePermuted' is only called at most once -- during a data constructor's lifetime. -- See Note [DataCon user type variable binders], as well as -- Note [Data con wrappers and GADT syntax] for an explanation of what -- mkDataConRep is doing with this function. dataConUserTyVarsArePermuted :: DataCon -> Bool dataConUserTyVarsArePermuted (MkData { dcUnivTyVars = univ_tvs , dcExTyCoVars = ex_tvs, dcEqSpec = eq_spec , dcUserTyVarBinders = user_tvbs }) = (filterEqSpec eq_spec univ_tvs ++ ex_tvs) /= binderVars user_tvbs {- %************************************************************************ %* * Promoting of data types to the kind level * * ************************************************************************ -} promoteDataCon :: DataCon -> TyCon promoteDataCon (MkData { dcPromoted = tc }) = tc {- ************************************************************************ * * \subsection{Splitting products} * * ************************************************************************ -} -- | Extract the type constructor, type argument, data constructor and it's -- /representation/ argument types from a type if it is a product type. -- -- Precisely, we return @Just@ for any data type that is all of: -- -- * Concrete (i.e. constructors visible) -- * Single-constructor -- * ... which has no existentials -- -- Whether the type is a @data@ type or a @newtype@. splitDataProductType_maybe :: Type -- ^ A product type, perhaps -> Maybe (TyCon, -- The type constructor [Type], -- Type args of the tycon DataCon, -- The data constructor [Scaled Type]) -- Its /representation/ arg types -- Rejecting existentials means we don't have to worry about -- freshening and substituting type variables -- (See "GHC.Type.Id.Make.dataConArgUnpack") splitDataProductType_maybe ty | Just (tycon, ty_args) <- splitTyConApp_maybe ty , Just con <- tyConSingleDataCon_maybe tycon , null (dataConExTyCoVars con) -- no existentials! See above = Just (tycon, ty_args, con, dataConInstArgTys con ty_args) | otherwise = Nothing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/FVs.hs0000644000000000000000000007612014472400112017563 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Taken quite directly from the Peyton Jones/Lester paper. -} {-# LANGUAGE TypeFamilies #-} -- | A module concerned with finding the free variables of an expression. module GHC.Core.FVs ( -- * Free variables of expressions and binding groups exprFreeVars, exprsFreeVars, exprFreeVarsDSet, exprFreeVarsList, exprsFreeVarsList, exprFreeIds, exprsFreeIds, exprFreeIdsDSet, exprsFreeIdsDSet, exprFreeIdsList, exprsFreeIdsList, bindFreeVars, -- * Selective free variables of expressions InterestingVarFun, exprSomeFreeVars, exprsSomeFreeVars, exprSomeFreeVarsList, exprsSomeFreeVarsList, -- * Free variables of Rules, Vars and Ids varTypeTyCoVars, varTypeTyCoFVs, idUnfoldingVars, idFreeVars, dIdFreeVars, bndrRuleAndUnfoldingVarsDSet, bndrRuleAndUnfoldingIds, idFVs, idRuleVars, stableUnfoldingVars, ruleFreeVars, rulesFreeVars, rulesFreeVarsDSet, mkRuleInfo, ruleLhsFreeIds, ruleLhsFreeIdsList, ruleRhsFreeVars, rulesRhsFreeIds, expr_fvs, -- * Orphan names orphNamesOfType, orphNamesOfCo, orphNamesOfAxiom, orphNamesOfTypes, orphNamesOfCoCon, exprsOrphNames, orphNamesOfFamInst, -- * Core syntax tree annotation with free variables FVAnn, -- annotation, abstract CoreExprWithFVs, -- = AnnExpr Id FVAnn CoreExprWithFVs', -- = AnnExpr' Id FVAnn CoreBindWithFVs, -- = AnnBind Id FVAnn CoreAltWithFVs, -- = AnnAlt Id FVAnn freeVars, -- CoreExpr -> CoreExprWithFVs freeVarsBind, -- CoreBind -> DVarSet -> (DVarSet, CoreBindWithFVs) freeVarsOf, -- CoreExprWithFVs -> DIdSet freeVarsOfAnn ) where import GHC.Prelude import GHC.Core import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Name.Set import GHC.Types.Name import GHC.Types.Tickish import GHC.Types.Var.Set import GHC.Types.Var import GHC.Core.Type import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Core.FamInstEnv import GHC.Builtin.Types( unrestrictedFunTyConName ) import GHC.Builtin.Types.Prim( funTyConName ) import GHC.Data.Maybe( orElse ) import GHC.Utils.FV as FV import GHC.Utils.Misc import GHC.Utils.Panic.Plain {- ************************************************************************ * * \section{Finding the free variables of an expression} * * ************************************************************************ This function simply finds the free variables of an expression. So far as type variables are concerned, it only finds tyvars that are * free in type arguments, * free in the type of a binder, but not those that are free in the type of variable occurrence. -} -- | Find all locally-defined free Ids or type variables in an expression -- returning a non-deterministic set. exprFreeVars :: CoreExpr -> VarSet exprFreeVars = fvVarSet . exprFVs -- | Find all locally-defined free Ids or type variables in an expression -- returning a composable FV computation. See Note [FV naming conventions] in "GHC.Utils.FV" -- for why export it. exprFVs :: CoreExpr -> FV exprFVs = filterFV isLocalVar . expr_fvs -- | Find all locally-defined free Ids or type variables in an expression -- returning a deterministic set. exprFreeVarsDSet :: CoreExpr -> DVarSet exprFreeVarsDSet = fvDVarSet . exprFVs -- | Find all locally-defined free Ids or type variables in an expression -- returning a deterministically ordered list. exprFreeVarsList :: CoreExpr -> [Var] exprFreeVarsList = fvVarList . exprFVs -- | Find all locally-defined free Ids in an expression exprFreeIds :: CoreExpr -> IdSet -- Find all locally-defined free Ids exprFreeIds = exprSomeFreeVars isLocalId exprsFreeIds :: [CoreExpr] -> IdSet -- Find all locally-defined free Ids exprsFreeIds = exprsSomeFreeVars isLocalId -- | Find all locally-defined free Ids in an expression -- returning a deterministic set. exprFreeIdsDSet :: CoreExpr -> DIdSet -- Find all locally-defined free Ids exprFreeIdsDSet = exprSomeFreeVarsDSet isLocalId -- | Find all locally-defined free Ids in an expression -- returning a deterministically ordered list. exprFreeIdsList :: CoreExpr -> [Id] -- Find all locally-defined free Ids exprFreeIdsList = exprSomeFreeVarsList isLocalId -- | Find all locally-defined free Ids in several expressions -- returning a deterministic set. exprsFreeIdsDSet :: [CoreExpr] -> DIdSet -- Find all locally-defined free Ids exprsFreeIdsDSet = exprsSomeFreeVarsDSet isLocalId -- | Find all locally-defined free Ids in several expressions -- returning a deterministically ordered list. exprsFreeIdsList :: [CoreExpr] -> [Id] -- Find all locally-defined free Ids exprsFreeIdsList = exprsSomeFreeVarsList isLocalId -- | Find all locally-defined free Ids or type variables in several expressions -- returning a non-deterministic set. exprsFreeVars :: [CoreExpr] -> VarSet exprsFreeVars = fvVarSet . exprsFVs -- | Find all locally-defined free Ids or type variables in several expressions -- returning a composable FV computation. See Note [FV naming conventions] in "GHC.Utils.FV" -- for why export it. exprsFVs :: [CoreExpr] -> FV exprsFVs exprs = mapUnionFV exprFVs exprs -- | Find all locally-defined free Ids or type variables in several expressions -- returning a deterministically ordered list. exprsFreeVarsList :: [CoreExpr] -> [Var] exprsFreeVarsList = fvVarList . exprsFVs -- | Find all locally defined free Ids in a binding group bindFreeVars :: CoreBind -> VarSet bindFreeVars (NonRec b r) = fvVarSet $ filterFV isLocalVar $ rhs_fvs (b,r) bindFreeVars (Rec prs) = fvVarSet $ filterFV isLocalVar $ addBndrs (map fst prs) (mapUnionFV rhs_fvs prs) -- | Finds free variables in an expression selected by a predicate exprSomeFreeVars :: InterestingVarFun -- ^ Says which 'Var's are interesting -> CoreExpr -> VarSet exprSomeFreeVars fv_cand e = fvVarSet $ filterFV fv_cand $ expr_fvs e -- | Finds free variables in an expression selected by a predicate -- returning a deterministically ordered list. exprSomeFreeVarsList :: InterestingVarFun -- ^ Says which 'Var's are interesting -> CoreExpr -> [Var] exprSomeFreeVarsList fv_cand e = fvVarList $ filterFV fv_cand $ expr_fvs e -- | Finds free variables in an expression selected by a predicate -- returning a deterministic set. exprSomeFreeVarsDSet :: InterestingVarFun -- ^ Says which 'Var's are interesting -> CoreExpr -> DVarSet exprSomeFreeVarsDSet fv_cand e = fvDVarSet $ filterFV fv_cand $ expr_fvs e -- | Finds free variables in several expressions selected by a predicate exprsSomeFreeVars :: InterestingVarFun -- Says which 'Var's are interesting -> [CoreExpr] -> VarSet exprsSomeFreeVars fv_cand es = fvVarSet $ filterFV fv_cand $ mapUnionFV expr_fvs es -- | Finds free variables in several expressions selected by a predicate -- returning a deterministically ordered list. exprsSomeFreeVarsList :: InterestingVarFun -- Says which 'Var's are interesting -> [CoreExpr] -> [Var] exprsSomeFreeVarsList fv_cand es = fvVarList $ filterFV fv_cand $ mapUnionFV expr_fvs es -- | Finds free variables in several expressions selected by a predicate -- returning a deterministic set. exprsSomeFreeVarsDSet :: InterestingVarFun -- ^ Says which 'Var's are interesting -> [CoreExpr] -> DVarSet exprsSomeFreeVarsDSet fv_cand e = fvDVarSet $ filterFV fv_cand $ mapUnionFV expr_fvs e -- Comment about obsolete code -- We used to gather the free variables the RULES at a variable occurrence -- with the following cryptic comment: -- "At a variable occurrence, add in any free variables of its rule rhss -- Curiously, we gather the Id's free *type* variables from its binding -- site, but its free *rule-rhs* variables from its usage sites. This -- is a little weird. The reason is that the former is more efficient, -- but the latter is more fine grained, and a makes a difference when -- a variable mentions itself one of its own rule RHSs" -- Not only is this "weird", but it's also pretty bad because it can make -- a function seem more recursive than it is. Suppose -- f = ...g... -- g = ... -- RULE g x = ...f... -- Then f is not mentioned in its own RHS, and needn't be a loop breaker -- (though g may be). But if we collect the rule fvs from g's occurrence, -- it looks as if f mentions itself. (This bites in the eftInt/eftIntFB -- code in GHC.Enum.) -- -- Anyway, it seems plain wrong. The RULE is like an extra RHS for the -- function, so its free variables belong at the definition site. -- -- Deleted code looked like -- foldVarSet add_rule_var var_itself_set (idRuleVars var) -- add_rule_var var set | keep_it fv_cand in_scope var = extendVarSet set var -- | otherwise = set -- SLPJ Feb06 addBndr :: CoreBndr -> FV -> FV addBndr bndr fv fv_cand in_scope acc = (varTypeTyCoFVs bndr `unionFV` -- Include type variables in the binder's type -- (not just Ids; coercion variables too!) FV.delFV bndr fv) fv_cand in_scope acc addBndrs :: [CoreBndr] -> FV -> FV addBndrs bndrs fv = foldr addBndr fv bndrs expr_fvs :: CoreExpr -> FV expr_fvs (Type ty) fv_cand in_scope acc = tyCoFVsOfType ty fv_cand in_scope acc expr_fvs (Coercion co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc expr_fvs (Var var) fv_cand in_scope acc = FV.unitFV var fv_cand in_scope acc expr_fvs (Lit _) fv_cand in_scope acc = emptyFV fv_cand in_scope acc expr_fvs (Tick t expr) fv_cand in_scope acc = (tickish_fvs t `unionFV` expr_fvs expr) fv_cand in_scope acc expr_fvs (App fun arg) fv_cand in_scope acc = (expr_fvs fun `unionFV` expr_fvs arg) fv_cand in_scope acc expr_fvs (Lam bndr body) fv_cand in_scope acc = addBndr bndr (expr_fvs body) fv_cand in_scope acc expr_fvs (Cast expr co) fv_cand in_scope acc = (expr_fvs expr `unionFV` tyCoFVsOfCo co) fv_cand in_scope acc expr_fvs (Case scrut bndr ty alts) fv_cand in_scope acc = (expr_fvs scrut `unionFV` tyCoFVsOfType ty `unionFV` addBndr bndr (mapUnionFV alt_fvs alts)) fv_cand in_scope acc where alt_fvs (Alt _ bndrs rhs) = addBndrs bndrs (expr_fvs rhs) expr_fvs (Let (NonRec bndr rhs) body) fv_cand in_scope acc = (rhs_fvs (bndr, rhs) `unionFV` addBndr bndr (expr_fvs body)) fv_cand in_scope acc expr_fvs (Let (Rec pairs) body) fv_cand in_scope acc = addBndrs (map fst pairs) (mapUnionFV rhs_fvs pairs `unionFV` expr_fvs body) fv_cand in_scope acc --------- rhs_fvs :: (Id, CoreExpr) -> FV rhs_fvs (bndr, rhs) = expr_fvs rhs `unionFV` bndrRuleAndUnfoldingFVs bndr -- Treat any RULES as extra RHSs of the binding --------- exprs_fvs :: [CoreExpr] -> FV exprs_fvs exprs = mapUnionFV expr_fvs exprs tickish_fvs :: CoreTickish -> FV tickish_fvs (Breakpoint _ _ ids) = FV.mkFVs ids tickish_fvs _ = emptyFV {- ************************************************************************ * * \section{Free names} * * ************************************************************************ -} -- | Finds the free /external/ names of an expression, notably -- including the names of type constructors (which of course do not show -- up in 'exprFreeVars'). exprOrphNames :: CoreExpr -> NameSet -- There's no need to delete local binders, because they will all -- be /internal/ names. exprOrphNames e = go e where go (Var v) | isExternalName n = unitNameSet n | otherwise = emptyNameSet where n = idName v go (Lit _) = emptyNameSet go (Type ty) = orphNamesOfType ty -- Don't need free tyvars go (Coercion co) = orphNamesOfCo co go (App e1 e2) = go e1 `unionNameSet` go e2 go (Lam v e) = go e `delFromNameSet` idName v go (Tick _ e) = go e go (Cast e co) = go e `unionNameSet` orphNamesOfCo co go (Let (NonRec _ r) e) = go e `unionNameSet` go r go (Let (Rec prs) e) = exprsOrphNames (map snd prs) `unionNameSet` go e go (Case e _ ty as) = go e `unionNameSet` orphNamesOfType ty `unionNameSet` unionNameSets (map go_alt as) go_alt (Alt _ _ r) = go r -- | Finds the free /external/ names of several expressions: see 'exprOrphNames' for details exprsOrphNames :: [CoreExpr] -> NameSet exprsOrphNames es = foldr (unionNameSet . exprOrphNames) emptyNameSet es {- ********************************************************************** %* * orphNamesXXX %* * %********************************************************************* -} orphNamesOfTyCon :: TyCon -> NameSet orphNamesOfTyCon tycon = unitNameSet (getName tycon) `unionNameSet` case tyConClass_maybe tycon of Nothing -> emptyNameSet Just cls -> unitNameSet (getName cls) orphNamesOfType :: Type -> NameSet orphNamesOfType ty | Just ty' <- coreView ty = orphNamesOfType ty' -- Look through type synonyms (#4912) orphNamesOfType (TyVarTy _) = emptyNameSet orphNamesOfType (LitTy {}) = emptyNameSet orphNamesOfType (TyConApp tycon tys) = func `unionNameSet` orphNamesOfTyCon tycon `unionNameSet` orphNamesOfTypes tys where func = case tys of arg:_ | tycon == funTyCon -> orph_names_of_fun_ty_con arg _ -> emptyNameSet orphNamesOfType (ForAllTy bndr res) = orphNamesOfType (binderType bndr) `unionNameSet` orphNamesOfType res orphNamesOfType (FunTy _ w arg res) = orph_names_of_fun_ty_con w `unionNameSet` unitNameSet funTyConName `unionNameSet` orphNamesOfType w `unionNameSet` orphNamesOfType arg `unionNameSet` orphNamesOfType res orphNamesOfType (AppTy fun arg) = orphNamesOfType fun `unionNameSet` orphNamesOfType arg orphNamesOfType (CastTy ty co) = orphNamesOfType ty `unionNameSet` orphNamesOfCo co orphNamesOfType (CoercionTy co) = orphNamesOfCo co orphNamesOfThings :: (a -> NameSet) -> [a] -> NameSet orphNamesOfThings f = foldr (unionNameSet . f) emptyNameSet orphNamesOfTypes :: [Type] -> NameSet orphNamesOfTypes = orphNamesOfThings orphNamesOfType orphNamesOfMCo :: MCoercion -> NameSet orphNamesOfMCo MRefl = emptyNameSet orphNamesOfMCo (MCo co) = orphNamesOfCo co orphNamesOfCo :: Coercion -> NameSet orphNamesOfCo (Refl ty) = orphNamesOfType ty orphNamesOfCo (GRefl _ ty mco) = orphNamesOfType ty `unionNameSet` orphNamesOfMCo mco orphNamesOfCo (TyConAppCo _ tc cos) = unitNameSet (getName tc) `unionNameSet` orphNamesOfCos cos orphNamesOfCo (AppCo co1 co2) = orphNamesOfCo co1 `unionNameSet` orphNamesOfCo co2 orphNamesOfCo (ForAllCo _ kind_co co) = orphNamesOfCo kind_co `unionNameSet` orphNamesOfCo co orphNamesOfCo (FunCo _ co_mult co1 co2) = orphNamesOfCo co_mult `unionNameSet` orphNamesOfCo co1 `unionNameSet` orphNamesOfCo co2 orphNamesOfCo (CoVarCo _) = emptyNameSet orphNamesOfCo (AxiomInstCo con _ cos) = orphNamesOfCoCon con `unionNameSet` orphNamesOfCos cos orphNamesOfCo (UnivCo p _ t1 t2) = orphNamesOfProv p `unionNameSet` orphNamesOfType t1 `unionNameSet` orphNamesOfType t2 orphNamesOfCo (SymCo co) = orphNamesOfCo co orphNamesOfCo (TransCo co1 co2) = orphNamesOfCo co1 `unionNameSet` orphNamesOfCo co2 orphNamesOfCo (NthCo _ _ co) = orphNamesOfCo co orphNamesOfCo (LRCo _ co) = orphNamesOfCo co orphNamesOfCo (InstCo co arg) = orphNamesOfCo co `unionNameSet` orphNamesOfCo arg orphNamesOfCo (KindCo co) = orphNamesOfCo co orphNamesOfCo (SubCo co) = orphNamesOfCo co orphNamesOfCo (AxiomRuleCo _ cs) = orphNamesOfCos cs orphNamesOfCo (HoleCo _) = emptyNameSet orphNamesOfProv :: UnivCoProvenance -> NameSet orphNamesOfProv (PhantomProv co) = orphNamesOfCo co orphNamesOfProv (ProofIrrelProv co) = orphNamesOfCo co orphNamesOfProv (PluginProv _) = emptyNameSet orphNamesOfProv (CorePrepProv _) = emptyNameSet orphNamesOfCos :: [Coercion] -> NameSet orphNamesOfCos = orphNamesOfThings orphNamesOfCo orphNamesOfCoCon :: CoAxiom br -> NameSet orphNamesOfCoCon (CoAxiom { co_ax_tc = tc, co_ax_branches = branches }) = orphNamesOfTyCon tc `unionNameSet` orphNamesOfCoAxBranches branches orphNamesOfAxiom :: CoAxiom br -> NameSet orphNamesOfAxiom axiom = orphNamesOfTypes (concatMap coAxBranchLHS $ fromBranches $ coAxiomBranches axiom) `extendNameSet` getName (coAxiomTyCon axiom) orphNamesOfCoAxBranches :: Branches br -> NameSet orphNamesOfCoAxBranches = foldr (unionNameSet . orphNamesOfCoAxBranch) emptyNameSet . fromBranches orphNamesOfCoAxBranch :: CoAxBranch -> NameSet orphNamesOfCoAxBranch (CoAxBranch { cab_lhs = lhs, cab_rhs = rhs }) = orphNamesOfTypes lhs `unionNameSet` orphNamesOfType rhs -- | orphNamesOfAxiom collects the names of the concrete types and -- type constructors that make up the LHS of a type family instance, -- including the family name itself. -- -- For instance, given `type family Foo a b`: -- `type instance Foo (F (G (H a))) b = ...` would yield [Foo,F,G,H] -- -- Used in the implementation of ":info" in GHCi. orphNamesOfFamInst :: FamInst -> NameSet orphNamesOfFamInst fam_inst = orphNamesOfAxiom (famInstAxiom fam_inst) -- Detect FUN 'Many as an application of (->), so that :i (->) works as expected -- (see #8535) Issue #16475 describes a more robust solution orph_names_of_fun_ty_con :: Mult -> NameSet orph_names_of_fun_ty_con Many = unitNameSet unrestrictedFunTyConName orph_names_of_fun_ty_con _ = emptyNameSet {- ************************************************************************ * * \section[freevars-everywhere]{Attaching free variables to every sub-expression} * * ************************************************************************ -} data RuleFVsFrom = LhsOnly | RhsOnly | BothSides -- | Those locally-defined variables free in the left and/or right hand sides -- of the rule, depending on the first argument. Returns an 'FV' computation. ruleFVs :: RuleFVsFrom -> CoreRule -> FV ruleFVs !_ (BuiltinRule {}) = emptyFV ruleFVs from (Rule { ru_fn = _do_not_include -- See Note [Rule free var hack] , ru_bndrs = bndrs , ru_rhs = rhs, ru_args = args }) = filterFV isLocalVar $ addBndrs bndrs (exprs_fvs exprs) where exprs = case from of LhsOnly -> args RhsOnly -> [rhs] BothSides -> rhs:args -- | Those locally-defined variables free in the left and/or right hand sides -- from several rules, depending on the first argument. -- Returns an 'FV' computation. rulesFVs :: RuleFVsFrom -> [CoreRule] -> FV rulesFVs from = mapUnionFV (ruleFVs from) -- | Those variables free in the right hand side of a rule returned as a -- non-deterministic set ruleRhsFreeVars :: CoreRule -> VarSet ruleRhsFreeVars = fvVarSet . ruleFVs RhsOnly -- | Those locally-defined free 'Id's in the right hand side of several rules -- returned as a non-deterministic set rulesRhsFreeIds :: [CoreRule] -> VarSet rulesRhsFreeIds = fvVarSet . filterFV isLocalId . rulesFVs RhsOnly ruleLhsFreeIds :: CoreRule -> VarSet -- ^ This finds all locally-defined free Ids on the left hand side of a rule -- and returns them as a non-deterministic set ruleLhsFreeIds = fvVarSet . filterFV isLocalId . ruleFVs LhsOnly ruleLhsFreeIdsList :: CoreRule -> [Var] -- ^ This finds all locally-defined free Ids on the left hand side of a rule -- and returns them as a deterministically ordered list ruleLhsFreeIdsList = fvVarList . filterFV isLocalId . ruleFVs LhsOnly -- | Those variables free in the both the left right hand sides of a rule -- returned as a non-deterministic set ruleFreeVars :: CoreRule -> VarSet ruleFreeVars = fvVarSet . ruleFVs BothSides -- | Those variables free in the both the left right hand sides of rules -- returned as a deterministic set rulesFreeVarsDSet :: [CoreRule] -> DVarSet rulesFreeVarsDSet rules = fvDVarSet $ rulesFVs BothSides rules -- | Those variables free in both the left right hand sides of several rules rulesFreeVars :: [CoreRule] -> VarSet rulesFreeVars rules = fvVarSet $ rulesFVs BothSides rules -- | Make a 'RuleInfo' containing a number of 'CoreRule's, suitable -- for putting into an 'IdInfo' mkRuleInfo :: [CoreRule] -> RuleInfo mkRuleInfo rules = RuleInfo rules (rulesFreeVarsDSet rules) {- Note [Rule free var hack] (Not a hack any more) ~~~~~~~~~~~~~~~~~~~~~~~~~ We used not to include the Id in its own rhs free-var set. Otherwise the occurrence analyser makes bindings recursive: f x y = x+y RULE: f (f x y) z ==> f x (f y z) However, the occurrence analyser distinguishes "non-rule loop breakers" from "rule-only loop breakers" (see BasicTypes.OccInfo). So it will put this 'f' in a Rec block, but will mark the binding as a non-rule loop breaker, which is perfectly inlinable. -} {- ************************************************************************ * * \section[freevars-everywhere]{Attaching free variables to every sub-expression} * * ************************************************************************ The free variable pass annotates every node in the expression with its NON-GLOBAL free variables and type variables. -} type FVAnn = DVarSet -- See Note [The FVAnn invariant] {- Note [The FVAnn invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Invariant: a FVAnn, say S, is closed: That is: if v is in S, then freevars( v's type/kind ) is also in S -} -- | Every node in a binding group annotated with its -- (non-global) free variables, both Ids and TyVars, and type. type CoreBindWithFVs = AnnBind Id FVAnn -- | Every node in an expression annotated with its -- (non-global) free variables, both Ids and TyVars, and type. -- NB: see Note [The FVAnn invariant] type CoreExprWithFVs = AnnExpr Id FVAnn type CoreExprWithFVs' = AnnExpr' Id FVAnn -- | Every node in an expression annotated with its -- (non-global) free variables, both Ids and TyVars, and type. type CoreAltWithFVs = AnnAlt Id FVAnn freeVarsOf :: CoreExprWithFVs -> DIdSet -- ^ Inverse function to 'freeVars' freeVarsOf (fvs, _) = fvs -- | Extract the vars reported in a FVAnn freeVarsOfAnn :: FVAnn -> DIdSet freeVarsOfAnn fvs = fvs aFreeVar :: Var -> DVarSet aFreeVar = unitDVarSet unionFVs :: DVarSet -> DVarSet -> DVarSet unionFVs = unionDVarSet unionFVss :: [DVarSet] -> DVarSet unionFVss = unionDVarSets delBindersFV :: [Var] -> DVarSet -> DVarSet delBindersFV bs fvs = foldr delBinderFV fvs bs delBinderFV :: Var -> DVarSet -> DVarSet -- This way round, so we can do it multiple times using foldr -- (b `delBinderFV` s) -- * removes the binder b from the free variable set s, -- * AND *adds* to s the free variables of b's type -- -- This is really important for some lambdas: -- In (\x::a -> x) the only mention of "a" is in the binder. -- -- Also in -- let x::a = b in ... -- we should really note that "a" is free in this expression. -- It'll be pinned inside the /\a by the binding for b, but -- it seems cleaner to make sure that a is in the free-var set -- when it is mentioned. -- -- This also shows up in recursive bindings. Consider: -- /\a -> letrec x::a = x in E -- Now, there are no explicit free type variables in the RHS of x, -- but nevertheless "a" is free in its definition. So we add in -- the free tyvars of the types of the binders, and include these in the -- free vars of the group, attached to the top level of each RHS. -- -- This actually happened in the defn of errorIO in IOBase.hs: -- errorIO (ST io) = case (errorIO# io) of -- _ -> bottom -- where -- bottom = bottom -- Never evaluated delBinderFV b s = (s `delDVarSet` b) `unionFVs` dVarTypeTyCoVars b -- Include coercion variables too! varTypeTyCoVars :: Var -> TyCoVarSet -- Find the type/kind variables free in the type of the id/tyvar varTypeTyCoVars var = fvVarSet $ varTypeTyCoFVs var dVarTypeTyCoVars :: Var -> DTyCoVarSet -- Find the type/kind/coercion variables free in the type of the id/tyvar dVarTypeTyCoVars var = fvDVarSet $ varTypeTyCoFVs var varTypeTyCoFVs :: Var -> FV -- Find the free variables of a binder. -- In the case of ids, don't forget the multiplicity field! varTypeTyCoFVs var = tyCoFVsOfType (varType var) `unionFV` mult_fvs where mult_fvs = case varMultMaybe var of Just mult -> tyCoFVsOfType mult Nothing -> emptyFV idFreeVars :: Id -> VarSet idFreeVars id = assert (isId id) $ fvVarSet $ idFVs id dIdFreeVars :: Id -> DVarSet dIdFreeVars id = fvDVarSet $ idFVs id idFVs :: Id -> FV -- Type variables, rule variables, and inline variables idFVs id = assert (isId id) $ varTypeTyCoFVs id `unionFV` bndrRuleAndUnfoldingFVs id bndrRuleAndUnfoldingVarsDSet :: Id -> DVarSet bndrRuleAndUnfoldingVarsDSet id = fvDVarSet $ bndrRuleAndUnfoldingFVs id bndrRuleAndUnfoldingIds :: Id -> IdSet bndrRuleAndUnfoldingIds id = fvVarSet $ filterFV isId $ bndrRuleAndUnfoldingFVs id bndrRuleAndUnfoldingFVs :: Id -> FV bndrRuleAndUnfoldingFVs id | isId id = idRuleFVs id `unionFV` idUnfoldingFVs id | otherwise = emptyFV idRuleVars ::Id -> VarSet -- Does *not* include CoreUnfolding vars idRuleVars id = fvVarSet $ idRuleFVs id idRuleFVs :: Id -> FV idRuleFVs id = assert (isId id) $ FV.mkFVs (dVarSetElems $ ruleInfoFreeVars (idSpecialisation id)) idUnfoldingVars :: Id -> VarSet -- Produce free vars for an unfolding, but NOT for an ordinary -- (non-inline) unfolding, since it is a dup of the rhs -- and we'll get exponential behaviour if we look at both unf and rhs! -- But do look at the *real* unfolding, even for loop breakers, else -- we might get out-of-scope variables idUnfoldingVars id = fvVarSet $ idUnfoldingFVs id idUnfoldingFVs :: Id -> FV idUnfoldingFVs id = stableUnfoldingFVs (realIdUnfolding id) `orElse` emptyFV stableUnfoldingVars :: Unfolding -> Maybe VarSet stableUnfoldingVars unf = fvVarSet `fmap` stableUnfoldingFVs unf stableUnfoldingFVs :: Unfolding -> Maybe FV stableUnfoldingFVs unf = case unf of CoreUnfolding { uf_tmpl = rhs, uf_src = src } | isStableSource src -> Just (filterFV isLocalVar $ expr_fvs rhs) DFunUnfolding { df_bndrs = bndrs, df_args = args } -> Just (filterFV isLocalVar $ FV.delFVs (mkVarSet bndrs) $ exprs_fvs args) -- DFuns are top level, so no fvs from types of bndrs _other -> Nothing {- ************************************************************************ * * \subsection{Free variables (and types)} * * ************************************************************************ -} freeVarsBind :: CoreBind -> DVarSet -- Free vars of scope of binding -> (CoreBindWithFVs, DVarSet) -- Return free vars of binding + scope freeVarsBind (NonRec binder rhs) body_fvs = ( AnnNonRec binder rhs2 , freeVarsOf rhs2 `unionFVs` body_fvs2 `unionFVs` bndrRuleAndUnfoldingVarsDSet binder ) where rhs2 = freeVars rhs body_fvs2 = binder `delBinderFV` body_fvs freeVarsBind (Rec binds) body_fvs = ( AnnRec (binders `zip` rhss2) , delBindersFV binders all_fvs ) where (binders, rhss) = unzip binds rhss2 = map freeVars rhss rhs_body_fvs = foldr (unionFVs . freeVarsOf) body_fvs rhss2 binders_fvs = fvDVarSet $ mapUnionFV bndrRuleAndUnfoldingFVs binders -- See Note [The FVAnn invariant] all_fvs = rhs_body_fvs `unionFVs` binders_fvs -- The "delBinderFV" happens after adding the idSpecVars, -- since the latter may add some of the binders as fvs freeVars :: CoreExpr -> CoreExprWithFVs -- ^ Annotate a 'CoreExpr' with its (non-global) free type -- and value variables at every tree node. freeVars = go where go :: CoreExpr -> CoreExprWithFVs go (Var v) | isLocalVar v = (aFreeVar v `unionFVs` ty_fvs `unionFVs` mult_vars, AnnVar v) | otherwise = (emptyDVarSet, AnnVar v) where mult_vars = tyCoVarsOfTypeDSet (varMult v) ty_fvs = dVarTypeTyCoVars v -- See Note [The FVAnn invariant] go (Lit lit) = (emptyDVarSet, AnnLit lit) go (Lam b body) = ( b_fvs `unionFVs` (b `delBinderFV` body_fvs) , AnnLam b body' ) where body'@(body_fvs, _) = go body b_ty = idType b b_fvs = tyCoVarsOfTypeDSet b_ty -- See Note [The FVAnn invariant] go (App fun arg) = ( freeVarsOf fun' `unionFVs` freeVarsOf arg' , AnnApp fun' arg' ) where fun' = go fun arg' = go arg go (Case scrut bndr ty alts) = ( (bndr `delBinderFV` alts_fvs) `unionFVs` freeVarsOf scrut2 `unionFVs` tyCoVarsOfTypeDSet ty -- Don't need to look at (idType bndr) -- because that's redundant with scrut , AnnCase scrut2 bndr ty alts2 ) where scrut2 = go scrut (alts_fvs_s, alts2) = mapAndUnzip fv_alt alts alts_fvs = unionFVss alts_fvs_s fv_alt (Alt con args rhs) = (delBindersFV args (freeVarsOf rhs2), (AnnAlt con args rhs2)) where rhs2 = go rhs go (Let bind body) = (bind_fvs, AnnLet bind2 body2) where (bind2, bind_fvs) = freeVarsBind bind (freeVarsOf body2) body2 = go body go (Cast expr co) = ( freeVarsOf expr2 `unionFVs` cfvs , AnnCast expr2 (cfvs, co) ) where expr2 = go expr cfvs = tyCoVarsOfCoDSet co go (Tick tickish expr) = ( tickishFVs tickish `unionFVs` freeVarsOf expr2 , AnnTick tickish expr2 ) where expr2 = go expr tickishFVs (Breakpoint _ _ ids) = mkDVarSet ids tickishFVs _ = emptyDVarSet go (Type ty) = (tyCoVarsOfTypeDSet ty, AnnType ty) go (Coercion co) = (tyCoVarsOfCoDSet co, AnnCoercion co) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/FamInstEnv.hs0000644000000000000000000017711114472400112021101 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- (c) The University of Glasgow 2006 -- -- FamInstEnv: Type checked family instance declarations module GHC.Core.FamInstEnv ( FamInst(..), FamFlavor(..), famInstAxiom, famInstTyCon, famInstRHS, famInstsRepTyCons, famInstRepTyCon_maybe, dataFamInstRepTyCon, pprFamInst, pprFamInsts, mkImportedFamInst, FamInstEnvs, FamInstEnv, emptyFamInstEnv, emptyFamInstEnvs, unionFamInstEnv, extendFamInstEnv, extendFamInstEnvList, famInstEnvElts, famInstEnvSize, familyInstances, familyNameInstances, -- * CoAxioms mkCoAxBranch, mkBranchedCoAxiom, mkUnbranchedCoAxiom, mkSingleCoAxiom, mkNewTypeCoAxiom, FamInstMatch(..), lookupFamInstEnv, lookupFamInstEnvConflicts, lookupFamInstEnvByTyCon, isDominatedBy, apartnessCheck, compatibleBranches, -- Injectivity InjectivityCheckResult(..), lookupFamInstEnvInjectivityConflicts, injectiveBranches, -- Normalisation topNormaliseType, topNormaliseType_maybe, normaliseType, normaliseTcApp, topReduceTyFamApp_maybe, reduceTyFamApp_maybe ) where import GHC.Prelude import GHC.Core.Unify import GHC.Core.Type as Type import GHC.Core.TyCo.Rep import GHC.Core.TyCon import GHC.Core.Coercion import GHC.Core.Coercion.Axiom import GHC.Core.Reduction import GHC.Core.RoughMap import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Name import GHC.Data.Maybe import GHC.Types.Var import GHC.Types.SrcLoc import Control.Monad import Data.List( mapAccumL ) import Data.Array( Array, assocs ) import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.Bag {- ************************************************************************ * * Type checked family instance heads * * ************************************************************************ Note [FamInsts and CoAxioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * CoAxioms and FamInsts are just like DFunIds and ClsInsts * A CoAxiom is a System-FC thing: it can relate any two types * A FamInst is a Haskell source-language thing, corresponding to a type/data family instance declaration. - The FamInst contains a CoAxiom, which is the evidence for the instance - The LHS of the CoAxiom is always of form F ty1 .. tyn where F is a type family -} data FamInst -- See Note [FamInsts and CoAxioms] = FamInst { fi_axiom :: CoAxiom Unbranched -- The new coercion axiom -- introduced by this family -- instance -- INVARIANT: apart from freshening (see below) -- fi_tvs = cab_tvs of the (single) axiom branch -- fi_cvs = cab_cvs ...ditto... -- fi_tys = cab_lhs ...ditto... -- fi_rhs = cab_rhs ...ditto... , fi_flavor :: FamFlavor -- Everything below here is a redundant, -- cached version of the two things above -- except that the TyVars are freshened , fi_fam :: Name -- Family name -- Used for "rough matching"; same idea as for class instances -- See Note [Rough matching in class and family instances] -- in GHC.Core.Unify , fi_tcs :: [RoughMatchTc] -- Top of type args -- INVARIANT: fi_tcs = roughMatchTcs fi_tys -- Used for "proper matching"; ditto , fi_tvs :: [TyVar] -- Template tyvars for full match , fi_cvs :: [CoVar] -- Template covars for full match -- Like ClsInsts, these variables are always fresh -- See Note [Template tyvars are fresh] in GHC.Core.InstEnv , fi_tys :: [Type] -- The LHS type patterns -- May be eta-reduced; see Note [Eta reduction for data families] -- in GHC.Core.Coercion.Axiom , fi_rhs :: Type -- the RHS, with its freshened vars } data FamFlavor = SynFamilyInst -- A synonym family | DataFamilyInst TyCon -- A data family, with its representation TyCon {- Note [Arity of data families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Data family instances might legitimately be over- or under-saturated. Under-saturation has two potential causes: U1) Eta reduction. See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. U2) When the user has specified a return kind instead of written out patterns. Example: data family Sing (a :: k) data instance Sing :: Bool -> Type The data family tycon Sing has an arity of 2, the k and the a. But the data instance has only one pattern, Bool (standing in for k). This instance is equivalent to `data instance Sing (a :: Bool)`, but without the last pattern, we have an under-saturated data family instance. On its own, this example is not compelling enough to add support for under-saturation, but U1 makes this feature more compelling. Over-saturation is also possible: O1) If the data family's return kind is a type variable (see also #12369), an instance might legitimately have more arguments than the family. Example: data family Fix :: (Type -> k) -> k data instance Fix f = MkFix1 (f (Fix f)) data instance Fix f x = MkFix2 (f (Fix f x) x) In the first instance here, the k in the data family kind is chosen to be Type. In the second, it's (Type -> Type). However, we require that any over-saturation is eta-reducible. That is, we require that any extra patterns be bare unrepeated type variables; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. Accordingly, the FamInst is never over-saturated. Why can we allow such flexibility for data families but not for type families? Because data families can be decomposed -- that is, they are generative and injective. A Type family is neither and so always must be applied to all its arguments. -} -- Obtain the axiom of a family instance famInstAxiom :: FamInst -> CoAxiom Unbranched famInstAxiom = fi_axiom -- Split the left-hand side of the FamInst famInstSplitLHS :: FamInst -> (TyCon, [Type]) famInstSplitLHS (FamInst { fi_axiom = axiom, fi_tys = lhs }) = (coAxiomTyCon axiom, lhs) -- Get the RHS of the FamInst famInstRHS :: FamInst -> Type famInstRHS = fi_rhs -- Get the family TyCon of the FamInst famInstTyCon :: FamInst -> TyCon famInstTyCon = coAxiomTyCon . famInstAxiom -- Return the representation TyCons introduced by data family instances, if any famInstsRepTyCons :: [FamInst] -> [TyCon] famInstsRepTyCons fis = [tc | FamInst { fi_flavor = DataFamilyInst tc } <- fis] -- Extracts the TyCon for this *data* (or newtype) instance famInstRepTyCon_maybe :: FamInst -> Maybe TyCon famInstRepTyCon_maybe fi = case fi_flavor fi of DataFamilyInst tycon -> Just tycon SynFamilyInst -> Nothing dataFamInstRepTyCon :: FamInst -> TyCon dataFamInstRepTyCon fi = case fi_flavor fi of DataFamilyInst tycon -> tycon SynFamilyInst -> pprPanic "dataFamInstRepTyCon" (ppr fi) {- ************************************************************************ * * Pretty printing * * ************************************************************************ -} instance NamedThing FamInst where getName = coAxiomName . fi_axiom instance Outputable FamInst where ppr = pprFamInst pprFamInst :: FamInst -> SDoc -- Prints the FamInst as a family instance declaration -- NB: This function, FamInstEnv.pprFamInst, is used only for internal, -- debug printing. See GHC.Types.TyThing.Ppr.pprFamInst for printing for the user pprFamInst (FamInst { fi_flavor = flavor, fi_axiom = ax , fi_tvs = tvs, fi_tys = tys, fi_rhs = rhs }) = hang (ppr_tc_sort <+> text "instance" <+> pprCoAxBranchUser (coAxiomTyCon ax) (coAxiomSingleBranch ax)) 2 (whenPprDebug debug_stuff) where ppr_tc_sort = case flavor of SynFamilyInst -> text "type" DataFamilyInst tycon | isDataTyCon tycon -> text "data" | isNewTyCon tycon -> text "newtype" | isAbstractTyCon tycon -> text "data" | otherwise -> text "WEIRD" <+> ppr tycon debug_stuff = vcat [ text "Coercion axiom:" <+> ppr ax , text "Tvs:" <+> ppr tvs , text "LHS:" <+> ppr tys , text "RHS:" <+> ppr rhs ] pprFamInsts :: [FamInst] -> SDoc pprFamInsts finsts = vcat (map pprFamInst finsts) {- Note [Lazy axiom match] ~~~~~~~~~~~~~~~~~~~~~~~ It is Vitally Important that mkImportedFamInst is *lazy* in its axiom parameter. The axiom is loaded lazily, via a forkM, in GHC.IfaceToCore. Sometime later, mkImportedFamInst is called using that axiom. However, the axiom may itself depend on entities which are not yet loaded as of the time of the mkImportedFamInst. Thus, if mkImportedFamInst eagerly looks at the axiom, a dependency loop spontaneously appears and GHC hangs. The solution is simply for mkImportedFamInst never, ever to look inside of the axiom until everything else is good and ready to do so. We can assume that this readiness has been achieved when some other code pulls on the axiom in the FamInst. Thus, we pattern match on the axiom lazily (in the where clause, not in the parameter list) and we assert the consistency of names there also. -} -- Make a family instance representation from the information found in an -- interface file. In particular, we get the rough match info from the iface -- (instead of computing it here). mkImportedFamInst :: Name -- Name of the family -> [RoughMatchTc] -- Rough match info -> CoAxiom Unbranched -- Axiom introduced -> FamInst -- Resulting family instance mkImportedFamInst fam mb_tcs axiom = FamInst { fi_fam = fam, fi_tcs = mb_tcs, fi_tvs = tvs, fi_cvs = cvs, fi_tys = tys, fi_rhs = rhs, fi_axiom = axiom, fi_flavor = flavor } where -- See Note [Lazy axiom match] ~(CoAxBranch { cab_lhs = tys , cab_tvs = tvs , cab_cvs = cvs , cab_rhs = rhs }) = coAxiomSingleBranch axiom -- Derive the flavor for an imported FamInst rather disgustingly -- Maybe we should store it in the IfaceFamInst? flavor = case splitTyConApp_maybe rhs of Just (tc, _) | Just ax' <- tyConFamilyCoercion_maybe tc , ax' == axiom -> DataFamilyInst tc _ -> SynFamilyInst {- ************************************************************************ * * FamInstEnv * * ************************************************************************ Note [FamInstEnv] ~~~~~~~~~~~~~~~~~ A FamInstEnv is a RoughMap of instance heads. Specifically, the keys are formed by the family name and the instance arguments. That is, an instance: type instance Fam (Maybe Int) a would insert into the instance environment an instance with a key of the form [RM_KnownTc Fam, RM_KnownTc Maybe, RM_WildCard] See Note [RoughMap] in GHC.Core.RoughMap. The same FamInstEnv includes both 'data family' and 'type family' instances. Type families are reduced during type inference, but not data families; the user explains when to use a data family instance by using constructors and pattern matching. Nevertheless it is still useful to have data families in the FamInstEnv: - For finding overlaps and conflicts - For finding the representation type...see FamInstEnv.topNormaliseType and its call site in GHC.Core.Opt.Simplify - In standalone deriving instance Eq (T [Int]) we need to find the representation type for T [Int] Note [Varying number of patterns for data family axioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For data families, the number of patterns may vary between instances. For example data family T a b data instance T Int a = T1 a | T2 data instance T Bool [a] = T3 a Then we get a data type for each instance, and an axiom: data TInt a = T1 a | T2 data TBoolList a = T3 a axiom ax7 :: T Int ~ TInt -- Eta-reduced axiom ax8 a :: T Bool [a] ~ TBoolList a These two axioms for T, one with one pattern, one with two; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom Note [FamInstEnv determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We turn FamInstEnvs into a list in some places that don't directly affect the ABI. That happens in family consistency checks and when producing output for `:info`. Unfortunately that nondeterminism is nonlocal and it's hard to tell what it affects without following a chain of functions. It's also easy to accidentally make that nondeterminism affect the ABI. Furthermore the envs should be relatively small, so it should be free to use deterministic maps here. Testing with nofib and validate detected no difference between UniqFM and UniqDFM. See Note [Deterministic UniqFM]. -} type FamInstEnvs = (FamInstEnv, FamInstEnv) -- External package inst-env, Home-package inst-env data FamInstEnv = FamIE !Int -- The number of instances, used to choose the smaller environment -- when checking type family consistnecy of home modules. !(RoughMap FamInst) -- See Note [FamInstEnv] -- See Note [FamInstEnv determinism] instance Outputable FamInstEnv where ppr (FamIE _ fs) = text "FamIE" <+> vcat (map ppr $ elemsRM fs) famInstEnvSize :: FamInstEnv -> Int famInstEnvSize (FamIE sz _) = sz -- | Create a 'FamInstEnv' from 'Name' indices. -- INVARIANTS: -- * The fs_tvs are distinct in each FamInst -- of a range value of the map (so we can safely unify them) emptyFamInstEnvs :: (FamInstEnv, FamInstEnv) emptyFamInstEnvs = (emptyFamInstEnv, emptyFamInstEnv) emptyFamInstEnv :: FamInstEnv emptyFamInstEnv = FamIE 0 emptyRM famInstEnvElts :: FamInstEnv -> [FamInst] famInstEnvElts (FamIE _ rm) = elemsRM rm -- See Note [FamInstEnv determinism] -- It's OK to use nonDetStrictFoldUDFM here since we're just computing the -- size. familyInstances :: (FamInstEnv, FamInstEnv) -> TyCon -> [FamInst] familyInstances envs tc = familyNameInstances envs (tyConName tc) familyNameInstances :: (FamInstEnv, FamInstEnv) -> Name -> [FamInst] familyNameInstances (pkg_fie, home_fie) fam = get home_fie ++ get pkg_fie where get :: FamInstEnv -> [FamInst] get (FamIE _ env) = lookupRM [RML_KnownTc fam] env -- | Makes no particular effort to detect conflicts. unionFamInstEnv :: FamInstEnv -> FamInstEnv -> FamInstEnv unionFamInstEnv (FamIE sa a) (FamIE sb b) = FamIE (sa + sb) (a `unionRM` b) extendFamInstEnvList :: FamInstEnv -> [FamInst] -> FamInstEnv extendFamInstEnvList inst_env fis = foldl' extendFamInstEnv inst_env fis extendFamInstEnv :: FamInstEnv -> FamInst -> FamInstEnv extendFamInstEnv (FamIE s inst_env) ins_item@(FamInst {fi_fam = cls_nm}) = FamIE (s+1) $ insertRM rough_tmpl ins_item inst_env where rough_tmpl = RM_KnownTc cls_nm : fi_tcs ins_item {- ************************************************************************ * * Compatibility * * ************************************************************************ Note [Apartness] ~~~~~~~~~~~~~~~~ In dealing with closed type families, we must be able to check that one type will never reduce to another. This check is called /apartness/. The check is always between a target (which may be an arbitrary type) and a pattern. Here is how we do it: apart(target, pattern) = not (unify(flatten(target), pattern)) where flatten (implemented in flattenTys, below) converts all type-family applications into fresh variables. (See Note [Flattening type-family applications when matching instances] in GHC.Core.Unify.) Note [Compatibility] ~~~~~~~~~~~~~~~~~~~~ Two patterns are /compatible/ if either of the following conditions hold: 1) The patterns are apart. 2) The patterns unify with a substitution S, and their right hand sides equal under that substitution. For open type families, only compatible instances are allowed. For closed type families, the story is slightly more complicated. Consider the following: type family F a where F Int = Bool F a = Int g :: Show a => a -> F a g x = length (show x) Should that type-check? No. We need to allow for the possibility that 'a' might be Int and therefore 'F a' should be Bool. We can simplify 'F a' to Int only when we can be sure that 'a' is not Int. To achieve this, after finding a possible match within the equations, we have to go back to all previous equations and check that, under the substitution induced by the match, other branches are surely apart. (See Note [Apartness].) This is similar to what happens with class instance selection, when we need to guarantee that there is only a match and no unifiers. The exact algorithm is different here because the potentially-overlapping group is closed. As another example, consider this: type family G x where G Int = Bool G a = Double type family H y -- no instances Now, we want to simplify (G (H Char)). We can't, because (H Char) might later simplify to be Int. So, (G (H Char)) is stuck, for now. While everything above is quite sound, it isn't as expressive as we'd like. Consider this: type family J a where J Int = Int J a = a Can we simplify (J b) to b? Sure we can. Yes, the first equation matches if b is instantiated with Int, but the RHSs coincide there, so it's all OK. So, the rule is this: when looking up a branch in a closed type family, we find a branch that matches the target, but then we make sure that the target is apart from every previous *incompatible* branch. We don't check the branches that are compatible with the matching branch, because they are either irrelevant (clause 1 of compatible) or benign (clause 2 of compatible). Note [Compatibility of eta-reduced axioms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In newtype instances of data families we eta-reduce the axioms, See Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. This means that we sometimes need to test compatibility of two axioms that were eta-reduced to different degrees, e.g.: data family D a b c newtype instance D a Int c = DInt (Maybe a) -- D a Int ~ Maybe -- lhs = [a, Int] newtype instance D Bool Int Char = DIntChar Float -- D Bool Int Char ~ Float -- lhs = [Bool, Int, Char] These are obviously incompatible. We could detect this by saturating (eta-expanding) the shorter LHS with fresh tyvars until the lists are of equal length, but instead we can just remove the tail of the longer list, as those types will simply unify with the freshly introduced tyvars. By doing this, in case the LHS are unifiable, the yielded substitution won't mention the tyvars that appear in the tail we dropped off, and we might try to test equality RHSes of different kinds, but that's fine since this case occurs only for data families, where the RHS is a unique tycon and the equality fails anyway. -} -- See Note [Compatibility] compatibleBranches :: CoAxBranch -> CoAxBranch -> Bool compatibleBranches (CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 }) (CoAxBranch { cab_lhs = lhs2, cab_rhs = rhs2 }) = case tcUnifyTysFG alwaysBindFun commonlhs1 commonlhs2 of -- Here we need the cab_tvs of the two branches to be disinct. -- See Note [CoAxBranch type variables] in GHC.Core.Coercion.Axiom. SurelyApart -> True MaybeApart {} -> False Unifiable subst -> Type.substTyAddInScope subst rhs1 `eqType` Type.substTyAddInScope subst rhs2 where (commonlhs1, commonlhs2) = zipAndUnzip lhs1 lhs2 -- See Note [Compatibility of eta-reduced axioms] -- | Result of testing two type family equations for injectiviy. data InjectivityCheckResult = InjectivityAccepted -- ^ Either RHSs are distinct or unification of RHSs leads to unification of -- LHSs | InjectivityUnified CoAxBranch CoAxBranch -- ^ RHSs unify but LHSs don't unify under that substitution. Relevant for -- closed type families where equation after unification might be -- overlpapped (in which case it is OK if they don't unify). Constructor -- stores axioms after unification. -- | Check whether two type family axioms don't violate injectivity annotation. injectiveBranches :: [Bool] -> CoAxBranch -> CoAxBranch -> InjectivityCheckResult injectiveBranches injectivity ax1@(CoAxBranch { cab_lhs = lhs1, cab_rhs = rhs1 }) ax2@(CoAxBranch { cab_lhs = lhs2, cab_rhs = rhs2 }) -- See Note [Verifying injectivity annotation], case 1. = let getInjArgs = filterByList injectivity in case tcUnifyTyWithTFs True rhs1 rhs2 of -- True = two-way pre-unification Nothing -> InjectivityAccepted -- RHS are different, so equations are injective. -- This is case 1A from Note [Verifying injectivity annotation] Just subst -> -- RHS unify under a substitution let lhs1Subst = Type.substTys subst (getInjArgs lhs1) lhs2Subst = Type.substTys subst (getInjArgs lhs2) -- If LHSs are equal under the substitution used for RHSs then this pair -- of equations does not violate injectivity annotation. If LHSs are not -- equal under that substitution then this pair of equations violates -- injectivity annotation, but for closed type families it still might -- be the case that one LHS after substitution is unreachable. in if eqTypes lhs1Subst lhs2Subst -- check case 1B1 from Note. then InjectivityAccepted else InjectivityUnified ( ax1 { cab_lhs = Type.substTys subst lhs1 , cab_rhs = Type.substTy subst rhs1 }) ( ax2 { cab_lhs = Type.substTys subst lhs2 , cab_rhs = Type.substTy subst rhs2 }) -- payload of InjectivityUnified used only for check 1B2, only -- for closed type families -- takes a CoAxiom with unknown branch incompatibilities and computes -- the compatibilities -- See Note [Storing compatibility] in GHC.Core.Coercion.Axiom computeAxiomIncomps :: [CoAxBranch] -> [CoAxBranch] computeAxiomIncomps branches = snd (mapAccumL go [] branches) where go :: [CoAxBranch] -> CoAxBranch -> ([CoAxBranch], CoAxBranch) go prev_brs cur_br = (new_br : prev_brs, new_br) where new_br = cur_br { cab_incomps = mk_incomps prev_brs cur_br } mk_incomps :: [CoAxBranch] -> CoAxBranch -> [CoAxBranch] mk_incomps prev_brs cur_br = filter (not . compatibleBranches cur_br) prev_brs {- ************************************************************************ * * Constructing axioms These functions are here because tidyType / tcUnifyTysFG are not available in GHC.Core.Coercion.Axiom Also computeAxiomIncomps is too sophisticated for CoAxiom * * ************************************************************************ Note [Tidy axioms when we build them] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Like types and classes, we build axioms fully quantified over all their variables, and tidy them when we build them. For example, we print out axioms and don't want to print stuff like F k k a b = ... Instead we must tidy those kind variables. See #7524. We could instead tidy when we print, but that makes it harder to get things like injectivity errors to come out right. Danger of Type family equation violates injectivity annotation. Kind variable ‘k’ cannot be inferred from the right-hand side. In the type family equation: PolyKindVars @[k1] @[k2] ('[] @k1) = '[] @k2 Note [Always number wildcard types in CoAxBranch] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following example (from the DataFamilyInstanceLHS test case): data family Sing (a :: k) data instance Sing (_ :: MyKind) where SingA :: Sing A SingB :: Sing B If we're not careful during tidying, then when this program is compiled with -ddump-types, we'll get the following information: COERCION AXIOMS axiom DataFamilyInstanceLHS.D:R:SingMyKind_0 :: Sing _ = DataFamilyInstanceLHS.R:SingMyKind_ _ It's misleading to have a wildcard type appearing on the RHS like that. To avoid this issue, when building a CoAxiom (which is what eventually gets printed above), we tidy all the variables in an env that already contains '_'. Thus, any variable named '_' will be renamed, giving us the nicer output here: COERCION AXIOMS axiom DataFamilyInstanceLHS.D:R:SingMyKind_0 :: Sing _1 = DataFamilyInstanceLHS.R:SingMyKind_ _1 Which is at least legal syntax. See also Note [CoAxBranch type variables] in GHC.Core.Coercion.Axiom; note that we are tidying (changing OccNames only), not freshening, in accordance with that Note. -} -- all axiom roles are Nominal, as this is only used with type families mkCoAxBranch :: [TyVar] -- original, possibly stale, tyvars -> [TyVar] -- Extra eta tyvars -> [CoVar] -- possibly stale covars -> [Type] -- LHS patterns -> Type -- RHS -> [Role] -> SrcSpan -> CoAxBranch mkCoAxBranch tvs eta_tvs cvs lhs rhs roles loc = CoAxBranch { cab_tvs = tvs' , cab_eta_tvs = eta_tvs' , cab_cvs = cvs' , cab_lhs = tidyTypes env lhs , cab_roles = roles , cab_rhs = tidyType env rhs , cab_loc = loc , cab_incomps = placeHolderIncomps } where (env1, tvs') = tidyVarBndrs init_tidy_env tvs (env2, eta_tvs') = tidyVarBndrs env1 eta_tvs (env, cvs') = tidyVarBndrs env2 cvs -- See Note [Tidy axioms when we build them] -- See also Note [CoAxBranch type variables] in GHC.Core.Coercion.Axiom init_occ_env = initTidyOccEnv [mkTyVarOcc "_"] init_tidy_env = mkEmptyTidyEnv init_occ_env -- See Note [Always number wildcard types in CoAxBranch] -- all of the following code is here to avoid mutual dependencies with -- Coercion mkBranchedCoAxiom :: Name -> TyCon -> [CoAxBranch] -> CoAxiom Branched mkBranchedCoAxiom ax_name fam_tc branches = CoAxiom { co_ax_unique = nameUnique ax_name , co_ax_name = ax_name , co_ax_tc = fam_tc , co_ax_role = Nominal , co_ax_implicit = False , co_ax_branches = manyBranches (computeAxiomIncomps branches) } mkUnbranchedCoAxiom :: Name -> TyCon -> CoAxBranch -> CoAxiom Unbranched mkUnbranchedCoAxiom ax_name fam_tc branch = CoAxiom { co_ax_unique = nameUnique ax_name , co_ax_name = ax_name , co_ax_tc = fam_tc , co_ax_role = Nominal , co_ax_implicit = False , co_ax_branches = unbranched (branch { cab_incomps = [] }) } mkSingleCoAxiom :: Role -> Name -> [TyVar] -> [TyVar] -> [CoVar] -> TyCon -> [Type] -> Type -> CoAxiom Unbranched -- Make a single-branch CoAxiom, including making the branch itself -- Used for both type family (Nominal) and data family (Representational) -- axioms, hence passing in the Role mkSingleCoAxiom role ax_name tvs eta_tvs cvs fam_tc lhs_tys rhs_ty = CoAxiom { co_ax_unique = nameUnique ax_name , co_ax_name = ax_name , co_ax_tc = fam_tc , co_ax_role = role , co_ax_implicit = False , co_ax_branches = unbranched (branch { cab_incomps = [] }) } where branch = mkCoAxBranch tvs eta_tvs cvs lhs_tys rhs_ty (map (const Nominal) tvs) (getSrcSpan ax_name) -- | Create a coercion constructor (axiom) suitable for the given -- newtype 'TyCon'. The 'Name' should be that of a new coercion -- 'CoAxiom', the 'TyVar's the arguments expected by the @newtype@ and -- the type the appropriate right hand side of the @newtype@, with -- the free variables a subset of those 'TyVar's. mkNewTypeCoAxiom :: Name -> TyCon -> [TyVar] -> [Role] -> Type -> CoAxiom Unbranched mkNewTypeCoAxiom name tycon tvs roles rhs_ty = CoAxiom { co_ax_unique = nameUnique name , co_ax_name = name , co_ax_implicit = True -- See Note [Implicit axioms] in GHC.Core.TyCon , co_ax_role = Representational , co_ax_tc = tycon , co_ax_branches = unbranched (branch { cab_incomps = [] }) } where branch = mkCoAxBranch tvs [] [] (mkTyVarTys tvs) rhs_ty roles (getSrcSpan name) {- ************************************************************************ * * Looking up a family instance * * ************************************************************************ @lookupFamInstEnv@ looks up in a @FamInstEnv@, using a one-way match. Multiple matches are only possible in case of type families (not data families), and then, it doesn't matter which match we choose (as the instances are guaranteed confluent). We return the matching family instances and the type instance at which it matches. For example, if we lookup 'T [Int]' and have a family instance data instance T [a] = .. desugared to data :R42T a = .. coe :Co:R42T a :: T [a] ~ :R42T a we return the matching instance '(FamInst{.., fi_tycon = :R42T}, Int)'. -} -- when matching a type family application, we get a FamInst, -- and the list of types the axiom should be applied to data FamInstMatch = FamInstMatch { fim_instance :: FamInst , fim_tys :: [Type] , fim_cos :: [Coercion] } -- See Note [Over-saturated matches] instance Outputable FamInstMatch where ppr (FamInstMatch { fim_instance = inst , fim_tys = tys , fim_cos = cos }) = text "match with" <+> parens (ppr inst) <+> ppr tys <+> ppr cos lookupFamInstEnvByTyCon :: FamInstEnvs -> TyCon -> [FamInst] lookupFamInstEnvByTyCon (pkg_ie, home_ie) fam_tc = get pkg_ie ++ get home_ie where get (FamIE _ rm) = lookupRM [RML_KnownTc (tyConName fam_tc)] rm lookupFamInstEnv :: FamInstEnvs -> TyCon -> [Type] -- What we are looking for -> [FamInstMatch] -- Successful matches -- Precondition: the tycon is saturated (or over-saturated) lookupFamInstEnv = lookup_fam_inst_env WantMatches lookupFamInstEnvConflicts :: FamInstEnvs -> FamInst -- Putative new instance -> [FamInst] -- Conflicting matches (don't look at the fim_tys field) -- E.g. when we are about to add -- f : type instance F [a] = a->a -- we do (lookupFamInstConflicts f [b]) -- to find conflicting matches -- -- Precondition: the tycon is saturated (or over-saturated) lookupFamInstEnvConflicts envs fam_inst = lookup_fam_inst_env (WantConflicts fam_inst) envs fam tys where (fam, tys) = famInstSplitLHS fam_inst -------------------------------------------------------------------------------- -- Type family injectivity checking bits -- -------------------------------------------------------------------------------- {- Note [Verifying injectivity annotation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Injectivity means that the RHS of a type family uniquely determines the LHS (see Note [Type inference for type families with injectivity]). The user informs us about injectivity using an injectivity annotation and it is GHC's task to verify that this annotation is correct w.r.t. type family equations. Whenever we see a new equation of a type family we need to make sure that adding this equation to the already known equations of a type family does not violate the injectivity annotation supplied by the user (see Note [Injectivity annotation]). Of course if the type family has no injectivity annotation then no check is required. But if a type family has injectivity annotation we need to make sure that the following conditions hold: 1. For each pair of *different* equations of a type family, one of the following conditions holds: A: RHSs are different. (Check done in GHC.Core.FamInstEnv.injectiveBranches) B1: OPEN TYPE FAMILIES: If the RHSs can be unified under some substitution then it must be possible to unify the LHSs under the same substitution. Example: type family FunnyId a = r | r -> a type instance FunnyId Int = Int type instance FunnyId a = a RHSs of these two equations unify under [ a |-> Int ] substitution. Under this substitution LHSs are equal therefore these equations don't violate injectivity annotation. (Check done in GHC.Core.FamInstEnv.injectiveBranches) B2: CLOSED TYPE FAMILIES: If the RHSs can be unified under some substitution then either the LHSs unify under the same substitution or the LHS of the latter equation is overlapped by earlier equations. Example 1: type family SwapIntChar a = r | r -> a where SwapIntChar Int = Char SwapIntChar Char = Int SwapIntChar a = a Say we are checking the last two equations. RHSs unify under [ a |-> Int ] substitution but LHSs don't. So we apply the substitution to LHS of last equation and check whether it is overlapped by any of previous equations. Since it is overlapped by the first equation we conclude that pair of last two equations does not violate injectivity annotation. (Check done in GHC.Tc.Validity.checkValidCoAxiom#gather_conflicts) A special case of B is when RHSs unify with an empty substitution ie. they are identical. If any of the above two conditions holds we conclude that the pair of equations does not violate injectivity annotation. But if we find a pair of equations where neither of the above holds we report that this pair violates injectivity annotation because for a given RHS we don't have a unique LHS. (Note that (B) actually implies (A).) Note that we only take into account these LHS patterns that were declared as injective. 2. If an RHS of a type family equation is a bare type variable then all LHS variables (including implicit kind variables) also have to be bare. In other words, this has to be a sole equation of that type family and it has to cover all possible patterns. So for example this definition will be rejected: type family W1 a = r | r -> a type instance W1 [a] = a If it were accepted we could call `W1 [W1 Int]`, which would reduce to `W1 Int` and then by injectivity we could conclude that `[W1 Int] ~ Int`, which is bogus. Checked FamInst.bareTvInRHSViolated. 3. If the RHS of a type family equation is a type family application then the type family is rejected as not injective. This is checked by FamInst.isTFHeaded. 4. If a LHS type variable that is declared as injective is not mentioned in an injective position in the RHS then the type family is rejected as not injective. "Injective position" means either an argument to a type constructor or argument to a type family on injective position. There are subtleties here. See Note [Coverage condition for injective type families] in GHC.Tc.Instance.Family. Check (1) must be done for all family instances (transitively) imported. Other checks (2-4) should be done just for locally written equations, as they are checks involving just a single equation, not about interactions. Doing the other checks for imported equations led to #17405, as the behavior of check (4) depends on -XUndecidableInstances (see Note [Coverage condition for injective type families] in FamInst), which may vary between modules. See also Note [Injective type families] in GHC.Core.TyCon -} -- | Check whether an open type family equation can be added to already existing -- instance environment without causing conflicts with supplied injectivity -- annotations. Returns list of conflicting axioms (type instance -- declarations). lookupFamInstEnvInjectivityConflicts :: [Bool] -- injectivity annotation for this type family instance -- INVARIANT: list contains at least one True value -> FamInstEnvs -- all type instances seens so far -> FamInst -- new type instance that we're checking -> [CoAxBranch] -- conflicting instance declarations lookupFamInstEnvInjectivityConflicts injList fam_inst_envs fam_inst@(FamInst { fi_axiom = new_axiom }) | not $ isOpenFamilyTyCon fam = [] | otherwise -- See Note [Verifying injectivity annotation]. This function implements -- check (1.B1) for open type families described there. = map (coAxiomSingleBranch . fi_axiom) $ filter isInjConflict $ familyInstances fam_inst_envs fam where fam = famInstTyCon fam_inst new_branch = coAxiomSingleBranch new_axiom -- filtering function used by `lookup_inj_fam_conflicts` to check whether -- a pair of equations conflicts with the injectivity annotation. isInjConflict (FamInst { fi_axiom = old_axiom }) | InjectivityAccepted <- injectiveBranches injList (coAxiomSingleBranch old_axiom) new_branch = False -- no conflict | otherwise = True -------------------------------------------------------------------------------- -- Type family overlap checking bits -- -------------------------------------------------------------------------------- {- Note [Family instance overlap conflicts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - In the case of data family instances, any overlap is fundamentally a conflict (as these instances imply injective type mappings). - In the case of type family instances, overlap is admitted as long as the right-hand sides of the overlapping rules coincide under the overlap substitution. eg type instance F a Int = a type instance F Int b = b These two overlap on (F Int Int) but then both RHSs are Int, so all is well. We require that they are syntactically equal; anything else would be difficult to test for at this stage. -} ------------------------------------------------------------ -- Might be a one-way match or a unifier data FamInstLookupMode a where -- The FamInst we are trying to find conflicts against WantConflicts :: FamInst -> FamInstLookupMode FamInst WantMatches :: FamInstLookupMode FamInstMatch lookup_fam_inst_env' -- The worker, local to this module :: forall a . FamInstLookupMode a -> FamInstEnv -> TyCon -> [Type] -- What we are looking for -> [a] lookup_fam_inst_env' lookup_mode (FamIE _ ie) fam match_tys | isOpenFamilyTyCon fam , let xs = rm_fun (lookupRM' rough_tmpl ie) -- The common case -- Avoid doing any of the allocation below if there are no instances to look at. , not $ null xs = mapMaybe' check_fun xs | otherwise = [] where rough_tmpl :: [RoughMatchLookupTc] rough_tmpl = RML_KnownTc (tyConName fam) : map typeToRoughMatchLookupTc match_tys rm_fun :: (Bag FamInst, [FamInst]) -> [FamInst] (rm_fun, check_fun) = case lookup_mode of WantConflicts fam_inst -> (snd, unify_fun fam_inst) WantMatches -> (bagToList . fst, match_fun) -- Function used for finding unifiers unify_fun orig_fam_inst item@(FamInst { fi_axiom = old_axiom, fi_tys = tpl_tys, fi_tvs = tpl_tvs }) = assertPpr (tyCoVarsOfTypes tys `disjointVarSet` mkVarSet tpl_tvs) ((ppr fam <+> ppr tys) $$ (ppr tpl_tvs <+> ppr tpl_tys)) $ -- Unification will break badly if the variables overlap -- They shouldn't because we allocate separate uniques for them if compatibleBranches (coAxiomSingleBranch old_axiom) new_branch then Nothing else Just item -- See Note [Family instance overlap conflicts] where new_branch = coAxiomSingleBranch (famInstAxiom orig_fam_inst) (fam, tys) = famInstSplitLHS orig_fam_inst -- Function used for checking matches match_fun item@(FamInst { fi_tvs = tpl_tvs, fi_cvs = tpl_cvs , fi_tys = tpl_tys }) = do subst <- tcMatchTys tpl_tys match_tys1 return (FamInstMatch { fim_instance = item , fim_tys = substTyVars subst tpl_tvs `chkAppend` match_tys2 , fim_cos = assert (all (isJust . lookupCoVar subst) tpl_cvs) $ substCoVars subst tpl_cvs }) where (match_tys1, match_tys2) = split_tys tpl_tys -- Precondition: the tycon is saturated (or over-saturated) -- Deal with over-saturation -- See Note [Over-saturated matches] split_tys tpl_tys | isTypeFamilyTyCon fam = pre_rough_split_tys | otherwise = let (match_tys1, match_tys2) = splitAtList tpl_tys match_tys in (match_tys1, match_tys2) (pre_match_tys1, pre_match_tys2) = splitAt (tyConArity fam) match_tys pre_rough_split_tys = (pre_match_tys1, pre_match_tys2) lookup_fam_inst_env -- The worker, local to this module :: FamInstLookupMode a -> FamInstEnvs -> TyCon -> [Type] -- What we are looking for -> [a] -- Successful matches -- Precondition: the tycon is saturated (or over-saturated) lookup_fam_inst_env match_fun (pkg_ie, home_ie) fam tys = lookup_fam_inst_env' match_fun home_ie fam tys ++ lookup_fam_inst_env' match_fun pkg_ie fam tys {- Note [Over-saturated matches] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's ok to look up an over-saturated type constructor. E.g. type family F a :: * -> * type instance F (a,b) = Either (a->b) The type instance gives rise to a newtype TyCon (at a higher kind which you can't do in Haskell!): newtype FPair a b = FP (Either (a->b)) Then looking up (F (Int,Bool) Char) will return a FamInstMatch (FPair, [Int,Bool,Char]) The "extra" type argument [Char] just stays on the end. We handle data families and type families separately here: * For type families, all instances of a type family must have the same arity, so we can precompute the split between the match_tys and the overflow tys. This is done in pre_rough_split_tys. * For data family instances, though, we need to re-split for each instance, because the breakdown might be different for each instance. Why? Because of eta reduction; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom. -} -- checks if one LHS is dominated by a list of other branches -- in other words, if an application would match the first LHS, it is guaranteed -- to match at least one of the others. The RHSs are ignored. -- This algorithm is conservative: -- True -> the LHS is definitely covered by the others -- False -> no information -- It is currently (Oct 2012) used only for generating errors for -- inaccessible branches. If these errors go unreported, no harm done. -- This is defined here to avoid a dependency from CoAxiom to Unify isDominatedBy :: CoAxBranch -> [CoAxBranch] -> Bool isDominatedBy branch branches = or $ map match branches where lhs = coAxBranchLHS branch match (CoAxBranch { cab_lhs = tys }) = isJust $ tcMatchTys tys lhs {- ************************************************************************ * * Choosing an axiom application * * ************************************************************************ The lookupFamInstEnv function does a nice job for *open* type families, but we also need to handle closed ones when normalising a type: -} reduceTyFamApp_maybe :: FamInstEnvs -> Role -- Desired role of result coercion -> TyCon -> [Type] -> Maybe Reduction -- Attempt to do a *one-step* reduction of a type-family application -- but *not* newtypes -- Works on type-synonym families always; data-families only if -- the role we seek is representational -- It does *not* normalise the type arguments first, so this may not -- go as far as you want. If you want normalised type arguments, -- use topReduceTyFamApp_maybe -- -- The TyCon can be oversaturated. -- Works on both open and closed families -- -- Always returns a *homogeneous* coercion -- type family reductions are always -- homogeneous reduceTyFamApp_maybe envs role tc tys | Phantom <- role = Nothing | case role of Representational -> isOpenFamilyTyCon tc _ -> isOpenTypeFamilyTyCon tc -- If we seek a representational coercion -- (e.g. the call in topNormaliseType_maybe) then we can -- unwrap data families as well as type-synonym families; -- otherwise only type-synonym families , FamInstMatch { fim_instance = FamInst { fi_axiom = ax } , fim_tys = inst_tys , fim_cos = inst_cos } : _ <- lookupFamInstEnv envs tc tys -- NB: Allow multiple matches because of compatible overlap = let co = mkUnbranchedAxInstCo role ax inst_tys inst_cos in Just $ coercionRedn co | Just ax <- isClosedSynFamilyTyConWithAxiom_maybe tc , Just (ind, inst_tys, inst_cos) <- chooseBranch ax tys = let co = mkAxInstCo role ax ind inst_tys inst_cos in Just $ coercionRedn co | Just ax <- isBuiltInSynFamTyCon_maybe tc , Just (coax,ts,ty) <- sfMatchFam ax tys , role == coaxrRole coax = let co = mkAxiomRuleCo coax (zipWith mkReflCo (coaxrAsmpRoles coax) ts) in Just $ mkReduction co ty | otherwise = Nothing -- The axiom can be oversaturated. (Closed families only.) chooseBranch :: CoAxiom Branched -> [Type] -> Maybe (BranchIndex, [Type], [Coercion]) -- found match, with args chooseBranch axiom tys = do { let num_pats = coAxiomNumPats axiom (target_tys, extra_tys) = splitAt num_pats tys branches = coAxiomBranches axiom ; (ind, inst_tys, inst_cos) <- findBranch (unMkBranches branches) target_tys ; return ( ind, inst_tys `chkAppend` extra_tys, inst_cos ) } -- The axiom must *not* be oversaturated findBranch :: Array BranchIndex CoAxBranch -> [Type] -> Maybe (BranchIndex, [Type], [Coercion]) -- coercions relate requested types to returned axiom LHS at role N findBranch branches target_tys = foldr go Nothing (assocs branches) where go :: (BranchIndex, CoAxBranch) -> Maybe (BranchIndex, [Type], [Coercion]) -> Maybe (BranchIndex, [Type], [Coercion]) go (index, branch) other = let (CoAxBranch { cab_tvs = tpl_tvs, cab_cvs = tpl_cvs , cab_lhs = tpl_lhs , cab_incomps = incomps }) = branch in_scope = mkInScopeSet (unionVarSets $ map (tyCoVarsOfTypes . coAxBranchLHS) incomps) -- See Note [Flattening type-family applications when matching instances] -- in GHC.Core.Unify flattened_target = flattenTys in_scope target_tys in case tcMatchTys tpl_lhs target_tys of Just subst -- matching worked. now, check for apartness. | apartnessCheck flattened_target branch -> -- matching worked & we're apart from all incompatible branches. -- success assert (all (isJust . lookupCoVar subst) tpl_cvs) $ Just (index, substTyVars subst tpl_tvs, substCoVars subst tpl_cvs) -- failure. keep looking _ -> other -- | Do an apartness check, as described in the "Closed Type Families" paper -- (POPL '14). This should be used when determining if an equation -- ('CoAxBranch') of a closed type family can be used to reduce a certain target -- type family application. apartnessCheck :: [Type] -- ^ /flattened/ target arguments. Make sure they're flattened! See -- Note [Flattening type-family applications when matching instances] -- in GHC.Core.Unify. -> CoAxBranch -- ^ the candidate equation we wish to use -- Precondition: this matches the target -> Bool -- ^ True <=> equation can fire apartnessCheck flattened_target (CoAxBranch { cab_incomps = incomps }) = all (isSurelyApart . tcUnifyTysFG alwaysBindFun flattened_target . coAxBranchLHS) incomps where isSurelyApart SurelyApart = True isSurelyApart _ = False {- ************************************************************************ * * Looking up a family instance * * ************************************************************************ Note [Normalising types] ~~~~~~~~~~~~~~~~~~~~~~~~ The topNormaliseType function removes all occurrences of type families and newtypes from the top-level structure of a type. normaliseTcApp does the type family lookup and is fairly straightforward. normaliseType is a little more involved. The complication comes from the fact that a type family might be used in the kind of a variable bound in a forall. We wish to remove this type family application, but that means coming up with a fresh variable (with the new kind). Thus, we need a substitution to be built up as we recur through the type. However, an ordinary TCvSubst just won't do: when we hit a type variable whose kind has changed during normalisation, we need both the new type variable *and* the coercion. We could conjure up a new VarEnv with just this property, but a usable substitution environment already exists: LiftingContexts from the liftCoSubst family of functions, defined in GHC.Core.Coercion. A LiftingContext maps a type variable to a coercion and a coercion variable to a pair of coercions. Let's ignore coercion variables for now. Because the coercion a type variable maps to contains the destination type (via coercionKind), we don't need to store that destination type separately. Thus, a LiftingContext has what we need: a map from type variables to (Coercion, Type) pairs. We also benefit because we can piggyback on the liftCoSubstVarBndr function to deal with binders. However, I had to modify that function to work with this application. Thus, we now have liftCoSubstVarBndrUsing, which takes a function used to process the kind of the binder. We don't wish to lift the kind, but instead normalise it. So, we pass in a callback function that processes the kind of the binder. After that brilliant explanation of all this, I'm sure you've forgotten the dangling reference to coercion variables. What do we do with those? Nothing at all. The point of normalising types is to remove type family applications, but there's no sense in removing these from coercions. We would just get back a new coercion witnessing the equality between the same types as the original coercion. Because coercions are irrelevant anyway, there is no point in doing this. So, whenever we encounter a coercion, we just say that it won't change. That's what the CoercionTy case is doing within normalise_type. Note [Normalisation and type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need to be a bit careful about normalising in the presence of type synonyms (#13035). Suppose S is a type synonym, and we have S t1 t2 If S is family-free (on its RHS) we can just normalise t1 and t2 and reconstruct (S t1' t2'). Expanding S could not reveal any new redexes because type families are saturated. But if S has a type family on its RHS we expand /before/ normalising the args t1, t2. If we normalise t1, t2 first, we'll re-normalise them after expansion, and that can lead to /exponential/ behaviour; see #13035. Notice, though, that expanding first can in principle duplicate t1,t2, which might contain redexes. I'm sure you could conjure up an exponential case by that route too, but it hasn't happened in practice yet! -} topNormaliseType :: FamInstEnvs -> Type -> Type topNormaliseType env ty = case topNormaliseType_maybe env ty of Just redn -> reductionReducedType redn Nothing -> ty topNormaliseType_maybe :: FamInstEnvs -> Type -> Maybe Reduction -- ^ Get rid of *outermost* (or toplevel) -- * type function redex -- * data family redex -- * newtypes -- returning an appropriate Representational coercion. Specifically, if -- topNormaliseType_maybe env ty = Just (co, ty') -- then -- (a) co :: ty ~R ty' -- (b) ty' is not a newtype, and is not a type-family or data-family redex -- -- However, ty' can be something like (Maybe (F ty)), where -- (F ty) is a redex. -- -- Always operates homogeneously: the returned type has the same kind as the -- original type, and the returned coercion is always homogeneous. topNormaliseType_maybe env ty = do { ((co, mkind_co), nty) <- topNormaliseTypeX stepper combine ty ; let hredn = mkHetReduction (mkReduction co nty) mkind_co ; return $ homogeniseHetRedn Representational hredn } where stepper = unwrapNewTypeStepper' `composeSteppers` tyFamStepper combine (c1, mc1) (c2, mc2) = (c1 `mkTransCo` c2, mc1 `mkTransMCo` mc2) unwrapNewTypeStepper' :: NormaliseStepper (Coercion, MCoercionN) unwrapNewTypeStepper' rec_nts tc tys = mapStepResult (, MRefl) $ unwrapNewTypeStepper rec_nts tc tys -- second coercion below is the kind coercion relating the original type's kind -- to the normalised type's kind tyFamStepper :: NormaliseStepper (Coercion, MCoercionN) tyFamStepper rec_nts tc tys -- Try to step a type/data family = case topReduceTyFamApp_maybe env tc tys of Just (HetReduction (Reduction co rhs) res_co) -> NS_Step rec_nts rhs (co, res_co) _ -> NS_Done --------------- normaliseTcApp :: FamInstEnvs -> Role -> TyCon -> [Type] -> Reduction -- See comments on normaliseType for the arguments of this function normaliseTcApp env role tc tys = initNormM env role (tyCoVarsOfTypes tys) $ normalise_tc_app tc tys -- See Note [Normalising types] about the LiftingContext normalise_tc_app :: TyCon -> [Type] -> NormM Reduction normalise_tc_app tc tys | Just (tenv, rhs, tys') <- expandSynTyCon_maybe tc tys , not (isFamFreeTyCon tc) -- Expand and try again = -- A synonym with type families in the RHS -- Expand and try again -- See Note [Normalisation and type synonyms] normalise_type (mkAppTys (substTy (mkTvSubstPrs tenv) rhs) tys') | isFamilyTyCon tc = -- A type-family application do { env <- getEnv ; role <- getRole ; ArgsReductions redns@(Reductions args_cos ntys) res_co <- normalise_tc_args tc tys ; case reduceTyFamApp_maybe env role tc ntys of Just redn1 -> do { redn2 <- normalise_reduction redn1 ; let redn3 = mkTyConAppCo role tc args_cos `mkTransRedn` redn2 ; return $ assemble_result role redn3 res_co } _ -> -- No unique matching family instance exists; -- we do not do anything return $ assemble_result role (mkTyConAppRedn role tc redns) res_co } | otherwise = -- A synonym with no type families in the RHS; or data type etc -- Just normalise the arguments and rebuild do { ArgsReductions redns res_co <- normalise_tc_args tc tys ; role <- getRole ; return $ assemble_result role (mkTyConAppRedn role tc redns) res_co } where assemble_result :: Role -- r, ambient role in NormM monad -> Reduction -- orig_ty ~r nty, possibly heterogeneous (nty possibly of changed kind) -> MCoercionN -- typeKind(orig_ty) ~N typeKind(nty) -> Reduction -- orig_ty ~r nty_casted -- where nty_casted has same kind as orig_ty assemble_result r redn kind_co = mkCoherenceRightMRedn r redn (mkSymMCo kind_co) --------------- -- | Try to simplify a type-family application, by *one* step -- If topReduceTyFamApp_maybe env r F tys = Just (HetReduction (Reduction co rhs) res_co) -- then co :: F tys ~R# rhs -- res_co :: typeKind(F tys) ~ typeKind(rhs) -- Type families and data families; always Representational role topReduceTyFamApp_maybe :: FamInstEnvs -> TyCon -> [Type] -> Maybe HetReduction topReduceTyFamApp_maybe envs fam_tc arg_tys | isFamilyTyCon fam_tc -- type families and data families , Just redn <- reduceTyFamApp_maybe envs role fam_tc ntys = Just $ mkHetReduction (mkTyConAppCo role fam_tc args_cos `mkTransRedn` redn) res_co | otherwise = Nothing where role = Representational ArgsReductions (Reductions args_cos ntys) res_co = initNormM envs role (tyCoVarsOfTypes arg_tys) $ normalise_tc_args fam_tc arg_tys normalise_tc_args :: TyCon -> [Type] -> NormM ArgsReductions normalise_tc_args tc tys = do { role <- getRole ; normalise_args (tyConKind tc) (tyConRolesX role tc) tys } --------------- normaliseType :: FamInstEnvs -> Role -- desired role of coercion -> Type -> Reduction normaliseType env role ty = initNormM env role (tyCoVarsOfType ty) $ normalise_type ty normalise_type :: Type -> NormM Reduction -- Normalise the input type, by eliminating *all* type-function redexes -- but *not* newtypes (which are visible to the programmer) -- Returns with Refl if nothing happens -- Does nothing to newtypes -- The returned coercion *must* be *homogeneous* -- See Note [Normalising types] -- Try not to disturb type synonyms if possible normalise_type ty = go ty where go :: Type -> NormM Reduction go (TyConApp tc tys) = normalise_tc_app tc tys go ty@(LitTy {}) = do { r <- getRole ; return $ mkReflRedn r ty } go (AppTy ty1 ty2) = go_app_tys ty1 [ty2] go (FunTy { ft_af = vis, ft_mult = w, ft_arg = ty1, ft_res = ty2 }) = do { arg_redn <- go ty1 ; res_redn <- go ty2 ; w_redn <- withRole Nominal $ go w ; r <- getRole ; return $ mkFunRedn r vis w_redn arg_redn res_redn } go (ForAllTy (Bndr tcvar vis) ty) = do { (lc', tv', k_redn) <- normalise_var_bndr tcvar ; redn <- withLC lc' $ normalise_type ty ; return $ mkForAllRedn vis tv' k_redn redn } go (TyVarTy tv) = normalise_tyvar tv go (CastTy ty co) = do { redn <- go ty ; lc <- getLC ; let co' = substRightCo lc co ; return $ mkCastRedn2 Nominal ty co redn co' -- ^^^^^^^^^^^ uses castCoercionKind2 } go (CoercionTy co) = do { lc <- getLC ; r <- getRole ; let kco = liftCoSubst Nominal lc (coercionType co) co' = substRightCo lc co ; return $ mkProofIrrelRedn r kco co co' } go_app_tys :: Type -- function -> [Type] -- args -> NormM Reduction -- cf. GHC.Tc.Solver.Rewrite.rewrite_app_ty_args go_app_tys (AppTy ty1 ty2) tys = go_app_tys ty1 (ty2 : tys) go_app_tys fun_ty arg_tys = do { fun_redn@(Reduction fun_co nfun) <- go fun_ty ; case tcSplitTyConApp_maybe nfun of Just (tc, xis) -> do { redn <- go (mkTyConApp tc (xis ++ arg_tys)) -- rewrite_app_ty_args avoids redundantly processing the xis, -- but that's a much more performance-sensitive function. -- This type normalisation is not called in a loop. ; return $ mkAppCos fun_co (map mkNomReflCo arg_tys) `mkTransRedn` redn } Nothing -> do { ArgsReductions redns res_co <- normalise_args (typeKind nfun) (repeat Nominal) arg_tys ; role <- getRole ; return $ mkCoherenceRightMRedn role (mkAppRedns fun_redn redns) (mkSymMCo res_co) } } normalise_args :: Kind -- of the function -> [Role] -- roles at which to normalise args -> [Type] -- args -> NormM ArgsReductions -- returns ArgsReductions (Reductions cos xis) res_co, -- where each xi is the normalised version of the corresponding type, -- each co is orig_arg ~ xi, and res_co :: kind(f orig_args) ~ kind(f xis). -- NB: The xis might *not* have the same kinds as the input types, -- but the resulting application *will* be well-kinded -- cf. GHC.Tc.Solver.Rewrite.rewrite_args_slow normalise_args fun_ki roles args = do { normed_args <- zipWithM normalise1 roles args ; return $ simplifyArgsWorker ki_binders inner_ki fvs roles normed_args } where (ki_binders, inner_ki) = splitPiTys fun_ki fvs = tyCoVarsOfTypes args normalise1 role ty = withRole role $ normalise_type ty normalise_tyvar :: TyVar -> NormM Reduction normalise_tyvar tv = assert (isTyVar tv) $ do { lc <- getLC ; r <- getRole ; return $ case liftCoSubstTyVar lc r tv of Just co -> coercionRedn co Nothing -> mkReflRedn r (mkTyVarTy tv) } normalise_reduction :: Reduction -> NormM Reduction normalise_reduction (Reduction co ty) = do { redn' <- normalise_type ty ; return $ co `mkTransRedn` redn' } normalise_var_bndr :: TyCoVar -> NormM (LiftingContext, TyCoVar, Reduction) normalise_var_bndr tcvar -- works for both tvar and covar = do { lc1 <- getLC ; env <- getEnv ; let callback lc ki = runNormM (normalise_type ki) env lc Nominal ; return $ liftCoSubstVarBndrUsing reductionCoercion callback lc1 tcvar } -- | a monad for the normalisation functions, reading 'FamInstEnvs', -- a 'LiftingContext', and a 'Role'. newtype NormM a = NormM { runNormM :: FamInstEnvs -> LiftingContext -> Role -> a } deriving (Functor) initNormM :: FamInstEnvs -> Role -> TyCoVarSet -- the in-scope variables -> NormM a -> a initNormM env role vars (NormM thing_inside) = thing_inside env lc role where in_scope = mkInScopeSet vars lc = emptyLiftingContext in_scope getRole :: NormM Role getRole = NormM (\ _ _ r -> r) getLC :: NormM LiftingContext getLC = NormM (\ _ lc _ -> lc) getEnv :: NormM FamInstEnvs getEnv = NormM (\ env _ _ -> env) withRole :: Role -> NormM a -> NormM a withRole r thing = NormM $ \ envs lc _old_r -> runNormM thing envs lc r withLC :: LiftingContext -> NormM a -> NormM a withLC lc thing = NormM $ \ envs _old_lc r -> runNormM thing envs lc r instance Monad NormM where ma >>= fmb = NormM $ \env lc r -> let a = runNormM ma env lc r in runNormM (fmb a) env lc r instance Applicative NormM where pure x = NormM $ \ _ _ _ -> x (<*>) = ap ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/InstEnv.hs0000644000000000000000000015753314472400112020463 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[InstEnv]{Utilities for typechecking instance declarations} The bits common to GHC.Tc.TyCl.Instance and GHC.Tc.Deriv. -} {-# LANGUAGE DeriveDataTypeable #-} module GHC.Core.InstEnv ( DFunId, InstMatch, ClsInstLookupResult, PotentialUnifiers(..), getPotentialUnifiers, nullUnifiers, OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe, ClsInst(..), DFunInstType, pprInstance, pprInstanceHdr, pprInstances, instanceHead, instanceSig, mkLocalInstance, mkImportedInstance, instanceDFunId, updateClsInstDFuns, updateClsInstDFun, fuzzyClsInstCmp, orphNamesOfClsInst, InstEnvs(..), VisibleOrphanModules, InstEnv, mkInstEnv, emptyInstEnv, unionInstEnv, extendInstEnv, filterInstEnv, deleteFromInstEnv, deleteDFunFromInstEnv, anyInstEnv, identicalClsInstHead, extendInstEnvList, lookupUniqueInstEnv, lookupInstEnv, instEnvElts, instEnvClasses, mapInstEnv, memberInstEnv, instIsVisible, classInstances, instanceBindFun, classNameInstances, instanceCantMatch, roughMatchTcs, isOverlappable, isOverlapping, isIncoherent ) where import GHC.Prelude import GHC.Tc.Utils.TcType -- InstEnv is really part of the type checker, -- and depends on TcType in many ways import GHC.Core ( IsOrphan(..), isOrphan, chooseOrphanAnchor ) import GHC.Core.RoughMap import GHC.Unit.Module.Env import GHC.Unit.Types import GHC.Core.Class import GHC.Types.Var import GHC.Types.Unique.DSet import GHC.Types.Var.Set import GHC.Types.Name import GHC.Types.Name.Set import GHC.Core.Unify import GHC.Types.Basic import GHC.Types.Id import Data.Data ( Data ) import Data.Maybe ( isJust ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.Semigroup {- ************************************************************************ * * ClsInst: the data type for type-class instances * * ************************************************************************ -} -- | A type-class instance. Note that there is some tricky laziness at work -- here. See Note [ClsInst laziness and the rough-match fields] for more -- details. data ClsInst = ClsInst { -- Used for "rough matching"; see -- Note [ClsInst laziness and the rough-match fields] -- INVARIANT: is_tcs = KnownTc is_cls_nm : roughMatchTcs is_tys is_cls_nm :: Name -- ^ Class name , is_tcs :: [RoughMatchTc] -- ^ Top of type args -- The class itself is always -- the first element of this list -- | @is_dfun_name = idName . is_dfun@. -- -- We use 'is_dfun_name' for the visibility check, -- 'instIsVisible', which needs to know the 'Module' which the -- dictionary is defined in. However, we cannot use the 'Module' -- attached to 'is_dfun' since doing so would mean we would -- potentially pull in an entire interface file unnecessarily. -- This was the cause of #12367. , is_dfun_name :: Name -- Used for "proper matching"; see Note [Proper-match fields] , is_tvs :: [TyVar] -- Fresh template tyvars for full match -- See Note [Template tyvars are fresh] , is_cls :: Class -- The real class , is_tys :: [Type] -- Full arg types (mentioning is_tvs) -- INVARIANT: is_dfun Id has type -- forall is_tvs. (...) => is_cls is_tys -- (modulo alpha conversion) , is_dfun :: DFunId -- See Note [Haddock assumptions] , is_flag :: OverlapFlag -- See detailed comments with -- the decl of BasicTypes.OverlapFlag , is_orphan :: IsOrphan } deriving Data -- | A fuzzy comparison function for class instances, intended for sorting -- instances before displaying them to the user. fuzzyClsInstCmp :: ClsInst -> ClsInst -> Ordering fuzzyClsInstCmp x y = foldMap cmp (zip (is_tcs x) (is_tcs y)) where cmp (RM_WildCard, RM_WildCard) = EQ cmp (RM_WildCard, RM_KnownTc _) = LT cmp (RM_KnownTc _, RM_WildCard) = GT cmp (RM_KnownTc x, RM_KnownTc y) = stableNameCmp x y isOverlappable, isOverlapping, isIncoherent :: ClsInst -> Bool isOverlappable i = hasOverlappableFlag (overlapMode (is_flag i)) isOverlapping i = hasOverlappingFlag (overlapMode (is_flag i)) isIncoherent i = hasIncoherentFlag (overlapMode (is_flag i)) {- Note [ClsInst laziness and the rough-match fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we load 'instance A.C B.T' from A.hi, but suppose that the type B.T is otherwise unused in the program. Then it's stupid to load B.hi, the data type declaration for B.T -- and perhaps further instance declarations! We avoid this as follows: * is_cls_nm, is_tcs, is_dfun_name are all Names. We can poke them to our heart's content. * Proper-match fields. is_dfun, and its related fields is_tvs, is_cls, is_tys contain TyVars, Class, Type, Class etc, and so are all lazy thunks. When we poke any of these fields we'll typecheck the DFunId declaration, and hence pull in interfaces that it refers to. See Note [Proper-match fields]. * Rough-match fields. During instance lookup, we use the is_cls_nm :: Name and is_tcs :: [RoughMatchTc] fields to perform a "rough match", *without* poking inside the DFunId. The rough-match fields allow us to say "definitely does not match", based only on Names. See GHC.Core.Unify Note [Rough matching in class and family instances] This laziness is very important; see #12367. Try hard to avoid pulling on the structured fields unless you really need the instance. * Another place to watch is InstEnv.instIsVisible, which needs the module to which the ClsInst belongs. We can get this from is_dfun_name. -} {- Note [Template tyvars are fresh] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The is_tvs field of a ClsInst has *completely fresh* tyvars. That is, they are * distinct from any other ClsInst * distinct from any tyvars free in predicates that may be looked up in the class instance environment Reason for freshness: we use unification when checking for overlap etc, and that requires the tyvars to be distinct. The invariant is checked by the ASSERT in lookupInstEnv'. Note [Proper-match fields] ~~~~~~~~~~~~~~~~~~~~~~~~~ The is_tvs, is_cls, is_tys fields are simply cached values, pulled out (lazily) from the dfun id. They are cached here simply so that we don't need to decompose the DFunId each time we want to match it. The hope is that the rough-match fields mean that we often never poke the proper-match fields. However, note that: * is_tvs must be a superset of the free vars of is_tys * is_tvs, is_tys may be alpha-renamed compared to the ones in the dfun Id Note [Haddock assumptions] ~~~~~~~~~~~~~~~~~~~~~~~~~~ For normal user-written instances, Haddock relies on * the SrcSpan of * the Name of * the is_dfun of * an Instance being equal to * the SrcSpan of * the instance head type of * the InstDecl used to construct the Instance. -} instanceDFunId :: ClsInst -> DFunId instanceDFunId = is_dfun updateClsInstDFun :: (DFunId -> DFunId) -> ClsInst -> ClsInst updateClsInstDFun tidy_dfun ispec = ispec { is_dfun = tidy_dfun (is_dfun ispec) } updateClsInstDFuns :: (DFunId -> DFunId) -> InstEnv -> InstEnv updateClsInstDFuns tidy_dfun (InstEnv rm) = InstEnv $ fmap (updateClsInstDFun tidy_dfun) rm instance NamedThing ClsInst where getName ispec = getName (is_dfun ispec) instance Outputable ClsInst where ppr = pprInstance pprInstance :: ClsInst -> SDoc -- Prints the ClsInst as an instance declaration pprInstance ispec = hang (pprInstanceHdr ispec) 2 (vcat [ text "--" <+> pprDefinedAt (getName ispec) , whenPprDebug (ppr (is_dfun ispec)) ]) -- * pprInstanceHdr is used in VStudio to populate the ClassView tree pprInstanceHdr :: ClsInst -> SDoc -- Prints the ClsInst as an instance declaration pprInstanceHdr (ClsInst { is_flag = flag, is_dfun = dfun }) = text "instance" <+> ppr flag <+> pprSigmaType (idType dfun) pprInstances :: [ClsInst] -> SDoc pprInstances ispecs = vcat (map pprInstance ispecs) instanceHead :: ClsInst -> ([TyVar], Class, [Type]) -- Returns the head, using the fresh tyvars from the ClsInst instanceHead (ClsInst { is_tvs = tvs, is_tys = tys, is_dfun = dfun }) = (tvs, cls, tys) where (_, _, cls, _) = tcSplitDFunTy (idType dfun) -- | Collects the names of concrete types and type constructors that make -- up the head of a class instance. For instance, given `class Foo a b`: -- -- `instance Foo (Either (Maybe Int) a) Bool` would yield -- [Either, Maybe, Int, Bool] -- -- Used in the implementation of ":info" in GHCi. -- -- The 'tcSplitSigmaTy' is because of -- instance Foo a => Baz T where ... -- The decl is an orphan if Baz and T are both not locally defined, -- even if Foo *is* locally defined orphNamesOfClsInst :: ClsInst -> NameSet orphNamesOfClsInst (ClsInst { is_cls_nm = cls_nm, is_tys = tys }) = orphNamesOfTypes tys `unionNameSet` unitNameSet cls_nm instanceSig :: ClsInst -> ([TyVar], [Type], Class, [Type]) -- Decomposes the DFunId instanceSig ispec = tcSplitDFunTy (idType (is_dfun ispec)) mkLocalInstance :: DFunId -> OverlapFlag -> [TyVar] -> Class -> [Type] -> ClsInst -- Used for local instances, where we can safely pull on the DFunId. -- Consider using newClsInst instead; this will also warn if -- the instance is an orphan. mkLocalInstance dfun oflag tvs cls tys = ClsInst { is_flag = oflag, is_dfun = dfun , is_tvs = tvs , is_dfun_name = dfun_name , is_cls = cls, is_cls_nm = cls_name , is_tys = tys, is_tcs = RM_KnownTc cls_name : roughMatchTcs tys , is_orphan = orph } where cls_name = className cls dfun_name = idName dfun this_mod = assert (isExternalName dfun_name) $ nameModule dfun_name is_local name = nameIsLocalOrFrom this_mod name -- Compute orphanhood. See Note [Orphans] in GHC.Core.InstEnv (cls_tvs, fds) = classTvsFds cls arg_names = [filterNameSet is_local (orphNamesOfType ty) | ty <- tys] -- See Note [When exactly is an instance decl an orphan?] orph | is_local cls_name = NotOrphan (nameOccName cls_name) | all notOrphan mb_ns = assert (not (null mb_ns)) $ head mb_ns | otherwise = IsOrphan notOrphan NotOrphan{} = True notOrphan _ = False mb_ns :: [IsOrphan] -- One for each fundep; a locally-defined name -- that is not in the "determined" arguments mb_ns | null fds = [choose_one arg_names] | otherwise = map do_one fds do_one (_ltvs, rtvs) = choose_one [ns | (tv,ns) <- cls_tvs `zip` arg_names , not (tv `elem` rtvs)] choose_one nss = chooseOrphanAnchor (unionNameSets nss) mkImportedInstance :: Name -- ^ the name of the class -> [RoughMatchTc] -- ^ the rough match signature of the instance -> Name -- ^ the 'Name' of the dictionary binding -> DFunId -- ^ the 'Id' of the dictionary. -> OverlapFlag -- ^ may this instance overlap? -> IsOrphan -- ^ is this instance an orphan? -> ClsInst -- Used for imported instances, where we get the rough-match stuff -- from the interface file -- The bound tyvars of the dfun are guaranteed fresh, because -- the dfun has been typechecked out of the same interface file mkImportedInstance cls_nm mb_tcs dfun_name dfun oflag orphan = ClsInst { is_flag = oflag, is_dfun = dfun , is_tvs = tvs, is_tys = tys , is_dfun_name = dfun_name , is_cls_nm = cls_nm, is_cls = cls , is_tcs = RM_KnownTc cls_nm : mb_tcs , is_orphan = orphan } where (tvs, _, cls, tys) = tcSplitDFunTy (idType dfun) {- Note [When exactly is an instance decl an orphan?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (see GHC.Iface.Make.instanceToIfaceInst, which implements this) Roughly speaking, an instance is an orphan if its head (after the =>) mentions nothing defined in this module. Functional dependencies complicate the situation though. Consider module M where { class C a b | a -> b } and suppose we are compiling module X: module X where import M data T = ... instance C Int T where ... This instance is an orphan, because when compiling a third module Y we might get a constraint (C Int v), and we'd want to improve v to T. So we must make sure X's instances are loaded, even if we do not directly use anything from X. More precisely, an instance is an orphan iff If there are no fundeps, then at least of the names in the instance head is locally defined. If there are fundeps, then for every fundep, at least one of the names free in a *non-determined* part of the instance head is defined in this module. (Note that these conditions hold trivially if the class is locally defined.) ************************************************************************ * * InstEnv, ClsInstEnv * * ************************************************************************ A @ClsInstEnv@ all the instances of that class. The @Id@ inside a ClsInstEnv mapping is the dfun for that instance. If class C maps to a list containing the item ([a,b], [t1,t2,t3], dfun), then forall a b, C t1 t2 t3 can be constructed by dfun or, to put it another way, we have instance (...) => C t1 t2 t3, witnessed by dfun -} --------------------------------------------------- {- Note [InstEnv determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We turn InstEnvs into a list in some places that don't directly affect the ABI. That happens when we create output for `:info`. Unfortunately that nondeterminism is nonlocal and it's hard to tell what it affects without following a chain of functions. It's also easy to accidentally make that nondeterminism affect the ABI. Furthermore the envs should be relatively small, so it should be free to use deterministic maps here. Testing with nofib and validate detected no difference between UniqFM and UniqDFM. See also Note [Deterministic UniqFM] -} -- Internally it's safe to indexable this map by -- by @Class@, the classes @Name@, the classes @TyCon@ -- or it's @Unique@. -- This is since: -- getUnique cls == getUnique (className cls) == getUnique (classTyCon cls) -- -- We still use Class as key type as it's both the common case -- and conveys the meaning better. But the implementation of --InstEnv is a bit more lax internally. newtype InstEnv = InstEnv (RoughMap ClsInst) -- Maps Class to instances for that class -- See Note [InstEnv determinism] instance Outputable InstEnv where ppr (InstEnv rm) = pprInstances $ elemsRM rm -- | 'InstEnvs' represents the combination of the global type class instance -- environment, the local type class instance environment, and the set of -- transitively reachable orphan modules (according to what modules have been -- directly imported) used to test orphan instance visibility. data InstEnvs = InstEnvs { ie_global :: InstEnv, -- External-package instances ie_local :: InstEnv, -- Home-package instances ie_visible :: VisibleOrphanModules -- Set of all orphan modules transitively -- reachable from the module being compiled -- See Note [Instance lookup and orphan instances] } -- | Set of visible orphan modules, according to what modules have been directly -- imported. This is based off of the dep_orphs field, which records -- transitively reachable orphan modules (modules that define orphan instances). type VisibleOrphanModules = ModuleSet -- INVARIANTS: -- * The is_tvs are distinct in each ClsInst -- of a ClsInstEnv (so we can safely unify them) -- Thus, the @ClsInstEnv@ for @Eq@ might contain the following entry: -- [a] ===> dfun_Eq_List :: forall a. Eq a => Eq [a] -- The "a" in the pattern must be one of the forall'd variables in -- the dfun type. emptyInstEnv :: InstEnv emptyInstEnv = InstEnv emptyRM mkInstEnv :: [ClsInst] -> InstEnv mkInstEnv = extendInstEnvList emptyInstEnv instEnvElts :: InstEnv -> [ClsInst] instEnvElts (InstEnv rm) = elemsRM rm -- See Note [InstEnv determinism] instEnvEltsForClass :: InstEnv -> Name -> [ClsInst] instEnvEltsForClass (InstEnv rm) cls_nm = lookupRM [RML_KnownTc cls_nm] rm -- N.B. this is not particularly efficient but used only by GHCi. instEnvClasses :: InstEnv -> UniqDSet Class instEnvClasses ie = mkUniqDSet $ map is_cls (instEnvElts ie) -- | Test if an instance is visible, by checking that its origin module -- is in 'VisibleOrphanModules'. -- See Note [Instance lookup and orphan instances] instIsVisible :: VisibleOrphanModules -> ClsInst -> Bool instIsVisible vis_mods ispec -- NB: Instances from the interactive package always are visible. We can't -- add interactive modules to the set since we keep creating new ones -- as a GHCi session progresses. = case nameModule_maybe (is_dfun_name ispec) of Nothing -> True Just mod | isInteractiveModule mod -> True | IsOrphan <- is_orphan ispec -> mod `elemModuleSet` vis_mods | otherwise -> True classInstances :: InstEnvs -> Class -> [ClsInst] classInstances envs cls = classNameInstances envs (className cls) classNameInstances :: InstEnvs -> Name -> [ClsInst] classNameInstances (InstEnvs { ie_global = pkg_ie, ie_local = home_ie, ie_visible = vis_mods }) cls = get home_ie ++ get pkg_ie where get :: InstEnv -> [ClsInst] get ie = filter (instIsVisible vis_mods) (instEnvEltsForClass ie cls) -- | Checks for an exact match of ClsInst in the instance environment. -- We use this when we do signature checking in "GHC.Tc.Module" memberInstEnv :: InstEnv -> ClsInst -> Bool memberInstEnv (InstEnv rm) ins_item@(ClsInst { is_tcs = tcs } ) = any (identicalDFunType ins_item) (fst $ lookupRM' (map roughMatchTcToLookup tcs) rm) where identicalDFunType cls1 cls2 = eqType (varType (is_dfun cls1)) (varType (is_dfun cls2)) -- | Makes no particular effort to detect conflicts. unionInstEnv :: InstEnv -> InstEnv -> InstEnv unionInstEnv (InstEnv a) (InstEnv b) = InstEnv (a `unionRM` b) extendInstEnvList :: InstEnv -> [ClsInst] -> InstEnv extendInstEnvList inst_env ispecs = foldl' extendInstEnv inst_env ispecs extendInstEnv :: InstEnv -> ClsInst -> InstEnv extendInstEnv (InstEnv rm) ins_item@(ClsInst { is_tcs = tcs }) = InstEnv $ insertRM tcs ins_item rm filterInstEnv :: (ClsInst -> Bool) -> InstEnv -> InstEnv filterInstEnv pred (InstEnv rm) = InstEnv $ filterRM pred rm anyInstEnv :: (ClsInst -> Bool) -> InstEnv -> Bool anyInstEnv pred (InstEnv rm) = foldRM (\x rest -> pred x || rest) False rm mapInstEnv :: (ClsInst -> ClsInst) -> InstEnv -> InstEnv mapInstEnv f (InstEnv rm) = InstEnv (f <$> rm) deleteFromInstEnv :: InstEnv -> ClsInst -> InstEnv deleteFromInstEnv (InstEnv rm) ins_item@(ClsInst { is_tcs = tcs }) = InstEnv $ filterMatchingRM (not . identicalClsInstHead ins_item) tcs rm deleteDFunFromInstEnv :: InstEnv -> DFunId -> InstEnv -- Delete a specific instance fron an InstEnv deleteDFunFromInstEnv (InstEnv rm) dfun = InstEnv $ filterMatchingRM (not . same_dfun) [RM_KnownTc (className cls)] rm where (_, _, cls, _) = tcSplitDFunTy (idType dfun) same_dfun (ClsInst { is_dfun = dfun' }) = dfun == dfun' identicalClsInstHead :: ClsInst -> ClsInst -> Bool -- ^ True when when the instance heads are the same -- e.g. both are Eq [(a,b)] -- Used for overriding in GHCi -- Obviously should be insensitive to alpha-renaming identicalClsInstHead (ClsInst { is_tcs = rough1, is_tys = tys1 }) (ClsInst { is_tcs = rough2, is_tys = tys2 }) = not (instanceCantMatch rough1 rough2) -- Fast check for no match, uses the "rough match" fields; -- also accounts for class name. && isJust (tcMatchTys tys1 tys2) && isJust (tcMatchTys tys2 tys1) {- ************************************************************************ * * Looking up an instance * * ************************************************************************ @lookupInstEnv@ looks up in a @InstEnv@, using a one-way match. Since the env is kept ordered, the first match must be the only one. The thing we are looking up can have an arbitrary "flexi" part. Note [Instance lookup and orphan instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we are compiling a module M, and we have a zillion packages loaded, and we are looking up an instance for C (T W). If we find a match in module 'X' from package 'p', should be "in scope"; that is, is p:X in the transitive closure of modules imported from M? The difficulty is that the "zillion packages" might include ones loaded through earlier invocations of the GHC API, or earlier module loads in GHCi. They might not be in the dependencies of M itself; and if not, the instances in them should not be visible. #2182, #8427. There are two cases: * If the instance is *not an orphan*, then module X defines C, T, or W. And in order for those types to be involved in typechecking M, it must be that X is in the transitive closure of M's imports. So we can use the instance. * If the instance *is an orphan*, the above reasoning does not apply. So we keep track of the set of orphan modules transitively below M; this is the ie_visible field of InstEnvs, of type VisibleOrphanModules. If module p:X is in this set, then we can use the instance, otherwise we can't. Note [Rules for instance lookup] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These functions implement the carefully-written rules in the user manual section on "overlapping instances". At risk of duplication, here are the rules. If the rules change, change this text and the user manual simultaneously. The link may be this: http://www.haskell.org/ghc/docs/latest/html/users_guide/glasgow_exts.html#instance-overlap The willingness to be overlapped or incoherent is a property of the instance declaration itself, controlled as follows: * An instance is "incoherent" if it has an INCOHERENT pragma, or if it appears in a module compiled with -XIncoherentInstances. * An instance is "overlappable" if it has an OVERLAPPABLE or OVERLAPS pragma, or if it appears in a module compiled with -XOverlappingInstances, or if the instance is incoherent. * An instance is "overlapping" if it has an OVERLAPPING or OVERLAPS pragma, or if it appears in a module compiled with -XOverlappingInstances, or if the instance is incoherent. compiled with -XOverlappingInstances. Now suppose that, in some client module, we are searching for an instance of the target constraint (C ty1 .. tyn). The search works like this. * Find all instances `I` that *match* the target constraint; that is, the target constraint is a substitution instance of `I`. These instance declarations are the *candidates*. * Eliminate any candidate `IX` for which both of the following hold: - There is another candidate `IY` that is strictly more specific; that is, `IY` is a substitution instance of `IX` but not vice versa. - Either `IX` is *overlappable*, or `IY` is *overlapping*. (This "either/or" design, rather than a "both/and" design, allow a client to deliberately override an instance from a library, without requiring a change to the library.) - If exactly one non-incoherent candidate remains, select it. If all remaining candidates are incoherent, select an arbitrary one. Otherwise the search fails (i.e. when more than one surviving candidate is not incoherent). - If the selected candidate (from the previous step) is incoherent, the search succeeds, returning that candidate. - If not, find all instances that *unify* with the target constraint, but do not *match* it. Such non-candidate instances might match when the target constraint is further instantiated. If all of them are incoherent, the search succeeds, returning the selected candidate; if not, the search fails. Notice that these rules are not influenced by flag settings in the client module, where the instances are *used*. These rules make it possible for a library author to design a library that relies on overlapping instances without the client having to know. Note [Overlapping instances] (NB: these notes are quite old) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Overlap is permitted, but only in such a way that one can make a unique choice when looking up. That is, overlap is only permitted if one template matches the other, or vice versa. So this is ok: [a] [Int] but this is not (Int,a) (b,Int) If overlap is permitted, the list is kept most specific first, so that the first lookup is the right choice. For now we just use association lists. \subsection{Avoiding a problem with overlapping} Consider this little program: \begin{pseudocode} class C a where c :: a class C a => D a where d :: a instance C Int where c = 17 instance D Int where d = 13 instance C a => C [a] where c = [c] instance ({- C [a], -} D a) => D [a] where d = c instance C [Int] where c = [37] main = print (d :: [Int]) \end{pseudocode} What do you think `main' prints (assuming we have overlapping instances, and all that turned on)? Well, the instance for `D' at type `[a]' is defined to be `c' at the same type, and we've got an instance of `C' at `[Int]', so the answer is `[37]', right? (the generic `C [a]' instance shouldn't apply because the `C [Int]' instance is more specific). Ghc-4.04 gives `[37]', while ghc-4.06 gives `[17]', so 4.06 is wrong. That was easy ;-) Let's just consult hugs for good measure. Wait - if I use old hugs (pre-September99), I get `[17]', and stranger yet, if I use hugs98, it doesn't even compile! What's going on!? What hugs complains about is the `D [a]' instance decl. \begin{pseudocode} ERROR "mj.hs" (line 10): Cannot build superclass instance *** Instance : D [a] *** Context supplied : D a *** Required superclass : C [a] \end{pseudocode} You might wonder what hugs is complaining about. It's saying that you need to add `C [a]' to the context of the `D [a]' instance (as appears in comments). But there's that `C [a]' instance decl one line above that says that I can reduce the need for a `C [a]' instance to the need for a `C a' instance, and in this case, I already have the necessary `C a' instance (since we have `D a' explicitly in the context, and `C' is a superclass of `D'). Unfortunately, the above reasoning indicates a premature commitment to the generic `C [a]' instance. I.e., it prematurely rules out the more specific instance `C [Int]'. This is the mistake that ghc-4.06 makes. The fix is to add the context that hugs suggests (uncomment the `C [a]'), effectively deferring the decision about which instance to use. Now, interestingly enough, 4.04 has this same bug, but it's covered up in this case by a little known `optimization' that was disabled in 4.06. Ghc-4.04 silently inserts any missing superclass context into an instance declaration. In this case, it silently inserts the `C [a]', and everything happens to work out. (See `GHC.Types.Id.Make.mkDictFunId' for the code in question. Search for `Mark Jones', although Mark claims no credit for the `optimization' in question, and would rather it stopped being called the `Mark Jones optimization' ;-) So, what's the fix? I think hugs has it right. Here's why. Let's try something else out with ghc-4.04. Let's add the following line: d' :: D a => [a] d' = c Everyone raise their hand who thinks that `d :: [Int]' should give a different answer from `d' :: [Int]'. Well, in ghc-4.04, it does. The `optimization' only applies to instance decls, not to regular bindings, giving inconsistent behavior. Old hugs had this same bug. Here's how we fixed it: like GHC, the list of instances for a given class is ordered, so that more specific instances come before more generic ones. For example, the instance list for C might contain: ..., C Int, ..., C a, ... When we go to look for a `C Int' instance we'll get that one first. But what if we go looking for a `C b' (`b' is unconstrained)? We'll pass the `C Int' instance, and keep going. But if `b' is unconstrained, then we don't know yet if the more specific instance will eventually apply. GHC keeps going, and matches on the generic `C a'. The fix is to, at each step, check to see if there's a reverse match, and if so, abort the search. This prevents hugs from prematurely choosing a generic instance when a more specific one exists. --Jeff BUT NOTE [Nov 2001]: we must actually *unify* not reverse-match in this test. Suppose the instance envt had ..., forall a b. C a a b, ..., forall a b c. C a b c, ... (still most specific first) Now suppose we are looking for (C x y Int), where x and y are unconstrained. C x y Int doesn't match the template {a,b} C a a b but neither does C a a b match the template {x,y} C x y Int But still x and y might subsequently be unified so they *do* match. Simple story: unify, don't match. -} type DFunInstType = Maybe Type -- Just ty => Instantiate with this type -- Nothing => Instantiate with any type of this tyvar's kind -- See Note [DFunInstType: instantiating types] type InstMatch = (ClsInst, [DFunInstType]) type ClsInstLookupResult = ( [InstMatch] -- Successful matches , PotentialUnifiers -- These don't match but do unify , [InstMatch] ) -- Unsafe overlapped instances under Safe Haskell -- (see Note [Safe Haskell Overlapping Instances] in -- GHC.Tc.Solver). {- Note [DFunInstType: instantiating types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A successful match is a ClsInst, together with the types at which the dfun_id in the ClsInst should be instantiated The instantiating types are (Either TyVar Type)s because the dfun might have some tyvars that *only* appear in arguments dfun :: forall a b. C a b, Ord b => D [a] When we match this against D [ty], we return the instantiating types [Just ty, Nothing] where the 'Nothing' indicates that 'b' can be freely instantiated. (The caller instantiates it to a flexi type variable, which will presumably later become fixed via functional dependencies.) Note [Infinitary substitution in lookup] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider class C a b instance C c c instance C d (Maybe d) [W] C e (Maybe e) You would think we could just use the second instance, because the first doesn't unify. But that's just ever so slightly wrong. The reason we check for unifiers along with matchers is that we don't want the possibility that a type variable instantiation could cause an instance choice to change. Yet if we have type family M = Maybe M and choose (e |-> M), then both instances match. This is absurd, but we cannot rule it out. Yet, worrying about this case is awfully inconvenient to users, and so we pretend the problem doesn't exist, by considering a lookup that runs into this occurs-check issue to indicate that an instance surely does not apply (i.e. is like the SurelyApart case). In the brief time that we didn't treat infinitary substitutions specially, two tickets were filed: #19044 and #19052, both trying to do Real Work. Why don't we just exclude any instances that are MaybeApart? Because we might have a [W] C e (F e), where F is a type family. The second instance above does not match, but it should be included as a future possibility. Unification will return MaybeApart MARTypeFamily in this case. What can go wrong with this design choice? We might get incoherence -- but not loss of type safety. In particular, if we have [W] C M M (for the M type family above), then GHC might arbitrarily choose either instance, depending on how M reduces (or doesn't). For type families, we can't just ignore the problem (as we essentially do here), because doing so would give us a hole in the type safety proof (as explored in Section 6 of "Closed Type Families with Overlapping Equations", POPL'14). This possibility of an infinitary substitution manifests as closed type families that look like they should reduce, but don't. Users complain: #9082 and #17311. For open type families, we actually can have unsoundness if we don't take infinitary substitutions into account: #8162. But, luckily, for class instances, we just risk coherence -- not great, but it seems better to give users what they likely want. (Also, note that this problem existed for the entire decade of 201x without anyone noticing, so it's manifestly not ruining anyone's day.) -} -- |Look up an instance in the given instance environment. The given class application must match exactly -- one instance and the match may not contain any flexi type variables. If the lookup is unsuccessful, -- yield 'Left errorMessage'. lookupUniqueInstEnv :: InstEnvs -> Class -> [Type] -> Either SDoc (ClsInst, [Type]) lookupUniqueInstEnv instEnv cls tys = case lookupInstEnv False instEnv cls tys of ([(inst, inst_tys)], _, _) | noFlexiVar -> Right (inst, inst_tys') | otherwise -> Left $ text "flexible type variable:" <+> (ppr $ mkTyConApp (classTyCon cls) tys) where inst_tys' = [ty | Just ty <- inst_tys] noFlexiVar = all isJust inst_tys _other -> Left $ text "instance not found" <+> (ppr $ mkTyConApp (classTyCon cls) tys) data PotentialUnifiers = NoUnifiers | OneOrMoreUnifiers [ClsInst] -- This list is lazy as we only look at all the unifiers when -- printing an error message. It can be expensive to compute all -- the unifiers because if you are matching something like C a[sk] then -- all instances will unify. instance Outputable PotentialUnifiers where ppr NoUnifiers = text "NoUnifiers" ppr xs = ppr (getPotentialUnifiers xs) instance Semigroup PotentialUnifiers where NoUnifiers <> u = u u <> NoUnifiers = u u1 <> u2 = OneOrMoreUnifiers (getPotentialUnifiers u1 ++ getPotentialUnifiers u2) instance Monoid PotentialUnifiers where mempty = NoUnifiers getPotentialUnifiers :: PotentialUnifiers -> [ClsInst] getPotentialUnifiers NoUnifiers = [] getPotentialUnifiers (OneOrMoreUnifiers cls) = cls nullUnifiers :: PotentialUnifiers -> Bool nullUnifiers NoUnifiers = True nullUnifiers _ = False lookupInstEnv' :: InstEnv -- InstEnv to look in -> VisibleOrphanModules -- But filter against this -> Class -> [Type] -- What we are looking for -> ([InstMatch], -- Successful matches PotentialUnifiers) -- These don't match but do unify -- (no incoherent ones in here) -- The second component of the result pair happens when we look up -- Foo [a] -- in an InstEnv that has entries for -- Foo [Int] -- Foo [b] -- Then which we choose would depend on the way in which 'a' -- is instantiated. So we report that Foo [b] is a match (mapping b->a) -- but Foo [Int] is a unifier. This gives the caller a better chance of -- giving a suitable error message lookupInstEnv' (InstEnv rm) vis_mods cls tys = (foldr check_match [] rough_matches, check_unifier rough_unifiers) where (rough_matches, rough_unifiers) = lookupRM' rough_tcs rm rough_tcs = RML_KnownTc (className cls) : roughMatchTcsLookup tys -------------- check_match :: ClsInst -> [InstMatch] -> [InstMatch] check_match item@(ClsInst { is_tvs = tpl_tvs, is_tys = tpl_tys }) acc | not (instIsVisible vis_mods item) = acc -- See Note [Instance lookup and orphan instances] | Just subst <- tcMatchTys tpl_tys tys = ((item, map (lookupTyVar subst) tpl_tvs) : acc) | otherwise = acc check_unifier :: [ClsInst] -> PotentialUnifiers check_unifier [] = NoUnifiers check_unifier (item@ClsInst { is_tvs = tpl_tvs, is_tys = tpl_tys }:items) | not (instIsVisible vis_mods item) = check_unifier items -- See Note [Instance lookup and orphan instances] | Just {} <- tcMatchTys tpl_tys tys = check_unifier items -- Does not match, so next check whether the things unify -- See Note [Overlapping instances] -- Ignore ones that are incoherent: Note [Incoherent instances] | isIncoherent item = check_unifier items | otherwise = assertPpr (tys_tv_set `disjointVarSet` tpl_tv_set) ((ppr cls <+> ppr tys) $$ (ppr tpl_tvs <+> ppr tpl_tys)) $ -- Unification will break badly if the variables overlap -- They shouldn't because we allocate separate uniques for them -- See Note [Template tyvars are fresh] case tcUnifyTysFG instanceBindFun tpl_tys tys of -- We consider MaybeApart to be a case where the instance might -- apply in the future. This covers an instance like C Int and -- a target like [W] C (F a), where F is a type family. SurelyApart -> check_unifier items -- See Note [Infinitary substitution in lookup] MaybeApart MARInfinite _ -> check_unifier items _ -> OneOrMoreUnifiers (item: getPotentialUnifiers (check_unifier items)) where tpl_tv_set = mkVarSet tpl_tvs tys_tv_set = tyCoVarsOfTypes tys --------------- -- This is the common way to call this function. lookupInstEnv :: Bool -- Check Safe Haskell overlap restrictions -> InstEnvs -- External and home package inst-env -> Class -> [Type] -- What we are looking for -> ClsInstLookupResult -- ^ See Note [Rules for instance lookup] -- ^ See Note [Safe Haskell Overlapping Instances] in "GHC.Tc.Solver" -- ^ See Note [Safe Haskell Overlapping Instances Implementation] in "GHC.Tc.Solver" lookupInstEnv check_overlap_safe (InstEnvs { ie_global = pkg_ie , ie_local = home_ie , ie_visible = vis_mods }) cls tys = (final_matches, final_unifs, unsafe_overlapped) where (home_matches, home_unifs) = lookupInstEnv' home_ie vis_mods cls tys (pkg_matches, pkg_unifs) = lookupInstEnv' pkg_ie vis_mods cls tys all_matches = home_matches ++ pkg_matches all_unifs = home_unifs `mappend` pkg_unifs final_matches = pruneOverlappedMatches all_matches -- Even if the unifs is non-empty (an error situation) -- we still prune the matches, so that the error message isn't -- misleading (complaining of multiple matches when some should be -- overlapped away) unsafe_overlapped = case final_matches of [match] -> check_safe match _ -> [] -- If the selected match is incoherent, discard all unifiers final_unifs = case final_matches of (m:_) | isIncoherent (fst m) -> NoUnifiers _ -> all_unifs -- NOTE [Safe Haskell isSafeOverlap] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- We restrict code compiled in 'Safe' mode from overriding code -- compiled in any other mode. The rationale is that code compiled -- in 'Safe' mode is code that is untrusted by the ghc user. So -- we shouldn't let that code change the behaviour of code the -- user didn't compile in 'Safe' mode since that's the code they -- trust. So 'Safe' instances can only overlap instances from the -- same module. A same instance origin policy for safe compiled -- instances. check_safe (inst,_) = case check_overlap_safe && unsafeTopInstance inst of -- make sure it only overlaps instances from the same module True -> go [] all_matches -- most specific is from a trusted location. False -> [] where go bad [] = bad go bad (i@(x,_):unchecked) = if inSameMod x || isOverlappable x then go bad unchecked else go (i:bad) unchecked inSameMod b = let na = getName $ getName inst la = isInternalName na nb = getName $ getName b lb = isInternalName nb in (la && lb) || (nameModule na == nameModule nb) -- We consider the most specific instance unsafe when it both: -- (1) Comes from a module compiled as `Safe` -- (2) Is an orphan instance, OR, an instance for a MPTC unsafeTopInstance inst = isSafeOverlap (is_flag inst) && (isOrphan (is_orphan inst) || classArity (is_cls inst) > 1) --------------- {- Note [Instance overlap and guards] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to find all instances that /match/ the constraint we are trying to solve. Next, using pruneOverlapped Matches, we eliminate from that list of instances any instances that are overlapped. For example: (A) instance C [a] where ... (B) instance {-# OVERLAPPING #-} C [[a] where ... (C) instance C (Maybe a) where Suppose we are trying to solve C [[Bool]]. The lookup will return a list [A,B] of the first two instances, since both match. (The Maybe instance doesn't match, so the lookup won't return (C).) Then pruneOverlappedMatches removes (A), since (B) is more specific. So we end up with just one match, (B). However pruneOverlappedMatches is a bit more subtle than you might think (#20946). Recall how we go about eliminating redundant instances, as described in Note [Rules for instance lookup]. - When instance I1 is more specific than instance I2, - and either I1 is overlapping or I2 is overlappable, then we can discard I2 in favour of I1. Note however that, as part of the instance resolution process, we don't want to immediately discard I2, as it can still be useful. For example, suppose we are trying to solve C [[Int]], and have instances: I1: instance C [[Int]] I2: instance {-# OVERLAPS #-} C [[a]] Both instances match. I2 is both overlappable and overlapping (that's what `OVERLAPS` means). Now I1 is more specific than I2, and I2 is overlappable, so we can discard I2. However, we should still keep I2 around when looking up instances, because it is overlapping and `I1` isn't: this means it can be used to eliminate other instances that I1 can't, such as: I3: instance C [a] I3 is more general than both I1 and I2, but it is not overlappable, and I1 is not overlapping. This means that we must use I2 to discard I3. To do this, in 'insert_overlapping', on top of keeping track of matching instances, we also keep track of /guards/, which are instances like I2 which we will discard in the end (because we have a more specific match that overrides it) but might still be useful for eliminating other instances (like I3 in this example). (A) Definition of guarding instances (guards). To add a matching instance G as a guard, it must satisfy the following conditions: A1. G is overlapped by a more specific match, M, A2. M is not overlapping, A3. G is overlapping. This means that we eliminate G from the set of matches (it is overriden by M), but we keep it around until we are done with instance resolution because it might still be useful to eliminate other matches. (B) Guards eliminate matches. There are two situations in which guards can eliminate a match: B1. We want to add a new instance, but it is overriden by a guard. We can immediately discard the instance. Example for B1: Suppose we want to solve C [[Int]], with instances: J1: instance C [[Int]] J2: instance {-# OVERLAPS #-} C [[a]] J3: instance C [a] Processing them in order: we add J1 as a match, then J2 as a guard. Now, when we come across J3, we can immediately discard it because it is overriden by the guard J2. B2. We have found a new guard. We must use it to discard matches we have already found. This is necessary because we must obtain the same result whether we process the instance or the guard first. Example for B2: Suppose we want to solve C [[Int]], with instances: K1: instance C [[Int]] K2: instance C [a] K3: instance {-# OVERLAPS #-} C [[a]] We start by considering K1 and K2. Neither has any overlapping flag set, so we end up with two matches, {K1, K2}. Next we look at K3: it is overriden by K1, but as K1 is not overlapping this means K3 should function as a guard. We must then ensure we eliminate K2 from the list of matches, as K3 guards against it. (C) Adding guards. When we already have collected some guards, and have come across a new guard, we can simply add it to the existing list of guards. We don't need to keep the set of guards minimal, as they will simply be thrown away at the end: we are only interested in the matches. Not having a minimal set of guards does not harm us, but it makes the code simpler. -} -- | Collect class instance matches, including matches that we know -- are overridden but might still be useful to override other instances -- (which we call "guards"). -- -- See Note [Instance overlap and guards]. data InstMatches = InstMatches { -- | Minimal matches: we have knocked out all strictly more general -- matches that are overlapped by a match in this list. instMatches :: [InstMatch] -- | Guards: matches that we know we won't pick in the end, -- but might still be useful for ruling out other instances, -- as per #20946. See Note [Instance overlap and guards], (A). , instGuards :: [ClsInst] } instance Outputable InstMatches where ppr (InstMatches { instMatches = matches, instGuards = guards }) = text "InstMatches" <+> braces (vcat [ text "instMatches:" <+> ppr matches , text "instGuards:" <+> ppr guards ]) noMatches :: InstMatches noMatches = InstMatches { instMatches = [], instGuards = [] } pruneOverlappedMatches :: [InstMatch] -> [InstMatch] -- ^ Remove from the argument list any InstMatches for which another -- element of the list is more specific, and overlaps it, using the -- rules of Nove [Rules for instance lookup] pruneOverlappedMatches all_matches = instMatches $ foldr insert_overlapping noMatches all_matches -- | Computes whether the first class instance overrides the second, -- i.e. the first is more specific and can overlap the second. -- -- More precisely, @instA `overrides` instB@ returns 'True' precisely when: -- -- - @instA@ is more specific than @instB@, -- - @instB@ is not more specific than @instA@, -- - @instA@ is overlapping OR @instB@ is overlappable. overrides :: ClsInst -> ClsInst -> Bool new_inst `overrides` old_inst = (new_inst `more_specific_than` old_inst) && (not $ old_inst `more_specific_than` new_inst) && (isOverlapping new_inst || isOverlappable old_inst) -- Overlap permitted if either the more specific instance -- is marked as overlapping, or the more general one is -- marked as overlappable. -- Latest change described in: #9242. -- Previous change: #3877, Dec 10. where -- `instB` can be instantiated to match `instA` -- or the two are equal instA `more_specific_than` instB = isJust (tcMatchTys (is_tys instB) (is_tys instA)) insert_overlapping :: InstMatch -> InstMatches -> InstMatches -- ^ Add a new solution, knocking out strictly less specific ones -- See Note [Rules for instance lookup] and Note [Instance overlap and guards]. -- -- /Property/: the order of insertion doesn't matter, i.e. -- @insert_overlapping inst1 (insert_overlapping inst2 matches)@ -- gives the same result as @insert_overlapping inst2 (insert_overlapping inst1 matches)@. insert_overlapping new_item@(new_inst,_) old@(InstMatches { instMatches = old_items, instGuards = guards }) -- If any of the "guarding" instances override this item, discard it. -- See Note [Instance overlap and guards], (B1). | any (`overrides` new_inst) guards = old | otherwise = insert_overlapping_new_item old_items where insert_overlapping_new_item :: [InstMatch] -> InstMatches insert_overlapping_new_item [] = InstMatches { instMatches = [new_item], instGuards = guards } insert_overlapping_new_item all_old_items@(old_item@(old_inst,_) : old_items) -- New strictly overrides old: throw out the old from the list of matches, -- but potentially keep it around as a guard if it can still be used -- to eliminate other instances. | new_inst `overrides` old_inst , InstMatches { instMatches = final_matches , instGuards = prev_guards } <- insert_overlapping_new_item old_items = if isOverlapping new_inst || not (isOverlapping old_inst) -- We're adding "new_inst" as a match. -- If "new_inst" is not overlapping but "old_inst" is, we should -- keep "old_inst" around as a guard. -- See Note [Instance overlap and guards], (A). then InstMatches { instMatches = final_matches , instGuards = prev_guards } else InstMatches { instMatches = final_matches , instGuards = old_inst : prev_guards } -- ^^^^^^^^^^^^^^^^^^^^^^ -- See Note [Instance overlap and guards], (C). -- Old strictly overrides new: throw it out from the list of matches, -- but potentially keep it around as a guard if it can still be used -- to eliminate other instances. | old_inst `overrides` new_inst = if isOverlapping old_inst || not (isOverlapping new_inst) -- We're discarding "new_inst", as it is overridden by "old_inst". -- However, it might still be useful as a guard if "old_inst" is not overlapping -- but "new_inst" is. -- See Note [Instance overlap and guards], (A). then InstMatches { instMatches = all_old_items , instGuards = guards } else InstMatches -- We're adding "new_inst" as a guard, so we must prune out -- any matches it overrides. -- See Note [Instance overlap and guards], (B2) { instMatches = filter (\(old_inst,_) -> not (new_inst `overrides` old_inst)) all_old_items -- See Note [Instance overlap and guards], (C) , instGuards = new_inst : guards } -- Discard incoherent instances; see Note [Incoherent instances] | isIncoherent old_inst -- Old is incoherent; discard it = insert_overlapping_new_item old_items | isIncoherent new_inst -- New is incoherent; discard it = InstMatches { instMatches = all_old_items , instGuards = guards } -- Equal or incomparable, and neither is incoherent; keep both | otherwise , InstMatches { instMatches = final_matches , instGuards = final_guards } <- insert_overlapping_new_item old_items = InstMatches { instMatches = old_item : final_matches , instGuards = final_guards } {- Note [Incoherent instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ For some classes, the choice of a particular instance does not matter, any one is good. E.g. consider class D a b where { opD :: a -> b -> String } instance D Int b where ... instance D a Int where ... g (x::Int) = opD x x -- Wanted: D Int Int For such classes this should work (without having to add an "instance D Int Int", and using -XOverlappingInstances, which would then work). This is what -XIncoherentInstances is for: Telling GHC "I don't care which instance you use; if you can use one, use it." Should this logic only work when *all* candidates have the incoherent flag, or even when all but one have it? The right choice is the latter, which can be justified by comparing the behaviour with how -XIncoherentInstances worked when it was only about the unify-check (Note [Overlapping instances]): Example: class C a b c where foo :: (a,b,c) instance C [a] b Int instance [incoherent] [Int] b c instance [incoherent] C a Int c Thanks to the incoherent flags, [Wanted] C [a] b Int works: Only instance one matches, the others just unify, but are marked incoherent. So I can write (foo :: ([a],b,Int)) :: ([Int], Int, Int). but if that works then I really want to be able to write foo :: ([Int], Int, Int) as well. Now all three instances from above match. None is more specific than another, so none is ruled out by the normal overlapping rules. One of them is not incoherent, but we still want this to compile. Hence the "all-but-one-logic". The implementation is in insert_overlapping, where we remove matching incoherent instances as long as there are others. ************************************************************************ * * Binding decisions * * ************************************************************************ -} instanceBindFun :: BindFun instanceBindFun tv _rhs_ty | isOverlappableTyVar tv = Apart | otherwise = BindMe -- Note [Binding when looking up instances] {- Note [Binding when looking up instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When looking up in the instance environment, or family-instance environment, we are careful about multiple matches, as described above in Note [Overlapping instances] The target tys can contain skolem constants. For existentials and instance variables, we can guarantee that those are never going to be instantiated to anything, so we should not involve them in the unification test. These are called "super skolems". Example: class Foo a where { op :: a -> Int } instance Foo a => Foo [a] -- NB overlap instance Foo [Int] -- NB overlap data T = forall a. Foo a => MkT a f :: T -> Int f (MkT x) = op [x,x] The op [x,x] means we need (Foo [a]). This `a` will never be instantiated, and so it is a super skolem. (See the use of tcInstSuperSkolTyVarsX in GHC.Tc.Gen.Pat.tcDataConPat.) Super skolems respond True to isOverlappableTyVar, and the use of Apart in instanceBindFun, above, means that these will be treated as fresh constants in the unification algorithm during instance lookup. Without this treatment, GHC would complain, saying that the choice of instance depended on the instantiation of 'a'; but of course it isn't *going* to be instantiated. Note that it is necessary that the unification algorithm returns SurelyApart for these super-skolems for GHC to be able to commit to another instance. We do this only for super skolems. For example we reject g :: forall a => [a] -> Int g x = op x on the grounds that the correct instance depends on the instantiation of 'a' -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Lint.hs0000644000000000000000000042245514472400112020001 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE ScopedTypeVariables #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 A ``lint'' pass to check for Core correctness. See Note [Core Lint guarantee]. -} module GHC.Core.Lint ( lintCoreBindings, lintUnfolding, lintPassResult, lintInteractiveExpr, lintExpr, lintAnnots, lintAxioms, interactiveInScope, -- ** Debug output endPass, endPassIO, displayLintResults, dumpPassResult ) where import GHC.Prelude import GHC.Driver.Session import GHC.Driver.Ppr import GHC.Driver.Env import GHC.Driver.Config.Diagnostic import GHC.Tc.Utils.TcType ( isFloatingPrimTy, isTyFamFree ) import GHC.Unit.Module.ModGuts import GHC.Runtime.Context import GHC.Core import GHC.Core.FVs import GHC.Core.Utils import GHC.Core.Stats ( coreBindsStats ) import GHC.Core.Opt.Monad import GHC.Core.DataCon import GHC.Core.Ppr import GHC.Core.Coercion import GHC.Core.Type as Type import GHC.Core.Multiplicity import GHC.Core.UsageEnv import GHC.Core.TyCo.Rep -- checks validity of types/coercions import GHC.Core.TyCo.Subst import GHC.Core.TyCo.FVs import GHC.Core.TyCo.Ppr ( pprTyVar, pprTyVars ) import GHC.Core.TyCon as TyCon import GHC.Core.Coercion.Axiom import GHC.Core.FamInstEnv( compatibleBranches ) import GHC.Core.InstEnv ( instanceDFunId, instEnvElts ) import GHC.Core.Coercion.Opt ( checkAxInstCo ) import GHC.Core.Opt.Arity ( typeArity ) import GHC.Types.Literal import GHC.Types.Var as Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Unique.Set( nonDetEltsUniqSet ) import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.SrcLoc import GHC.Types.Tickish import GHC.Types.RepType import GHC.Types.Basic import GHC.Types.Demand ( splitDmdSig, isDeadEndDiv ) import GHC.Types.TypeEnv import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Builtin.Types ( multiplicityTy ) import GHC.Data.Bag import GHC.Data.List.SetOps import GHC.Utils.Monad import GHC.Utils.Outputable as Outputable import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Misc import GHC.Utils.Trace import GHC.Utils.Error import qualified GHC.Utils.Error as Err import GHC.Utils.Logger import Control.Monad import Data.Foldable ( toList ) import Data.List.NonEmpty ( NonEmpty(..), groupWith ) import Data.List ( partition ) import Data.Maybe import GHC.Data.Pair import qualified GHC.LanguageExtensions as LangExt {- Note [Core Lint guarantee] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Core Lint is the type-checker for Core. Using it, we get the following guarantee: If all of: 1. Core Lint passes, 2. there are no unsafe coercions (i.e. unsafeEqualityProof), 3. all plugin-supplied coercions (i.e. PluginProv) are valid, and 4. all case-matches are complete then running the compiled program will not seg-fault, assuming no bugs downstream (e.g. in the code generator). This guarantee is quite powerful, in that it allows us to decouple the safety of the resulting program from the type inference algorithm. However, do note point (4) above. Core Lint does not check for incomplete case-matches; see Note [Case expression invariants] in GHC.Core, invariant (4). As explained there, an incomplete case-match might slip by Core Lint and cause trouble at runtime. Note [GHC Formalism] ~~~~~~~~~~~~~~~~~~~~ This file implements the type-checking algorithm for System FC, the "official" name of the Core language. Type safety of FC is heart of the claim that executables produced by GHC do not have segmentation faults. Thus, it is useful to be able to reason about System FC independently of reading the code. To this purpose, there is a document core-spec.pdf built in docs/core-spec that contains a formalism of the types and functions dealt with here. If you change just about anything in this file or you change other types/functions throughout the Core language (all signposted to this note), you should update that formalism. See docs/core-spec/README for more info about how to do so. Note [check vs lint] ~~~~~~~~~~~~~~~~~~~~ This file implements both a type checking algorithm and also general sanity checking. For example, the "sanity checking" checks for TyConApp on the left of an AppTy, which should never happen. These sanity checks don't really affect any notion of type soundness. Yet, it is convenient to do the sanity checks at the same time as the type checks. So, we use the following naming convention: - Functions that begin with 'lint'... are involved in type checking. These functions might also do some sanity checking. - Functions that begin with 'check'... are *not* involved in type checking. They exist only for sanity checking. Issues surrounding variable naming, shadowing, and such are considered *not* to be part of type checking, as the formalism omits these details. Summary of checks ~~~~~~~~~~~~~~~~~ Checks that a set of core bindings is well-formed. The PprStyle and String just control what we print in the event of an error. The Bool value indicates whether we have done any specialisation yet (in which case we do some extra checks). We check for (a) type errors (b) Out-of-scope type variables (c) Out-of-scope local variables (d) Ill-kinded types (e) Incorrect unsafe coercions If we have done specialisation the we check that there are (a) No top-level bindings of primitive (unboxed type) Note [Linting function types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in Note [Representation of function types], all saturated applications of funTyCon are represented with the FunTy constructor. We check this invariant in lintType. Note [Linting type lets] ~~~~~~~~~~~~~~~~~~~~~~~~ In the desugarer, it's very very convenient to be able to say (in effect) let a = Type Bool in let x::a = True in That is, use a type let. See Note [Core type and coercion invariant] in "GHC.Core". One place it is used is in mkWwBodies; see Note [Join points and beta-redexes] in GHC.Core.Opt.WorkWrap.Utils. (Maybe there are other "clients" of this feature; I'm not sure). * Hence when linting we need to remember that a=Int, else we might reject a correct program. So we carry a type substitution (in this example [a -> Bool]) and apply this substitution before comparing types. In effect, in Lint, type equality is always equality-modulo-le-subst. This is in the le_subst field of LintEnv. But nota bene: (SI1) The le_subst substitution is applied to types and coercions only (SI2) The result of that substitution is used only to check for type equality, to check well-typed-ness, /but is then discarded/. The result of substittion does not outlive the CoreLint pass. (SI3) The InScopeSet of le_subst includes only TyVar and CoVar binders. * The function lintInTy :: Type -> LintM (Type, Kind) returns a substituted type. * When we encounter a binder (like x::a) we must apply the substitution to the type of the binding variable. lintBinders does this. * Clearly we need to clone tyvar binders as we go. * But take care (#17590)! We must also clone CoVar binders: let a = TYPE (ty |> cv) in \cv -> blah blindly substituting for `a` might capture `cv`. * Alas, when cloning a coercion variable we might choose a unique that happens to clash with an inner Id, thus \cv_66 -> let wild_X7 = blah in blah We decide to clone `cv_66` because it's already in scope. Fine, choose a new unique. Aha, X7 looks good. So we check the lambda body with le_subst of [cv_66 :-> cv_X7] This is all fine, even though we use the same unique as wild_X7. As (SI2) says, we do /not/ return a new lambda (\cv_X7 -> let wild_X7 = blah in ...) We simply use the le_subst substitution in types/coercions only, when checking for equality. * We still need to check that Id occurrences are bound by some enclosing binding. We do /not/ use the InScopeSet for the le_subst for this purpose -- it contains only TyCoVars. Instead we have a separate le_ids for the in-scope Id binders. Sigh. We might want to explore getting rid of type-let! Note [Bad unsafe coercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~ For discussion see https://gitlab.haskell.org/ghc/ghc/wikis/bad-unsafe-coercions Linter introduces additional rules that checks improper coercion between different types, called bad coercions. Following coercions are forbidden: (a) coercions between boxed and unboxed values; (b) coercions between unlifted values of the different sizes, here active size is checked, i.e. size of the actual value but not the space allocated for value; (c) coercions between floating and integral boxed values, this check is not yet supported for unboxed tuples, as no semantics were specified for that; (d) coercions from / to vector type (e) If types are unboxed tuples then tuple (# A_1,..,A_n #) can be coerced to (# B_1,..,B_m #) if n=m and for each pair A_i, B_i rules (a-e) holds. Note [Join points] ~~~~~~~~~~~~~~~~~~ We check the rules listed in Note [Invariants on join points] in GHC.Core. The only one that causes any difficulty is the first: All occurrences must be tail calls. To this end, along with the in-scope set, we remember in le_joins the subset of in-scope Ids that are valid join ids. For example: join j x = ... in case e of A -> jump j y -- good B -> case (jump j z) of -- BAD C -> join h = jump j w in ... -- good D -> let x = jump j v in ... -- BAD A join point remains valid in case branches, so when checking the A branch, j is still valid. When we check the scrutinee of the inner case, however, we set le_joins to empty, and catch the error. Similarly, join points can occur free in RHSes of other join points but not the RHSes of value bindings (thunks and functions). ************************************************************************ * * Beginning and ending passes * * ************************************************************************ These functions are not CoreM monad stuff, but they probably ought to be, and it makes a convenient place for them. They print out stuff before and after core passes, and do Core Lint when necessary. -} endPass :: CoreToDo -> CoreProgram -> [CoreRule] -> CoreM () endPass pass binds rules = do { hsc_env <- getHscEnv ; print_unqual <- getPrintUnqualified ; liftIO $ endPassIO hsc_env print_unqual pass binds rules } endPassIO :: HscEnv -> PrintUnqualified -> CoreToDo -> CoreProgram -> [CoreRule] -> IO () -- Used by the IO-is CorePrep too endPassIO hsc_env print_unqual pass binds rules = do { dumpPassResult logger dump_core_sizes print_unqual mb_flag (showSDoc dflags (ppr pass)) (pprPassDetails pass) binds rules ; lintPassResult hsc_env pass binds } where dump_core_sizes = not (gopt Opt_SuppressCoreSizes dflags) logger = hsc_logger hsc_env dflags = hsc_dflags hsc_env mb_flag = case coreDumpFlag pass of Just flag | logHasDumpFlag logger flag -> Just flag | logHasDumpFlag logger Opt_D_verbose_core2core -> Just flag _ -> Nothing dumpPassResult :: Logger -> Bool -- dump core sizes? -> PrintUnqualified -> Maybe DumpFlag -- Just df => show details in a file whose -- name is specified by df -> String -- Header -> SDoc -- Extra info to appear after header -> CoreProgram -> [CoreRule] -> IO () dumpPassResult logger dump_core_sizes unqual mb_flag hdr extra_info binds rules = do { forM_ mb_flag $ \flag -> do logDumpFile logger (mkDumpStyle unqual) flag hdr FormatCore dump_doc -- Report result size -- This has the side effect of forcing the intermediate to be evaluated -- if it's not already forced by a -ddump flag. ; Err.debugTraceMsg logger 2 size_doc } where size_doc = sep [text "Result size of" <+> text hdr, nest 2 (equals <+> ppr (coreBindsStats binds))] dump_doc = vcat [ nest 2 extra_info , size_doc , blankLine , if dump_core_sizes then pprCoreBindingsWithSize binds else pprCoreBindings binds , ppUnless (null rules) pp_rules ] pp_rules = vcat [ blankLine , text "------ Local rules for imported ids --------" , pprRules rules ] coreDumpFlag :: CoreToDo -> Maybe DumpFlag coreDumpFlag (CoreDoSimplify {}) = Just Opt_D_verbose_core2core coreDumpFlag (CoreDoPluginPass {}) = Just Opt_D_verbose_core2core coreDumpFlag CoreDoFloatInwards = Just Opt_D_verbose_core2core coreDumpFlag (CoreDoFloatOutwards {}) = Just Opt_D_verbose_core2core coreDumpFlag CoreLiberateCase = Just Opt_D_verbose_core2core coreDumpFlag CoreDoStaticArgs = Just Opt_D_verbose_core2core coreDumpFlag CoreDoCallArity = Just Opt_D_dump_call_arity coreDumpFlag CoreDoExitify = Just Opt_D_dump_exitify coreDumpFlag CoreDoDemand = Just Opt_D_dump_stranal coreDumpFlag CoreDoCpr = Just Opt_D_dump_cpranal coreDumpFlag CoreDoWorkerWrapper = Just Opt_D_dump_worker_wrapper coreDumpFlag CoreDoSpecialising = Just Opt_D_dump_spec coreDumpFlag CoreDoSpecConstr = Just Opt_D_dump_spec coreDumpFlag CoreCSE = Just Opt_D_dump_cse coreDumpFlag CoreDesugar = Just Opt_D_dump_ds_preopt coreDumpFlag CoreDesugarOpt = Just Opt_D_dump_ds coreDumpFlag CoreTidy = Just Opt_D_dump_simpl coreDumpFlag CorePrep = Just Opt_D_dump_prep coreDumpFlag CoreAddLateCcs = Just Opt_D_dump_late_cc coreDumpFlag CoreAddCallerCcs = Nothing coreDumpFlag CoreOccurAnal = Nothing coreDumpFlag CoreDoPrintCore = Nothing coreDumpFlag (CoreDoRuleCheck {}) = Nothing coreDumpFlag CoreDoNothing = Nothing coreDumpFlag (CoreDoPasses {}) = Nothing {- ************************************************************************ * * Top-level interfaces * * ************************************************************************ -} lintPassResult :: HscEnv -> CoreToDo -> CoreProgram -> IO () lintPassResult hsc_env pass binds | not (gopt Opt_DoCoreLinting dflags) = return () | otherwise = do { let warns_and_errs = lintCoreBindings dflags pass (interactiveInScope $ hsc_IC hsc_env) binds ; Err.showPass logger ("Core Linted result of " ++ showPpr dflags pass) ; displayLintResults logger (showLintWarnings pass) (ppr pass) (pprCoreBindings binds) warns_and_errs } where dflags = hsc_dflags hsc_env logger = hsc_logger hsc_env displayLintResults :: Logger -> Bool -- ^ If 'True', display linter warnings. -- If 'False', ignore linter warnings. -> SDoc -- ^ The source of the linted program -> SDoc -- ^ The linted program, pretty-printed -> WarnsAndErrs -> IO () displayLintResults logger display_warnings pp_what pp_pgm (warns, errs) | not (isEmptyBag errs) = do { logMsg logger Err.MCDump noSrcSpan $ withPprStyle defaultDumpStyle (vcat [ lint_banner "errors" pp_what, Err.pprMessageBag errs , text "*** Offending Program ***" , pp_pgm , text "*** End of Offense ***" ]) ; Err.ghcExit logger 1 } | not (isEmptyBag warns) , log_enable_debug (logFlags logger) , display_warnings -- If the Core linter encounters an error, output to stderr instead of -- stdout (#13342) = logMsg logger Err.MCInfo noSrcSpan $ withPprStyle defaultDumpStyle (lint_banner "warnings" pp_what $$ Err.pprMessageBag (mapBag ($$ blankLine) warns)) | otherwise = return () lint_banner :: String -> SDoc -> SDoc lint_banner string pass = text "*** Core Lint" <+> text string <+> text ": in result of" <+> pass <+> text "***" showLintWarnings :: CoreToDo -> Bool -- Disable Lint warnings on the first simplifier pass, because -- there may be some INLINE knots still tied, which is tiresomely noisy showLintWarnings (CoreDoSimplify _ (SimplMode { sm_phase = InitialPhase })) = False showLintWarnings _ = True lintInteractiveExpr :: SDoc -- ^ The source of the linted expression -> HscEnv -> CoreExpr -> IO () lintInteractiveExpr what hsc_env expr | not (gopt Opt_DoCoreLinting dflags) = return () | Just err <- lintExpr dflags (interactiveInScope $ hsc_IC hsc_env) expr = displayLintResults logger False what (pprCoreExpr expr) (emptyBag, err) | otherwise = return () where dflags = hsc_dflags hsc_env logger = hsc_logger hsc_env interactiveInScope :: InteractiveContext -> [Var] -- In GHCi we may lint expressions, or bindings arising from 'deriving' -- clauses, that mention variables bound in the interactive context. -- These are Local things (see Note [Interactively-bound Ids in GHCi] in GHC.Runtime.Context). -- So we have to tell Lint about them, lest it reports them as out of scope. -- -- We do this by find local-named things that may appear free in interactive -- context. This function is pretty revolting and quite possibly not quite right. -- When we are not in GHCi, the interactive context (hsc_IC hsc_env) is empty -- so this is a (cheap) no-op. -- -- See #8215 for an example interactiveInScope ictxt = tyvars ++ ids where -- C.f. GHC.Tc.Module.setInteractiveContext, Desugar.deSugarExpr (cls_insts, _fam_insts) = ic_instances ictxt te1 = mkTypeEnvWithImplicits (ic_tythings ictxt) te = extendTypeEnvWithIds te1 (map instanceDFunId $ instEnvElts cls_insts) ids = typeEnvIds te tyvars = tyCoVarsOfTypesList $ map idType ids -- Why the type variables? How can the top level envt have free tyvars? -- I think it's because of the GHCi debugger, which can bind variables -- f :: [t] -> [t] -- where t is a RuntimeUnk (see TcType) -- | Type-check a 'CoreProgram'. See Note [Core Lint guarantee]. lintCoreBindings :: DynFlags -> CoreToDo -> [Var] -> CoreProgram -> WarnsAndErrs -- Returns (warnings, errors) -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoreBindings dflags pass local_in_scope binds = initL dflags flags local_in_scope $ addLoc TopLevelBindings $ do { checkL (null dups) (dupVars dups) ; checkL (null ext_dups) (dupExtVars ext_dups) ; lintRecBindings TopLevel all_pairs $ \_ -> return () } where all_pairs = flattenBinds binds -- Put all the top-level binders in scope at the start -- This is because rewrite rules can bring something -- into use 'unexpectedly'; see Note [Glomming] in "GHC.Core.Opt.OccurAnal" binders = map fst all_pairs flags = (defaultLintFlags dflags) { lf_check_global_ids = check_globals , lf_check_inline_loop_breakers = check_lbs , lf_check_static_ptrs = check_static_ptrs , lf_check_linearity = check_linearity , lf_check_fixed_rep = check_fixed_rep } -- In the output of the desugarer, before optimisation, -- we have eta-expanded data constructors with representation-polymorphic -- bindings; so we switch off the representation-polymorphism checks. -- The very simple optimiser will beta-reduce them away. -- See Note [Checking for representation-polymorphic built-ins] -- in GHC.HsToCore.Expr. check_fixed_rep = case pass of CoreDesugar -> False _ -> True -- See Note [Checking for global Ids] check_globals = case pass of CoreTidy -> False CorePrep -> False _ -> True -- See Note [Checking for INLINE loop breakers] check_lbs = case pass of CoreDesugar -> False CoreDesugarOpt -> False _ -> True -- See Note [Checking StaticPtrs] check_static_ptrs | not (xopt LangExt.StaticPointers dflags) = AllowAnywhere | otherwise = case pass of CoreDoFloatOutwards _ -> AllowAtTopLevel CoreTidy -> RejectEverywhere CorePrep -> AllowAtTopLevel _ -> AllowAnywhere -- See Note [Linting linearity] check_linearity = gopt Opt_DoLinearCoreLinting dflags || ( case pass of CoreDesugar -> True _ -> False) (_, dups) = removeDups compare binders -- dups_ext checks for names with different uniques -- but the same External name M.n. We don't -- allow this at top level: -- M.n{r3} = ... -- M.n{r29} = ... -- because they both get the same linker symbol ext_dups = snd (removeDups ord_ext (map Var.varName binders)) ord_ext n1 n2 | Just m1 <- nameModule_maybe n1 , Just m2 <- nameModule_maybe n2 = compare (m1, nameOccName n1) (m2, nameOccName n2) | otherwise = LT {- ************************************************************************ * * \subsection[lintUnfolding]{lintUnfolding} * * ************************************************************************ Note [Linting Unfoldings from Interfaces] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use this to check all top-level unfoldings that come in from interfaces (it is very painful to catch errors otherwise). We do not need to call lintUnfolding on unfoldings that are nested within top-level unfoldings; they are linted when we lint the top-level unfolding; hence the `TopLevelFlag` on `tcPragExpr` in GHC.IfaceToCore. -} lintUnfolding :: Bool -- ^ True <=> is a compulsory unfolding -> DynFlags -> SrcLoc -> VarSet -- ^ Treat these as in scope -> CoreExpr -> Maybe (Bag SDoc) -- Nothing => OK lintUnfolding is_compulsory dflags locn var_set expr | isEmptyBag errs = Nothing | otherwise = Just errs where vars = nonDetEltsUniqSet var_set (_warns, errs) = initL dflags (defaultLintFlags dflags) vars $ if is_compulsory -- See Note [Checking for representation polymorphism] then noFixedRuntimeRepChecks linter else linter linter = addLoc (ImportedUnfolding locn) $ lintCoreExpr expr lintExpr :: DynFlags -> [Var] -- Treat these as in scope -> CoreExpr -> Maybe (Bag SDoc) -- Nothing => OK lintExpr dflags vars expr | isEmptyBag errs = Nothing | otherwise = Just errs where (_warns, errs) = initL dflags (defaultLintFlags dflags) vars linter linter = addLoc TopLevelBindings $ lintCoreExpr expr {- ************************************************************************ * * \subsection[lintCoreBinding]{lintCoreBinding} * * ************************************************************************ Check a core binding, returning the list of variables bound. -} -- Returns a UsageEnv because this function is called in lintCoreExpr for -- Let lintRecBindings :: TopLevelFlag -> [(Id, CoreExpr)] -> ([LintedId] -> LintM a) -> LintM (a, [UsageEnv]) lintRecBindings top_lvl pairs thing_inside = lintIdBndrs top_lvl bndrs $ \ bndrs' -> do { ues <- zipWithM lint_pair bndrs' rhss ; a <- thing_inside bndrs' ; return (a, ues) } where (bndrs, rhss) = unzip pairs lint_pair bndr' rhs = addLoc (RhsOf bndr') $ do { (rhs_ty, ue) <- lintRhs bndr' rhs -- Check the rhs ; lintLetBind top_lvl Recursive bndr' rhs rhs_ty ; return ue } lintLetBody :: [LintedId] -> CoreExpr -> LintM (LintedType, UsageEnv) lintLetBody bndrs body = do { (body_ty, body_ue) <- addLoc (BodyOfLetRec bndrs) (lintCoreExpr body) ; mapM_ (lintJoinBndrType body_ty) bndrs ; return (body_ty, body_ue) } lintLetBind :: TopLevelFlag -> RecFlag -> LintedId -> CoreExpr -> LintedType -> LintM () -- Binder's type, and the RHS, have already been linted -- This function checks other invariants lintLetBind top_lvl rec_flag binder rhs rhs_ty = do { let binder_ty = idType binder ; ensureEqTys binder_ty rhs_ty (mkRhsMsg binder (text "RHS") rhs_ty) -- If the binding is for a CoVar, the RHS should be (Coercion co) -- See Note [Core type and coercion invariant] in GHC.Core ; checkL (not (isCoVar binder) || isCoArg rhs) (mkLetErr binder rhs) -- Check the let/app invariant -- See Note [Core let/app invariant] in GHC.Core ; checkL ( isJoinId binder || mightBeLiftedType binder_ty || (isNonRec rec_flag && exprOkForSpeculation rhs) || isDataConWorkId binder || isDataConWrapId binder -- until #17521 is fixed || exprIsTickedString rhs) (badBndrTyMsg binder (text "unlifted")) -- Check that if the binder is at the top level and has type Addr#, -- that it is a string literal. -- See Note [Core top-level string literals]. ; checkL (not (isTopLevel top_lvl && binder_ty `eqType` addrPrimTy) || exprIsTickedString rhs) (mkTopNonLitStrMsg binder) ; flags <- getLintFlags -- Check that a join-point binder has a valid type -- NB: lintIdBinder has checked that it is not top-level bound ; case isJoinId_maybe binder of Nothing -> return () Just arity -> checkL (isValidJoinPointType arity binder_ty) (mkInvalidJoinPointMsg binder binder_ty) ; when (lf_check_inline_loop_breakers flags && isStableUnfolding (realIdUnfolding binder) && isStrongLoopBreaker (idOccInfo binder) && isInlinePragma (idInlinePragma binder)) (addWarnL (text "INLINE binder is (non-rule) loop breaker:" <+> ppr binder)) -- Only non-rule loop breakers inhibit inlining -- We used to check that the dmdTypeDepth of a demand signature never -- exceeds idArity, but that is an unnecessary complication, see -- Note [idArity varies independently of dmdTypeDepth] in GHC.Core.Opt.DmdAnal -- Check that the binder's arity is within the bounds imposed by -- the type and the strictness signature. See Note [exprArity invariant] -- and Note [Trimming arity] ; checkL (typeArity (idType binder) `lengthAtLeast` idArity binder) (text "idArity" <+> ppr (idArity binder) <+> text "exceeds typeArity" <+> ppr (length (typeArity (idType binder))) <> colon <+> ppr binder) ; case splitDmdSig (idDmdSig binder) of (demands, result_info) | isDeadEndDiv result_info -> checkL (demands `lengthAtLeast` idArity binder) (text "idArity" <+> ppr (idArity binder) <+> text "exceeds arity imposed by the strictness signature" <+> ppr (idDmdSig binder) <> colon <+> ppr binder) _ -> return () ; addLoc (RuleOf binder) $ mapM_ (lintCoreRule binder binder_ty) (idCoreRules binder) ; addLoc (UnfoldingOf binder) $ lintIdUnfolding binder binder_ty (idUnfolding binder) ; return () } -- We should check the unfolding, if any, but this is tricky because -- the unfolding is a SimplifiableCoreExpr. Give up for now. -- | Checks the RHS of bindings. It only differs from 'lintCoreExpr' -- in that it doesn't reject occurrences of the function 'makeStatic' when they -- appear at the top level and @lf_check_static_ptrs == AllowAtTopLevel@, and -- for join points, it skips the outer lambdas that take arguments to the -- join point. -- -- See Note [Checking StaticPtrs]. lintRhs :: Id -> CoreExpr -> LintM (LintedType, UsageEnv) -- NB: the Id can be Linted or not -- it's only used for -- its OccInfo and join-pointer-hood lintRhs bndr rhs | Just arity <- isJoinId_maybe bndr = lintJoinLams arity (Just bndr) rhs | AlwaysTailCalled arity <- tailCallInfo (idOccInfo bndr) = lintJoinLams arity Nothing rhs -- Allow applications of the data constructor @StaticPtr@ at the top -- but produce errors otherwise. lintRhs _bndr rhs = fmap lf_check_static_ptrs getLintFlags >>= go where -- Allow occurrences of 'makeStatic' at the top-level but produce errors -- otherwise. go :: StaticPtrCheck -> LintM (OutType, UsageEnv) go AllowAtTopLevel | (binders0, rhs') <- collectTyBinders rhs , Just (fun, t, info, e) <- collectMakeStaticArgs rhs' = markAllJoinsBad $ foldr -- imitate @lintCoreExpr (Lam ...)@ lintLambda -- imitate @lintCoreExpr (App ...)@ (do fun_ty_ue <- lintCoreExpr fun lintCoreArgs fun_ty_ue [Type t, info, e] ) binders0 go _ = markAllJoinsBad $ lintCoreExpr rhs -- | Lint the RHS of a join point with expected join arity of @n@ (see Note -- [Join points] in "GHC.Core"). lintJoinLams :: JoinArity -> Maybe Id -> CoreExpr -> LintM (LintedType, UsageEnv) lintJoinLams join_arity enforce rhs = go join_arity rhs where go 0 expr = lintCoreExpr expr go n (Lam var body) = lintLambda var $ go (n-1) body go n expr | Just bndr <- enforce -- Join point with too few RHS lambdas = failWithL $ mkBadJoinArityMsg bndr join_arity n rhs | otherwise -- Future join point, not yet eta-expanded = markAllJoinsBad $ lintCoreExpr expr -- Body of lambda is not a tail position lintIdUnfolding :: Id -> Type -> Unfolding -> LintM () lintIdUnfolding bndr bndr_ty uf | isStableUnfolding uf , Just rhs <- maybeUnfoldingTemplate uf = do { ty <- fst <$> (if isCompulsoryUnfolding uf then noFixedRuntimeRepChecks $ lintRhs bndr rhs -- ^^^^^^^^^^^^^^^^^^^^^^^ -- See Note [Checking for representation polymorphism] else lintRhs bndr rhs) ; ensureEqTys bndr_ty ty (mkRhsMsg bndr (text "unfolding") ty) } lintIdUnfolding _ _ _ = return () -- Do not Lint unstable unfoldings, because that leads -- to exponential behaviour; c.f. GHC.Core.FVs.idUnfoldingVars {- Note [Checking for INLINE loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's very suspicious if a strong loop breaker is marked INLINE. However, the desugarer generates instance methods with INLINE pragmas that form a mutually recursive group. Only after a round of simplification are they unravelled. So we suppress the test for the desugarer. Note [Checking for representation polymorphism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We ordinarily want to check for bad representation polymorphism. See Note [Representation polymorphism invariants] in GHC.Core. However, we do *not* want to do this in a compulsory unfolding. Compulsory unfoldings arise only internally, for things like newtype wrappers, dictionaries, and (notably) unsafeCoerce#. These might legitimately be representation-polymorphic; indeed representation-polymorphic unfoldings are a primary reason for the very existence of compulsory unfoldings (we can't compile code for the original, representation-polymorphic, binding). It is vitally important that we do representation polymorphism checks *after* performing the unfolding, but not beforehand. This is all safe because we will check any unfolding after it has been unfolded; checking the unfolding beforehand is merely an optimization, and one that actively hurts us here. Note [Linting of runRW#] ~~~~~~~~~~~~~~~~~~~~~~~~ runRW# has some very special behavior (see Note [runRW magic] in GHC.CoreToStg.Prep) which CoreLint must accommodate, by allowing join points in its argument. For example, this is fine: join j x = ... in runRW# (\s. case v of A -> j 3 B -> j 4) Usually those calls to the join point 'j' would not be valid tail calls, because they occur in a function argument. But in the case of runRW# they are fine, because runRW# (\s.e) behaves operationally just like e. (runRW# is ultimately inlined in GHC.CoreToStg.Prep.) In the case that the continuation is /not/ a lambda we simply disable this special behaviour. For example, this is /not/ fine: join j = ... in runRW# @r @ty (jump j) ************************************************************************ * * \subsection[lintCoreExpr]{lintCoreExpr} * * ************************************************************************ -} -- Linted things: substitution applied, and type is linted type LintedType = Type type LintedKind = Kind type LintedCoercion = Coercion type LintedTyCoVar = TyCoVar type LintedId = Id -- | Lint an expression cast through the given coercion, returning the type -- resulting from the cast. lintCastExpr :: CoreExpr -> LintedType -> Coercion -> LintM LintedType lintCastExpr expr expr_ty co = do { co' <- lintCoercion co ; let (Pair from_ty to_ty, role) = coercionKindRole co' ; checkValueType to_ty $ text "target of cast" <+> quotes (ppr co') ; lintRole co' Representational role ; ensureEqTys from_ty expr_ty (mkCastErr expr co' from_ty expr_ty) ; return to_ty } lintCoreExpr :: CoreExpr -> LintM (LintedType, UsageEnv) -- The returned type has the substitution from the monad -- already applied to it: -- lintCoreExpr e subst = exprType (subst e) -- -- The returned "type" can be a kind, if the expression is (Type ty) -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoreExpr (Var var) = do var_pair@(var_ty, _) <- lintIdOcc var 0 checkCanEtaExpand (Var var) [] var_ty return var_pair lintCoreExpr (Lit lit) = return (literalType lit, zeroUE) lintCoreExpr (Cast expr co) = do (expr_ty, ue) <- markAllJoinsBad $ lintCoreExpr expr to_ty <- lintCastExpr expr expr_ty co return (to_ty, ue) lintCoreExpr (Tick tickish expr) = do case tickish of Breakpoint _ _ ids -> forM_ ids $ \id -> do checkDeadIdOcc id lookupIdInScope id _ -> return () markAllJoinsBadIf block_joins $ lintCoreExpr expr where block_joins = not (tickish `tickishScopesLike` SoftScope) -- TODO Consider whether this is the correct rule. It is consistent with -- the simplifier's behaviour - cost-centre-scoped ticks become part of -- the continuation, and thus they behave like part of an evaluation -- context, but soft-scoped and non-scoped ticks simply wrap the result -- (see Simplify.simplTick). lintCoreExpr (Let (NonRec tv (Type ty)) body) | isTyVar tv = -- See Note [Linting type lets] do { ty' <- lintType ty ; lintTyBndr tv $ \ tv' -> do { addLoc (RhsOf tv) $ lintTyKind tv' ty' -- Now extend the substitution so we -- take advantage of it in the body ; extendTvSubstL tv ty' $ addLoc (BodyOfLetRec [tv]) $ lintCoreExpr body } } lintCoreExpr (Let (NonRec bndr rhs) body) | isId bndr = do { -- First Lint the RHS, before bringing the binder into scope (rhs_ty, let_ue) <- lintRhs bndr rhs -- See Note [Multiplicity of let binders] in Var -- Now lint the binder ; lintBinder LetBind bndr $ \bndr' -> do { lintLetBind NotTopLevel NonRecursive bndr' rhs rhs_ty ; addAliasUE bndr let_ue (lintLetBody [bndr'] body) } } | otherwise = failWithL (mkLetErr bndr rhs) -- Not quite accurate lintCoreExpr e@(Let (Rec pairs) body) = do { -- Check that the list of pairs is non-empty checkL (not (null pairs)) (emptyRec e) -- Check that there are no duplicated binders ; let (_, dups) = removeDups compare bndrs ; checkL (null dups) (dupVars dups) -- Check that either all the binders are joins, or none ; checkL (all isJoinId bndrs || all (not . isJoinId) bndrs) $ mkInconsistentRecMsg bndrs -- See Note [Multiplicity of let binders] in Var ; ((body_type, body_ue), ues) <- lintRecBindings NotTopLevel pairs $ \ bndrs' -> lintLetBody bndrs' body ; return (body_type, body_ue `addUE` scaleUE Many (foldr1 addUE ues)) } where bndrs = map fst pairs lintCoreExpr e@(App _ _) | Var fun <- fun , fun `hasKey` runRWKey -- N.B. we may have an over-saturated application of the form: -- runRW (\s -> \x -> ...) y , ty_arg1 : ty_arg2 : arg3 : rest <- args = do { fun_pair1 <- lintCoreArg (idType fun, zeroUE) ty_arg1 ; (fun_ty2, ue2) <- lintCoreArg fun_pair1 ty_arg2 -- See Note [Linting of runRW#] ; let lintRunRWCont :: CoreArg -> LintM (LintedType, UsageEnv) lintRunRWCont expr@(Lam _ _) = lintJoinLams 1 (Just fun) expr lintRunRWCont other = markAllJoinsBad $ lintCoreExpr other -- TODO: Look through ticks? ; (arg3_ty, ue3) <- lintRunRWCont arg3 ; app_ty <- lintValApp arg3 fun_ty2 arg3_ty ue2 ue3 ; lintCoreArgs app_ty rest } | otherwise = do { fun_pair <- lintCoreFun fun (length args) ; app_pair@(app_ty, _) <- lintCoreArgs fun_pair args ; checkCanEtaExpand fun args app_ty ; return app_pair} where (fun, args, _source_ticks) = collectArgsTicks tickishFloatable e -- We must look through source ticks to avoid #21152, for example: -- -- reallyUnsafePtrEquality -- = \ @a -> -- (src reallyUnsafePtrEquality#) -- @Lifted @a @Lifted @a -- -- To do this, we use `collectArgsTicks tickishFloatable` to match -- the eta expansion behaviour, as per Note [Eta expansion and source notes] -- in GHC.Core.Opt.Arity. lintCoreExpr (Lam var expr) = markAllJoinsBad $ lintLambda var $ lintCoreExpr expr lintCoreExpr (Case scrut var alt_ty alts) = lintCaseExpr scrut var alt_ty alts -- This case can't happen; linting types in expressions gets routed through -- lintCoreArgs lintCoreExpr (Type ty) = failWithL (text "Type found as expression" <+> ppr ty) lintCoreExpr (Coercion co) = do { co' <- addLoc (InCo co) $ lintCoercion co ; return (coercionType co', zeroUE) } ---------------------- lintIdOcc :: Var -> Int -- Number of arguments (type or value) being passed -> LintM (LintedType, UsageEnv) -- returns type of the *variable* lintIdOcc var nargs = addLoc (OccOf var) $ do { checkL (isNonCoVarId var) (text "Non term variable" <+> ppr var) -- See GHC.Core Note [Variable occurrences in Core] -- Check that the type of the occurrence is the same -- as the type of the binding site. The inScopeIds are -- /un-substituted/, so this checks that the occurrence type -- is identical to the binder type. -- This makes things much easier for things like: -- /\a. \(x::Maybe a). /\a. ...(x::Maybe a)... -- The "::Maybe a" on the occurrence is referring to the /outer/ a. -- If we compared /substituted/ types we'd risk comparing -- (Maybe a) from the binding site with bogus (Maybe a1) from -- the occurrence site. Comparing un-substituted types finesses -- this altogether ; (bndr, linted_bndr_ty) <- lookupIdInScope var ; let occ_ty = idType var bndr_ty = idType bndr ; ensureEqTys occ_ty bndr_ty $ mkBndrOccTypeMismatchMsg bndr var bndr_ty occ_ty -- Check for a nested occurrence of the StaticPtr constructor. -- See Note [Checking StaticPtrs]. ; lf <- getLintFlags ; when (nargs /= 0 && lf_check_static_ptrs lf /= AllowAnywhere) $ checkL (idName var /= makeStaticName) $ text "Found makeStatic nested in an expression" ; checkDeadIdOcc var ; checkJoinOcc var nargs ; usage <- varCallSiteUsage var ; return (linted_bndr_ty, usage) } lintCoreFun :: CoreExpr -> Int -- Number of arguments (type or val) being passed -> LintM (LintedType, UsageEnv) -- Returns type of the *function* lintCoreFun (Var var) nargs = lintIdOcc var nargs lintCoreFun (Lam var body) nargs -- Act like lintCoreExpr of Lam, but *don't* call markAllJoinsBad; -- See Note [Beta redexes] | nargs /= 0 = lintLambda var $ lintCoreFun body (nargs - 1) lintCoreFun expr nargs = markAllJoinsBadIf (nargs /= 0) $ -- See Note [Join points are less general than the paper] lintCoreExpr expr ------------------ lintLambda :: Var -> LintM (Type, UsageEnv) -> LintM (Type, UsageEnv) lintLambda var lintBody = addLoc (LambdaBodyOf var) $ lintBinder LambdaBind var $ \ var' -> do { (body_ty, ue) <- lintBody ; ue' <- checkLinearity ue var' ; return (mkLamType var' body_ty, ue') } ------------------ checkDeadIdOcc :: Id -> LintM () -- Occurrences of an Id should never be dead.... -- except when we are checking a case pattern checkDeadIdOcc id | isDeadOcc (idOccInfo id) = do { in_case <- inCasePat ; checkL in_case (text "Occurrence of a dead Id" <+> ppr id) } | otherwise = return () ------------------ lintJoinBndrType :: LintedType -- Type of the body -> LintedId -- Possibly a join Id -> LintM () -- Checks that the return type of a join Id matches the body -- E.g. join j x = rhs in body -- The type of 'rhs' must be the same as the type of 'body' lintJoinBndrType body_ty bndr | Just arity <- isJoinId_maybe bndr , let bndr_ty = idType bndr , (bndrs, res) <- splitPiTys bndr_ty = checkL (length bndrs >= arity && body_ty `eqType` mkPiTys (drop arity bndrs) res) $ hang (text "Join point returns different type than body") 2 (vcat [ text "Join bndr:" <+> ppr bndr <+> dcolon <+> ppr (idType bndr) , text "Join arity:" <+> ppr arity , text "Body type:" <+> ppr body_ty ]) | otherwise = return () checkJoinOcc :: Id -> JoinArity -> LintM () -- Check that if the occurrence is a JoinId, then so is the -- binding site, and it's a valid join Id checkJoinOcc var n_args | Just join_arity_occ <- isJoinId_maybe var = do { mb_join_arity_bndr <- lookupJoinId var ; case mb_join_arity_bndr of { Nothing -> -- Binder is not a join point do { join_set <- getValidJoins ; addErrL (text "join set " <+> ppr join_set $$ invalidJoinOcc var) } ; Just join_arity_bndr -> do { checkL (join_arity_bndr == join_arity_occ) $ -- Arity differs at binding site and occurrence mkJoinBndrOccMismatchMsg var join_arity_bndr join_arity_occ ; checkL (n_args == join_arity_occ) $ -- Arity doesn't match #args mkBadJumpMsg var join_arity_occ n_args } } } | otherwise = return () -- | This function checks that we are able to perform eta expansion for -- functions with no binding, in order to satisfy invariant I3 -- from Note [Representation polymorphism invariants] in GHC.Core. checkCanEtaExpand :: CoreExpr -- ^ the function (head of the application) we are checking -> [CoreArg] -- ^ the arguments to the application -> LintedType -- ^ the instantiated type of the overall application -> LintM () checkCanEtaExpand (Var fun_id) args app_ty | hasNoBinding fun_id = checkL (null bad_arg_tys) err_msg where arity :: Arity arity = idArity fun_id nb_val_args :: Int nb_val_args = count isValArg args -- Check the remaining argument types, past the -- given arguments and up to the arity of the 'Id'. -- Returns the types that couldn't be determined to have -- a fixed RuntimeRep. check_args :: [Type] -> [Type] check_args = go (nb_val_args + 1) where go :: Int -- index of the argument (starting from 1) -> [Type] -- arguments -> [Type] -- value argument types that could not be -- determined to have a fixed runtime representation go i _ | i > arity = [] go _ [] -- The Arity of an Id should never exceed the number of value arguments -- that can be read off from the Id's type. -- See Note [Arity and function types] in GHC.Types.Id.Info. = pprPanic "checkCanEtaExpand: arity larger than number of value arguments apparent in type" $ vcat [ text "fun_id =" <+> ppr fun_id , text "arity =" <+> ppr arity , text "app_ty =" <+> ppr app_ty , text "args = " <+> ppr args , text "nb_val_args =" <+> ppr nb_val_args ] go i (ty : bndrs) | typeHasFixedRuntimeRep ty = go (i+1) bndrs | otherwise = ty : go (i+1) bndrs bad_arg_tys :: [Type] bad_arg_tys = check_args . map fst $ getRuntimeArgTys app_ty -- We use 'getRuntimeArgTys' to find all the argument types, -- including those hidden under newtypes. For example, -- if `FunNT a b` is a newtype around `a -> b`, then -- when checking -- -- foo :: forall r (a :: TYPE r) (b :: TYPE r) c. a -> FunNT b c -- -- we should check that the instantiations of BOTH `a` AND `b` -- have a fixed runtime representation. err_msg :: SDoc err_msg = vcat [ text "Cannot eta expand" <+> quotes (ppr fun_id) , text "The following type" <> plural bad_arg_tys <+> doOrDoes bad_arg_tys <+> text "not have a fixed runtime representation:" , nest 2 $ vcat $ map ppr_ty_ki bad_arg_tys ] ppr_ty_ki :: Type -> SDoc ppr_ty_ki ty = bullet <+> ppr ty <+> dcolon <+> ppr (typeKind ty) checkCanEtaExpand _ _ _ = return () -- Check that the usage of var is consistent with var itself, and pop the var -- from the usage environment (this is important because of shadowing). checkLinearity :: UsageEnv -> Var -> LintM UsageEnv checkLinearity body_ue lam_var = case varMultMaybe lam_var of Just mult -> do ensureSubUsage lhs mult (err_msg mult) return $ deleteUE body_ue lam_var Nothing -> return body_ue -- A type variable where lhs = lookupUE body_ue lam_var err_msg mult = text "Linearity failure in lambda:" <+> ppr lam_var $$ ppr lhs <+> text "⊈" <+> ppr mult {- Note [No alternatives lint check] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Case expressions with no alternatives are odd beasts, and it would seem like they would worth be looking at in the linter (cf #10180). We used to check two things: * exprIsHNF is false: it would *seem* to be terribly wrong if the scrutinee was already in head normal form. * exprIsDeadEnd is true: we should be able to see why GHC believes the scrutinee is diverging for sure. It was already known that the second test was not entirely reliable. Unfortunately (#13990), the first test turned out not to be reliable either. Getting the checks right turns out to be somewhat complicated. For example, suppose we have (comment 8) data T a where TInt :: T Int absurdTBool :: T Bool -> a absurdTBool v = case v of data Foo = Foo !(T Bool) absurdFoo :: Foo -> a absurdFoo (Foo x) = absurdTBool x GHC initially accepts the empty case because of the GADT conditions. But then we inline absurdTBool, getting absurdFoo (Foo x) = case x of x is in normal form (because the Foo constructor is strict) but the case is empty. To avoid this problem, GHC would have to recognize that matching on Foo x is already absurd, which is not so easy. More generally, we don't really know all the ways that GHC can lose track of why an expression is bottom, so we shouldn't make too much fuss when that happens. Note [Beta redexes] ~~~~~~~~~~~~~~~~~~~ Consider: join j @x y z = ... in (\@x y z -> jump j @x y z) @t e1 e2 This is clearly ill-typed, since the jump is inside both an application and a lambda, either of which is enough to disqualify it as a tail call (see Note [Invariants on join points] in GHC.Core). However, strictly from a lambda-calculus perspective, the term doesn't go wrong---after the two beta reductions, the jump *is* a tail call and everything is fine. Why would we want to allow this when we have let? One reason is that a compound beta redex (that is, one with more than one argument) has different scoping rules: naively reducing the above example using lets will capture any free occurrence of y in e2. More fundamentally, type lets are tricky; many passes, such as Float Out, tacitly assume that the incoming program's type lets have all been dealt with by the simplifier. Thus we don't want to let-bind any types in, say, GHC.Core.Subst.simpleOptPgm, which in some circumstances can run immediately before Float Out. All that said, currently GHC.Core.Subst.simpleOptPgm is the only thing using this loophole, doing so to avoid re-traversing large functions (beta-reducing a type lambda without introducing a type let requires a substitution). TODO: Improve simpleOptPgm so that we can forget all this ever happened. ************************************************************************ * * \subsection[lintCoreArgs]{lintCoreArgs} * * ************************************************************************ The basic version of these functions checks that the argument is a subtype of the required type, as one would expect. -} lintCoreArgs :: (LintedType, UsageEnv) -> [CoreArg] -> LintM (LintedType, UsageEnv) lintCoreArgs (fun_ty, fun_ue) args = foldM lintCoreArg (fun_ty, fun_ue) args lintCoreArg :: (LintedType, UsageEnv) -> CoreArg -> LintM (LintedType, UsageEnv) lintCoreArg (fun_ty, ue) (Type arg_ty) = do { checkL (not (isCoercionTy arg_ty)) (text "Unnecessary coercion-to-type injection:" <+> ppr arg_ty) ; arg_ty' <- lintType arg_ty ; res <- lintTyApp fun_ty arg_ty' ; return (res, ue) } lintCoreArg (fun_ty, fun_ue) arg = do { (arg_ty, arg_ue) <- markAllJoinsBad $ lintCoreExpr arg -- See Note [Representation polymorphism invariants] in GHC.Core ; flags <- getLintFlags ; when (lf_check_fixed_rep flags) $ -- Only check that 'arg_ty' has a fixed RuntimeRep -- if 'lf_check_fixed_rep' is on. do { checkL (typeHasFixedRuntimeRep arg_ty) (text "Argument does not have a fixed runtime representation" <+> ppr arg <+> dcolon <+> parens (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))) ; checkL (mightBeLiftedType arg_ty || exprOkForSpeculation arg) (mkLetAppMsg arg) } ; lintValApp arg fun_ty arg_ty fun_ue arg_ue } ----------------- lintAltBinders :: UsageEnv -> Var -- Case binder -> LintedType -- Scrutinee type -> LintedType -- Constructor type -> [(Mult, OutVar)] -- Binders -> LintM UsageEnv -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintAltBinders rhs_ue _case_bndr scrut_ty con_ty [] = do { ensureEqTys con_ty scrut_ty (mkBadPatMsg con_ty scrut_ty) ; return rhs_ue } lintAltBinders rhs_ue case_bndr scrut_ty con_ty ((var_w, bndr):bndrs) | isTyVar bndr = do { con_ty' <- lintTyApp con_ty (mkTyVarTy bndr) ; lintAltBinders rhs_ue case_bndr scrut_ty con_ty' bndrs } | otherwise = do { (con_ty', _) <- lintValApp (Var bndr) con_ty (idType bndr) zeroUE zeroUE -- We can pass zeroUE to lintValApp because we ignore its usage -- calculation and compute it in the call for checkCaseLinearity below. ; rhs_ue' <- checkCaseLinearity rhs_ue case_bndr var_w bndr ; lintAltBinders rhs_ue' case_bndr scrut_ty con_ty' bndrs } -- | Implements the case rules for linearity checkCaseLinearity :: UsageEnv -> Var -> Mult -> Var -> LintM UsageEnv checkCaseLinearity ue case_bndr var_w bndr = do ensureSubUsage lhs rhs err_msg lintLinearBinder (ppr bndr) (case_bndr_w `mkMultMul` var_w) (varMult bndr) return $ deleteUE ue bndr where lhs = bndr_usage `addUsage` (var_w `scaleUsage` case_bndr_usage) rhs = case_bndr_w `mkMultMul` var_w err_msg = (text "Linearity failure in variable:" <+> ppr bndr $$ ppr lhs <+> text "⊈" <+> ppr rhs $$ text "Computed by:" <+> text "LHS:" <+> lhs_formula <+> text "RHS:" <+> rhs_formula) lhs_formula = ppr bndr_usage <+> text "+" <+> parens (ppr case_bndr_usage <+> text "*" <+> ppr var_w) rhs_formula = ppr case_bndr_w <+> text "*" <+> ppr var_w case_bndr_w = varMult case_bndr case_bndr_usage = lookupUE ue case_bndr bndr_usage = lookupUE ue bndr ----------------- lintTyApp :: LintedType -> LintedType -> LintM LintedType lintTyApp fun_ty arg_ty | Just (tv,body_ty) <- splitForAllTyCoVar_maybe fun_ty = do { lintTyKind tv arg_ty ; in_scope <- getInScope -- substTy needs the set of tyvars in scope to avoid generating -- uniques that are already in scope. -- See Note [The substitution invariant] in GHC.Core.TyCo.Subst ; return (substTyWithInScope in_scope [tv] [arg_ty] body_ty) } | otherwise = failWithL (mkTyAppMsg fun_ty arg_ty) ----------------- -- | @lintValApp arg fun_ty arg_ty@ lints an application of @fun arg@ -- where @fun :: fun_ty@ and @arg :: arg_ty@, returning the type of the -- application. lintValApp :: CoreExpr -> LintedType -> LintedType -> UsageEnv -> UsageEnv -> LintM (LintedType, UsageEnv) lintValApp arg fun_ty arg_ty fun_ue arg_ue | Just (w, arg_ty', res_ty') <- splitFunTy_maybe fun_ty = do { ensureEqTys arg_ty' arg_ty (mkAppMsg arg_ty' arg_ty arg) ; let app_ue = addUE fun_ue (scaleUE w arg_ue) ; return (res_ty', app_ue) } | otherwise = failWithL err2 where err2 = mkNonFunAppMsg fun_ty arg_ty arg lintTyKind :: OutTyVar -> LintedType -> LintM () -- Both args have had substitution applied -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintTyKind tyvar arg_ty = unless (arg_kind `eqType` tyvar_kind) $ addErrL (mkKindErrMsg tyvar arg_ty $$ (text "Linted Arg kind:" <+> ppr arg_kind)) where tyvar_kind = tyVarKind tyvar arg_kind = typeKind arg_ty {- ************************************************************************ * * \subsection[lintCoreAlts]{lintCoreAlts} * * ************************************************************************ -} lintCaseExpr :: CoreExpr -> Id -> Type -> [CoreAlt] -> LintM (LintedType, UsageEnv) lintCaseExpr scrut var alt_ty alts = do { let e = Case scrut var alt_ty alts -- Just for error messages -- Check the scrutinee ; (scrut_ty, scrut_ue) <- markAllJoinsBad $ lintCoreExpr scrut -- See Note [Join points are less general than the paper] -- in GHC.Core ; let scrut_mult = varMult var ; alt_ty <- addLoc (CaseTy scrut) $ lintValueType alt_ty ; var_ty <- addLoc (IdTy var) $ lintValueType (idType var) -- We used to try to check whether a case expression with no -- alternatives was legitimate, but this didn't work. -- See Note [No alternatives lint check] for details. -- Check that the scrutinee is not a floating-point type -- if there are any literal alternatives -- See GHC.Core Note [Case expression invariants] item (5) -- See Note [Rules for floating-point comparisons] in GHC.Core.Opt.ConstantFold ; let isLitPat (Alt (LitAlt _) _ _) = True isLitPat _ = False ; checkL (not $ isFloatingPrimTy scrut_ty && any isLitPat alts) (text "Lint warning: Scrutinising floating-point expression with literal pattern in case analysis (see #9238)." $$ text "scrut" <+> ppr scrut) ; case tyConAppTyCon_maybe (idType var) of Just tycon | debugIsOn , isAlgTyCon tycon , not (isAbstractTyCon tycon) , null (tyConDataCons tycon) , not (exprIsDeadEnd scrut) -> pprTrace "Lint warning: case binder's type has no constructors" (ppr var <+> ppr (idType var)) -- This can legitimately happen for type families $ return () _otherwise -> return () -- Don't use lintIdBndr on var, because unboxed tuple is legitimate ; subst <- getTCvSubst ; ensureEqTys var_ty scrut_ty (mkScrutMsg var var_ty scrut_ty subst) -- See GHC.Core Note [Case expression invariants] item (7) ; lintBinder CaseBind var $ \_ -> do { -- Check the alternatives ; alt_ues <- mapM (lintCoreAlt var scrut_ty scrut_mult alt_ty) alts ; let case_ue = (scaleUE scrut_mult scrut_ue) `addUE` supUEs alt_ues ; checkCaseAlts e scrut_ty alts ; return (alt_ty, case_ue) } } checkCaseAlts :: CoreExpr -> LintedType -> [CoreAlt] -> LintM () -- a) Check that the alts are non-empty -- b1) Check that the DEFAULT comes first, if it exists -- b2) Check that the others are in increasing order -- c) Check that there's a default for infinite types -- NB: Algebraic cases are not necessarily exhaustive, because -- the simplifier correctly eliminates case that can't -- possibly match. checkCaseAlts e ty alts = do { checkL (all non_deflt con_alts) (mkNonDefltMsg e) -- See GHC.Core Note [Case expression invariants] item (2) ; checkL (increasing_tag con_alts) (mkNonIncreasingAltsMsg e) -- See GHC.Core Note [Case expression invariants] item (3) -- For types Int#, Word# with an infinite (well, large!) number of -- possible values, there should usually be a DEFAULT case -- But (see Note [Empty case alternatives] in GHC.Core) it's ok to -- have *no* case alternatives. -- In effect, this is a kind of partial test. I suppose it's possible -- that we might *know* that 'x' was 1 or 2, in which case -- case x of { 1 -> e1; 2 -> e2 } -- would be fine. ; checkL (isJust maybe_deflt || not is_infinite_ty || null alts) (nonExhaustiveAltsMsg e) } where (con_alts, maybe_deflt) = findDefault alts -- Check that successive alternatives have strictly increasing tags increasing_tag (alt1 : rest@( alt2 : _)) = alt1 `ltAlt` alt2 && increasing_tag rest increasing_tag _ = True non_deflt (Alt DEFAULT _ _) = False non_deflt _ = True is_infinite_ty = case tyConAppTyCon_maybe ty of Nothing -> False Just tycon -> isPrimTyCon tycon lintAltExpr :: CoreExpr -> LintedType -> LintM UsageEnv lintAltExpr expr ann_ty = do { (actual_ty, ue) <- lintCoreExpr expr ; ensureEqTys actual_ty ann_ty (mkCaseAltMsg expr actual_ty ann_ty) ; return ue } -- See GHC.Core Note [Case expression invariants] item (6) lintCoreAlt :: Var -- Case binder -> LintedType -- Type of scrutinee -> Mult -- Multiplicity of scrutinee -> LintedType -- Type of the alternative -> CoreAlt -> LintM UsageEnv -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoreAlt _ _ _ alt_ty (Alt DEFAULT args rhs) = do { lintL (null args) (mkDefaultArgsMsg args) ; lintAltExpr rhs alt_ty } lintCoreAlt _case_bndr scrut_ty _ alt_ty (Alt (LitAlt lit) args rhs) | litIsLifted lit = failWithL integerScrutinisedMsg | otherwise = do { lintL (null args) (mkDefaultArgsMsg args) ; ensureEqTys lit_ty scrut_ty (mkBadPatMsg lit_ty scrut_ty) ; lintAltExpr rhs alt_ty } where lit_ty = literalType lit lintCoreAlt case_bndr scrut_ty _scrut_mult alt_ty alt@(Alt (DataAlt con) args rhs) | isNewTyCon (dataConTyCon con) = zeroUE <$ addErrL (mkNewTyDataConAltMsg scrut_ty alt) | Just (tycon, tycon_arg_tys) <- splitTyConApp_maybe scrut_ty = addLoc (CaseAlt alt) $ do { -- First instantiate the universally quantified -- type variables of the data constructor -- We've already check lintL (tycon == dataConTyCon con) (mkBadConMsg tycon con) ; let { con_payload_ty = piResultTys (dataConRepType con) tycon_arg_tys ; binderMult (Named _) = Many ; binderMult (Anon _ st) = scaledMult st -- See Note [Validating multiplicities in a case] ; multiplicities = map binderMult $ fst $ splitPiTys con_payload_ty } -- And now bring the new binders into scope ; lintBinders CasePatBind args $ \ args' -> do { rhs_ue <- lintAltExpr rhs alt_ty ; rhs_ue' <- addLoc (CasePat alt) (lintAltBinders rhs_ue case_bndr scrut_ty con_payload_ty (zipEqual "lintCoreAlt" multiplicities args')) ; return $ deleteUE rhs_ue' case_bndr } } | otherwise -- Scrut-ty is wrong shape = zeroUE <$ addErrL (mkBadAltMsg scrut_ty alt) {- Note [Validating multiplicities in a case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose 'MkT :: a %m -> T m a'. If we are validating 'case (x :: T Many a) of MkT y -> ...', we have to substitute m := Many in the type of MkT - in particular, y can be used Many times and that expression would still be linear in x. We do this by looking at con_payload_ty, which is the type of the datacon applied to the surrounding arguments. Testcase: linear/should_compile/MultConstructor Data constructors containing existential tyvars will then have Named binders, which are always multiplicity Many. Testcase: indexed-types/should_compile/GADT1 -} lintLinearBinder :: SDoc -> Mult -> Mult -> LintM () lintLinearBinder doc actual_usage described_usage = ensureSubMult actual_usage described_usage err_msg where err_msg = (text "Multiplicity of variable does not agree with its context" $$ doc $$ ppr actual_usage $$ text "Annotation:" <+> ppr described_usage) {- ************************************************************************ * * \subsection[lint-types]{Types} * * ************************************************************************ -} -- When we lint binders, we (one at a time and in order): -- 1. Lint var types or kinds (possibly substituting) -- 2. Add the binder to the in scope set, and if its a coercion var, -- we may extend the substitution to reflect its (possibly) new kind lintBinders :: BindingSite -> [Var] -> ([Var] -> LintM a) -> LintM a lintBinders _ [] linterF = linterF [] lintBinders site (var:vars) linterF = lintBinder site var $ \var' -> lintBinders site vars $ \ vars' -> linterF (var':vars') -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintBinder :: BindingSite -> Var -> (Var -> LintM a) -> LintM a lintBinder site var linterF | isTyCoVar var = lintTyCoBndr var linterF | otherwise = lintIdBndr NotTopLevel site var linterF lintTyBndr :: TyVar -> (LintedTyCoVar -> LintM a) -> LintM a lintTyBndr = lintTyCoBndr -- We could specialise it, I guess -- lintCoBndr :: CoVar -> (LintedTyCoVar -> LintM a) -> LintM a -- lintCoBndr = lintTyCoBndr -- We could specialise it, I guess lintTyCoBndr :: TyCoVar -> (LintedTyCoVar -> LintM a) -> LintM a lintTyCoBndr tcv thing_inside = do { subst <- getTCvSubst ; kind' <- lintType (varType tcv) ; let tcv' = uniqAway (getTCvInScope subst) $ setVarType tcv kind' subst' = extendTCvSubstWithClone subst tcv tcv' ; when (isCoVar tcv) $ lintL (isCoVarType kind') (text "CoVar with non-coercion type:" <+> pprTyVar tcv) ; updateTCvSubst subst' (thing_inside tcv') } lintIdBndrs :: forall a. TopLevelFlag -> [Id] -> ([LintedId] -> LintM a) -> LintM a lintIdBndrs top_lvl ids thing_inside = go ids thing_inside where go :: [Id] -> ([Id] -> LintM a) -> LintM a go [] thing_inside = thing_inside [] go (id:ids) thing_inside = lintIdBndr top_lvl LetBind id $ \id' -> go ids $ \ids' -> thing_inside (id' : ids') lintIdBndr :: TopLevelFlag -> BindingSite -> InVar -> (OutVar -> LintM a) -> LintM a -- Do substitution on the type of a binder and add the var with this -- new type to the in-scope set of the second argument -- ToDo: lint its rules lintIdBndr top_lvl bind_site id thing_inside = assertPpr (isId id) (ppr id) $ do { flags <- getLintFlags ; checkL (not (lf_check_global_ids flags) || isLocalId id) (text "Non-local Id binder" <+> ppr id) -- See Note [Checking for global Ids] -- Check that if the binder is nested, it is not marked as exported ; checkL (not (isExportedId id) || is_top_lvl) (mkNonTopExportedMsg id) -- Check that if the binder is nested, it does not have an external name ; checkL (not (isExternalName (Var.varName id)) || is_top_lvl) (mkNonTopExternalNameMsg id) -- See Note [Representation polymorphism invariants] in GHC.Core ; lintL (isJoinId id || not (lf_check_fixed_rep flags) || typeHasFixedRuntimeRep id_ty) $ text "Binder does not have a fixed runtime representation:" <+> ppr id <+> dcolon <+> parens (ppr id_ty <+> dcolon <+> ppr (typeKind id_ty)) -- Check that a join-id is a not-top-level let-binding ; when (isJoinId id) $ checkL (not is_top_lvl && is_let_bind) $ mkBadJoinBindMsg id -- Check that the Id does not have type (t1 ~# t2) or (t1 ~R# t2); -- if so, it should be a CoVar, and checked by lintCoVarBndr ; lintL (not (isCoVarType id_ty)) (text "Non-CoVar has coercion type" <+> ppr id <+> dcolon <+> ppr id_ty) -- Check that the lambda binder has no value or OtherCon unfolding. -- See #21496 ; lintL (not (bind_site == LambdaBind && isEvaldUnfolding (idUnfolding id))) (text "Lambda binder with value or OtherCon unfolding.") ; linted_ty <- addLoc (IdTy id) (lintValueType id_ty) ; addInScopeId id linted_ty $ thing_inside (setIdType id linted_ty) } where id_ty = idType id is_top_lvl = isTopLevel top_lvl is_let_bind = case bind_site of LetBind -> True _ -> False {- %************************************************************************ %* * Types %* * %************************************************************************ -} lintValueType :: Type -> LintM LintedType -- Types only, not kinds -- Check the type, and apply the substitution to it -- See Note [Linting type lets] lintValueType ty = addLoc (InType ty) $ do { ty' <- lintType ty ; let sk = typeKind ty' ; lintL (classifiesTypeWithValues sk) $ hang (text "Ill-kinded type:" <+> ppr ty) 2 (text "has kind:" <+> ppr sk) ; return ty' } checkTyCon :: TyCon -> LintM () checkTyCon tc = checkL (not (isTcTyCon tc)) (text "Found TcTyCon:" <+> ppr tc) ------------------- lintType :: Type -> LintM LintedType -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintType (TyVarTy tv) | not (isTyVar tv) = failWithL (mkBadTyVarMsg tv) | otherwise = do { subst <- getTCvSubst ; case lookupTyVar subst tv of Just linted_ty -> return linted_ty -- In GHCi we may lint an expression with a free -- type variable. Then it won't be in the -- substitution, but it should be in scope Nothing | tv `isInScope` subst -> return (TyVarTy tv) | otherwise -> failWithL $ hang (text "The type variable" <+> pprBndr LetBind tv) 2 (text "is out of scope") } lintType ty@(AppTy t1 t2) | TyConApp {} <- t1 = failWithL $ text "TyConApp to the left of AppTy:" <+> ppr ty | otherwise = do { t1' <- lintType t1 ; t2' <- lintType t2 ; lint_ty_app ty (typeKind t1') [t2'] ; return (AppTy t1' t2') } lintType ty@(TyConApp tc tys) | isTypeSynonymTyCon tc || isTypeFamilyTyCon tc = do { report_unsat <- lf_report_unsat_syns <$> getLintFlags ; lintTySynFamApp report_unsat ty tc tys } | isFunTyCon tc , tys `lengthIs` 5 -- We should never see a saturated application of funTyCon; such -- applications should be represented with the FunTy constructor. -- See Note [Linting function types] and -- Note [Representation of function types]. = failWithL (hang (text "Saturated application of (->)") 2 (ppr ty)) | otherwise -- Data types, data families, primitive types = do { checkTyCon tc ; tys' <- mapM lintType tys ; lint_ty_app ty (tyConKind tc) tys' ; return (TyConApp tc tys') } -- arrows can related *unlifted* kinds, so this has to be separate from -- a dependent forall. lintType ty@(FunTy af tw t1 t2) = do { t1' <- lintType t1 ; t2' <- lintType t2 ; tw' <- lintType tw ; lintArrow (text "type or kind" <+> quotes (ppr ty)) t1' t2' tw' ; return (FunTy af tw' t1' t2') } lintType ty@(ForAllTy (Bndr tcv vis) body_ty) | not (isTyCoVar tcv) = failWithL (text "Non-Tyvar or Non-Covar bound in type:" <+> ppr ty) | otherwise = lintTyCoBndr tcv $ \tcv' -> do { body_ty' <- lintType body_ty ; lintForAllBody tcv' body_ty' ; when (isCoVar tcv) $ lintL (tcv `elemVarSet` tyCoVarsOfType body_ty) $ text "Covar does not occur in the body:" <+> (ppr tcv $$ ppr body_ty) -- See GHC.Core.TyCo.Rep Note [Unused coercion variable in ForAllTy] -- and cf GHC.Core.Coercion Note [Unused coercion variable in ForAllCo] ; return (ForAllTy (Bndr tcv' vis) body_ty') } lintType ty@(LitTy l) = do { lintTyLit l; return ty } lintType (CastTy ty co) = do { ty' <- lintType ty ; co' <- lintStarCoercion co ; let tyk = typeKind ty' cok = coercionLKind co' ; ensureEqTys tyk cok (mkCastTyErr ty co tyk cok) ; return (CastTy ty' co') } lintType (CoercionTy co) = do { co' <- lintCoercion co ; return (CoercionTy co') } ----------------- lintForAllBody :: LintedTyCoVar -> LintedType -> LintM () -- Do the checks for the body of a forall-type lintForAllBody tcv body_ty = do { checkValueType body_ty (text "the body of forall:" <+> ppr body_ty) -- For type variables, check for skolem escape -- See Note [Phantom type variables in kinds] in GHC.Core.Type -- The kind of (forall cv. th) is liftedTypeKind, so no -- need to check for skolem-escape in the CoVar case ; let body_kind = typeKind body_ty ; when (isTyVar tcv) $ case occCheckExpand [tcv] body_kind of Just {} -> return () Nothing -> failWithL $ hang (text "Variable escape in forall:") 2 (vcat [ text "tyvar:" <+> ppr tcv , text "type:" <+> ppr body_ty , text "kind:" <+> ppr body_kind ]) } ----------------- lintTySynFamApp :: Bool -> InType -> TyCon -> [InType] -> LintM LintedType -- The TyCon is a type synonym or a type family (not a data family) -- See Note [Linting type synonym applications] -- c.f. GHC.Tc.Validity.check_syn_tc_app lintTySynFamApp report_unsat ty tc tys | report_unsat -- Report unsaturated only if report_unsat is on , tys `lengthLessThan` tyConArity tc = failWithL (hang (text "Un-saturated type application") 2 (ppr ty)) -- Deal with type synonyms | Just (tenv, rhs, tys') <- expandSynTyCon_maybe tc tys , let expanded_ty = mkAppTys (substTy (mkTvSubstPrs tenv) rhs) tys' = do { -- Kind-check the argument types, but without reporting -- un-saturated type families/synonyms tys' <- setReportUnsat False (mapM lintType tys) ; when report_unsat $ do { _ <- lintType expanded_ty ; return () } ; lint_ty_app ty (tyConKind tc) tys' ; return (TyConApp tc tys') } -- Otherwise this must be a type family | otherwise = do { tys' <- mapM lintType tys ; lint_ty_app ty (tyConKind tc) tys' ; return (TyConApp tc tys') } ----------------- -- Confirms that a type is really TYPE r or Constraint checkValueType :: LintedType -> SDoc -> LintM () checkValueType ty doc = lintL (classifiesTypeWithValues kind) (text "Non-Type-like kind when Type-like expected:" <+> ppr kind $$ text "when checking" <+> doc) where kind = typeKind ty ----------------- lintArrow :: SDoc -> LintedType -> LintedType -> LintedType -> LintM () -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintArrow what t1 t2 tw -- Eg lintArrow "type or kind `blah'" k1 k2 kw -- or lintArrow "coercion `blah'" k1 k2 kw = do { unless (classifiesTypeWithValues k1) (addErrL (msg (text "argument") k1)) ; unless (classifiesTypeWithValues k2) (addErrL (msg (text "result") k2)) ; unless (isMultiplicityTy kw) (addErrL (msg (text "multiplicity") kw)) } where k1 = typeKind t1 k2 = typeKind t2 kw = typeKind tw msg ar k = vcat [ hang (text "Ill-kinded" <+> ar) 2 (text "in" <+> what) , what <+> text "kind:" <+> ppr k ] ----------------- lint_ty_app :: Type -> LintedKind -> [LintedType] -> LintM () lint_ty_app ty k tys = lint_app (text "type" <+> quotes (ppr ty)) k tys ---------------- lint_co_app :: Coercion -> LintedKind -> [LintedType] -> LintM () lint_co_app ty k tys = lint_app (text "coercion" <+> quotes (ppr ty)) k tys ---------------- lintTyLit :: TyLit -> LintM () lintTyLit (NumTyLit n) | n >= 0 = return () | otherwise = failWithL msg where msg = text "Negative type literal:" <+> integer n lintTyLit (StrTyLit _) = return () lintTyLit (CharTyLit _) = return () lint_app :: SDoc -> LintedKind -> [LintedType] -> LintM () -- (lint_app d fun_kind arg_tys) -- We have an application (f arg_ty1 .. arg_tyn), -- where f :: fun_kind -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lint_app doc kfn arg_tys = do { in_scope <- getInScope -- We need the in_scope set to satisfy the invariant in -- Note [The substitution invariant] in GHC.Core.TyCo.Subst ; _ <- foldlM (go_app in_scope) kfn arg_tys ; return () } where fail_msg extra = vcat [ hang (text "Kind application error in") 2 doc , nest 2 (text "Function kind =" <+> ppr kfn) , nest 2 (text "Arg types =" <+> ppr arg_tys) , extra ] go_app in_scope kfn ta | Just kfn' <- coreView kfn = go_app in_scope kfn' ta go_app _ fun_kind@(FunTy _ _ kfa kfb) ta = do { let ka = typeKind ta ; unless (ka `eqType` kfa) $ addErrL (fail_msg (text "Fun:" <+> (ppr fun_kind $$ ppr ta <+> dcolon <+> ppr ka))) ; return kfb } go_app in_scope (ForAllTy (Bndr kv _vis) kfn) ta = do { let kv_kind = varType kv ka = typeKind ta ; unless (ka `eqType` kv_kind) $ addErrL (fail_msg (text "Forall:" <+> (ppr kv $$ ppr kv_kind $$ ppr ta <+> dcolon <+> ppr ka))) ; return $ substTy (extendTCvSubst (mkEmptyTCvSubst in_scope) kv ta) kfn } go_app _ kfn ta = failWithL (fail_msg (text "Not a fun:" <+> (ppr kfn $$ ppr ta))) {- ********************************************************************* * * Linting rules * * ********************************************************************* -} lintCoreRule :: OutVar -> LintedType -> CoreRule -> LintM () lintCoreRule _ _ (BuiltinRule {}) = return () -- Don't bother lintCoreRule fun fun_ty rule@(Rule { ru_name = name, ru_bndrs = bndrs , ru_args = args, ru_rhs = rhs }) = lintBinders LambdaBind bndrs $ \ _ -> do { (lhs_ty, _) <- lintCoreArgs (fun_ty, zeroUE) args ; (rhs_ty, _) <- case isJoinId_maybe fun of Just join_arity -> do { checkL (args `lengthIs` join_arity) $ mkBadJoinPointRuleMsg fun join_arity rule -- See Note [Rules for join points] ; lintCoreExpr rhs } _ -> markAllJoinsBad $ lintCoreExpr rhs ; ensureEqTys lhs_ty rhs_ty $ (rule_doc <+> vcat [ text "lhs type:" <+> ppr lhs_ty , text "rhs type:" <+> ppr rhs_ty , text "fun_ty:" <+> ppr fun_ty ]) ; let bad_bndrs = filter is_bad_bndr bndrs ; checkL (null bad_bndrs) (rule_doc <+> text "unbound" <+> ppr bad_bndrs) -- See Note [Linting rules] } where rule_doc = text "Rule" <+> doubleQuotes (ftext name) <> colon lhs_fvs = exprsFreeVars args rhs_fvs = exprFreeVars rhs is_bad_bndr :: Var -> Bool -- See Note [Unbound RULE binders] in GHC.Core.Rules is_bad_bndr bndr = not (bndr `elemVarSet` lhs_fvs) && bndr `elemVarSet` rhs_fvs && isNothing (isReflCoVar_maybe bndr) {- Note [Linting rules] ~~~~~~~~~~~~~~~~~~~~~~~ It's very bad if simplifying a rule means that one of the template variables (ru_bndrs) that /is/ mentioned on the RHS becomes not-mentioned in the LHS (ru_args). How can that happen? Well, in #10602, SpecConstr stupidly constructed a rule like forall x,c1,c2. f (x |> c1 |> c2) = .... But simplExpr collapses those coercions into one. (Indeed in #10602, it collapsed to the identity and was removed altogether.) We don't have a great story for what to do here, but at least this check will nail it. NB (#11643): it's possible that a variable listed in the binders becomes not-mentioned on both LHS and RHS. Here's a silly example: RULE forall x y. f (g x y) = g (x+1) (y-1) And suppose worker/wrapper decides that 'x' is Absent. Then we'll end up with RULE forall x y. f ($gw y) = $gw (x+1) This seems sufficiently obscure that there isn't enough payoff to try to trim the forall'd binder list. Note [Rules for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A join point cannot be partially applied. However, the left-hand side of a rule for a join point is effectively a *pattern*, not a piece of code, so there's an argument to be made for allowing a situation like this: join $sj :: Int -> Int -> String $sj n m = ... j :: forall a. Eq a => a -> a -> String {-# RULES "SPEC j" jump j @ Int $dEq = jump $sj #-} j @a $dEq x y = ... Applying this rule can't turn a well-typed program into an ill-typed one, so conceivably we could allow it. But we can always eta-expand such an "undersaturated" rule (see 'GHC.Core.Opt.Arity.etaExpandToJoinPointRule'), and in fact the simplifier would have to in order to deal with the RHS. So we take a conservative view and don't allow undersaturated rules for join points. See Note [Join points and unfoldings/rules] in "GHC.Core.Opt.OccurAnal" for further discussion. -} {- ************************************************************************ * * Linting coercions * * ************************************************************************ -} {- Note [Asymptotic efficiency] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When linting coercions (and types actually) we return a linted (substituted) coercion. Then we often have to take the coercionKind of that returned coercion. If we get long chains, that can be asymptotically inefficient, notably in * TransCo * InstCo * NthCo (cf #9233) * LRCo But the code is simple. And this is only Lint. Let's wait to see if the bad perf bites us in practice. A solution would be to return the kind and role of the coercion, as well as the linted coercion. Or perhaps even *only* the kind and role, which is what used to happen. But that proved tricky and error prone (#17923), so now we return the coercion. -} -- lints a coercion, confirming that its lh kind and its rh kind are both * -- also ensures that the role is Nominal lintStarCoercion :: InCoercion -> LintM LintedCoercion lintStarCoercion g = do { g' <- lintCoercion g ; let Pair t1 t2 = coercionKind g' ; checkValueType t1 (text "the kind of the left type in" <+> ppr g) ; checkValueType t2 (text "the kind of the right type in" <+> ppr g) ; lintRole g Nominal (coercionRole g) ; return g' } lintCoercion :: InCoercion -> LintM LintedCoercion -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] lintCoercion (CoVarCo cv) | not (isCoVar cv) = failWithL (hang (text "Bad CoVarCo:" <+> ppr cv) 2 (text "With offending type:" <+> ppr (varType cv))) | otherwise = do { subst <- getTCvSubst ; case lookupCoVar subst cv of Just linted_co -> return linted_co ; Nothing | cv `isInScope` subst -> return (CoVarCo cv) | otherwise -> -- lintCoBndr always extends the substitition failWithL $ hang (text "The coercion variable" <+> pprBndr LetBind cv) 2 (text "is out of scope") } lintCoercion (Refl ty) = do { ty' <- lintType ty ; return (Refl ty') } lintCoercion (GRefl r ty MRefl) = do { ty' <- lintType ty ; return (GRefl r ty' MRefl) } lintCoercion (GRefl r ty (MCo co)) = do { ty' <- lintType ty ; co' <- lintCoercion co ; let tk = typeKind ty' tl = coercionLKind co' ; ensureEqTys tk tl $ hang (text "GRefl coercion kind mis-match:" <+> ppr co) 2 (vcat [ppr ty', ppr tk, ppr tl]) ; lintRole co' Nominal (coercionRole co') ; return (GRefl r ty' (MCo co')) } lintCoercion co@(TyConAppCo r tc cos) | tc `hasKey` funTyConKey , [_w, _rep1,_rep2,_co1,_co2] <- cos = failWithL (text "Saturated TyConAppCo (->):" <+> ppr co) -- All saturated TyConAppCos should be FunCos | Just {} <- synTyConDefn_maybe tc = failWithL (text "Synonym in TyConAppCo:" <+> ppr co) | otherwise = do { checkTyCon tc ; cos' <- mapM lintCoercion cos ; let (co_kinds, co_roles) = unzip (map coercionKindRole cos') ; lint_co_app co (tyConKind tc) (map pFst co_kinds) ; lint_co_app co (tyConKind tc) (map pSnd co_kinds) ; zipWithM_ (lintRole co) (tyConRolesX r tc) co_roles ; return (TyConAppCo r tc cos') } lintCoercion co@(AppCo co1 co2) | TyConAppCo {} <- co1 = failWithL (text "TyConAppCo to the left of AppCo:" <+> ppr co) | Just (TyConApp {}, _) <- isReflCo_maybe co1 = failWithL (text "Refl (TyConApp ...) to the left of AppCo:" <+> ppr co) | otherwise = do { co1' <- lintCoercion co1 ; co2' <- lintCoercion co2 ; let (Pair lk1 rk1, r1) = coercionKindRole co1' (Pair lk2 rk2, r2) = coercionKindRole co2' ; lint_co_app co (typeKind lk1) [lk2] ; lint_co_app co (typeKind rk1) [rk2] ; if r1 == Phantom then lintL (r2 == Phantom || r2 == Nominal) (text "Second argument in AppCo cannot be R:" $$ ppr co) else lintRole co Nominal r2 ; return (AppCo co1' co2') } ---------- lintCoercion co@(ForAllCo tcv kind_co body_co) | not (isTyCoVar tcv) = failWithL (text "Non tyco binder in ForAllCo:" <+> ppr co) | otherwise = do { kind_co' <- lintStarCoercion kind_co ; lintTyCoBndr tcv $ \tcv' -> do { body_co' <- lintCoercion body_co ; ensureEqTys (varType tcv') (coercionLKind kind_co') $ text "Kind mis-match in ForallCo" <+> ppr co -- Assuming kind_co :: k1 ~ k2 -- Need to check that -- (forall (tcv:k1). lty) and -- (forall (tcv:k2). rty[(tcv:k2) |> sym kind_co/tcv]) -- are both well formed. Easiest way is to call lintForAllBody -- for each; there is actually no need to do the funky substitution ; let Pair lty rty = coercionKind body_co' ; lintForAllBody tcv' lty ; lintForAllBody tcv' rty ; when (isCoVar tcv) $ lintL (almostDevoidCoVarOfCo tcv body_co) $ text "Covar can only appear in Refl and GRefl: " <+> ppr co -- See "last wrinkle" in GHC.Core.Coercion -- Note [Unused coercion variable in ForAllCo] -- and c.f. GHC.Core.TyCo.Rep Note [Unused coercion variable in ForAllTy] ; return (ForAllCo tcv' kind_co' body_co') } } lintCoercion co@(FunCo r cow co1 co2) = do { co1' <- lintCoercion co1 ; co2' <- lintCoercion co2 ; cow' <- lintCoercion cow ; let Pair lt1 rt1 = coercionKind co1 Pair lt2 rt2 = coercionKind co2 Pair ltw rtw = coercionKind cow ; lintArrow (text "coercion" <+> quotes (ppr co)) lt1 lt2 ltw ; lintArrow (text "coercion" <+> quotes (ppr co)) rt1 rt2 rtw ; lintRole co1 r (coercionRole co1) ; lintRole co2 r (coercionRole co2) ; ensureEqTys (typeKind ltw) multiplicityTy (text "coercion" <> quotes (ppr co)) ; ensureEqTys (typeKind rtw) multiplicityTy (text "coercion" <> quotes (ppr co)) ; let expected_mult_role = case r of Phantom -> Phantom _ -> Nominal ; lintRole cow expected_mult_role (coercionRole cow) ; return (FunCo r cow' co1' co2') } -- See Note [Bad unsafe coercion] lintCoercion co@(UnivCo prov r ty1 ty2) = do { ty1' <- lintType ty1 ; ty2' <- lintType ty2 ; let k1 = typeKind ty1' k2 = typeKind ty2' ; prov' <- lint_prov k1 k2 prov ; when (r /= Phantom && classifiesTypeWithValues k1 && classifiesTypeWithValues k2) (checkTypes ty1 ty2) ; return (UnivCo prov' r ty1' ty2') } where report s = hang (text $ "Unsafe coercion: " ++ s) 2 (vcat [ text "From:" <+> ppr ty1 , text " To:" <+> ppr ty2]) isUnBoxed :: PrimRep -> Bool isUnBoxed = not . isGcPtrRep -- see #9122 for discussion of these checks checkTypes t1 t2 | allow_ill_kinded_univ_co prov = return () -- Skip kind checks | otherwise = do { checkWarnL fixed_rep_1 (report "left-hand type does not have a fixed runtime representation") ; checkWarnL fixed_rep_2 (report "right-hand type does not have a fixed runtime representation") ; when (fixed_rep_1 && fixed_rep_2) $ do { checkWarnL (reps1 `equalLength` reps2) (report "between values with different # of reps") ; zipWithM_ validateCoercion reps1 reps2 }} where fixed_rep_1 = typeHasFixedRuntimeRep t1 fixed_rep_2 = typeHasFixedRuntimeRep t2 -- don't look at these unless lev_poly1/2 are False -- Otherwise, we get #13458 reps1 = typePrimRep t1 reps2 = typePrimRep t2 -- CorePrep deliberately makes ill-kinded casts -- e.g (case error @Int "blah" of {}) :: Int# -- ==> (error @Int "blah") |> Unsafe Int Int# -- See Note [Unsafe coercions] in GHC.Core.CoreToStg.Prep allow_ill_kinded_univ_co (CorePrepProv homo_kind) = not homo_kind allow_ill_kinded_univ_co _ = False validateCoercion :: PrimRep -> PrimRep -> LintM () validateCoercion rep1 rep2 = do { platform <- targetPlatform <$> getDynFlags ; checkWarnL (isUnBoxed rep1 == isUnBoxed rep2) (report "between unboxed and boxed value") ; checkWarnL (TyCon.primRepSizeB platform rep1 == TyCon.primRepSizeB platform rep2) (report "between unboxed values of different size") ; let fl = liftM2 (==) (TyCon.primRepIsFloat rep1) (TyCon.primRepIsFloat rep2) ; case fl of Nothing -> addWarnL (report "between vector types") Just False -> addWarnL (report "between float and integral values") _ -> return () } lint_prov k1 k2 (PhantomProv kco) = do { kco' <- lintStarCoercion kco ; lintRole co Phantom r ; check_kinds kco' k1 k2 ; return (PhantomProv kco') } lint_prov k1 k2 (ProofIrrelProv kco) = do { lintL (isCoercionTy ty1) (mkBadProofIrrelMsg ty1 co) ; lintL (isCoercionTy ty2) (mkBadProofIrrelMsg ty2 co) ; kco' <- lintStarCoercion kco ; check_kinds kco k1 k2 ; return (ProofIrrelProv kco') } lint_prov _ _ prov@(PluginProv _) = return prov lint_prov _ _ prov@(CorePrepProv _) = return prov check_kinds kco k1 k2 = do { let Pair k1' k2' = coercionKind kco ; ensureEqTys k1 k1' (mkBadUnivCoMsg CLeft co) ; ensureEqTys k2 k2' (mkBadUnivCoMsg CRight co) } lintCoercion (SymCo co) = do { co' <- lintCoercion co ; return (SymCo co') } lintCoercion co@(TransCo co1 co2) = do { co1' <- lintCoercion co1 ; co2' <- lintCoercion co2 ; let ty1b = coercionRKind co1' ty2a = coercionLKind co2' ; ensureEqTys ty1b ty2a (hang (text "Trans coercion mis-match:" <+> ppr co) 2 (vcat [ppr (coercionKind co1'), ppr (coercionKind co2')])) ; lintRole co (coercionRole co1) (coercionRole co2) ; return (TransCo co1' co2') } lintCoercion the_co@(NthCo r0 n co) = do { co' <- lintCoercion co ; let (Pair s t, r) = coercionKindRole co' ; case (splitForAllTyCoVar_maybe s, splitForAllTyCoVar_maybe t) of { (Just _, Just _) -- works for both tyvar and covar | n == 0 , (isForAllTy_ty s && isForAllTy_ty t) || (isForAllTy_co s && isForAllTy_co t) -> do { lintRole the_co Nominal r0 ; return (NthCo r0 n co') } ; _ -> case (splitTyConApp_maybe s, splitTyConApp_maybe t) of { (Just (tc_s, tys_s), Just (tc_t, tys_t)) | tc_s == tc_t , isInjectiveTyCon tc_s r -- see Note [NthCo and newtypes] in GHC.Core.TyCo.Rep , tys_s `equalLength` tys_t , tys_s `lengthExceeds` n -> do { lintRole the_co tr r0 ; return (NthCo r0 n co') } where tr = nthRole r tc_s n ; _ -> failWithL (hang (text "Bad getNth:") 2 (ppr the_co $$ ppr s $$ ppr t)) }}} lintCoercion the_co@(LRCo lr co) = do { co' <- lintCoercion co ; let Pair s t = coercionKind co' r = coercionRole co' ; lintRole co Nominal r ; case (splitAppTy_maybe s, splitAppTy_maybe t) of (Just _, Just _) -> return (LRCo lr co') _ -> failWithL (hang (text "Bad LRCo:") 2 (ppr the_co $$ ppr s $$ ppr t)) } lintCoercion (InstCo co arg) = do { co' <- lintCoercion co ; arg' <- lintCoercion arg ; let Pair t1 t2 = coercionKind co' Pair s1 s2 = coercionKind arg' ; lintRole arg Nominal (coercionRole arg') ; case (splitForAllTyVar_maybe t1, splitForAllTyVar_maybe t2) of -- forall over tvar { (Just (tv1,_), Just (tv2,_)) | typeKind s1 `eqType` tyVarKind tv1 , typeKind s2 `eqType` tyVarKind tv2 -> return (InstCo co' arg') | otherwise -> failWithL (text "Kind mis-match in inst coercion1" <+> ppr co) ; _ -> case (splitForAllCoVar_maybe t1, splitForAllCoVar_maybe t2) of -- forall over covar { (Just (cv1, _), Just (cv2, _)) | typeKind s1 `eqType` varType cv1 , typeKind s2 `eqType` varType cv2 , CoercionTy _ <- s1 , CoercionTy _ <- s2 -> return (InstCo co' arg') | otherwise -> failWithL (text "Kind mis-match in inst coercion2" <+> ppr co) ; _ -> failWithL (text "Bad argument of inst") }}} lintCoercion co@(AxiomInstCo con ind cos) = do { unless (0 <= ind && ind < numBranches (coAxiomBranches con)) (bad_ax (text "index out of range")) ; let CoAxBranch { cab_tvs = ktvs , cab_cvs = cvs , cab_roles = roles } = coAxiomNthBranch con ind ; unless (cos `equalLength` (ktvs ++ cvs)) $ bad_ax (text "lengths") ; cos' <- mapM lintCoercion cos ; subst <- getTCvSubst ; let empty_subst = zapTCvSubst subst ; _ <- foldlM check_ki (empty_subst, empty_subst) (zip3 (ktvs ++ cvs) roles cos') ; let fam_tc = coAxiomTyCon con ; case checkAxInstCo co of Just bad_branch -> bad_ax $ text "inconsistent with" <+> pprCoAxBranch fam_tc bad_branch Nothing -> return () ; return (AxiomInstCo con ind cos') } where bad_ax what = addErrL (hang (text "Bad axiom application" <+> parens what) 2 (ppr co)) check_ki (subst_l, subst_r) (ktv, role, arg') = do { let Pair s' t' = coercionKind arg' sk' = typeKind s' tk' = typeKind t' ; lintRole arg' role (coercionRole arg') ; let ktv_kind_l = substTy subst_l (tyVarKind ktv) ktv_kind_r = substTy subst_r (tyVarKind ktv) ; unless (sk' `eqType` ktv_kind_l) (bad_ax (text "check_ki1" <+> vcat [ ppr co, ppr sk', ppr ktv, ppr ktv_kind_l ] )) ; unless (tk' `eqType` ktv_kind_r) (bad_ax (text "check_ki2" <+> vcat [ ppr co, ppr tk', ppr ktv, ppr ktv_kind_r ] )) ; return (extendTCvSubst subst_l ktv s', extendTCvSubst subst_r ktv t') } lintCoercion (KindCo co) = do { co' <- lintCoercion co ; return (KindCo co') } lintCoercion (SubCo co') = do { co' <- lintCoercion co' ; lintRole co' Nominal (coercionRole co') ; return (SubCo co') } lintCoercion this@(AxiomRuleCo ax cos) = do { cos' <- mapM lintCoercion cos ; lint_roles 0 (coaxrAsmpRoles ax) cos' ; case coaxrProves ax (map coercionKind cos') of Nothing -> err "Malformed use of AxiomRuleCo" [ ppr this ] Just _ -> return (AxiomRuleCo ax cos') } where err :: forall a. String -> [SDoc] -> LintM a err m xs = failWithL $ hang (text m) 2 $ vcat (text "Rule:" <+> ppr (coaxrName ax) : xs) lint_roles n (e : es) (co : cos) | e == coercionRole co = lint_roles (n+1) es cos | otherwise = err "Argument roles mismatch" [ text "In argument:" <+> int (n+1) , text "Expected:" <+> ppr e , text "Found:" <+> ppr (coercionRole co) ] lint_roles _ [] [] = return () lint_roles n [] rs = err "Too many coercion arguments" [ text "Expected:" <+> int n , text "Provided:" <+> int (n + length rs) ] lint_roles n es [] = err "Not enough coercion arguments" [ text "Expected:" <+> int (n + length es) , text "Provided:" <+> int n ] lintCoercion (HoleCo h) = do { addErrL $ text "Unfilled coercion hole:" <+> ppr h ; lintCoercion (CoVarCo (coHoleCoVar h)) } {- ************************************************************************ * * Axioms * * ************************************************************************ -} lintAxioms :: Logger -> DynFlags -> SDoc -- ^ The source of the linted axioms -> [CoAxiom Branched] -> IO () lintAxioms logger dflags what axioms = displayLintResults logger True what (vcat $ map pprCoAxiom axioms) $ initL dflags (defaultLintFlags dflags) [] $ do { mapM_ lint_axiom axioms ; let axiom_groups = groupWith coAxiomTyCon axioms ; mapM_ lint_axiom_group axiom_groups } lint_axiom :: CoAxiom Branched -> LintM () lint_axiom ax@(CoAxiom { co_ax_tc = tc, co_ax_branches = branches , co_ax_role = ax_role }) = addLoc (InAxiom ax) $ do { mapM_ (lint_branch tc) branch_list ; extra_checks } where branch_list = fromBranches branches extra_checks | isNewTyCon tc = do { CoAxBranch { cab_tvs = tvs , cab_eta_tvs = eta_tvs , cab_cvs = cvs , cab_roles = roles , cab_lhs = lhs_tys } <- case branch_list of [branch] -> return branch _ -> failWithL (text "multi-branch axiom with newtype") ; let ax_lhs = mkInfForAllTys tvs $ mkTyConApp tc lhs_tys nt_tvs = takeList tvs (tyConTyVars tc) -- axiom may be eta-reduced: Note [Newtype eta] in GHC.Core.TyCon nt_lhs = mkInfForAllTys nt_tvs $ mkTyConApp tc (mkTyVarTys nt_tvs) -- See Note [Newtype eta] in GHC.Core.TyCon ; lintL (ax_lhs `eqType` nt_lhs) (text "Newtype axiom LHS does not match newtype definition") ; lintL (null cvs) (text "Newtype axiom binds coercion variables") ; lintL (null eta_tvs) -- See Note [Eta reduction for data families] -- which is not about newtype axioms (text "Newtype axiom has eta-tvs") ; lintL (ax_role == Representational) (text "Newtype axiom role not representational") ; lintL (roles `equalLength` tvs) (text "Newtype axiom roles list is the wrong length." $$ text "roles:" <+> sep (map ppr roles)) ; lintL (roles == takeList roles (tyConRoles tc)) (vcat [ text "Newtype axiom roles do not match newtype tycon's." , text "axiom roles:" <+> sep (map ppr roles) , text "tycon roles:" <+> sep (map ppr (tyConRoles tc)) ]) } | isFamilyTyCon tc = do { if | isTypeFamilyTyCon tc -> lintL (ax_role == Nominal) (text "type family axiom is not nominal") | isDataFamilyTyCon tc -> lintL (ax_role == Representational) (text "data family axiom is not representational") | otherwise -> addErrL (text "A family TyCon is neither a type family nor a data family:" <+> ppr tc) ; mapM_ (lint_family_branch tc) branch_list } | otherwise = addErrL (text "Axiom tycon is neither a newtype nor a family.") lint_branch :: TyCon -> CoAxBranch -> LintM () lint_branch ax_tc (CoAxBranch { cab_tvs = tvs, cab_cvs = cvs , cab_lhs = lhs_args, cab_rhs = rhs }) = lintBinders LambdaBind (tvs ++ cvs) $ \_ -> do { let lhs = mkTyConApp ax_tc lhs_args ; lhs' <- lintType lhs ; rhs' <- lintType rhs ; let lhs_kind = typeKind lhs' rhs_kind = typeKind rhs' ; lintL (lhs_kind `eqType` rhs_kind) $ hang (text "Inhomogeneous axiom") 2 (text "lhs:" <+> ppr lhs <+> dcolon <+> ppr lhs_kind $$ text "rhs:" <+> ppr rhs <+> dcolon <+> ppr rhs_kind) } -- these checks do not apply to newtype axioms lint_family_branch :: TyCon -> CoAxBranch -> LintM () lint_family_branch fam_tc br@(CoAxBranch { cab_tvs = tvs , cab_eta_tvs = eta_tvs , cab_cvs = cvs , cab_roles = roles , cab_lhs = lhs , cab_incomps = incomps }) = do { lintL (isDataFamilyTyCon fam_tc || null eta_tvs) (text "Type family axiom has eta-tvs") ; lintL (all (`elemVarSet` tyCoVarsOfTypes lhs) tvs) (text "Quantified variable in family axiom unused in LHS") ; lintL (all isTyFamFree lhs) (text "Type family application on LHS of family axiom") ; lintL (all (== Nominal) roles) (text "Non-nominal role in family axiom" $$ text "roles:" <+> sep (map ppr roles)) ; lintL (null cvs) (text "Coercion variables bound in family axiom") ; forM_ incomps $ \ br' -> lintL (not (compatibleBranches br br')) $ hang (text "Incorrect incompatible branches:") 2 (vcat [text "Branch:" <+> ppr br, text "Bogus incomp:" <+> ppr br']) } lint_axiom_group :: NonEmpty (CoAxiom Branched) -> LintM () lint_axiom_group (_ :| []) = return () lint_axiom_group (ax :| axs) = do { lintL (isOpenFamilyTyCon tc) (text "Non-open-family with multiple axioms") ; let all_pairs = [ (ax1, ax2) | ax1 <- all_axs , ax2 <- all_axs ] ; mapM_ (lint_axiom_pair tc) all_pairs } where all_axs = ax : axs tc = coAxiomTyCon ax lint_axiom_pair :: TyCon -> (CoAxiom Branched, CoAxiom Branched) -> LintM () lint_axiom_pair tc (ax1, ax2) | Just br1@(CoAxBranch { cab_tvs = tvs1 , cab_lhs = lhs1 , cab_rhs = rhs1 }) <- coAxiomSingleBranch_maybe ax1 , Just br2@(CoAxBranch { cab_tvs = tvs2 , cab_lhs = lhs2 , cab_rhs = rhs2 }) <- coAxiomSingleBranch_maybe ax2 = lintL (compatibleBranches br1 br2) $ vcat [ hsep [ text "Axioms", ppr ax1, text "and", ppr ax2 , text "are incompatible" ] , text "tvs1 =" <+> pprTyVars tvs1 , text "lhs1 =" <+> ppr (mkTyConApp tc lhs1) , text "rhs1 =" <+> ppr rhs1 , text "tvs2 =" <+> pprTyVars tvs2 , text "lhs2 =" <+> ppr (mkTyConApp tc lhs2) , text "rhs2 =" <+> ppr rhs2 ] | otherwise = addErrL (text "Open type family axiom has more than one branch: either" <+> ppr ax1 <+> text "or" <+> ppr ax2) {- ************************************************************************ * * \subsection[lint-monad]{The Lint monad} * * ************************************************************************ -} -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] data LintEnv = LE { le_flags :: LintFlags -- Linting the result of this pass , le_loc :: [LintLocInfo] -- Locations , le_subst :: TCvSubst -- Current TyCo substitution -- See Note [Linting type lets] -- /Only/ substitutes for type variables; -- but might clone CoVars -- We also use le_subst to keep track of -- in-scope TyVars and CoVars (but not Ids) -- Range of the TCvSubst is LintedType/LintedCo , le_ids :: VarEnv (Id, LintedType) -- In-scope Ids -- Used to check that occurrences have an enclosing binder. -- The Id is /pre-substitution/, used to check that -- the occurrence has an identical type to the binder -- The LintedType is used to return the type of the occurrence, -- without having to lint it again. , le_joins :: IdSet -- Join points in scope that are valid -- A subset of the InScopeSet in le_subst -- See Note [Join points] , le_dynflags :: DynFlags -- DynamicFlags , le_ue_aliases :: NameEnv UsageEnv -- Assigns usage environments to the -- alias-like binders, as found in -- non-recursive lets. } data LintFlags = LF { lf_check_global_ids :: Bool -- See Note [Checking for global Ids] , lf_check_inline_loop_breakers :: Bool -- See Note [Checking for INLINE loop breakers] , lf_check_static_ptrs :: StaticPtrCheck -- ^ See Note [Checking StaticPtrs] , lf_report_unsat_syns :: Bool -- ^ See Note [Linting type synonym applications] , lf_check_linearity :: Bool -- ^ See Note [Linting linearity] , lf_check_fixed_rep :: Bool -- See Note [Checking for representation polymorphism] } -- See Note [Checking StaticPtrs] data StaticPtrCheck = AllowAnywhere -- ^ Allow 'makeStatic' to occur anywhere. | AllowAtTopLevel -- ^ Allow 'makeStatic' calls at the top-level only. | RejectEverywhere -- ^ Reject any 'makeStatic' occurrence. deriving Eq defaultLintFlags :: DynFlags -> LintFlags defaultLintFlags dflags = LF { lf_check_global_ids = False , lf_check_inline_loop_breakers = True , lf_check_static_ptrs = AllowAnywhere , lf_check_linearity = gopt Opt_DoLinearCoreLinting dflags , lf_report_unsat_syns = True , lf_check_fixed_rep = True } newtype LintM a = LintM { unLintM :: LintEnv -> WarnsAndErrs -> -- Warning and error messages so far (Maybe a, WarnsAndErrs) } -- Result and messages (if any) deriving (Functor) type WarnsAndErrs = (Bag SDoc, Bag SDoc) {- Note [Checking for global Ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before CoreTidy, all locally-bound Ids must be LocalIds, even top-level ones. See Note [Exported LocalIds] and #9857. Note [Checking StaticPtrs] ~~~~~~~~~~~~~~~~~~~~~~~~~~ See Note [Grand plan for static forms] in GHC.Iface.Tidy.StaticPtrTable for an overview. Every occurrence of the function 'makeStatic' should be moved to the top level by the FloatOut pass. It's vital that we don't have nested 'makeStatic' occurrences after CorePrep, because we populate the Static Pointer Table from the top-level bindings. See SimplCore Note [Grand plan for static forms]. The linter checks that no occurrence is left behind, nested within an expression. The check is enabled only after the FloatOut, CorePrep, and CoreTidy passes and only if the module uses the StaticPointers language extension. Checking more often doesn't help since the condition doesn't hold until after the first FloatOut pass. Note [Type substitution] ~~~~~~~~~~~~~~~~~~~~~~~~ Why do we need a type substitution? Consider /\(a:*). \(x:a). /\(a:*). id a x This is ill typed, because (renaming variables) it is really /\(a:*). \(x:a). /\(b:*). id b x Hence, when checking an application, we can't naively compare x's type (at its binding site) with its expected type (at a use site). So we rename type binders as we go, maintaining a substitution. The same substitution also supports let-type, current expressed as (/\(a:*). body) ty Here we substitute 'ty' for 'a' in 'body', on the fly. Note [Linting type synonym applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When linting a type-synonym, or type-family, application S ty1 .. tyn we behave as follows (#15057, #T15664): * If lf_report_unsat_syns = True, and S has arity < n, complain about an unsaturated type synonym or type family * Switch off lf_report_unsat_syns, and lint ty1 .. tyn. Reason: catch out of scope variables or other ill-kinded gubbins, even if S discards that argument entirely. E.g. (#15012): type FakeOut a = Int type family TF a type instance TF Int = FakeOut a Here 'a' is out of scope; but if we expand FakeOut, we conceal that out-of-scope error. Reason for switching off lf_report_unsat_syns: with LiberalTypeSynonyms, GHC allows unsaturated synonyms provided they are saturated when the type is expanded. Example type T f = f Int type S a = a -> a type Z = T S In Z's RHS, S appears unsaturated, but it is saturated when T is expanded. * If lf_report_unsat_syns is on, expand the synonym application and lint the result. Reason: want to check that synonyms are saturated when the type is expanded. Note [Linting linearity] ~~~~~~~~~~~~~~~~~~~~~~~~ There is one known optimisations that have not yet been updated to work with Linear Lint: * Optimisations can create a letrec which uses a variable linearly, e.g. letrec f True = f False f False = x in f True uses 'x' linearly, but this is not seen by the linter. Plan: make let-bound variables remember the usage environment. See ticket #18694. We plan to fix this issue in the very near future. For now, -dcore-lint enables only linting output of the desugarer, and full Linear Lint has to be enabled separately with -dlinear-core-lint. Ticket #19165 concerns enabling Linear Lint with -dcore-lint. Note [checkCanEtaExpand] ~~~~~~~~~~~~~~~~~~~~~~~~ The checkCanEtaExpand function is responsible for enforcing invariant I3 from Note [Representation polymorphism invariants] in GHC.Core: in any partial application `f e_1 .. e_n`, if `f` has no binding, we must be able to eta expand `f` to match the declared arity of `f`. Wrinkle 1: eta-expansion and newtypes Most of the time, when we have a partial application `f e_1 .. e_n` in which `f` is `hasNoBinding`, we eta-expand it up to its arity as follows: \ x_{n+1} ... x_arity -> f e_1 .. e_n x_{n+1} ... x_arity However, we might need to insert casts if some of the arguments that `f` takes are under a newtype. For example, suppose `f` `hasNoBinding`, has arity 1 and type f :: forall r (a :: TYPE r). Identity (a -> a) then we eta-expand the nullary application `f` to ( \ x -> f x ) |> co where co :: ( forall r (a :: TYPE r). a -> a ) ~# ( forall r (a :: TYPE r). Identity (a -> a) ) In this case we would have to perform a representation-polymorphism check on the instantiation of `a`. Wrinkle 2: 'hasNoBinding' and laziness It's important that we able to compute 'hasNoBinding' for an 'Id' without ever forcing the unfolding of the 'Id'. Otherwise, we could end up with a loop, as outlined in Note [Lazily checking Unfoldings] in GHC.IfaceToCore. -} instance Applicative LintM where pure x = LintM $ \ _ errs -> (Just x, errs) (<*>) = ap instance Monad LintM where m >>= k = LintM (\ env errs -> let (res, errs') = unLintM m env errs in case res of Just r -> unLintM (k r) env errs' Nothing -> (Nothing, errs')) instance MonadFail LintM where fail err = failWithL (text err) instance HasDynFlags LintM where getDynFlags = LintM (\ e errs -> (Just (le_dynflags e), errs)) data LintLocInfo = RhsOf Id -- The variable bound | OccOf Id -- Occurrence of id | LambdaBodyOf Id -- The lambda-binder | RuleOf Id -- Rules attached to a binder | UnfoldingOf Id -- Unfolding of a binder | BodyOfLetRec [Id] -- One of the binders | CaseAlt CoreAlt -- Case alternative | CasePat CoreAlt -- The *pattern* of the case alternative | CaseTy CoreExpr -- The type field of a case expression -- with this scrutinee | IdTy Id -- The type field of an Id binder | AnExpr CoreExpr -- Some expression | ImportedUnfolding SrcLoc -- Some imported unfolding (ToDo: say which) | TopLevelBindings | InType Type -- Inside a type | InCo Coercion -- Inside a coercion | InAxiom (CoAxiom Branched) -- Inside a CoAxiom initL :: DynFlags -> LintFlags -> [Var] -- ^ 'Id's that should be treated as being in scope -> LintM a -- ^ Action to run -> WarnsAndErrs initL dflags flags vars m = case unLintM m env (emptyBag, emptyBag) of (Just _, errs) -> errs (Nothing, errs@(_, e)) | not (isEmptyBag e) -> errs | otherwise -> pprPanic ("Bug in Lint: a failure occurred " ++ "without reporting an error message") empty where (tcvs, ids) = partition isTyCoVar vars env = LE { le_flags = flags , le_subst = mkEmptyTCvSubst (mkInScopeSet (mkVarSet tcvs)) , le_ids = mkVarEnv [(id, (id,idType id)) | id <- ids] , le_joins = emptyVarSet , le_loc = [] , le_dynflags = dflags , le_ue_aliases = emptyNameEnv } setReportUnsat :: Bool -> LintM a -> LintM a -- Switch off lf_report_unsat_syns setReportUnsat ru thing_inside = LintM $ \ env errs -> let env' = env { le_flags = (le_flags env) { lf_report_unsat_syns = ru } } in unLintM thing_inside env' errs -- See Note [Checking for representation polymorphism] noFixedRuntimeRepChecks :: LintM a -> LintM a noFixedRuntimeRepChecks thing_inside = LintM $ \env errs -> let env' = env { le_flags = (le_flags env) { lf_check_fixed_rep = False } } in unLintM thing_inside env' errs getLintFlags :: LintM LintFlags getLintFlags = LintM $ \ env errs -> (Just (le_flags env), errs) checkL :: Bool -> SDoc -> LintM () checkL True _ = return () checkL False msg = failWithL msg -- like checkL, but relevant to type checking lintL :: Bool -> SDoc -> LintM () lintL = checkL checkWarnL :: Bool -> SDoc -> LintM () checkWarnL True _ = return () checkWarnL False msg = addWarnL msg failWithL :: SDoc -> LintM a failWithL msg = LintM $ \ env (warns,errs) -> (Nothing, (warns, addMsg True env errs msg)) addErrL :: SDoc -> LintM () addErrL msg = LintM $ \ env (warns,errs) -> (Just (), (warns, addMsg True env errs msg)) addWarnL :: SDoc -> LintM () addWarnL msg = LintM $ \ env (warns,errs) -> (Just (), (addMsg False env warns msg, errs)) addMsg :: Bool -> LintEnv -> Bag SDoc -> SDoc -> Bag SDoc addMsg is_error env msgs msg = assertPpr (notNull loc_msgs) msg $ msgs `snocBag` mk_msg msg where loc_msgs :: [(SrcLoc, SDoc)] -- Innermost first loc_msgs = map dumpLoc (le_loc env) cxt_doc = vcat [ vcat $ reverse $ map snd loc_msgs , text "Substitution:" <+> ppr (le_subst env) ] context | is_error = cxt_doc | otherwise = whenPprDebug cxt_doc -- Print voluminous info for Lint errors -- but not for warnings msg_span = case [ span | (loc,_) <- loc_msgs , let span = srcLocSpan loc , isGoodSrcSpan span ] of [] -> noSrcSpan (s:_) -> s !diag_opts = initDiagOpts (le_dynflags env) mk_msg msg = mkLocMessage (mkMCDiagnostic diag_opts WarningWithoutFlag) msg_span (msg $$ context) addLoc :: LintLocInfo -> LintM a -> LintM a addLoc extra_loc m = LintM $ \ env errs -> unLintM m (env { le_loc = extra_loc : le_loc env }) errs inCasePat :: LintM Bool -- A slight hack; see the unique call site inCasePat = LintM $ \ env errs -> (Just (is_case_pat env), errs) where is_case_pat (LE { le_loc = CasePat {} : _ }) = True is_case_pat _other = False addInScopeId :: Id -> LintedType -> LintM a -> LintM a addInScopeId id linted_ty m = LintM $ \ env@(LE { le_ids = id_set, le_joins = join_set }) errs -> unLintM m (env { le_ids = extendVarEnv id_set id (id, linted_ty) , le_joins = add_joins join_set }) errs where add_joins join_set | isJoinId id = extendVarSet join_set id -- Overwrite with new arity | otherwise = delVarSet join_set id -- Remove any existing binding getInScopeIds :: LintM (VarEnv (Id,LintedType)) getInScopeIds = LintM (\env errs -> (Just (le_ids env), errs)) extendTvSubstL :: TyVar -> Type -> LintM a -> LintM a extendTvSubstL tv ty m = LintM $ \ env errs -> unLintM m (env { le_subst = Type.extendTvSubst (le_subst env) tv ty }) errs updateTCvSubst :: TCvSubst -> LintM a -> LintM a updateTCvSubst subst' m = LintM $ \ env errs -> unLintM m (env { le_subst = subst' }) errs markAllJoinsBad :: LintM a -> LintM a markAllJoinsBad m = LintM $ \ env errs -> unLintM m (env { le_joins = emptyVarSet }) errs markAllJoinsBadIf :: Bool -> LintM a -> LintM a markAllJoinsBadIf True m = markAllJoinsBad m markAllJoinsBadIf False m = m getValidJoins :: LintM IdSet getValidJoins = LintM (\ env errs -> (Just (le_joins env), errs)) getTCvSubst :: LintM TCvSubst getTCvSubst = LintM (\ env errs -> (Just (le_subst env), errs)) getUEAliases :: LintM (NameEnv UsageEnv) getUEAliases = LintM (\ env errs -> (Just (le_ue_aliases env), errs)) getInScope :: LintM InScopeSet getInScope = LintM (\ env errs -> (Just (getTCvInScope $ le_subst env), errs)) lookupIdInScope :: Id -> LintM (Id, LintedType) lookupIdInScope id_occ = do { in_scope_ids <- getInScopeIds ; case lookupVarEnv in_scope_ids id_occ of Just (id_bndr, linted_ty) -> do { checkL (not (bad_global id_bndr)) global_in_scope ; return (id_bndr, linted_ty) } Nothing -> do { checkL (not is_local) local_out_of_scope ; return (id_occ, idType id_occ) } } -- We don't bother to lint the type -- of global (i.e. imported) Ids where is_local = mustHaveLocalBinding id_occ local_out_of_scope = text "Out of scope:" <+> pprBndr LetBind id_occ global_in_scope = hang (text "Occurrence is GlobalId, but binding is LocalId") 2 (pprBndr LetBind id_occ) bad_global id_bnd = isGlobalId id_occ && isLocalId id_bnd && not (isWiredIn id_occ) -- 'bad_global' checks for the case where an /occurrence/ is -- a GlobalId, but there is an enclosing binding fora a LocalId. -- NB: the in-scope variables are mostly LocalIds, checked by lintIdBndr, -- but GHCi adds GlobalIds from the interactive context. These -- are fine; hence the test (isLocalId id == isLocalId v) -- NB: when compiling Control.Exception.Base, things like absentError -- are defined locally, but appear in expressions as (global) -- wired-in Ids after worker/wrapper -- So we simply disable the test in this case lookupJoinId :: Id -> LintM (Maybe JoinArity) -- Look up an Id which should be a join point, valid here -- If so, return its arity, if not return Nothing lookupJoinId id = do { join_set <- getValidJoins ; case lookupVarSet join_set id of Just id' -> return (isJoinId_maybe id') Nothing -> return Nothing } addAliasUE :: Id -> UsageEnv -> LintM a -> LintM a addAliasUE id ue thing_inside = LintM $ \ env errs -> let new_ue_aliases = extendNameEnv (le_ue_aliases env) (getName id) ue in unLintM thing_inside (env { le_ue_aliases = new_ue_aliases }) errs varCallSiteUsage :: Id -> LintM UsageEnv varCallSiteUsage id = do m <- getUEAliases return $ case lookupNameEnv m (getName id) of Nothing -> unitUE id One Just id_ue -> id_ue ensureEqTys :: LintedType -> LintedType -> SDoc -> LintM () -- check ty2 is subtype of ty1 (ie, has same structure but usage -- annotations need only be consistent, not equal) -- Assumes ty1,ty2 are have already had the substitution applied ensureEqTys ty1 ty2 msg = lintL (ty1 `eqType` ty2) msg ensureSubUsage :: Usage -> Mult -> SDoc -> LintM () ensureSubUsage Bottom _ _ = return () ensureSubUsage Zero described_mult err_msg = ensureSubMult Many described_mult err_msg ensureSubUsage (MUsage m) described_mult err_msg = ensureSubMult m described_mult err_msg ensureSubMult :: Mult -> Mult -> SDoc -> LintM () ensureSubMult actual_usage described_usage err_msg = do flags <- getLintFlags when (lf_check_linearity flags) $ case actual_usage' `submult` described_usage' of Submult -> return () Unknown -> case isMultMul actual_usage' of Just (m1, m2) -> ensureSubMult m1 described_usage' err_msg >> ensureSubMult m2 described_usage' err_msg Nothing -> when (not (actual_usage' `eqType` described_usage')) (addErrL err_msg) where actual_usage' = normalize actual_usage described_usage' = normalize described_usage normalize :: Mult -> Mult normalize m = case isMultMul m of Just (m1, m2) -> mkMultMul (normalize m1) (normalize m2) Nothing -> m lintRole :: Outputable thing => thing -- where the role appeared -> Role -- expected -> Role -- actual -> LintM () lintRole co r1 r2 = lintL (r1 == r2) (text "Role incompatibility: expected" <+> ppr r1 <> comma <+> text "got" <+> ppr r2 $$ text "in" <+> ppr co) {- ************************************************************************ * * \subsection{Error messages} * * ************************************************************************ -} dumpLoc :: LintLocInfo -> (SrcLoc, SDoc) dumpLoc (RhsOf v) = (getSrcLoc v, text "In the RHS of" <+> pp_binders [v]) dumpLoc (OccOf v) = (getSrcLoc v, text "In an occurrence of" <+> pp_binder v) dumpLoc (LambdaBodyOf b) = (getSrcLoc b, text "In the body of lambda with binder" <+> pp_binder b) dumpLoc (RuleOf b) = (getSrcLoc b, text "In a rule attached to" <+> pp_binder b) dumpLoc (UnfoldingOf b) = (getSrcLoc b, text "In the unfolding of" <+> pp_binder b) dumpLoc (BodyOfLetRec []) = (noSrcLoc, text "In body of a letrec with no binders") dumpLoc (BodyOfLetRec bs@(_:_)) = ( getSrcLoc (head bs), text "In the body of letrec with binders" <+> pp_binders bs) dumpLoc (AnExpr e) = (noSrcLoc, text "In the expression:" <+> ppr e) dumpLoc (CaseAlt (Alt con args _)) = (noSrcLoc, text "In a case alternative:" <+> parens (ppr con <+> pp_binders args)) dumpLoc (CasePat (Alt con args _)) = (noSrcLoc, text "In the pattern of a case alternative:" <+> parens (ppr con <+> pp_binders args)) dumpLoc (CaseTy scrut) = (noSrcLoc, hang (text "In the result-type of a case with scrutinee:") 2 (ppr scrut)) dumpLoc (IdTy b) = (getSrcLoc b, text "In the type of a binder:" <+> ppr b) dumpLoc (ImportedUnfolding locn) = (locn, text "In an imported unfolding") dumpLoc TopLevelBindings = (noSrcLoc, Outputable.empty) dumpLoc (InType ty) = (noSrcLoc, text "In the type" <+> quotes (ppr ty)) dumpLoc (InCo co) = (noSrcLoc, text "In the coercion" <+> quotes (ppr co)) dumpLoc (InAxiom ax) = (getSrcLoc ax, hang (text "In the coercion axiom") 2 (pprCoAxiom ax)) pp_binders :: [Var] -> SDoc pp_binders bs = sep (punctuate comma (map pp_binder bs)) pp_binder :: Var -> SDoc pp_binder b | isId b = hsep [ppr b, dcolon, ppr (idType b)] | otherwise = hsep [ppr b, dcolon, ppr (tyVarKind b)] ------------------------------------------------------ -- Messages for case expressions mkDefaultArgsMsg :: [Var] -> SDoc mkDefaultArgsMsg args = hang (text "DEFAULT case with binders") 4 (ppr args) mkCaseAltMsg :: CoreExpr -> Type -> Type -> SDoc mkCaseAltMsg e ty1 ty2 = hang (text "Type of case alternatives not the same as the annotation on case:") 4 (vcat [ text "Actual type:" <+> ppr ty1, text "Annotation on case:" <+> ppr ty2, text "Alt Rhs:" <+> ppr e ]) mkScrutMsg :: Id -> Type -> Type -> TCvSubst -> SDoc mkScrutMsg var var_ty scrut_ty subst = vcat [text "Result binder in case doesn't match scrutinee:" <+> ppr var, text "Result binder type:" <+> ppr var_ty,--(idType var), text "Scrutinee type:" <+> ppr scrut_ty, hsep [text "Current TCv subst", ppr subst]] mkNonDefltMsg, mkNonIncreasingAltsMsg :: CoreExpr -> SDoc mkNonDefltMsg e = hang (text "Case expression with DEFAULT not at the beginning") 4 (ppr e) mkNonIncreasingAltsMsg e = hang (text "Case expression with badly-ordered alternatives") 4 (ppr e) nonExhaustiveAltsMsg :: CoreExpr -> SDoc nonExhaustiveAltsMsg e = hang (text "Case expression with non-exhaustive alternatives") 4 (ppr e) mkBadConMsg :: TyCon -> DataCon -> SDoc mkBadConMsg tycon datacon = vcat [ text "In a case alternative, data constructor isn't in scrutinee type:", text "Scrutinee type constructor:" <+> ppr tycon, text "Data con:" <+> ppr datacon ] mkBadPatMsg :: Type -> Type -> SDoc mkBadPatMsg con_result_ty scrut_ty = vcat [ text "In a case alternative, pattern result type doesn't match scrutinee type:", text "Pattern result type:" <+> ppr con_result_ty, text "Scrutinee type:" <+> ppr scrut_ty ] integerScrutinisedMsg :: SDoc integerScrutinisedMsg = text "In a LitAlt, the literal is lifted (probably Integer)" mkBadAltMsg :: Type -> CoreAlt -> SDoc mkBadAltMsg scrut_ty alt = vcat [ text "Data alternative when scrutinee is not a tycon application", text "Scrutinee type:" <+> ppr scrut_ty, text "Alternative:" <+> pprCoreAlt alt ] mkNewTyDataConAltMsg :: Type -> CoreAlt -> SDoc mkNewTyDataConAltMsg scrut_ty alt = vcat [ text "Data alternative for newtype datacon", text "Scrutinee type:" <+> ppr scrut_ty, text "Alternative:" <+> pprCoreAlt alt ] ------------------------------------------------------ -- Other error messages mkAppMsg :: Type -> Type -> CoreExpr -> SDoc mkAppMsg expected_arg_ty actual_arg_ty arg = vcat [text "Argument value doesn't match argument type:", hang (text "Expected arg type:") 4 (ppr expected_arg_ty), hang (text "Actual arg type:") 4 (ppr actual_arg_ty), hang (text "Arg:") 4 (ppr arg)] mkNonFunAppMsg :: Type -> Type -> CoreExpr -> SDoc mkNonFunAppMsg fun_ty arg_ty arg = vcat [text "Non-function type in function position", hang (text "Fun type:") 4 (ppr fun_ty), hang (text "Arg type:") 4 (ppr arg_ty), hang (text "Arg:") 4 (ppr arg)] mkLetErr :: TyVar -> CoreExpr -> SDoc mkLetErr bndr rhs = vcat [text "Bad `let' binding:", hang (text "Variable:") 4 (ppr bndr <+> dcolon <+> ppr (varType bndr)), hang (text "Rhs:") 4 (ppr rhs)] mkTyAppMsg :: Type -> Type -> SDoc mkTyAppMsg ty arg_ty = vcat [text "Illegal type application:", hang (text "Exp type:") 4 (ppr ty <+> dcolon <+> ppr (typeKind ty)), hang (text "Arg type:") 4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))] emptyRec :: CoreExpr -> SDoc emptyRec e = hang (text "Empty Rec binding:") 2 (ppr e) mkRhsMsg :: Id -> SDoc -> Type -> SDoc mkRhsMsg binder what ty = vcat [hsep [text "The type of this binder doesn't match the type of its" <+> what <> colon, ppr binder], hsep [text "Binder's type:", ppr (idType binder)], hsep [text "Rhs type:", ppr ty]] mkLetAppMsg :: CoreExpr -> SDoc mkLetAppMsg e = hang (text "This argument does not satisfy the let/app invariant:") 2 (ppr e) badBndrTyMsg :: Id -> SDoc -> SDoc badBndrTyMsg binder what = vcat [ text "The type of this binder is" <+> what <> colon <+> ppr binder , text "Binder's type:" <+> ppr (idType binder) ] mkNonTopExportedMsg :: Id -> SDoc mkNonTopExportedMsg binder = hsep [text "Non-top-level binder is marked as exported:", ppr binder] mkNonTopExternalNameMsg :: Id -> SDoc mkNonTopExternalNameMsg binder = hsep [text "Non-top-level binder has an external name:", ppr binder] mkTopNonLitStrMsg :: Id -> SDoc mkTopNonLitStrMsg binder = hsep [text "Top-level Addr# binder has a non-literal rhs:", ppr binder] mkKindErrMsg :: TyVar -> Type -> SDoc mkKindErrMsg tyvar arg_ty = vcat [text "Kinds don't match in type application:", hang (text "Type variable:") 4 (ppr tyvar <+> dcolon <+> ppr (tyVarKind tyvar)), hang (text "Arg type:") 4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))] mkCastErr :: CoreExpr -> Coercion -> Type -> Type -> SDoc mkCastErr expr = mk_cast_err "expression" "type" (ppr expr) mkCastTyErr :: Type -> Coercion -> Kind -> Kind -> SDoc mkCastTyErr ty = mk_cast_err "type" "kind" (ppr ty) mk_cast_err :: String -- ^ What sort of casted thing this is -- (\"expression\" or \"type\"). -> String -- ^ What sort of coercion is being used -- (\"type\" or \"kind\"). -> SDoc -- ^ The thing being casted. -> Coercion -> Type -> Type -> SDoc mk_cast_err thing_str co_str pp_thing co from_ty thing_ty = vcat [from_msg <+> text "of Cast differs from" <+> co_msg <+> text "of" <+> enclosed_msg, from_msg <> colon <+> ppr from_ty, text (capitalise co_str) <+> text "of" <+> enclosed_msg <> colon <+> ppr thing_ty, text "Actual" <+> enclosed_msg <> colon <+> pp_thing, text "Coercion used in cast:" <+> ppr co ] where co_msg, from_msg, enclosed_msg :: SDoc co_msg = text co_str from_msg = text "From-" <> co_msg enclosed_msg = text "enclosed" <+> text thing_str mkBadUnivCoMsg :: LeftOrRight -> Coercion -> SDoc mkBadUnivCoMsg lr co = text "Kind mismatch on the" <+> pprLeftOrRight lr <+> text "side of a UnivCo:" <+> ppr co mkBadProofIrrelMsg :: Type -> Coercion -> SDoc mkBadProofIrrelMsg ty co = hang (text "Found a non-coercion in a proof-irrelevance UnivCo:") 2 (vcat [ text "type:" <+> ppr ty , text "co:" <+> ppr co ]) mkBadTyVarMsg :: Var -> SDoc mkBadTyVarMsg tv = text "Non-tyvar used in TyVarTy:" <+> ppr tv <+> dcolon <+> ppr (varType tv) mkBadJoinBindMsg :: Var -> SDoc mkBadJoinBindMsg var = vcat [ text "Bad join point binding:" <+> ppr var , text "Join points can be bound only by a non-top-level let" ] mkInvalidJoinPointMsg :: Var -> Type -> SDoc mkInvalidJoinPointMsg var ty = hang (text "Join point has invalid type:") 2 (ppr var <+> dcolon <+> ppr ty) mkBadJoinArityMsg :: Var -> Int -> Int -> CoreExpr -> SDoc mkBadJoinArityMsg var ar n rhs = vcat [ text "Join point has too few lambdas", text "Join var:" <+> ppr var, text "Join arity:" <+> ppr ar, text "Number of lambdas:" <+> ppr (ar - n), text "Rhs = " <+> ppr rhs ] invalidJoinOcc :: Var -> SDoc invalidJoinOcc var = vcat [ text "Invalid occurrence of a join variable:" <+> ppr var , text "The binder is either not a join point, or not valid here" ] mkBadJumpMsg :: Var -> Int -> Int -> SDoc mkBadJumpMsg var ar nargs = vcat [ text "Join point invoked with wrong number of arguments", text "Join var:" <+> ppr var, text "Join arity:" <+> ppr ar, text "Number of arguments:" <+> int nargs ] mkInconsistentRecMsg :: [Var] -> SDoc mkInconsistentRecMsg bndrs = vcat [ text "Recursive let binders mix values and join points", text "Binders:" <+> hsep (map ppr_with_details bndrs) ] where ppr_with_details bndr = ppr bndr <> ppr (idDetails bndr) mkJoinBndrOccMismatchMsg :: Var -> JoinArity -> JoinArity -> SDoc mkJoinBndrOccMismatchMsg bndr join_arity_bndr join_arity_occ = vcat [ text "Mismatch in join point arity between binder and occurrence" , text "Var:" <+> ppr bndr , text "Arity at binding site:" <+> ppr join_arity_bndr , text "Arity at occurrence: " <+> ppr join_arity_occ ] mkBndrOccTypeMismatchMsg :: Var -> Var -> LintedType -> LintedType -> SDoc mkBndrOccTypeMismatchMsg bndr var bndr_ty var_ty = vcat [ text "Mismatch in type between binder and occurrence" , text "Binder:" <+> ppr bndr <+> dcolon <+> ppr bndr_ty , text "Occurrence:" <+> ppr var <+> dcolon <+> ppr var_ty , text " Before subst:" <+> ppr (idType var) ] mkBadJoinPointRuleMsg :: JoinId -> JoinArity -> CoreRule -> SDoc mkBadJoinPointRuleMsg bndr join_arity rule = vcat [ text "Join point has rule with wrong number of arguments" , text "Var:" <+> ppr bndr , text "Join arity:" <+> ppr join_arity , text "Rule:" <+> ppr rule ] pprLeftOrRight :: LeftOrRight -> SDoc pprLeftOrRight CLeft = text "left" pprLeftOrRight CRight = text "right" dupVars :: [NonEmpty Var] -> SDoc dupVars vars = hang (text "Duplicate variables brought into scope") 2 (ppr (map toList vars)) dupExtVars :: [NonEmpty Name] -> SDoc dupExtVars vars = hang (text "Duplicate top-level variables with the same qualified name") 2 (ppr (map toList vars)) {- ************************************************************************ * * \subsection{Annotation Linting} * * ************************************************************************ -} -- | This checks whether a pass correctly looks through debug -- annotations (@SourceNote@). This works a bit different from other -- consistency checks: We check this by running the given task twice, -- noting all differences between the results. lintAnnots :: SDoc -> (ModGuts -> CoreM ModGuts) -> ModGuts -> CoreM ModGuts lintAnnots pname pass guts = {-# SCC "lintAnnots" #-} do -- Run the pass as we normally would dflags <- getDynFlags logger <- getLogger when (gopt Opt_DoAnnotationLinting dflags) $ liftIO $ Err.showPass logger "Annotation linting - first run" nguts <- pass guts -- If appropriate re-run it without debug annotations to make sure -- that they made no difference. when (gopt Opt_DoAnnotationLinting dflags) $ do liftIO $ Err.showPass logger "Annotation linting - second run" nguts' <- withoutAnnots pass guts -- Finally compare the resulting bindings liftIO $ Err.showPass logger "Annotation linting - comparison" let binds = flattenBinds $ mg_binds nguts binds' = flattenBinds $ mg_binds nguts' (diffs,_) = diffBinds True (mkRnEnv2 emptyInScopeSet) binds binds' when (not (null diffs)) $ GHC.Core.Opt.Monad.putMsg $ vcat [ lint_banner "warning" pname , text "Core changes with annotations:" , withPprStyle defaultDumpStyle $ nest 2 $ vcat diffs ] -- Return actual new guts return nguts -- | Run the given pass without annotations. This means that we both -- set the debugLevel setting to 0 in the environment as well as all -- annotations from incoming modules. withoutAnnots :: (ModGuts -> CoreM ModGuts) -> ModGuts -> CoreM ModGuts withoutAnnots pass guts = do -- Remove debug flag from environment. dflags <- getDynFlags let removeFlag env = hscSetFlags (dflags { debugLevel = 0}) env withoutFlag corem = -- TODO: supply tag here as well ? liftIO =<< runCoreM <$> fmap removeFlag getHscEnv <*> getRuleBase <*> getUniqMask <*> getModule <*> getVisibleOrphanMods <*> getPrintUnqualified <*> getSrcSpanM <*> pure corem -- Nuke existing ticks in module. -- TODO: Ticks in unfoldings. Maybe change unfolding so it removes -- them in absence of debugLevel > 0. let nukeTicks = stripTicksE (not . tickishIsCode) nukeAnnotsBind :: CoreBind -> CoreBind nukeAnnotsBind bind = case bind of Rec bs -> Rec $ map (\(b,e) -> (b, nukeTicks e)) bs NonRec b e -> NonRec b $ nukeTicks e nukeAnnotsMod mg@ModGuts{mg_binds=binds} = mg{mg_binds = map nukeAnnotsBind binds} -- Perform pass with all changes applied fmap fst $ withoutFlag $ pass (nukeAnnotsMod guts) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Make.hs0000644000000000000000000013212014472400112017733 0ustar0000000000000000 {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Handy functions for creating much Core syntax module GHC.Core.Make ( -- * Constructing normal syntax mkCoreLet, mkCoreLets, mkCoreApp, mkCoreApps, mkCoreConApps, mkCoreLams, mkWildCase, mkIfThenElse, mkWildValBinder, mkWildEvBinder, mkSingleAltCase, sortQuantVars, castBottomExpr, -- * Constructing boxed literals mkLitRubbish, mkWordExpr, mkIntExpr, mkIntExprInt, mkUncheckedIntExpr, mkIntegerExpr, mkNaturalExpr, mkFloatExpr, mkDoubleExpr, mkCharExpr, mkStringExpr, mkStringExprFS, mkStringExprFSWith, MkStringIds (..), getMkStringIds, -- * Floats FloatBind(..), wrapFloat, wrapFloats, floatBindings, -- * Constructing small tuples mkCoreVarTupTy, mkCoreTup, mkCoreUbxTup, mkCoreUbxSum, mkCoreTupBoxity, unitExpr, -- * Constructing big tuples mkBigCoreVarTup, mkBigCoreVarTup1, mkBigCoreVarTupTy, mkBigCoreTupTy, mkBigCoreTup, -- * Deconstructing small tuples mkSmallTupleSelector, mkSmallTupleCase, -- * Deconstructing big tuples mkTupleSelector, mkTupleSelector1, mkTupleCase, -- * Constructing list expressions mkNilExpr, mkConsExpr, mkListExpr, mkFoldrExpr, mkBuildExpr, -- * Constructing non empty lists mkNonEmptyListExpr, -- * Constructing Maybe expressions mkNothingExpr, mkJustExpr, -- * Error Ids mkRuntimeErrorApp, mkImpossibleExpr, mkAbsentErrorApp, errorIds, rEC_CON_ERROR_ID, rUNTIME_ERROR_ID, nON_EXHAUSTIVE_GUARDS_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, pAT_ERROR_ID, rEC_SEL_ERROR_ID, aBSENT_ERROR_ID, tYPE_ERROR_ID, aBSENT_SUM_FIELD_ERROR_ID ) where import GHC.Prelude import GHC.Platform import GHC.Types.Id import GHC.Types.Var ( EvVar, setTyVarUnique ) import GHC.Types.TyThing import GHC.Types.Id.Info import GHC.Types.Cpr import GHC.Types.Demand import GHC.Types.Name hiding ( varName ) import GHC.Types.Literal import GHC.Types.Unique.Supply import GHC.Core import GHC.Core.Utils ( exprType, needsCaseBinding, mkSingleAltCase, bindNonRec ) import GHC.Core.Type import GHC.Core.Coercion ( isCoVar ) import GHC.Core.DataCon ( DataCon, dataConWorkId ) import GHC.Core.Multiplicity import GHC.Hs.Utils ( mkChunkified, chunkify ) import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Builtin.Types.Prim import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString import Data.List ( partition ) import Data.Char ( ord ) infixl 4 `mkCoreApp`, `mkCoreApps` {- ************************************************************************ * * \subsection{Basic GHC.Core construction} * * ************************************************************************ -} -- | Sort the variables, putting type and covars first, in scoped order, -- and then other Ids -- -- It is a deterministic sort, meaining it doesn't look at the values of -- Uniques. For explanation why it's important See Note [Unique Determinism] -- in GHC.Types.Unique. sortQuantVars :: [Var] -> [Var] sortQuantVars vs = sorted_tcvs ++ ids where (tcvs, ids) = partition (isTyVar <||> isCoVar) vs sorted_tcvs = scopedSort tcvs -- | Bind a binding group over an expression, using a @let@ or @case@ as -- appropriate (see "GHC.Core#let_app_invariant") mkCoreLet :: CoreBind -> CoreExpr -> CoreExpr mkCoreLet (NonRec bndr rhs) body -- See Note [Core let/app invariant] = bindNonRec bndr rhs body mkCoreLet bind body = Let bind body -- | Create a lambda where the given expression has a number of variables -- bound over it. The leftmost binder is that bound by the outermost -- lambda in the result mkCoreLams :: [CoreBndr] -> CoreExpr -> CoreExpr mkCoreLams = mkLams -- | Bind a list of binding groups over an expression. The leftmost binding -- group becomes the outermost group in the resulting expression mkCoreLets :: [CoreBind] -> CoreExpr -> CoreExpr mkCoreLets binds body = foldr mkCoreLet body binds -- | Construct an expression which represents the application of a number of -- expressions to that of a data constructor expression. The leftmost expression -- in the list is applied first mkCoreConApps :: DataCon -> [CoreExpr] -> CoreExpr mkCoreConApps con args = mkCoreApps (Var (dataConWorkId con)) args -- | Construct an expression which represents the application of a number of -- expressions to another. The leftmost expression in the list is applied first -- -- Respects the let/app invariant by building a case expression where necessary -- See Note [Core let/app invariant] in "GHC.Core" mkCoreApps :: CoreExpr -- ^ function -> [CoreExpr] -- ^ arguments -> CoreExpr mkCoreApps fun args = fst $ foldl' (mkCoreAppTyped doc_string) (fun, fun_ty) args where doc_string = ppr fun_ty $$ ppr fun $$ ppr args fun_ty = exprType fun -- | Construct an expression which represents the application of one expression -- to the other -- -- Respects the let/app invariant by building a case expression where necessary -- See Note [Core let/app invariant] in "GHC.Core" mkCoreApp :: SDoc -> CoreExpr -- ^ function -> CoreExpr -- ^ argument -> CoreExpr mkCoreApp s fun arg = fst $ mkCoreAppTyped s (fun, exprType fun) arg -- | Construct an expression which represents the application of one expression -- paired with its type to an argument. The result is paired with its type. This -- function is not exported and used in the definition of 'mkCoreApp' and -- 'mkCoreApps'. -- -- Respects the let/app invariant by building a case expression where necessary -- See Note [Core let/app invariant] in "GHC.Core" mkCoreAppTyped :: SDoc -> (CoreExpr, Type) -> CoreExpr -> (CoreExpr, Type) mkCoreAppTyped _ (fun, fun_ty) (Type ty) = (App fun (Type ty), piResultTy fun_ty ty) mkCoreAppTyped _ (fun, fun_ty) (Coercion co) = (App fun (Coercion co), funResultTy fun_ty) mkCoreAppTyped d (fun, fun_ty) arg = assertPpr (isFunTy fun_ty) (ppr fun $$ ppr arg $$ d) (mkValApp fun arg (Scaled mult arg_ty) res_ty, res_ty) where (mult, arg_ty, res_ty) = splitFunTy fun_ty -- | Build an application (e1 e2), -- or a strict binding (case e2 of x -> e1 x) -- using the latter when necessary to respect the let/app invariant -- See Note [Core let/app invariant] in GHC.Core mkValApp :: CoreExpr -> CoreExpr -> Scaled Type -> Type -> CoreExpr mkValApp fun arg (Scaled w arg_ty) res_ty | not (needsCaseBinding arg_ty arg) = App fun arg -- The vastly common case | otherwise = mkStrictApp fun arg (Scaled w arg_ty) res_ty {- ********************************************************************* * * Building case expressions * * ********************************************************************* -} mkWildEvBinder :: PredType -> EvVar mkWildEvBinder pred = mkWildValBinder Many pred -- | Make a /wildcard binder/. This is typically used when you need a binder -- that you expect to use only at a *binding* site. Do not use it at -- occurrence sites because it has a single, fixed unique, and it's very -- easy to get into difficulties with shadowing. That's why it is used so little. -- -- See Note [WildCard binders] in "GHC.Core.Opt.Simplify.Env" mkWildValBinder :: Mult -> Type -> Id mkWildValBinder w ty = mkLocalIdOrCoVar wildCardName w ty -- "OrCoVar" since a coercion can be a scrutinee with -fdefer-type-errors -- (e.g. see test T15695). Ticket #17291 covers fixing this problem. -- | Make a case expression whose case binder is unused -- The alts and res_ty should not have any occurrences of WildId mkWildCase :: CoreExpr -- ^ scrutinee -> Scaled Type -> Type -- ^ res_ty -> [CoreAlt] -- ^ alts -> CoreExpr mkWildCase scrut (Scaled w scrut_ty) res_ty alts = Case scrut (mkWildValBinder w scrut_ty) res_ty alts -- | Build a strict application (case e2 of x -> e1 x) mkStrictApp :: CoreExpr -> CoreExpr -> Scaled Type -> Type -> CoreExpr mkStrictApp fun arg (Scaled w arg_ty) res_ty = Case arg arg_id res_ty [Alt DEFAULT [] (App fun (Var arg_id))] -- mkDefaultCase looks attractive here, and would be sound. -- But it uses (exprType alt_rhs) to compute the result type, -- whereas here we already know that the result type is res_ty where arg_id = mkWildValBinder w arg_ty -- Lots of shadowing, but it doesn't matter, -- because 'fun' and 'res_ty' should not have a free wild-id -- -- This is Dangerous. But this is the only place we play this -- game, mkStrictApp returns an expression that does not have -- a free wild-id. So the only way 'fun' could get a free wild-id -- would be if you take apart this case expression (or some other -- expression that uses mkWildValBinder, of which there are not -- many), and pass a fragment of it as the fun part of a 'mkStrictApp'. mkIfThenElse :: CoreExpr -- ^ guard -> CoreExpr -- ^ then -> CoreExpr -- ^ else -> CoreExpr mkIfThenElse guard then_expr else_expr -- Not going to be refining, so okay to take the type of the "then" clause = mkWildCase guard (linear boolTy) (exprType then_expr) [ Alt (DataAlt falseDataCon) [] else_expr, -- Increasing order of tag! Alt (DataAlt trueDataCon) [] then_expr ] castBottomExpr :: CoreExpr -> Type -> CoreExpr -- (castBottomExpr e ty), assuming that 'e' diverges, -- return an expression of type 'ty' -- See Note [Empty case alternatives] in GHC.Core castBottomExpr e res_ty | e_ty `eqType` res_ty = e | otherwise = Case e (mkWildValBinder One e_ty) res_ty [] where e_ty = exprType e mkLitRubbish :: Type -> Maybe CoreExpr -- Make a rubbish-literal CoreExpr of the given type. -- Fail (returning Nothing) if -- * the RuntimeRep of the Type is not monomorphic; -- * the type is (a ~# b), the type of coercion -- See INVARIANT 1 and 2 of item (2) in Note [Rubbish literals] -- in GHC.Types.Literal mkLitRubbish ty | not (noFreeVarsOfType rep) = Nothing -- Satisfy INVARIANT 1 | isCoVarType ty = Nothing -- Satisfy INVARIANT 2 | otherwise = Just (Lit (LitRubbish rep) `mkTyApps` [ty]) where rep = getRuntimeRep ty {- ************************************************************************ * * \subsection{Making literals} * * ************************************************************************ -} -- | Create a 'CoreExpr' which will evaluate to the given @Int@ mkIntExpr :: Platform -> Integer -> CoreExpr -- Result = I# i :: Int mkIntExpr platform i = mkCoreConApps intDataCon [mkIntLit platform i] -- | Create a 'CoreExpr' which will evaluate to the given @Int@. Don't check -- that the number is in the range of the target platform @Int@ mkUncheckedIntExpr :: Integer -> CoreExpr -- Result = I# i :: Int mkUncheckedIntExpr i = mkCoreConApps intDataCon [Lit (mkLitIntUnchecked i)] -- | Create a 'CoreExpr' which will evaluate to the given @Int@ mkIntExprInt :: Platform -> Int -> CoreExpr -- Result = I# i :: Int mkIntExprInt platform i = mkCoreConApps intDataCon [mkIntLit platform (fromIntegral i)] -- | Create a 'CoreExpr' which will evaluate to the a @Word@ with the given value mkWordExpr :: Platform -> Integer -> CoreExpr mkWordExpr platform w = mkCoreConApps wordDataCon [mkWordLit platform w] -- | Create a 'CoreExpr' which will evaluate to the given @Integer@ mkIntegerExpr :: Platform -> Integer -> CoreExpr -- Result :: Integer mkIntegerExpr platform i | platformInIntRange platform i = mkCoreConApps integerISDataCon [mkIntLit platform i] | i < 0 = mkCoreConApps integerINDataCon [Lit (mkLitBigNat (negate i))] | otherwise = mkCoreConApps integerIPDataCon [Lit (mkLitBigNat i)] -- | Create a 'CoreExpr' which will evaluate to the given @Natural@ mkNaturalExpr :: Platform -> Integer -> CoreExpr mkNaturalExpr platform w | platformInWordRange platform w = mkCoreConApps naturalNSDataCon [mkWordLit platform w] | otherwise = mkCoreConApps naturalNBDataCon [Lit (mkLitBigNat w)] -- | Create a 'CoreExpr' which will evaluate to the given @Float@ mkFloatExpr :: Float -> CoreExpr mkFloatExpr f = mkCoreConApps floatDataCon [mkFloatLitFloat f] -- | Create a 'CoreExpr' which will evaluate to the given @Double@ mkDoubleExpr :: Double -> CoreExpr mkDoubleExpr d = mkCoreConApps doubleDataCon [mkDoubleLitDouble d] -- | Create a 'CoreExpr' which will evaluate to the given @Char@ mkCharExpr :: Char -> CoreExpr -- Result = C# c :: Int mkCharExpr c = mkCoreConApps charDataCon [mkCharLit c] -- | Create a 'CoreExpr' which will evaluate to the given @String@ mkStringExpr :: MonadThings m => String -> m CoreExpr -- Result :: String mkStringExpr str = mkStringExprFS (mkFastString str) -- | Create a 'CoreExpr' which will evaluate to a string morally equivalent to the given @FastString@ mkStringExprFS :: MonadThings m => FastString -> m CoreExpr -- Result :: String mkStringExprFS = mkStringExprFSLookup lookupId mkStringExprFSLookup :: Monad m => (Name -> m Id) -> FastString -> m CoreExpr mkStringExprFSLookup lookupM str = do mk <- getMkStringIds lookupM pure (mkStringExprFSWith mk str) getMkStringIds :: Applicative m => (Name -> m Id) -> m MkStringIds getMkStringIds lookupM = MkStringIds <$> lookupM unpackCStringName <*> lookupM unpackCStringUtf8Name data MkStringIds = MkStringIds { unpackCStringId :: !Id , unpackCStringUtf8Id :: !Id } mkStringExprFSWith :: MkStringIds -> FastString -> CoreExpr mkStringExprFSWith ids str | nullFS str = mkNilExpr charTy | all safeChar chars = let !unpack_id = unpackCStringId ids in App (Var unpack_id) lit | otherwise = let !unpack_utf8_id = unpackCStringUtf8Id ids in App (Var unpack_utf8_id) lit where chars = unpackFS str safeChar c = ord c >= 1 && ord c <= 0x7F lit = Lit (LitString (bytesFS str)) {- ************************************************************************ * * \subsection{Tuple constructors} * * ************************************************************************ -} {- Creating tuples and their types for Core expressions @mkBigCoreVarTup@ builds a tuple; the inverse to @mkTupleSelector@. * If it has only one element, it is the identity function. * If there are more elements than a big tuple can have, it nests the tuples. Note [Flattening one-tuples] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This family of functions creates a tuple of variables/expressions/types. mkCoreTup [e1,e2,e3] = (e1,e2,e3) What if there is just one variable/expression/type in the argument? We could do one of two things: * Flatten it out, so that mkCoreTup [e1] = e1 * Build a one-tuple (see Note [One-tuples] in GHC.Builtin.Types) mkCoreTup1 [e1] = Solo e1 We use a suffix "1" to indicate this. Usually we want the former, but occasionally the latter. NB: The logic in tupleDataCon knows about () and Solo and (,), etc. Note [Don't flatten tuples from HsSyn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we get an explicit 1-tuple from HsSyn somehow (likely: Template Haskell), we should treat it really as a 1-tuple, without flattening. Note that a 1-tuple and a flattened value have different performance and laziness characteristics, so should just do what we're asked. This arose from discussions in #16881. One-tuples that arise internally depend on the circumstance; often flattening is a good idea. Decisions are made on a case-by-case basis. -} -- | Build the type of a small tuple that holds the specified variables -- One-tuples are flattened; see Note [Flattening one-tuples] mkCoreVarTupTy :: [Id] -> Type mkCoreVarTupTy ids = mkBoxedTupleTy (map idType ids) -- | Build a small tuple holding the specified expressions -- One-tuples are flattened; see Note [Flattening one-tuples] mkCoreTup :: [CoreExpr] -> CoreExpr mkCoreTup [c] = c mkCoreTup cs = mkCoreTup1 cs -- non-1-tuples are uniform -- | Build a small tuple holding the specified expressions -- One-tuples are *not* flattened; see Note [Flattening one-tuples] -- See also Note [Don't flatten tuples from HsSyn] mkCoreTup1 :: [CoreExpr] -> CoreExpr mkCoreTup1 cs = mkCoreConApps (tupleDataCon Boxed (length cs)) (map (Type . exprType) cs ++ cs) -- | Build a small unboxed tuple holding the specified expressions, -- with the given types. The types must be the types of the expressions. -- Do not include the RuntimeRep specifiers; this function calculates them -- for you. -- Does /not/ flatten one-tuples; see Note [Flattening one-tuples] mkCoreUbxTup :: [Type] -> [CoreExpr] -> CoreExpr mkCoreUbxTup tys exps = assert (tys `equalLength` exps) $ mkCoreConApps (tupleDataCon Unboxed (length tys)) (map (Type . getRuntimeRep) tys ++ map Type tys ++ exps) -- | Make a core tuple of the given boxity; don't flatten 1-tuples mkCoreTupBoxity :: Boxity -> [CoreExpr] -> CoreExpr mkCoreTupBoxity Boxed exps = mkCoreTup1 exps mkCoreTupBoxity Unboxed exps = mkCoreUbxTup (map exprType exps) exps -- | Build an unboxed sum. -- -- Alternative number ("alt") starts from 1. mkCoreUbxSum :: Int -> Int -> [Type] -> CoreExpr -> CoreExpr mkCoreUbxSum arity alt tys exp = assert (length tys == arity) $ assert (alt <= arity) $ mkCoreConApps (sumDataCon alt arity) (map (Type . getRuntimeRep) tys ++ map Type tys ++ [exp]) -- | Build a big tuple holding the specified variables -- One-tuples are flattened; see Note [Flattening one-tuples] mkBigCoreVarTup :: [Id] -> CoreExpr mkBigCoreVarTup ids = mkBigCoreTup (map Var ids) mkBigCoreVarTup1 :: [Id] -> CoreExpr -- Same as mkBigCoreVarTup, but one-tuples are NOT flattened -- see Note [Flattening one-tuples] mkBigCoreVarTup1 [id] = mkCoreConApps (tupleDataCon Boxed 1) [Type (idType id), Var id] mkBigCoreVarTup1 ids = mkBigCoreTup (map Var ids) -- | Build the type of a big tuple that holds the specified variables -- One-tuples are flattened; see Note [Flattening one-tuples] mkBigCoreVarTupTy :: [Id] -> Type mkBigCoreVarTupTy ids = mkBigCoreTupTy (map idType ids) -- | Build a big tuple holding the specified expressions -- One-tuples are flattened; see Note [Flattening one-tuples] mkBigCoreTup :: [CoreExpr] -> CoreExpr mkBigCoreTup = mkChunkified mkCoreTup -- | Build the type of a big tuple that holds the specified type of thing -- One-tuples are flattened; see Note [Flattening one-tuples] mkBigCoreTupTy :: [Type] -> Type mkBigCoreTupTy = mkChunkified mkBoxedTupleTy -- | The unit expression unitExpr :: CoreExpr unitExpr = Var unitDataConId {- ************************************************************************ * * \subsection{Tuple destructors} * * ************************************************************************ -} -- | Builds a selector which scrutises the given -- expression and extracts the one name from the list given. -- If you want the no-shadowing rule to apply, the caller -- is responsible for making sure that none of these names -- are in scope. -- -- If there is just one 'Id' in the tuple, then the selector is -- just the identity. -- -- If necessary, we pattern match on a \"big\" tuple. -- -- A tuple selector is not linear in its argument. Consequently, the case -- expression built by `mkTupleSelector` must consume its scrutinee 'Many' -- times. And all the argument variables must have multiplicity 'Many'. mkTupleSelector, mkTupleSelector1 :: [Id] -- ^ The 'Id's to pattern match the tuple against -> Id -- ^ The 'Id' to select -> Id -- ^ A variable of the same type as the scrutinee -> CoreExpr -- ^ Scrutinee -> CoreExpr -- ^ Selector expression -- mkTupleSelector [a,b,c,d] b v e -- = case e of v { -- (p,q) -> case p of p { -- (a,b) -> b }} -- We use 'tpl' vars for the p,q, since shadowing does not matter. -- -- In fact, it's more convenient to generate it innermost first, getting -- -- case (case e of v -- (p,q) -> p) of p -- (a,b) -> b mkTupleSelector vars the_var scrut_var scrut = mk_tup_sel (chunkify vars) the_var where mk_tup_sel [vars] the_var = mkSmallTupleSelector vars the_var scrut_var scrut mk_tup_sel vars_s the_var = mkSmallTupleSelector group the_var tpl_v $ mk_tup_sel (chunkify tpl_vs) tpl_v where tpl_tys = [mkBoxedTupleTy (map idType gp) | gp <- vars_s] tpl_vs = mkTemplateLocals tpl_tys [(tpl_v, group)] = [(tpl,gp) | (tpl,gp) <- zipEqual "mkTupleSelector" tpl_vs vars_s, the_var `elem` gp ] -- ^ 'mkTupleSelector1' is like 'mkTupleSelector' -- but one-tuples are NOT flattened (see Note [Flattening one-tuples]) mkTupleSelector1 vars the_var scrut_var scrut | [_] <- vars = mkSmallTupleSelector1 vars the_var scrut_var scrut | otherwise = mkTupleSelector vars the_var scrut_var scrut -- | Like 'mkTupleSelector' but for tuples that are guaranteed -- never to be \"big\". -- -- > mkSmallTupleSelector [x] x v e = [| e |] -- > mkSmallTupleSelector [x,y,z] x v e = [| case e of v { (x,y,z) -> x } |] mkSmallTupleSelector, mkSmallTupleSelector1 :: [Id] -- The tuple args -> Id -- The selected one -> Id -- A variable of the same type as the scrutinee -> CoreExpr -- Scrutinee -> CoreExpr mkSmallTupleSelector [var] should_be_the_same_var _ scrut = assert (var == should_be_the_same_var) $ scrut -- Special case for 1-tuples mkSmallTupleSelector vars the_var scrut_var scrut = mkSmallTupleSelector1 vars the_var scrut_var scrut -- ^ 'mkSmallTupleSelector1' is like 'mkSmallTupleSelector' -- but one-tuples are NOT flattened (see Note [Flattening one-tuples]) mkSmallTupleSelector1 vars the_var scrut_var scrut = assert (notNull vars) $ Case scrut scrut_var (idType the_var) [Alt (DataAlt (tupleDataCon Boxed (length vars))) vars (Var the_var)] -- | A generalization of 'mkTupleSelector', allowing the body -- of the case to be an arbitrary expression. -- -- To avoid shadowing, we use uniques to invent new variables. -- -- If necessary we pattern match on a \"big\" tuple. mkTupleCase :: UniqSupply -- ^ For inventing names of intermediate variables -> [Id] -- ^ The tuple identifiers to pattern match on -> CoreExpr -- ^ Body of the case -> Id -- ^ A variable of the same type as the scrutinee -> CoreExpr -- ^ Scrutinee -> CoreExpr -- ToDo: eliminate cases where none of the variables are needed. -- -- mkTupleCase uniqs [a,b,c,d] body v e -- = case e of v { (p,q) -> -- case p of p { (a,b) -> -- case q of q { (c,d) -> -- body }}} mkTupleCase uniqs vars body scrut_var scrut = mk_tuple_case uniqs (chunkify vars) body where -- This is the case where don't need any nesting mk_tuple_case _ [vars] body = mkSmallTupleCase vars body scrut_var scrut -- This is the case where we must make nest tuples at least once mk_tuple_case us vars_s body = let (us', vars', body') = foldr one_tuple_case (us, [], body) vars_s in mk_tuple_case us' (chunkify vars') body' one_tuple_case chunk_vars (us, vs, body) = let (uniq, us') = takeUniqFromSupply us scrut_var = mkSysLocal (fsLit "ds") uniq Many (mkBoxedTupleTy (map idType chunk_vars)) body' = mkSmallTupleCase chunk_vars body scrut_var (Var scrut_var) in (us', scrut_var:vs, body') -- | As 'mkTupleCase', but for a tuple that is small enough to be guaranteed -- not to need nesting. mkSmallTupleCase :: [Id] -- ^ The tuple args -> CoreExpr -- ^ Body of the case -> Id -- ^ A variable of the same type as the scrutinee -> CoreExpr -- ^ Scrutinee -> CoreExpr mkSmallTupleCase [var] body _scrut_var scrut = bindNonRec var scrut body mkSmallTupleCase vars body scrut_var scrut -- One branch no refinement? = Case scrut scrut_var (exprType body) [Alt (DataAlt (tupleDataCon Boxed (length vars))) vars body] {- ************************************************************************ * * Floats * * ************************************************************************ -} data FloatBind = FloatLet CoreBind | FloatCase CoreExpr Id AltCon [Var] -- case e of y { C ys -> ... } -- See Note [Floating single-alternative cases] in GHC.Core.Opt.SetLevels instance Outputable FloatBind where ppr (FloatLet b) = text "LET" <+> ppr b ppr (FloatCase e b c bs) = hang (text "CASE" <+> ppr e <+> text "of" <+> ppr b) 2 (ppr c <+> ppr bs) wrapFloat :: FloatBind -> CoreExpr -> CoreExpr wrapFloat (FloatLet defns) body = Let defns body wrapFloat (FloatCase e b con bs) body = mkSingleAltCase e b con bs body -- | Applies the floats from right to left. That is @wrapFloats [b1, b2, …, bn] -- u = let b1 in let b2 in … in let bn in u@ wrapFloats :: [FloatBind] -> CoreExpr -> CoreExpr wrapFloats floats expr = foldr wrapFloat expr floats bindBindings :: CoreBind -> [Var] bindBindings (NonRec b _) = [b] bindBindings (Rec bnds) = map fst bnds floatBindings :: FloatBind -> [Var] floatBindings (FloatLet bnd) = bindBindings bnd floatBindings (FloatCase _ b _ bs) = b:bs {- ************************************************************************ * * \subsection{Common list manipulation expressions} * * ************************************************************************ Call the constructor Ids when building explicit lists, so that they interact well with rules. -} -- | Makes a list @[]@ for lists of the specified type mkNilExpr :: Type -> CoreExpr mkNilExpr ty = mkCoreConApps nilDataCon [Type ty] -- | Makes a list @(:)@ for lists of the specified type mkConsExpr :: Type -> CoreExpr -> CoreExpr -> CoreExpr mkConsExpr ty hd tl = mkCoreConApps consDataCon [Type ty, hd, tl] -- | Make a list containing the given expressions, where the list has the given type mkListExpr :: Type -> [CoreExpr] -> CoreExpr mkListExpr ty xs = foldr (mkConsExpr ty) (mkNilExpr ty) xs mkNonEmptyListExpr :: Type -> CoreExpr -> [CoreExpr] -> CoreExpr mkNonEmptyListExpr ty x xs = mkCoreConApps nonEmptyDataCon [Type ty, x, mkListExpr ty xs] -- | Make a fully applied 'foldr' expression mkFoldrExpr :: MonadThings m => Type -- ^ Element type of the list -> Type -- ^ Fold result type -> CoreExpr -- ^ "Cons" function expression for the fold -> CoreExpr -- ^ "Nil" expression for the fold -> CoreExpr -- ^ List expression being folded acress -> m CoreExpr mkFoldrExpr elt_ty result_ty c n list = do foldr_id <- lookupId foldrName return (Var foldr_id `App` Type elt_ty `App` Type result_ty `App` c `App` n `App` list) -- | Make a 'build' expression applied to a locally-bound worker function mkBuildExpr :: (MonadFail m, MonadThings m, MonadUnique m) => Type -- ^ Type of list elements to be built -> ((Id, Type) -> (Id, Type) -> m CoreExpr) -- ^ Function that, given information about the 'Id's -- of the binders for the build worker function, returns -- the body of that worker -> m CoreExpr mkBuildExpr elt_ty mk_build_inside = do n_tyvar <- newTyVar alphaTyVar let n_ty = mkTyVarTy n_tyvar c_ty = mkVisFunTysMany [elt_ty, n_ty] n_ty [c, n] <- sequence [mkSysLocalM (fsLit "c") Many c_ty, mkSysLocalM (fsLit "n") Many n_ty] build_inside <- mk_build_inside (c, c_ty) (n, n_ty) build_id <- lookupId buildName return $ Var build_id `App` Type elt_ty `App` mkLams [n_tyvar, c, n] build_inside where newTyVar tyvar_tmpl = do uniq <- getUniqueM return (setTyVarUnique tyvar_tmpl uniq) {- ************************************************************************ * * Manipulating Maybe data type * * ************************************************************************ -} -- | Makes a Nothing for the specified type mkNothingExpr :: Type -> CoreExpr mkNothingExpr ty = mkConApp nothingDataCon [Type ty] -- | Makes a Just from a value of the specified type mkJustExpr :: Type -> CoreExpr -> CoreExpr mkJustExpr ty val = mkConApp justDataCon [Type ty, val] {- ************************************************************************ * * Error expressions * * ************************************************************************ -} mkRuntimeErrorApp :: Id -- Should be of type (forall a. Addr# -> a) -- where Addr# points to a UTF8 encoded string -> Type -- The type to instantiate 'a' -> String -- The string to print -> CoreExpr mkRuntimeErrorApp err_id res_ty err_msg = mkApps (Var err_id) [ Type (getRuntimeRep res_ty) , Type res_ty, err_string ] where err_string = Lit (mkLitString err_msg) mkImpossibleExpr :: Type -> CoreExpr mkImpossibleExpr res_ty = mkRuntimeErrorApp rUNTIME_ERROR_ID res_ty "Impossible case alternative" {- ************************************************************************ * * Error Ids * * ************************************************************************ GHC randomly injects these into the code. @patError@ is just a version of @error@ for pattern-matching failures. It knows various ``codes'' which expand to longer strings---this saves space! @absentErr@ is a thing we put in for ``absent'' arguments. They jolly well shouldn't be yanked on, but if one is, then you will get a friendly message from @absentErr@ (rather than a totally random crash). -} errorIds :: [Id] errorIds = [ rUNTIME_ERROR_ID, nON_EXHAUSTIVE_GUARDS_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, pAT_ERROR_ID, rEC_CON_ERROR_ID, rEC_SEL_ERROR_ID, aBSENT_ERROR_ID, aBSENT_SUM_FIELD_ERROR_ID, tYPE_ERROR_ID, -- Used with Opt_DeferTypeErrors, see #10284 rAISE_OVERFLOW_ID, rAISE_UNDERFLOW_ID, rAISE_DIVZERO_ID ] recSelErrorName, runtimeErrorName, absentErrorName :: Name recConErrorName, patErrorName :: Name nonExhaustiveGuardsErrorName, noMethodBindingErrorName :: Name typeErrorName :: Name absentSumFieldErrorName :: Name raiseOverflowName, raiseUnderflowName, raiseDivZeroName :: Name recSelErrorName = err_nm "recSelError" recSelErrorIdKey rEC_SEL_ERROR_ID runtimeErrorName = err_nm "runtimeError" runtimeErrorIdKey rUNTIME_ERROR_ID recConErrorName = err_nm "recConError" recConErrorIdKey rEC_CON_ERROR_ID patErrorName = err_nm "patError" patErrorIdKey pAT_ERROR_ID typeErrorName = err_nm "typeError" typeErrorIdKey tYPE_ERROR_ID noMethodBindingErrorName = err_nm "noMethodBindingError" noMethodBindingErrorIdKey nO_METHOD_BINDING_ERROR_ID nonExhaustiveGuardsErrorName = err_nm "nonExhaustiveGuardsError" nonExhaustiveGuardsErrorIdKey nON_EXHAUSTIVE_GUARDS_ERROR_ID err_nm :: String -> Unique -> Id -> Name err_nm str uniq id = mkWiredInIdName cONTROL_EXCEPTION_BASE (fsLit str) uniq id rEC_SEL_ERROR_ID, rUNTIME_ERROR_ID, rEC_CON_ERROR_ID :: Id pAT_ERROR_ID, nO_METHOD_BINDING_ERROR_ID, nON_EXHAUSTIVE_GUARDS_ERROR_ID :: Id tYPE_ERROR_ID, aBSENT_ERROR_ID, aBSENT_SUM_FIELD_ERROR_ID :: Id rAISE_OVERFLOW_ID, rAISE_UNDERFLOW_ID, rAISE_DIVZERO_ID :: Id rEC_SEL_ERROR_ID = mkRuntimeErrorId recSelErrorName rUNTIME_ERROR_ID = mkRuntimeErrorId runtimeErrorName rEC_CON_ERROR_ID = mkRuntimeErrorId recConErrorName pAT_ERROR_ID = mkRuntimeErrorId patErrorName nO_METHOD_BINDING_ERROR_ID = mkRuntimeErrorId noMethodBindingErrorName nON_EXHAUSTIVE_GUARDS_ERROR_ID = mkRuntimeErrorId nonExhaustiveGuardsErrorName tYPE_ERROR_ID = mkRuntimeErrorId typeErrorName -- Note [aBSENT_SUM_FIELD_ERROR_ID] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Unboxed sums are transformed into unboxed tuples in GHC.Stg.Unarise.mkUbxSum -- and fields that can't be reached are filled with rubbish values. -- For instance, consider the case of the program: -- -- f :: (# Int | Float# #) -> Int -- f = ... -- -- x = f (# | 2.0## #) -- -- Unarise will represent f's unboxed sum argument as a tuple (# Int#, Int, -- Float# #), where Int# is a tag. Consequently, `x` will be rewritten to: -- -- x = f (# 2#, ???, 2.0## #) -- -- We must come up with some rubbish literal to use in place of `???`. In the -- case of unboxed integer types this is easy: we can simply use 0 for -- Int#/Word# and 0.0 Float#/Double#. -- -- However, coming up with a rubbish pointer value is more delicate as the -- value must satisfy the following requirements: -- -- 1. it needs to be a valid closure pointer for the GC (not a NULL pointer) -- -- 2. it can't take arguments because it's used in unarise and applying an -- argument would require allocating a thunk, which is both difficult to -- do and costly. -- -- 3. it shouldn't be CAFfy since this would make otherwise non-CAFfy -- bindings CAFfy, incurring a cost in GC performance. Given that unboxed -- sums are intended to be used in performance-critical code, this is to -- We work-around this by declaring the absentSumFieldError as non-CAFfy, -- as described in Note [Wired-in exceptions are not CAFfy]. -- -- Getting this wrong causes hard-to-debug runtime issues, see #15038. -- -- 4. it can't be defined in `base` package. Afterall, not all code which -- uses unboxed sums uses depends upon `base`. Specifically, this became -- an issue when we wanted to use unboxed sums in boot libraries used by -- `base`, see #17791. -- -- To fill this role we define `ghc-prim:GHC.Prim.Panic.absentSumFieldError` -- with the type: -- -- absentSumFieldError :: forall a. a -- -- Note that this type is something of a lie since Unarise may use it at an -- unlifted type. However, this lie is benign as absent sum fields are examined -- only by the GC, which does not care about levity.. -- -- When entered, this closure calls `stg_panic#`, which immediately halts -- execution and cannot be caught. This is in contrast to most other runtime -- errors, which are thrown as proper Haskell exceptions. This design is -- intentional since entering an absent sum field is an indication that -- something has gone horribly wrong, very likely due to a compiler bug. -- -- Note [Wired-in exceptions are not CAFfy] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- GHC has logic wiring-in a small number of exceptions, which may be thrown in -- generated code. Specifically, these are implemented via closures (defined -- in `GHC.Prim.Exception` in `ghc-prim`) which, when entered, raise the desired -- exception. For instance, in the case of OverflowError we have -- -- raiseOverflow :: forall a. a -- raiseOverflow = runRW# (\s -> -- case raiseOverflow# s of -- (# _, _ #) -> let x = x in x) -- -- where `raiseOverflow#` is defined in the rts/Exception.cmm. -- -- Note that `raiseOverflow` and friends, being top-level thunks, are CAFs. -- Normally, this would be reflected in their IdInfo; however, as these -- functions are widely used and CAFfyness is transitive, we very much want to -- avoid declaring them as CAFfy. This is especially true in especially in -- performance-critical code like that using unboxed sums and -- absentSumFieldError. -- -- Consequently, `mkExceptionId` instead declares the exceptions to be -- non-CAFfy and rather ensure in the RTS (in `initBuiltinGcRoots` in -- rts/RtsStartup.c) that these closures remain reachable by creating a -- StablePtr to each. Note that we are using the StablePtr mechanism not -- because we need a StablePtr# object, but rather because the stable pointer -- table is a source of GC roots. -- -- At some point we could consider removing this optimisation as it is quite -- fragile, but we do want to be careful to avoid adding undue cost. Unboxed -- sums in particular are intended to be used in performance-critical contexts. -- -- See #15038, #21141. absentSumFieldErrorName = mkWiredInIdName gHC_PRIM_PANIC (fsLit "absentSumFieldError") absentSumFieldErrorIdKey aBSENT_SUM_FIELD_ERROR_ID absentErrorName = mkWiredInIdName gHC_PRIM_PANIC (fsLit "absentError") absentErrorIdKey aBSENT_ERROR_ID raiseOverflowName = mkWiredInIdName gHC_PRIM_EXCEPTION (fsLit "raiseOverflow") raiseOverflowIdKey rAISE_OVERFLOW_ID raiseUnderflowName = mkWiredInIdName gHC_PRIM_EXCEPTION (fsLit "raiseUnderflow") raiseUnderflowIdKey rAISE_UNDERFLOW_ID raiseDivZeroName = mkWiredInIdName gHC_PRIM_EXCEPTION (fsLit "raiseDivZero") raiseDivZeroIdKey rAISE_DIVZERO_ID aBSENT_SUM_FIELD_ERROR_ID = mkExceptionId absentSumFieldErrorName rAISE_OVERFLOW_ID = mkExceptionId raiseOverflowName rAISE_UNDERFLOW_ID = mkExceptionId raiseUnderflowName rAISE_DIVZERO_ID = mkExceptionId raiseDivZeroName -- | Exception with type \"forall a. a\" -- -- Any exceptions added via this function needs to be added to -- the RTS's initBuiltinGcRoots() function. mkExceptionId :: Name -> Id mkExceptionId name = mkVanillaGlobalWithInfo name (mkSpecForAllTys [alphaTyVar] (mkTyVarTy alphaTyVar)) -- forall a . a (divergingIdInfo [] `setCafInfo` NoCafRefs) -- See Note [Wired-in exceptions are not CAFfy] mkRuntimeErrorId :: Name -> Id -- Error function -- with type: forall (r:RuntimeRep) (a:TYPE r). Addr# -> a -- with arity: 1 -- which diverges after being given one argument -- The Addr# is expected to be the address of -- a UTF8-encoded error string mkRuntimeErrorId name = mkVanillaGlobalWithInfo name runtimeErrorTy (divergingIdInfo [evalDmd]) -- Do *not* mark them as NoCafRefs, because they can indeed have -- CAF refs. For example, pAT_ERROR_ID calls GHC.Err.untangle, -- which has some CAFs -- In due course we may arrange that these error-y things are -- regarded by the GC as permanently live, in which case we -- can give them NoCaf info. As it is, any function that calls -- any pc_bottoming_Id will itself have CafRefs, which bloats -- SRTs. runtimeErrorTy :: Type -- forall (rr :: RuntimeRep) (a :: rr). Addr# -> a -- See Note [Error and friends have an "open-tyvar" forall] runtimeErrorTy = mkSpecForAllTys [runtimeRep1TyVar, openAlphaTyVar] (mkVisFunTyMany addrPrimTy openAlphaTy) -- | An 'IdInfo' for an Id, such as 'aBSENT_ERROR_ID' or 'raiseOverflow', that -- throws an (imprecise) exception after being supplied one value arg for every -- argument 'Demand' in the list. The demands end up in the demand signature. -- -- 1. Sets the demand signature to unleash the given arg dmds 'botDiv' -- 2. Sets the arity info so that it matches the length of arg demands -- 3. Sets a bottoming CPR sig with the correct arity -- -- It's important that all 3 agree on the arity, which is what this defn ensures. divergingIdInfo :: [Demand] -> IdInfo divergingIdInfo arg_dmds = vanillaIdInfo `setArityInfo` arity `setDmdSigInfo` mkClosedDmdSig arg_dmds botDiv `setCprSigInfo` mkCprSig arity botCpr where arity = length arg_dmds {- Note [Error and friends have an "open-tyvar" forall] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'error' and 'undefined' have types error :: forall (v :: RuntimeRep) (a :: TYPE v). String -> a undefined :: forall (v :: RuntimeRep) (a :: TYPE v). a Notice the runtime-representation polymorphism. This ensures that "error" can be instantiated at unboxed as well as boxed types. This is OK because it never returns, so the return type is irrelevant. ************************************************************************ * * aBSENT_ERROR_ID * * ************************************************************************ Note [aBSENT_ERROR_ID] ~~~~~~~~~~~~~~~~~~~~~~ We use aBSENT_ERROR_ID to build absent fillers for lifted types in workers. E.g. f x = (case x of (a,b) -> b) + 1::Int The demand analyser figures out that only the second component of x is used, and does a w/w split thus f x = case x of (a,b) -> $wf b $wf b = let a = absentError "blah" x = (a,b) in After some simplification, the (absentError "blah") thunk normally goes away. See also Note [Absent fillers] in GHC.Core.Opt.WorkWrap.Utils. Historical Note --------------- We used to have exprIsHNF respond True to absentError and *not* mark it as diverging. Here's the reason for the former. It doesn't apply anymore because we no longer say that `a` is absent (A). Instead it gets (head strict) demand 1A and we won't emit the absent error: #14285 had, roughly data T a = MkT a !a {-# INLINABLE f #-} f x = case x of MkT a b -> g (MkT b a) It turned out that g didn't use the second component, and hence f doesn't use the first. But the stable-unfolding for f looks like \x. case x of MkT a b -> g ($WMkT b a) where $WMkT is the wrapper for MkT that evaluates its arguments. We apply the same w/w split to this unfolding (see Note [Worker/wrapper for INLINABLE functions] in GHC.Core.Opt.WorkWrap) so the template ends up like \b. let a = absentError "blah" x = MkT a b in case x of MkT a b -> g ($WMkT b a) After doing case-of-known-constructor, and expanding $WMkT we get \b -> g (case absentError "blah" of a -> MkT b a) Yikes! That bogusly appears to evaluate the absentError! This is extremely tiresome. Another way to think of this is that, in Core, it is an invariant that a strict data constructor, like MkT, must be applied only to an argument in HNF. So (absentError "blah") had better be non-bottom. So the "solution" is to add a special case for absentError to exprIsHNFlike. This allows Simplify.rebuildCase, in the Note [Case to let transformation] branch, to convert the case on absentError into a let. We also make absentError *not* be diverging, unlike the other error-ids, so that we can be sure not to remove the case branches before converting the case to a let. If, by some bug or bizarre happenstance, we ever call absentError, we should throw an exception. This should never happen, of course, but we definitely can't return anything. e.g. if somehow we had case absentError "foo" of Nothing -> ... Just x -> ... then if we return, the case expression will select a field and continue. Seg fault city. Better to throw an exception. (Even though we've said it is in HNF :-) It might seem a bit surprising that seq on absentError is simply erased absentError "foo" `seq` x ==> x but that should be okay; since there's no pattern match we can't really be relying on anything from it. -} aBSENT_ERROR_ID -- See Note [aBSENT_ERROR_ID] = mkVanillaGlobalWithInfo absentErrorName absent_ty id_info where absent_ty = mkSpecForAllTys [alphaTyVar] (mkVisFunTyMany addrPrimTy alphaTy) -- Not runtime-rep polymorphic. aBSENT_ERROR_ID is only used for -- lifted-type things; see Note [Absent fillers] in GHC.Core.Opt.WorkWrap.Utils id_info = divergingIdInfo [evalDmd] -- NB: CAFFY! mkAbsentErrorApp :: Type -- The type to instantiate 'a' -> String -- The string to print -> CoreExpr mkAbsentErrorApp res_ty err_msg = mkApps (Var aBSENT_ERROR_ID) [ Type res_ty, err_string ] where err_string = Lit (mkLitString err_msg) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Map/Expr.hs0000644000000000000000000004325214472400112020520 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# OPTIONS_GHC -Wno-orphans #-} -- Eq (DeBruijn CoreExpr) and Eq (DeBruijn CoreAlt) module GHC.Core.Map.Expr ( -- * Maps over Core expressions CoreMap, emptyCoreMap, extendCoreMap, lookupCoreMap, foldCoreMap, -- * Alpha equality eqDeBruijnExpr, eqCoreExpr, -- * 'TrieMap' class reexports TrieMap(..), insertTM, deleteTM, lkDFreeVar, xtDFreeVar, lkDNamed, xtDNamed, (>.>), (|>), (|>>), ) where import GHC.Prelude import GHC.Data.TrieMap import GHC.Core.Map.Type import GHC.Core import GHC.Core.Type import GHC.Types.Tickish import GHC.Types.Var import GHC.Utils.Misc import GHC.Utils.Outputable import qualified Data.Map as Map import GHC.Types.Name.Env import Control.Monad( (>=>) ) {- This module implements TrieMaps over Core related data structures like CoreExpr or Type. It is built on the Tries from the TrieMap module. The code is very regular and boilerplate-like, but there is some neat handling of *binders*. In effect they are deBruijn numbered on the fly. -} ---------------------- -- Recall that -- Control.Monad.(>=>) :: (a -> Maybe b) -> (b -> Maybe c) -> a -> Maybe c -- The CoreMap makes heavy use of GenMap. However the CoreMap Types are not -- known when defining GenMap so we can only specialize them here. {-# SPECIALIZE lkG :: Key CoreMapX -> CoreMapG a -> Maybe a #-} {-# SPECIALIZE xtG :: Key CoreMapX -> XT a -> CoreMapG a -> CoreMapG a #-} {-# SPECIALIZE mapG :: (a -> b) -> CoreMapG a -> CoreMapG b #-} {-# SPECIALIZE fdG :: (a -> b -> b) -> CoreMapG a -> b -> b #-} {- ************************************************************************ * * CoreMap * * ************************************************************************ -} {- Note [Binders] ~~~~~~~~~~~~~~ * In general we check binders as late as possible because types are less likely to differ than expression structure. That's why cm_lam :: CoreMapG (TypeMapG a) rather than cm_lam :: TypeMapG (CoreMapG a) * We don't need to look at the type of some binders, notably - the case binder in (Case _ b _ _) - the binders in an alternative because they are totally fixed by the context Note [Empty case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For a key (Case e b ty (alt:alts)) we don't need to look the return type 'ty', because every alternative has that type. * For a key (Case e b ty []) we MUST look at the return type 'ty', because otherwise (Case (error () "urk") _ Int []) would compare equal to (Case (error () "urk") _ Bool []) which is utterly wrong (#6097) We could compare the return type regardless, but the wildly common case is that it's unnecessary, so we have two fields (cm_case and cm_ecase) for the two possibilities. Only cm_ecase looks at the type. See also Note [Empty case alternatives] in GHC.Core. -} -- | @CoreMap a@ is a map from 'CoreExpr' to @a@. If you are a client, this -- is the type you want. newtype CoreMap a = CoreMap (CoreMapG a) instance TrieMap CoreMap where type Key CoreMap = CoreExpr emptyTM = CoreMap emptyTM lookupTM k (CoreMap m) = lookupTM (deBruijnize k) m alterTM k f (CoreMap m) = CoreMap (alterTM (deBruijnize k) f m) foldTM k (CoreMap m) = foldTM k m mapTM f (CoreMap m) = CoreMap (mapTM f m) filterTM f (CoreMap m) = CoreMap (filterTM f m) -- | @CoreMapG a@ is a map from @DeBruijn CoreExpr@ to @a@. The extended -- key makes it suitable for recursive traversal, since it can track binders, -- but it is strictly internal to this module. If you are including a 'CoreMap' -- inside another 'TrieMap', this is the type you want. type CoreMapG = GenMap CoreMapX -- | @CoreMapX a@ is the base map from @DeBruijn CoreExpr@ to @a@, but without -- the 'GenMap' optimization. data CoreMapX a = CM { cm_var :: VarMap a , cm_lit :: LiteralMap a , cm_co :: CoercionMapG a , cm_type :: TypeMapG a , cm_cast :: CoreMapG (CoercionMapG a) , cm_tick :: CoreMapG (TickishMap a) , cm_app :: CoreMapG (CoreMapG a) , cm_lam :: CoreMapG (BndrMap a) -- Note [Binders] , cm_letn :: CoreMapG (CoreMapG (BndrMap a)) , cm_letr :: ListMap CoreMapG (CoreMapG (ListMap BndrMap a)) , cm_case :: CoreMapG (ListMap AltMap a) , cm_ecase :: CoreMapG (TypeMapG a) -- Note [Empty case alternatives] } instance Eq (DeBruijn CoreExpr) where (==) = eqDeBruijnExpr eqDeBruijnExpr :: DeBruijn CoreExpr -> DeBruijn CoreExpr -> Bool eqDeBruijnExpr (D env1 e1) (D env2 e2) = go e1 e2 where go (Var v1) (Var v2) = eqDeBruijnVar (D env1 v1) (D env2 v2) go (Lit lit1) (Lit lit2) = lit1 == lit2 -- See Note [Using tcView inside eqDeBruijnType] in GHC.Core.Map.Type go (Type t1) (Type t2) = eqDeBruijnType (D env1 t1) (D env2 t2) -- See Note [Alpha-equality for Coercion arguments] go (Coercion {}) (Coercion {}) = True go (Cast e1 co1) (Cast e2 co2) = D env1 co1 == D env2 co2 && go e1 e2 go (App f1 a1) (App f2 a2) = go f1 f2 && go a1 a2 go (Tick n1 e1) (Tick n2 e2) = eqDeBruijnTickish (D env1 n1) (D env2 n2) && go e1 e2 go (Lam b1 e1) (Lam b2 e2) -- See Note [Using tcView inside eqDeBruijnType] in GHC.Core.Map.Type = eqDeBruijnType (D env1 (varType b1)) (D env2 (varType b2)) && D env1 (varMultMaybe b1) == D env2 (varMultMaybe b2) && eqDeBruijnExpr (D (extendCME env1 b1) e1) (D (extendCME env2 b2) e2) go (Let (NonRec v1 r1) e1) (Let (NonRec v2 r2) e2) = go r1 r2 -- See Note [Alpha-equality for let-bindings] && eqDeBruijnExpr (D (extendCME env1 v1) e1) (D (extendCME env2 v2) e2) go (Let (Rec ps1) e1) (Let (Rec ps2) e2) = equalLength ps1 ps2 -- See Note [Alpha-equality for let-bindings] && all2 (\b1 b2 -> -- See Note [Using tcView inside eqDeBruijnType] in -- GHC.Core.Map.Type eqDeBruijnType (D env1 (varType b1)) (D env2 (varType b2))) bs1 bs2 && D env1' rs1 == D env2' rs2 && eqDeBruijnExpr (D env1' e1) (D env2' e2) where (bs1,rs1) = unzip ps1 (bs2,rs2) = unzip ps2 env1' = extendCMEs env1 bs1 env2' = extendCMEs env2 bs2 go (Case e1 b1 t1 a1) (Case e2 b2 t2 a2) | null a1 -- See Note [Empty case alternatives] = null a2 && go e1 e2 && D env1 t1 == D env2 t2 | otherwise = go e1 e2 && D (extendCME env1 b1) a1 == D (extendCME env2 b2) a2 go _ _ = False eqDeBruijnTickish :: DeBruijn CoreTickish -> DeBruijn CoreTickish -> Bool eqDeBruijnTickish (D env1 t1) (D env2 t2) = go t1 t2 where go (Breakpoint lext lid lids) (Breakpoint rext rid rids) = lid == rid && D env1 lids == D env2 rids && lext == rext go l r = l == r -- Compares for equality, modulo alpha eqCoreExpr :: CoreExpr -> CoreExpr -> Bool eqCoreExpr e1 e2 = eqDeBruijnExpr (deBruijnize e1) (deBruijnize e2) {- Note [Alpha-equality for Coercion arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'Coercion' constructor only appears in argument positions, and so, if the functions are equal, then the arguments must have equal types. Because the comparison for coercions (correctly) checks only their types, checking for alpha-equality of the coercions is redundant. -} {- Note [Alpha-equality for let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For /recursive/ let-bindings we need to check that the types of the binders are alpha-equivalent. Otherwise letrec (x : Bool) = x in x and letrec (y : Char) = y in y would be considered alpha-equivalent, which they are obviously not. For /non-recursive/ let-bindings, we do not have to check that the types of the binders are alpha-equivalent. When the RHSs (the expressions) of the non-recursive let-binders are well-formed and well-typed (which we assume they are at this point in the compiler), and the RHSs are alpha-equivalent, then the bindings must have the same type. In addition, it is also worth pointing out that letrec { x = e1; y = e2 } in b is NOT considered equal to letrec { y = e2; x = e1 } in b -} emptyE :: CoreMapX a emptyE = CM { cm_var = emptyTM, cm_lit = emptyTM , cm_co = emptyTM, cm_type = emptyTM , cm_cast = emptyTM, cm_app = emptyTM , cm_lam = emptyTM, cm_letn = emptyTM , cm_letr = emptyTM, cm_case = emptyTM , cm_ecase = emptyTM, cm_tick = emptyTM } instance TrieMap CoreMapX where type Key CoreMapX = DeBruijn CoreExpr emptyTM = emptyE lookupTM = lkE alterTM = xtE foldTM = fdE mapTM = mapE filterTM = ftE -------------------------- mapE :: (a->b) -> CoreMapX a -> CoreMapX b mapE f (CM { cm_var = cvar, cm_lit = clit , cm_co = cco, cm_type = ctype , cm_cast = ccast , cm_app = capp , cm_lam = clam, cm_letn = cletn , cm_letr = cletr, cm_case = ccase , cm_ecase = cecase, cm_tick = ctick }) = CM { cm_var = mapTM f cvar, cm_lit = mapTM f clit , cm_co = mapTM f cco, cm_type = mapTM f ctype , cm_cast = mapTM (mapTM f) ccast, cm_app = mapTM (mapTM f) capp , cm_lam = mapTM (mapTM f) clam, cm_letn = mapTM (mapTM (mapTM f)) cletn , cm_letr = mapTM (mapTM (mapTM f)) cletr, cm_case = mapTM (mapTM f) ccase , cm_ecase = mapTM (mapTM f) cecase, cm_tick = mapTM (mapTM f) ctick } ftE :: (a->Bool) -> CoreMapX a -> CoreMapX a ftE f (CM { cm_var = cvar, cm_lit = clit , cm_co = cco, cm_type = ctype , cm_cast = ccast , cm_app = capp , cm_lam = clam, cm_letn = cletn , cm_letr = cletr, cm_case = ccase , cm_ecase = cecase, cm_tick = ctick }) = CM { cm_var = filterTM f cvar, cm_lit = filterTM f clit , cm_co = filterTM f cco, cm_type = filterTM f ctype , cm_cast = mapTM (filterTM f) ccast, cm_app = mapTM (filterTM f) capp , cm_lam = mapTM (filterTM f) clam, cm_letn = mapTM (mapTM (filterTM f)) cletn , cm_letr = mapTM (mapTM (filterTM f)) cletr, cm_case = mapTM (filterTM f) ccase , cm_ecase = mapTM (filterTM f) cecase, cm_tick = mapTM (filterTM f) ctick } -------------------------- lookupCoreMap :: CoreMap a -> CoreExpr -> Maybe a lookupCoreMap cm e = lookupTM e cm extendCoreMap :: CoreMap a -> CoreExpr -> a -> CoreMap a extendCoreMap m e v = alterTM e (\_ -> Just v) m foldCoreMap :: (a -> b -> b) -> b -> CoreMap a -> b foldCoreMap k z m = foldTM k m z emptyCoreMap :: CoreMap a emptyCoreMap = emptyTM instance Outputable a => Outputable (CoreMap a) where ppr m = text "CoreMap elts" <+> ppr (foldTM (:) m []) ------------------------- fdE :: (a -> b -> b) -> CoreMapX a -> b -> b fdE k m = foldTM k (cm_var m) . foldTM k (cm_lit m) . foldTM k (cm_co m) . foldTM k (cm_type m) . foldTM (foldTM k) (cm_cast m) . foldTM (foldTM k) (cm_tick m) . foldTM (foldTM k) (cm_app m) . foldTM (foldTM k) (cm_lam m) . foldTM (foldTM (foldTM k)) (cm_letn m) . foldTM (foldTM (foldTM k)) (cm_letr m) . foldTM (foldTM k) (cm_case m) . foldTM (foldTM k) (cm_ecase m) -- lkE: lookup in trie for expressions lkE :: DeBruijn CoreExpr -> CoreMapX a -> Maybe a lkE (D env expr) cm = go expr cm where go (Var v) = cm_var >.> lkVar env v go (Lit l) = cm_lit >.> lookupTM l go (Type t) = cm_type >.> lkG (D env t) go (Coercion c) = cm_co >.> lkG (D env c) go (Cast e c) = cm_cast >.> lkG (D env e) >=> lkG (D env c) go (Tick tickish e) = cm_tick >.> lkG (D env e) >=> lkTickish tickish go (App e1 e2) = cm_app >.> lkG (D env e2) >=> lkG (D env e1) go (Lam v e) = cm_lam >.> lkG (D (extendCME env v) e) >=> lkBndr env v go (Let (NonRec b r) e) = cm_letn >.> lkG (D env r) >=> lkG (D (extendCME env b) e) >=> lkBndr env b go (Let (Rec prs) e) = let (bndrs,rhss) = unzip prs env1 = extendCMEs env bndrs in cm_letr >.> lkList (lkG . D env1) rhss >=> lkG (D env1 e) >=> lkList (lkBndr env1) bndrs go (Case e b ty as) -- See Note [Empty case alternatives] | null as = cm_ecase >.> lkG (D env e) >=> lkG (D env ty) | otherwise = cm_case >.> lkG (D env e) >=> lkList (lkA (extendCME env b)) as xtE :: DeBruijn CoreExpr -> XT a -> CoreMapX a -> CoreMapX a xtE (D env (Var v)) f m = m { cm_var = cm_var m |> xtVar env v f } xtE (D env (Type t)) f m = m { cm_type = cm_type m |> xtG (D env t) f } xtE (D env (Coercion c)) f m = m { cm_co = cm_co m |> xtG (D env c) f } xtE (D _ (Lit l)) f m = m { cm_lit = cm_lit m |> alterTM l f } xtE (D env (Cast e c)) f m = m { cm_cast = cm_cast m |> xtG (D env e) |>> xtG (D env c) f } xtE (D env (Tick t e)) f m = m { cm_tick = cm_tick m |> xtG (D env e) |>> xtTickish t f } xtE (D env (App e1 e2)) f m = m { cm_app = cm_app m |> xtG (D env e2) |>> xtG (D env e1) f } xtE (D env (Lam v e)) f m = m { cm_lam = cm_lam m |> xtG (D (extendCME env v) e) |>> xtBndr env v f } xtE (D env (Let (NonRec b r) e)) f m = m { cm_letn = cm_letn m |> xtG (D (extendCME env b) e) |>> xtG (D env r) |>> xtBndr env b f } xtE (D env (Let (Rec prs) e)) f m = m { cm_letr = let (bndrs,rhss) = unzip prs env1 = extendCMEs env bndrs in cm_letr m |> xtList (xtG . D env1) rhss |>> xtG (D env1 e) |>> xtList (xtBndr env1) bndrs f } xtE (D env (Case e b ty as)) f m | null as = m { cm_ecase = cm_ecase m |> xtG (D env e) |>> xtG (D env ty) f } | otherwise = m { cm_case = cm_case m |> xtG (D env e) |>> let env1 = extendCME env b in xtList (xtA env1) as f } -- TODO: this seems a bit dodgy, see 'eqTickish' type TickishMap a = Map.Map CoreTickish a lkTickish :: CoreTickish -> TickishMap a -> Maybe a lkTickish = lookupTM xtTickish :: CoreTickish -> XT a -> TickishMap a -> TickishMap a xtTickish = alterTM ------------------------ data AltMap a -- A single alternative = AM { am_deflt :: CoreMapG a , am_data :: DNameEnv (CoreMapG a) , am_lit :: LiteralMap (CoreMapG a) } instance TrieMap AltMap where type Key AltMap = CoreAlt emptyTM = AM { am_deflt = emptyTM , am_data = emptyDNameEnv , am_lit = emptyTM } lookupTM = lkA emptyCME alterTM = xtA emptyCME foldTM = fdA mapTM = mapA filterTM = ftA instance Eq (DeBruijn CoreAlt) where D env1 a1 == D env2 a2 = go a1 a2 where go (Alt DEFAULT _ rhs1) (Alt DEFAULT _ rhs2) = D env1 rhs1 == D env2 rhs2 go (Alt (LitAlt lit1) _ rhs1) (Alt (LitAlt lit2) _ rhs2) = lit1 == lit2 && D env1 rhs1 == D env2 rhs2 go (Alt (DataAlt dc1) bs1 rhs1) (Alt (DataAlt dc2) bs2 rhs2) = dc1 == dc2 && D (extendCMEs env1 bs1) rhs1 == D (extendCMEs env2 bs2) rhs2 go _ _ = False mapA :: (a->b) -> AltMap a -> AltMap b mapA f (AM { am_deflt = adeflt, am_data = adata, am_lit = alit }) = AM { am_deflt = mapTM f adeflt , am_data = mapTM (mapTM f) adata , am_lit = mapTM (mapTM f) alit } ftA :: (a->Bool) -> AltMap a -> AltMap a ftA f (AM { am_deflt = adeflt, am_data = adata, am_lit = alit }) = AM { am_deflt = filterTM f adeflt , am_data = mapTM (filterTM f) adata , am_lit = mapTM (filterTM f) alit } lkA :: CmEnv -> CoreAlt -> AltMap a -> Maybe a lkA env (Alt DEFAULT _ rhs) = am_deflt >.> lkG (D env rhs) lkA env (Alt (LitAlt lit) _ rhs) = am_lit >.> lookupTM lit >=> lkG (D env rhs) lkA env (Alt (DataAlt dc) bs rhs) = am_data >.> lkDNamed dc >=> lkG (D (extendCMEs env bs) rhs) xtA :: CmEnv -> CoreAlt -> XT a -> AltMap a -> AltMap a xtA env (Alt DEFAULT _ rhs) f m = m { am_deflt = am_deflt m |> xtG (D env rhs) f } xtA env (Alt (LitAlt l) _ rhs) f m = m { am_lit = am_lit m |> alterTM l |>> xtG (D env rhs) f } xtA env (Alt (DataAlt d) bs rhs) f m = m { am_data = am_data m |> xtDNamed d |>> xtG (D (extendCMEs env bs) rhs) f } fdA :: (a -> b -> b) -> AltMap a -> b -> b fdA k m = foldTM k (am_deflt m) . foldTM (foldTM k) (am_data m) . foldTM (foldTM k) (am_lit m) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Map/Type.hs0000644000000000000000000006225514472400112020527 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} module GHC.Core.Map.Type ( -- * Re-export generic interface TrieMap(..), XT, -- * Maps over 'Type's TypeMap, emptyTypeMap, extendTypeMap, lookupTypeMap, foldTypeMap, LooseTypeMap, -- ** With explicit scoping CmEnv, lookupCME, extendTypeMapWithScope, lookupTypeMapWithScope, mkDeBruijnContext, extendCME, extendCMEs, emptyCME, -- * Utilities for use by friends only TypeMapG, CoercionMapG, DeBruijn(..), deBruijnize, eqDeBruijnType, eqDeBruijnVar, BndrMap, xtBndr, lkBndr, VarMap, xtVar, lkVar, lkDFreeVar, xtDFreeVar, xtDNamed, lkDNamed ) where -- This module is separate from GHC.Core.Map.Expr to avoid a module loop -- between GHC.Core.Unify (which depends on this module) and GHC.Core import GHC.Prelude import GHC.Core.Type import GHC.Core.Coercion import GHC.Core.TyCo.Rep import GHC.Data.TrieMap import GHC.Data.FastString import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Unique.FM import GHC.Utils.Outputable import GHC.Utils.Panic import qualified Data.Map as Map import qualified Data.IntMap as IntMap import Control.Monad ( (>=>) ) import GHC.Data.Maybe -- NB: Be careful about RULES and type families (#5821). So we should make sure -- to specify @Key TypeMapX@ (and not @DeBruijn Type@, the reduced form) {-# SPECIALIZE lkG :: Key TypeMapX -> TypeMapG a -> Maybe a #-} {-# SPECIALIZE lkG :: Key CoercionMapX -> CoercionMapG a -> Maybe a #-} {-# SPECIALIZE xtG :: Key TypeMapX -> XT a -> TypeMapG a -> TypeMapG a #-} {-# SPECIALIZE xtG :: Key CoercionMapX -> XT a -> CoercionMapG a -> CoercionMapG a #-} {-# SPECIALIZE mapG :: (a -> b) -> TypeMapG a -> TypeMapG b #-} {-# SPECIALIZE mapG :: (a -> b) -> CoercionMapG a -> CoercionMapG b #-} {-# SPECIALIZE fdG :: (a -> b -> b) -> TypeMapG a -> b -> b #-} {-# SPECIALIZE fdG :: (a -> b -> b) -> CoercionMapG a -> b -> b #-} {- ************************************************************************ * * Coercions * * ************************************************************************ -} -- We should really never care about the contents of a coercion. Instead, -- just look up the coercion's type. newtype CoercionMap a = CoercionMap (CoercionMapG a) instance TrieMap CoercionMap where type Key CoercionMap = Coercion emptyTM = CoercionMap emptyTM lookupTM k (CoercionMap m) = lookupTM (deBruijnize k) m alterTM k f (CoercionMap m) = CoercionMap (alterTM (deBruijnize k) f m) foldTM k (CoercionMap m) = foldTM k m mapTM f (CoercionMap m) = CoercionMap (mapTM f m) filterTM f (CoercionMap m) = CoercionMap (filterTM f m) type CoercionMapG = GenMap CoercionMapX newtype CoercionMapX a = CoercionMapX (TypeMapX a) instance TrieMap CoercionMapX where type Key CoercionMapX = DeBruijn Coercion emptyTM = CoercionMapX emptyTM lookupTM = lkC alterTM = xtC foldTM f (CoercionMapX core_tm) = foldTM f core_tm mapTM f (CoercionMapX core_tm) = CoercionMapX (mapTM f core_tm) filterTM f (CoercionMapX core_tm) = CoercionMapX (filterTM f core_tm) instance Eq (DeBruijn Coercion) where D env1 co1 == D env2 co2 = D env1 (coercionType co1) == D env2 (coercionType co2) lkC :: DeBruijn Coercion -> CoercionMapX a -> Maybe a lkC (D env co) (CoercionMapX core_tm) = lkT (D env $ coercionType co) core_tm xtC :: DeBruijn Coercion -> XT a -> CoercionMapX a -> CoercionMapX a xtC (D env co) f (CoercionMapX m) = CoercionMapX (xtT (D env $ coercionType co) f m) {- ************************************************************************ * * Types * * ************************************************************************ -} -- | @TypeMapG a@ is a map from @DeBruijn Type@ to @a@. The extended -- key makes it suitable for recursive traversal, since it can track binders, -- but it is strictly internal to this module. If you are including a 'TypeMap' -- inside another 'TrieMap', this is the type you want. Note that this -- lookup does not do a kind-check. Thus, all keys in this map must have -- the same kind. Also note that this map respects the distinction between -- @Type@ and @Constraint@, despite the fact that they are equivalent type -- synonyms in Core. type TypeMapG = GenMap TypeMapX -- | @TypeMapX a@ is the base map from @DeBruijn Type@ to @a@, but without the -- 'GenMap' optimization. See Note [Computing equality on types] in GHC.Core.Type. data TypeMapX a = TM { tm_var :: VarMap a , tm_app :: TypeMapG (TypeMapG a) -- Note [Equality on AppTys] in GHC.Core.Type , tm_tycon :: DNameEnv a -- only InvisArg arrows here , tm_funty :: TypeMapG (TypeMapG (TypeMapG a)) -- keyed on the argument, result rep, and result -- constraints are never linear-restricted and are always lifted -- See also Note [Equality on FunTys] in GHC.Core.TyCo.Rep , tm_forall :: TypeMapG (BndrMap a) -- See Note [Binders] in GHC.Core.Map.Expr , tm_tylit :: TyLitMap a , tm_coerce :: Maybe a } -- Note that there is no tyconapp case; see Note [Equality on AppTys] in GHC.Core.Type -- | Squeeze out any synonyms, and change TyConApps to nested AppTys. Why the -- last one? See Note [Equality on AppTys] in GHC.Core.Type -- -- Note, however, that we keep Constraint and Type apart here, despite the fact -- that they are both synonyms of TYPE 'LiftedRep (see #11715). -- -- We also keep (Eq a => a) as a FunTy, distinct from ((->) (Eq a) a). trieMapView :: Type -> Maybe Type trieMapView ty -- First check for TyConApps that need to be expanded to -- AppTy chains. | Just (tc, tys@(_:_)) <- tcSplitTyConApp_maybe ty = Just $ foldl' AppTy (mkTyConTy tc) tys -- Then resolve any remaining nullary synonyms. | Just ty' <- tcView ty = Just ty' trieMapView _ = Nothing instance TrieMap TypeMapX where type Key TypeMapX = DeBruijn Type emptyTM = emptyT lookupTM = lkT alterTM = xtT foldTM = fdT mapTM = mapT filterTM = filterT instance Eq (DeBruijn Type) where (==) = eqDeBruijnType {- Note [Using tcView inside eqDeBruijnType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `eqDeBruijnType` uses `tcView` and thus treats Type and Constraint as distinct -- see Note [coreView vs tcView] in GHC.Core.Type. We do that because `eqDeBruijnType` is used in TrieMaps, which are used for instance for instance selection in the type checker. [Or at least will be soon.] However, the odds that we have two expressions that are identical save for the 'Type'/'Constraint' distinction are low. (Not impossible to do. But doubtful anyone has ever done so in the history of Haskell.) And it's actually all OK: 'eqExpr' is conservative: if `eqExpr e1 e2` returns 'True', thne it must be that `e1` behaves identically to `e2` in all contexts. But if `eqExpr e1 e2` returns 'False', then we learn nothing. The use of 'tcView' where we expect 'coreView' means 'eqExpr' returns 'False' bit more often that it should. This might, say, stop a `RULE` from firing or CSE from optimizing an expression. Stopping `RULE` firing is good actually: `RULES` are written in Haskell, where `Type /= Constraint`. Stopping CSE is unfortunate, but tolerable. -} -- | An equality relation between two 'Type's (known below as @t1 :: k2@ -- and @t2 :: k2@) data TypeEquality = TNEQ -- ^ @t1 /= t2@ | TEQ -- ^ @t1 ~ t2@ and there are not casts in either, -- therefore we can conclude @k1 ~ k2@ | TEQX -- ^ @t1 ~ t2@ yet one of the types contains a cast so -- they may differ in kind eqDeBruijnType :: DeBruijn Type -> DeBruijn Type -> Bool eqDeBruijnType env_t1@(D env1 t1) env_t2@(D env2 t2) = -- See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep -- See Note [Computing equality on types] case go env_t1 env_t2 of TEQX -> toBool (go (D env1 k1) (D env2 k2)) ty_eq -> toBool ty_eq where k1 = typeKind t1 k2 = typeKind t2 toBool :: TypeEquality -> Bool toBool TNEQ = False toBool _ = True liftEquality :: Bool -> TypeEquality liftEquality False = TNEQ liftEquality _ = TEQ hasCast :: TypeEquality -> TypeEquality hasCast TEQ = TEQX hasCast eq = eq andEq :: TypeEquality -> TypeEquality -> TypeEquality andEq TNEQ _ = TNEQ andEq TEQX e = hasCast e andEq TEQ e = e -- See Note [Comparing nullary type synonyms] in GHC.Core.Type go (D _ (TyConApp tc1 [])) (D _ (TyConApp tc2 [])) | tc1 == tc2 = TEQ go env_t@(D env t) env_t'@(D env' t') -- See Note [Using tcView inside eqDeBruijnType] | Just new_t <- tcView t = go (D env new_t) env_t' | Just new_t' <- tcView t' = go env_t (D env' new_t') | otherwise = case (t, t') of -- See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep (CastTy t1 _, _) -> hasCast (go (D env t1) (D env t')) (_, CastTy t1' _) -> hasCast (go (D env t) (D env t1')) (TyVarTy v, TyVarTy v') -> liftEquality $ eqDeBruijnVar (D env v) (D env' v') -- See Note [Equality on AppTys] in GHC.Core.Type (AppTy t1 t2, s) | Just (t1', t2') <- repSplitAppTy_maybe s -> go (D env t1) (D env' t1') `andEq` go (D env t2) (D env' t2') (s, AppTy t1' t2') | Just (t1, t2) <- repSplitAppTy_maybe s -> go (D env t1) (D env' t1') `andEq` go (D env t2) (D env' t2') (FunTy v1 w1 t1 t2, FunTy v1' w1' t1' t2') -> liftEquality (v1 == v1') `andEq` -- NB: eqDeBruijnType does the kind check requested by -- Note [Equality on FunTys] in GHC.Core.TyCo.Rep liftEquality (eqDeBruijnType (D env t1) (D env' t1')) `andEq` liftEquality (eqDeBruijnType (D env t2) (D env' t2')) `andEq` -- Comparing multiplicities last because the test is usually true go (D env w1) (D env w1') (TyConApp tc tys, TyConApp tc' tys') -> liftEquality (tc == tc') `andEq` gos env env' tys tys' (LitTy l, LitTy l') -> liftEquality (l == l') (ForAllTy (Bndr tv vis) ty, ForAllTy (Bndr tv' vis') ty') -> -- See Note [ForAllTy and typechecker equality] in -- GHC.Tc.Solver.Canonical for why we use `sameVis` here liftEquality (vis `sameVis` vis') `andEq` go (D env (varType tv)) (D env' (varType tv')) `andEq` go (D (extendCME env tv) ty) (D (extendCME env' tv') ty') (CoercionTy {}, CoercionTy {}) -> TEQ _ -> TNEQ gos _ _ [] [] = TEQ gos e1 e2 (ty1:tys1) (ty2:tys2) = go (D e1 ty1) (D e2 ty2) `andEq` gos e1 e2 tys1 tys2 gos _ _ _ _ = TNEQ instance Eq (DeBruijn Var) where (==) = eqDeBruijnVar eqDeBruijnVar :: DeBruijn Var -> DeBruijn Var -> Bool eqDeBruijnVar (D env1 v1) (D env2 v2) = case (lookupCME env1 v1, lookupCME env2 v2) of (Just b1, Just b2) -> b1 == b2 (Nothing, Nothing) -> v1 == v2 _ -> False instance {-# OVERLAPPING #-} Outputable a => Outputable (TypeMapG a) where ppr m = text "TypeMap elts" <+> ppr (foldTM (:) m []) emptyT :: TypeMapX a emptyT = TM { tm_var = emptyTM , tm_app = emptyTM , tm_tycon = emptyDNameEnv , tm_funty = emptyTM , tm_forall = emptyTM , tm_tylit = emptyTyLitMap , tm_coerce = Nothing } mapT :: (a->b) -> TypeMapX a -> TypeMapX b mapT f (TM { tm_var = tvar, tm_app = tapp, tm_tycon = ttycon , tm_funty = tfunty, tm_forall = tforall, tm_tylit = tlit , tm_coerce = tcoerce }) = TM { tm_var = mapTM f tvar , tm_app = mapTM (mapTM f) tapp , tm_tycon = mapTM f ttycon , tm_funty = mapTM (mapTM (mapTM f)) tfunty , tm_forall = mapTM (mapTM f) tforall , tm_tylit = mapTM f tlit , tm_coerce = fmap f tcoerce } ----------------- lkT :: DeBruijn Type -> TypeMapX a -> Maybe a lkT (D env ty) m = go ty m where go ty | Just ty' <- trieMapView ty = go ty' go (TyVarTy v) = tm_var >.> lkVar env v go (AppTy t1 t2) = tm_app >.> lkG (D env t1) >=> lkG (D env t2) go (TyConApp tc []) = tm_tycon >.> lkDNamed tc go ty@(TyConApp _ (_:_)) = pprPanic "lkT TyConApp" (ppr ty) go (LitTy l) = tm_tylit >.> lkTyLit l go (ForAllTy (Bndr tv _) ty) = tm_forall >.> lkG (D (extendCME env tv) ty) >=> lkBndr env tv go (FunTy InvisArg _ arg res) | Just res_rep <- getRuntimeRep_maybe res = tm_funty >.> lkG (D env arg) >=> lkG (D env res_rep) >=> lkG (D env res) go ty@(FunTy {}) = pprPanic "lkT FunTy" (ppr ty) go (CastTy t _) = go t go (CoercionTy {}) = tm_coerce ----------------- xtT :: DeBruijn Type -> XT a -> TypeMapX a -> TypeMapX a xtT (D env ty) f m | Just ty' <- trieMapView ty = xtT (D env ty') f m xtT (D env (TyVarTy v)) f m = m { tm_var = tm_var m |> xtVar env v f } xtT (D env (AppTy t1 t2)) f m = m { tm_app = tm_app m |> xtG (D env t1) |>> xtG (D env t2) f } xtT (D _ (TyConApp tc [])) f m = m { tm_tycon = tm_tycon m |> xtDNamed tc f } xtT (D env (FunTy InvisArg _ t1 t2)) f m = m { tm_funty = tm_funty m |> xtG (D env t1) |>> xtG (D env t2_rep) |>> xtG (D env t2) f } where t2_rep = expectJust "xtT FunTy InvisArg" (getRuntimeRep_maybe t2) xtT (D _ (LitTy l)) f m = m { tm_tylit = tm_tylit m |> xtTyLit l f } xtT (D env (CastTy t _)) f m = xtT (D env t) f m xtT (D _ (CoercionTy {})) f m = m { tm_coerce = tm_coerce m |> f } xtT (D env (ForAllTy (Bndr tv _) ty)) f m = m { tm_forall = tm_forall m |> xtG (D (extendCME env tv) ty) |>> xtBndr env tv f } xtT (D _ ty@(TyConApp _ (_:_))) _ _ = pprPanic "xtT TyConApp" (ppr ty) xtT (D _ ty@(FunTy {})) _ _ = pprPanic "xtT FunTy" (ppr ty) fdT :: (a -> b -> b) -> TypeMapX a -> b -> b fdT k m = foldTM k (tm_var m) . foldTM (foldTM k) (tm_app m) . foldTM k (tm_tycon m) . foldTM (foldTM (foldTM k)) (tm_funty m) . foldTM (foldTM k) (tm_forall m) . foldTyLit k (tm_tylit m) . foldMaybe k (tm_coerce m) filterT :: (a -> Bool) -> TypeMapX a -> TypeMapX a filterT f (TM { tm_var = tvar, tm_app = tapp, tm_tycon = ttycon , tm_funty = tfunty, tm_forall = tforall, tm_tylit = tlit , tm_coerce = tcoerce }) = TM { tm_var = filterTM f tvar , tm_app = mapTM (filterTM f) tapp , tm_tycon = filterTM f ttycon , tm_funty = mapTM (mapTM (filterTM f)) tfunty , tm_forall = mapTM (filterTM f) tforall , tm_tylit = filterTM f tlit , tm_coerce = filterMaybe f tcoerce } ------------------------ data TyLitMap a = TLM { tlm_number :: Map.Map Integer a , tlm_string :: UniqFM FastString a , tlm_char :: Map.Map Char a } instance TrieMap TyLitMap where type Key TyLitMap = TyLit emptyTM = emptyTyLitMap lookupTM = lkTyLit alterTM = xtTyLit foldTM = foldTyLit mapTM = mapTyLit filterTM = filterTyLit emptyTyLitMap :: TyLitMap a emptyTyLitMap = TLM { tlm_number = Map.empty, tlm_string = emptyUFM, tlm_char = Map.empty } mapTyLit :: (a->b) -> TyLitMap a -> TyLitMap b mapTyLit f (TLM { tlm_number = tn, tlm_string = ts, tlm_char = tc }) = TLM { tlm_number = Map.map f tn, tlm_string = mapUFM f ts, tlm_char = Map.map f tc } lkTyLit :: TyLit -> TyLitMap a -> Maybe a lkTyLit l = case l of NumTyLit n -> tlm_number >.> Map.lookup n StrTyLit n -> tlm_string >.> (`lookupUFM` n) CharTyLit n -> tlm_char >.> Map.lookup n xtTyLit :: TyLit -> XT a -> TyLitMap a -> TyLitMap a xtTyLit l f m = case l of NumTyLit n -> m { tlm_number = Map.alter f n (tlm_number m) } StrTyLit n -> m { tlm_string = alterUFM f (tlm_string m) n } CharTyLit n -> m { tlm_char = Map.alter f n (tlm_char m) } foldTyLit :: (a -> b -> b) -> TyLitMap a -> b -> b foldTyLit l m = flip (foldUFM l) (tlm_string m) . flip (Map.foldr l) (tlm_number m) . flip (Map.foldr l) (tlm_char m) filterTyLit :: (a -> Bool) -> TyLitMap a -> TyLitMap a filterTyLit f (TLM { tlm_number = tn, tlm_string = ts, tlm_char = tc }) = TLM { tlm_number = Map.filter f tn, tlm_string = filterUFM f ts, tlm_char = Map.filter f tc } ------------------------------------------------- -- | @TypeMap a@ is a map from 'Type' to @a@. If you are a client, this -- is the type you want. The keys in this map may have different kinds. newtype TypeMap a = TypeMap (TypeMapG (TypeMapG a)) lkTT :: DeBruijn Type -> TypeMap a -> Maybe a lkTT (D env ty) (TypeMap m) = lkG (D env $ typeKind ty) m >>= lkG (D env ty) xtTT :: DeBruijn Type -> XT a -> TypeMap a -> TypeMap a xtTT (D env ty) f (TypeMap m) = TypeMap (m |> xtG (D env $ typeKind ty) |>> xtG (D env ty) f) -- Below are some client-oriented functions which operate on 'TypeMap'. instance TrieMap TypeMap where type Key TypeMap = Type emptyTM = TypeMap emptyTM lookupTM k m = lkTT (deBruijnize k) m alterTM k f m = xtTT (deBruijnize k) f m foldTM k (TypeMap m) = foldTM (foldTM k) m mapTM f (TypeMap m) = TypeMap (mapTM (mapTM f) m) filterTM f (TypeMap m) = TypeMap (mapTM (filterTM f) m) foldTypeMap :: (a -> b -> b) -> b -> TypeMap a -> b foldTypeMap k z m = foldTM k m z emptyTypeMap :: TypeMap a emptyTypeMap = emptyTM lookupTypeMap :: TypeMap a -> Type -> Maybe a lookupTypeMap cm t = lookupTM t cm extendTypeMap :: TypeMap a -> Type -> a -> TypeMap a extendTypeMap m t v = alterTM t (const (Just v)) m lookupTypeMapWithScope :: TypeMap a -> CmEnv -> Type -> Maybe a lookupTypeMapWithScope m cm t = lkTT (D cm t) m -- | Extend a 'TypeMap' with a type in the given context. -- @extendTypeMapWithScope m (mkDeBruijnContext [a,b,c]) t v@ is equivalent to -- @extendTypeMap m (forall a b c. t) v@, but allows reuse of the context over -- multiple insertions. extendTypeMapWithScope :: TypeMap a -> CmEnv -> Type -> a -> TypeMap a extendTypeMapWithScope m cm t v = xtTT (D cm t) (const (Just v)) m -- | Construct a deBruijn environment with the given variables in scope. -- e.g. @mkDeBruijnEnv [a,b,c]@ constructs a context @forall a b c.@ mkDeBruijnContext :: [Var] -> CmEnv mkDeBruijnContext = extendCMEs emptyCME -- | A 'LooseTypeMap' doesn't do a kind-check. Thus, when lookup up (t |> g), -- you'll find entries inserted under (t), even if (g) is non-reflexive. newtype LooseTypeMap a = LooseTypeMap (TypeMapG a) instance TrieMap LooseTypeMap where type Key LooseTypeMap = Type emptyTM = LooseTypeMap emptyTM lookupTM k (LooseTypeMap m) = lookupTM (deBruijnize k) m alterTM k f (LooseTypeMap m) = LooseTypeMap (alterTM (deBruijnize k) f m) foldTM f (LooseTypeMap m) = foldTM f m mapTM f (LooseTypeMap m) = LooseTypeMap (mapTM f m) filterTM f (LooseTypeMap m) = LooseTypeMap (filterTM f m) {- ************************************************************************ * * Variables * * ************************************************************************ -} type BoundVar = Int -- Bound variables are deBruijn numbered type BoundVarMap a = IntMap.IntMap a data CmEnv = CME { cme_next :: !BoundVar , cme_env :: VarEnv BoundVar } emptyCME :: CmEnv emptyCME = CME { cme_next = 0, cme_env = emptyVarEnv } extendCME :: CmEnv -> Var -> CmEnv extendCME (CME { cme_next = bv, cme_env = env }) v = CME { cme_next = bv+1, cme_env = extendVarEnv env v bv } extendCMEs :: CmEnv -> [Var] -> CmEnv extendCMEs env vs = foldl' extendCME env vs lookupCME :: CmEnv -> Var -> Maybe BoundVar lookupCME (CME { cme_env = env }) v = lookupVarEnv env v -- | @DeBruijn a@ represents @a@ modulo alpha-renaming. This is achieved -- by equipping the value with a 'CmEnv', which tracks an on-the-fly deBruijn -- numbering. This allows us to define an 'Eq' instance for @DeBruijn a@, even -- if this was not (easily) possible for @a@. Note: we purposely don't -- export the constructor. Make a helper function if you find yourself -- needing it. data DeBruijn a = D CmEnv a -- | Synthesizes a @DeBruijn a@ from an @a@, by assuming that there are no -- bound binders (an empty 'CmEnv'). This is usually what you want if there -- isn't already a 'CmEnv' in scope. deBruijnize :: a -> DeBruijn a deBruijnize = D emptyCME instance Eq (DeBruijn a) => Eq (DeBruijn [a]) where D _ [] == D _ [] = True D env (x:xs) == D env' (x':xs') = D env x == D env' x' && D env xs == D env' xs' _ == _ = False instance Eq (DeBruijn a) => Eq (DeBruijn (Maybe a)) where D _ Nothing == D _ Nothing = True D env (Just x) == D env' (Just x') = D env x == D env' x' _ == _ = False --------- Variable binders ------------- -- | A 'BndrMap' is a 'TypeMapG' which allows us to distinguish between -- binding forms whose binders have different types. For example, -- if we are doing a 'TrieMap' lookup on @\(x :: Int) -> ()@, we should -- not pick up an entry in the 'TrieMap' for @\(x :: Bool) -> ()@: -- we can disambiguate this by matching on the type (or kind, if this -- a binder in a type) of the binder. -- -- We also need to do the same for multiplicity! Which, since multiplicities are -- encoded simply as a 'Type', amounts to have a Trie for a pair of types. Tries -- of pairs are composition. data BndrMap a = BndrMap (TypeMapG (MaybeMap TypeMapG a)) instance TrieMap BndrMap where type Key BndrMap = Var emptyTM = BndrMap emptyTM lookupTM = lkBndr emptyCME alterTM = xtBndr emptyCME foldTM = fdBndrMap mapTM = mapBndrMap filterTM = ftBndrMap mapBndrMap :: (a -> b) -> BndrMap a -> BndrMap b mapBndrMap f (BndrMap tm) = BndrMap (mapTM (mapTM f) tm) fdBndrMap :: (a -> b -> b) -> BndrMap a -> b -> b fdBndrMap f (BndrMap tm) = foldTM (foldTM f) tm -- We need to use 'BndrMap' for 'Coercion', 'CoreExpr' AND 'Type', since all -- of these data types have binding forms. lkBndr :: CmEnv -> Var -> BndrMap a -> Maybe a lkBndr env v (BndrMap tymap) = do multmap <- lkG (D env (varType v)) tymap lookupTM (D env <$> varMultMaybe v) multmap xtBndr :: forall a . CmEnv -> Var -> XT a -> BndrMap a -> BndrMap a xtBndr env v xt (BndrMap tymap) = BndrMap (tymap |> xtG (D env (varType v)) |>> (alterTM (D env <$> varMultMaybe v) xt)) ftBndrMap :: (a -> Bool) -> BndrMap a -> BndrMap a ftBndrMap f (BndrMap tm) = BndrMap (mapTM (filterTM f) tm) --------- Variable occurrence ------------- data VarMap a = VM { vm_bvar :: BoundVarMap a -- Bound variable , vm_fvar :: DVarEnv a } -- Free variable instance TrieMap VarMap where type Key VarMap = Var emptyTM = VM { vm_bvar = IntMap.empty, vm_fvar = emptyDVarEnv } lookupTM = lkVar emptyCME alterTM = xtVar emptyCME foldTM = fdVar mapTM = mapVar filterTM = ftVar mapVar :: (a->b) -> VarMap a -> VarMap b mapVar f (VM { vm_bvar = bv, vm_fvar = fv }) = VM { vm_bvar = mapTM f bv, vm_fvar = mapTM f fv } lkVar :: CmEnv -> Var -> VarMap a -> Maybe a lkVar env v | Just bv <- lookupCME env v = vm_bvar >.> lookupTM bv | otherwise = vm_fvar >.> lkDFreeVar v xtVar :: CmEnv -> Var -> XT a -> VarMap a -> VarMap a xtVar env v f m | Just bv <- lookupCME env v = m { vm_bvar = vm_bvar m |> alterTM bv f } | otherwise = m { vm_fvar = vm_fvar m |> xtDFreeVar v f } fdVar :: (a -> b -> b) -> VarMap a -> b -> b fdVar k m = foldTM k (vm_bvar m) . foldTM k (vm_fvar m) lkDFreeVar :: Var -> DVarEnv a -> Maybe a lkDFreeVar var env = lookupDVarEnv env var xtDFreeVar :: Var -> XT a -> DVarEnv a -> DVarEnv a xtDFreeVar v f m = alterDVarEnv f m v ftVar :: (a -> Bool) -> VarMap a -> VarMap a ftVar f (VM { vm_bvar = bv, vm_fvar = fv }) = VM { vm_bvar = filterTM f bv, vm_fvar = filterTM f fv } ------------------------------------------------- lkDNamed :: NamedThing n => n -> DNameEnv a -> Maybe a lkDNamed n env = lookupDNameEnv env (getName n) xtDNamed :: NamedThing n => n -> XT a -> DNameEnv a -> DNameEnv a xtDNamed tc f m = alterDNameEnv f m (getName tc) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Multiplicity.hs0000644000000000000000000003214314472400112021552 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} {-| This module defines the semi-ring of multiplicities, and associated functions. Multiplicities annotate arrow types to indicate the linearity of the arrow (in the sense of linear types). Mult is a type synonym for Type, used only when its kind is Multiplicity. To simplify dealing with multiplicities, functions such as mkMultMul perform simplifications such as Many * x = Many on the fly. -} module GHC.Core.Multiplicity ( Mult , pattern One , pattern Many , isMultMul , mkMultAdd , mkMultMul , mkMultSup , Scaled(..) , scaledMult , scaledThing , unrestricted , linear , tymult , irrelevantMult , mkScaled , scaledSet , scaleScaled , IsSubmult(..) , submult , mapScaledType) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Core.TyCo.Rep import {-# SOURCE #-} GHC.Builtin.Types ( multMulTyCon ) import GHC.Core.Type import GHC.Builtin.Names (multMulTyConKey) import GHC.Types.Unique (hasKey) {- Note [Linear types] ~~~~~~~~~~~~~~~~~~~ This module is the entry point for linear types. The detailed design is in the _Linear Haskell_ article [https://arxiv.org/abs/1710.09756]. Other important resources in the linear types implementation wiki page [https://gitlab.haskell.org/ghc/ghc/wikis/linear-types/implementation], and the proposal [https://github.com/ghc-proposals/ghc-proposals/pull/111] which describes the concrete design at length. For the busy developer, though, here is a high-level view of linear types is the following: - Function arrows are annotated with a multiplicity (as defined by type `Mult` and its smart constructors in this module) - Because, as a type constructor, the type of function now has an extra argument, the notation (->) is no longer suitable. We named the function type constructor `FUN`. - (->) retains its backward compatible meaning: `(->) a b = a -> b`. To achieve this, `(->)` is defined as a type synonym to `FUN Many` (see below). - Multiplicities can be reified in Haskell as types of kind `GHC.Types.Multiplicity` - Ground multiplicity (that is, without a variable) can be `One` or `Many` (`Many` is generally rendered as ω in the scientific literature). Functions whose type is annotated with `One` are linear functions, functions whose type is annotated with `Many` are regular functions, often called “unrestricted” to contrast them with linear functions. - A linear function is defined as a function such that *if* its result is consumed exactly once, *then* its argument is consumed exactly once. You can think of “consuming exactly once” as evaluating a value in normal form exactly once (though not necessarily in one go). The _Linear Haskell_ article (see infra) has a more precise definition of “consuming exactly once”. - Data types can have unrestricted fields (the canonical example being the `Unrestricted` data type), then these don't need to be consumed for a value to be consumed exactly once. So consuming a value of type `Unrestricted` exactly once means forcing it at least once. - Why “at least once”? Because if `case u of { C x y -> f (C x y) }` is linear (provided `f` is a linear function). So we might as well have done `case u of { !z -> f z }`. So, we can observe constructors as many times as we want, and we are actually allowed to force the same thing several times because laziness means that we are really forcing a the value once, and observing its constructor several times. The type checker and the linter recognise some (but not all) of these multiple forces as indeed linear. Mostly just enough to support variable patterns. - Multiplicities form a semiring. - Multiplicities can also be variables and we can universally quantify over these variables. This is referred to as “multiplicity polymorphism”. Furthermore, multiplicity can be formal semiring expressions combining variables. - Contrary to the paper, the sum of two multiplicities is always `Many`. This will have to change, however, if we want to add a multiplicity for 0. Whether we want to is still debated. - Case expressions have a multiplicity annotation too. A case expression with multiplicity `One`, consumes its scrutinee exactly once (provided the entire case expression is consumed exactly once); whereas a case expression with multiplicity `Many` can consume its scrutinee as many time as it wishes (no matter how much the case expression is consumed). Note [Usages] ~~~~~~~~~~~~~ In the _Linear Haskell_ paper, you'll find typing rules such as these: Γ ⊢ f : A #π-> B Δ ⊢ u : A --------------------------- Γ + kΔ ⊢ f u : B If you read this as a type-checking algorithm going from the bottom up, this reads as: the algorithm has to find a split of some input context Ξ into an appropriate Γ and a Δ such as Ξ = Γ + kΔ, *and the multiplicities are chosen to make f and u typecheck*. This could be achieved by letting the typechecking of `f` use exactly the variable it needs, then passing the remainder, as `Delta` to the typechecking of u. But what does that mean if `x` is bound with multiplicity `p` (a variable) and `f` consumes `x` once? `Delta` would have to contain `x` with multiplicity `p-1`. It's not really clear how to make that works. In summary: bottom-up multiplicity checking forgoes addition and multiplication in favour of subtraction and division. And variables make the latter hard. The alternative is to read multiplicities from the top down: as an *output* from the typechecking algorithm, rather than an input. We call these output multiplicities Usages, to distinguish them from the multiplicities which come, as input, from the types of functions. Usages are checked for compatibility with multiplicity annotations using an ordering relation. In other words, the usage of x in the expression u is the smallest multiplicity which can be ascribed to x for u to typecheck. Usages are usually group in a UsageEnv, as defined in the UsageEnv module. So, in our function application example, the typechecking algorithm would receive usage environements f_ue from the typechecking of f, and u_ue from the typechecking of u. Then the output would be f_ue + (k * u_ue). Addition and scaling of usage environment is the pointwise extension of the semiring operations on multiplicities. Note [Zero as a usage] ~~~~~~~~~~~~~~~~~~~~~~ In the current presentation usages are not exactly multiplicities, because they can contain 0, and multiplicities can't. Why do we need a 0 usage? A function which doesn't use its argument will be required to annotate it with `Many`: \(x % Many) -> 0 However, we cannot replace absence with Many when computing usages compositionally: in (x, True) We expect x to have usage 1. But when computing the usage of x in True we would find that x is absent, hence has multiplicity Many. The final multiplicity would be One+Many = Many. Oops! Hence there is a usage Zero for absent variables. Zero is characterised by being the neutral element to usage addition. We may decide to add Zero as a multiplicity in the future. In which case, this distinction will go away. Note [Joining usages] ~~~~~~~~~~~~~~~~~~~~~ The usage of a variable is defined, in Note [Usages], as the minimum usage which can be ascribed to a variable. So what is the usage of x in case … of { p1 -> u -- usage env: u_ue ; p2 -> v } -- usage env: v_ue It must be the least upper bound, or _join_, of u_ue(x) and v_ue(x). So, contrary to a declarative presentation where the correct usage of x can be conjured out of thin air, we need to be able to compute the join of two multiplicities. Join is extended pointwise on usage environments. Note [Bottom as a usage] ~~~~~~~~~~~~~~~~~~~~~~ What is the usage of x in case … of {} Per usual linear logic, as well as the _Linear Haskell_ article, x can have every multiplicity. So we need a minimum usage _bottom_, which is also the neutral element for join. In fact, this is not such as nice solution, because it is not clear how to define sum and multiplication with bottom. We give reasonable definitions, but they are not complete (they don't respect the semiring laws, and it's possible to come up with examples of Core transformation which are not well-typed) A better solution would probably be to annotate case expressions with a usage environment, just like they are annotated with a type. Which, probably not coincidentally, is also primarily for empty cases. A side benefit of this approach is that the linter would not need to join multiplicities, anymore; hence would be closer to the presentation in the article. That's because it could use the annotation as the multiplicity for each branch. Note [Data constructors are linear by default] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Data constructors defined without -XLinearTypes (as well as data constructors defined with the Haskell 98 in all circumstances) have all their fields linear. That is, in data Maybe a = Nothing | Just a We have Just :: a %1 -> Just a The goal is to maximise reuse of types between linear code and traditional code. This is argued at length in the proposal and the article (links in Note [Linear types]). Note [Polymorphisation of linear fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The choice in Note [Data constructors are linear by default] has an impact on backwards compatibility. Consider map Just We have map :: (a -> b) -> f a -> f b Just :: a %1 -> Just a Types don't match, we should get a type error. But this is legal Haskell 98 code! Bad! Bad! Bad! It could be solved with subtyping, but subtyping doesn't combine well with polymorphism. Instead, we generalise the type of Just, when used as term: Just :: forall {p}. a %p-> Just a This is solely a concern for higher-order code like this: when called fully applied linear constructors are more general than constructors with unrestricted fields. In particular, linear constructors can always be eta-expanded to their Haskell 98 type. This is explained in the paper (but there, we had a different strategy to resolve this type mismatch in higher-order code. It turned out to be insufficient, which is explained in the wiki page as well as the proposal). We only generalise linear fields this way: fields with multiplicity Many, or other multiplicity expressions are exclusive to -XLinearTypes, hence don't have backward compatibility implications. The implementation is described in Note [Typechecking data constructors] in GHC.Tc.Gen.Head. More details in the proposal. -} {- Note [Adding new multiplicities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To add a new multiplicity, you need to: * Add the new type with Multiplicity kind * Update cases in mkMultAdd, mkMultMul, mkMultSup, submult, tcSubMult * Check supUE function that computes sup of a multiplicity and Zero -} isMultMul :: Mult -> Maybe (Mult, Mult) isMultMul ty | Just (tc, [x, y]) <- splitTyConApp_maybe ty , tc `hasKey` multMulTyConKey = Just (x, y) | otherwise = Nothing {- Note [Overapproximating multiplicities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions mkMultAdd, mkMultMul, mkMultSup perform operations on multiplicities. They can return overapproximations: their result is merely guaranteed to be a submultiplicity of the actual value. They should be used only when an upper bound is acceptable. In most cases, they are used in usage environments (UsageEnv); in usage environments, replacing a usage with a larger one can only cause more programs to fail to typecheck. In future work, instead of approximating we might add type families and allow users to write types involving operations on multiplicities. In this case, we could enforce more invariants in Mult, for example, enforce that it is in the form of a sum of products, and even that the summands and factors are ordered somehow, to have more equalities. -} -- With only two multiplicities One and Many, we can always replace -- p + q by Many. See Note [Overapproximating multiplicities]. mkMultAdd :: Mult -> Mult -> Mult mkMultAdd _ _ = Many mkMultMul :: Mult -> Mult -> Mult mkMultMul One p = p mkMultMul p One = p mkMultMul Many _ = Many mkMultMul _ Many = Many mkMultMul p q = mkTyConApp multMulTyCon [p, q] scaleScaled :: Mult -> Scaled a -> Scaled a scaleScaled m' (Scaled m t) = Scaled (m' `mkMultMul` m) t -- See Note [Joining usages] -- | @mkMultSup w1 w2@ returns a multiplicity such that @mkMultSup w1 -- w2 >= w1@ and @mkMultSup w1 w2 >= w2@. See Note [Overapproximating multiplicities]. mkMultSup :: Mult -> Mult -> Mult mkMultSup = mkMultMul -- Note: If you are changing this logic, check 'supUE' in UsageEnv as well. -- -- * Multiplicity ordering -- data IsSubmult = Submult -- Definitely a submult | Unknown -- Could be a submult, need to ask the typechecker deriving (Show, Eq) instance Outputable IsSubmult where ppr = text . show -- | @submult w1 w2@ check whether a value of multiplicity @w1@ is allowed where a -- value of multiplicity @w2@ is expected. This is a partial order. submult :: Mult -> Mult -> IsSubmult submult _ Many = Submult submult One One = Submult -- The 1 <= p rule submult One _ = Submult submult _ _ = Unknown ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/Arity.hs0000644000000000000000000024235114472400112020720 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Arity and eta expansion -} {-# LANGUAGE CPP #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Arity and eta expansion module GHC.Core.Opt.Arity ( manifestArity, joinRhsArity, exprArity, typeArity , exprEtaExpandArity, findRhsArity , etaExpand, etaExpandAT , exprBotStrictness_maybe -- ** ArityType , ArityType(..), mkBotArityType, mkManifestArityType, expandableArityType , arityTypeArity, maxWithArity, idArityType -- ** Join points , etaExpandToJoinPoint, etaExpandToJoinPointRule -- ** Coercions and casts , pushCoArg, pushCoArgs, pushCoValArg, pushCoTyArg , pushCoercionIntoLambda, pushCoDataCon, collectBindersPushingCo ) where import GHC.Prelude import GHC.Driver.Session ( DynFlags, GeneralFlag(..), gopt ) import GHC.Core import GHC.Core.FVs import GHC.Core.Utils import GHC.Core.DataCon import GHC.Core.TyCon ( tyConArity ) import GHC.Core.TyCon.RecWalk ( initRecTc, checkRecTc ) import GHC.Core.Predicate ( isDictTy, isCallStackPredTy ) import GHC.Core.Multiplicity -- We have two sorts of substitution: -- GHC.Core.Subst.Subst, and GHC.Core.TyCo.TCvSubst -- Both have substTy, substCo Hence need for qualification import GHC.Core.Subst as Core import GHC.Core.Type as Type import GHC.Core.Coercion as Type import GHC.Types.Demand import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Id import GHC.Types.Basic import GHC.Types.Tickish import GHC.Builtin.Uniques import GHC.Data.FastString import GHC.Data.Pair import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Trace import GHC.Utils.Misc {- ************************************************************************ * * manifestArity and exprArity * * ************************************************************************ exprArity is a cheap-and-cheerful version of exprEtaExpandArity. It tells how many things the expression can be applied to before doing any work. It doesn't look inside cases, lets, etc. The idea is that exprEtaExpandArity will do the hard work, leaving something that's easy for exprArity to grapple with. In particular, Simplify uses exprArity to compute the ArityInfo for the Id. Originally I thought that it was enough just to look for top-level lambdas, but it isn't. I've seen this foo = PrelBase.timesInt We want foo to get arity 2 even though the eta-expander will leave it unchanged, in the expectation that it'll be inlined. But occasionally it isn't, because foo is blacklisted (used in a rule). Similarly, see the ok_note check in exprEtaExpandArity. So f = __inline_me (\x -> e) won't be eta-expanded. And in any case it seems more robust to have exprArity be a bit more intelligent. But note that (\x y z -> f x y z) should have arity 3, regardless of f's arity. -} manifestArity :: CoreExpr -> Arity -- ^ manifestArity sees how many leading value lambdas there are, -- after looking through casts manifestArity (Lam v e) | isId v = 1 + manifestArity e | otherwise = manifestArity e manifestArity (Tick t e) | not (tickishIsCode t) = manifestArity e manifestArity (Cast e _) = manifestArity e manifestArity _ = 0 joinRhsArity :: CoreExpr -> JoinArity -- Join points are supposed to have manifestly-visible -- lambdas at the top: no ticks, no casts, nothing -- Moreover, type lambdas count in JoinArity joinRhsArity (Lam _ e) = 1 + joinRhsArity e joinRhsArity _ = 0 --------------- exprArity :: CoreExpr -> Arity -- ^ An approximate, fast, version of 'exprEtaExpandArity' exprArity e = go e where go (Var v) = idArity v go (Lam x e) | isId x = go e + 1 | otherwise = go e go (Tick t e) | not (tickishIsCode t) = go e go (Cast e co) = trim_arity (go e) (coercionRKind co) -- See Note [exprArity invariant] go (App e (Type _)) = go e go (App f a) | exprIsTrivial a = (go f - 1) `max` 0 -- See Note [exprArity for applications] -- NB: coercions count as a value argument go _ = 0 trim_arity :: Arity -> Type -> Arity trim_arity arity ty = arity `min` length (typeArity ty) --------------- typeArity :: Type -> [OneShotInfo] -- How many value arrows are visible in the type? -- We look through foralls, and newtypes -- See Note [exprArity invariant] typeArity ty = go initRecTc ty where go rec_nts ty | Just (_, ty') <- splitForAllTyCoVar_maybe ty = go rec_nts ty' | Just (_,arg,res) <- splitFunTy_maybe ty = typeOneShot arg : go rec_nts res | Just (tc,tys) <- splitTyConApp_maybe ty , Just (ty', _) <- instNewTyCon_maybe tc tys , Just rec_nts' <- checkRecTc rec_nts tc -- See Note [Expanding newtypes and products] -- in GHC.Core.TyCon -- , not (isClassTyCon tc) -- Do not eta-expand through newtype classes -- -- See Note [Newtype classes and eta expansion] -- (no longer required) = go rec_nts' ty' -- Important to look through non-recursive newtypes, so that, eg -- (f x) where f has arity 2, f :: Int -> IO () -- Here we want to get arity 1 for the result! -- -- AND through a layer of recursive newtypes -- e.g. newtype Stream m a b = Stream (m (Either b (a, Stream m a b))) | otherwise = [] --------------- exprBotStrictness_maybe :: CoreExpr -> Maybe (Arity, DmdSig) -- A cheap and cheerful function that identifies bottoming functions -- and gives them a suitable strictness signatures. It's used during -- float-out exprBotStrictness_maybe e = case getBotArity (arityType botStrictnessArityEnv e) of Nothing -> Nothing Just ar -> Just (ar, sig ar) where sig ar = mkClosedDmdSig (replicate ar topDmd) botDiv {- Note [exprArity invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~ exprArity has the following invariants: (1) If typeArity (exprType e) = n, then manifestArity (etaExpand e n) = n That is, etaExpand can always expand as much as typeArity says So the case analysis in etaExpand and in typeArity must match (2) exprArity e <= typeArity (exprType e) (3) Hence if (exprArity e) = n, then manifestArity (etaExpand e n) = n That is, if exprArity says "the arity is n" then etaExpand really can get "n" manifest lambdas to the top. Why is this important? Because - In GHC.Iface.Tidy we use exprArity to fix the *final arity* of each top-level Id, and in - In CorePrep we use etaExpand on each rhs, so that the visible lambdas actually match that arity, which in turn means that the StgRhs has the right number of lambdas An alternative would be to do the eta-expansion in GHC.Iface.Tidy, at least for top-level bindings, in which case we would not need the trim_arity in exprArity. That is a less local change, so I'm going to leave it for today! Note [Newtype classes and eta expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB: this nasty special case is no longer required, because for newtype classes we don't use the class-op rule mechanism at all. See Note [Single-method classes] in GHC.Tc.TyCl.Instance. SLPJ May 2013 -------- Old out of date comments, just for interest ----------- We have to be careful when eta-expanding through newtypes. In general it's a good idea, but annoyingly it interacts badly with the class-op rule mechanism. Consider class C a where { op :: a -> a } instance C b => C [b] where op x = ... These translate to co :: forall a. (a->a) ~ C a $copList :: C b -> [b] -> [b] $copList d x = ... $dfList :: C b -> C [b] {-# DFunUnfolding = [$copList] #-} $dfList d = $copList d |> co@[b] Now suppose we have: dCInt :: C Int blah :: [Int] -> [Int] blah = op ($dfList dCInt) Now we want the built-in op/$dfList rule will fire to give blah = $copList dCInt But with eta-expansion 'blah' might (and in #3772, which is slightly more complicated, does) turn into blah = op (\eta. ($dfList dCInt |> sym co) eta) and now it is *much* harder for the op/$dfList rule to fire, because exprIsConApp_maybe won't hold of the argument to op. I considered trying to *make* it hold, but it's tricky and I gave up. The test simplCore/should_compile/T3722 is an excellent example. -------- End of old out of date comments, just for interest ----------- Note [exprArity for applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we come to an application we check that the arg is trivial. eg f (fac x) does not have arity 2, even if f has arity 3! * We require that is trivial rather merely cheap. Suppose f has arity 2. Then f (Just y) has arity 0, because if we gave it arity 1 and then inlined f we'd get let v = Just y in \w. which has arity 0. And we try to maintain the invariant that we don't have arity decreases. * The `max 0` is important! (\x y -> f x) has arity 2, even if f is unknown, hence arity 0 ************************************************************************ * * Computing the "arity" of an expression * * ************************************************************************ Note [Definition of arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The "arity" of an expression 'e' is n if applying 'e' to *fewer* than n *value* arguments converges rapidly Or, to put it another way there is no work lost in duplicating the partial application (e x1 .. x(n-1)) In the divergent case, no work is lost by duplicating because if the thing is evaluated once, that's the end of the program. Or, to put it another way, in any context C C[ (\x1 .. xn. e x1 .. xn) ] is as efficient as C[ e ] It's all a bit more subtle than it looks: Note [One-shot lambdas] ~~~~~~~~~~~~~~~~~~~~~~~ Consider one-shot lambdas let x = expensive in \y z -> E We want this to have arity 1 if the \y-abstraction is a 1-shot lambda. Note [Dealing with bottom] ~~~~~~~~~~~~~~~~~~~~~~~~~~ A Big Deal with computing arities is expressions like f = \x -> case x of True -> \s -> e1 False -> \s -> e2 This happens all the time when f :: Bool -> IO () In this case we do eta-expand, in order to get that \s to the top, and give f arity 2. This isn't really right in the presence of seq. Consider (f bot) `seq` 1 This should diverge! But if we eta-expand, it won't. We ignore this "problem" (unless -fpedantic-bottoms is on), because being scrupulous would lose an important transformation for many programs. (See #5587 for an example.) Consider also f = \x -> error "foo" Here, arity 1 is fine. But if it is f = \x -> case x of True -> error "foo" False -> \y -> x+y then we want to get arity 2. Technically, this isn't quite right, because (f True) `seq` 1 should diverge, but it'll converge if we eta-expand f. Nevertheless, we do so; it improves some programs significantly, and increasing convergence isn't a bad thing. Hence the ABot/ATop in ArityType. So these two transformations aren't always the Right Thing, and we have several tickets reporting unexpected behaviour resulting from this transformation. So we try to limit it as much as possible: (1) Do NOT move a lambda outside a known-bottom case expression case undefined of { (a,b) -> \y -> e } This showed up in #5557 (2) Do NOT move a lambda outside a case unless (a) The scrutinee is ok-for-speculation, or (b) more liberally: the scrutinee is cheap (e.g. a variable), and -fpedantic-bottoms is not enforced (see #2915 for an example) Of course both (1) and (2) are readily defeated by disguising the bottoms. 4. Note [Newtype arity] ~~~~~~~~~~~~~~~~~~~~~~~~ Non-recursive newtypes are transparent, and should not get in the way. We do (currently) eta-expand recursive newtypes too. So if we have, say newtype T = MkT ([T] -> Int) Suppose we have e = coerce T f where f has arity 1. Then: etaExpandArity e = 1; that is, etaExpandArity looks through the coerce. When we eta-expand e to arity 1: eta_expand 1 e T we want to get: coerce T (\x::[T] -> (coerce ([T]->Int) e) x) HOWEVER, note that if you use coerce bogusly you can ge coerce Int negate And since negate has arity 2, you might try to eta expand. But you can't decompose Int to a function type. Hence the final case in eta_expand. Note [The state-transformer hack] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have f = e where e has arity n. Then, if we know from the context that f has a usage type like t1 -> ... -> tn -1-> t(n+1) -1-> ... -1-> tm -> ... then we can expand the arity to m. This usage type says that any application (x e1 .. en) will be applied to uniquely to (m-n) more args Consider f = \x. let y = in case x of True -> foo False -> \(s:RealWorld) -> e where foo has arity 1. Then we want the state hack to apply to foo too, so we can eta expand the case. Then we expect that if f is applied to one arg, it'll be applied to two (that's the hack -- we don't really know, and sometimes it's false) See also Id.isOneShotBndr. Note [State hack and bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's a terrible idea to use the state hack on a bottoming function. Here's what happens (#2861): f :: String -> IO T f = \p. error "..." Eta-expand, using the state hack: f = \p. (\s. ((error "...") |> g1) s) |> g2 g1 :: IO T ~ (S -> (S,T)) g2 :: (S -> (S,T)) ~ IO T Extrude the g2 f' = \p. \s. ((error "...") |> g1) s f = f' |> (String -> g2) Discard args for bottomming function f' = \p. \s. ((error "...") |> g1 |> g3 g3 :: (S -> (S,T)) ~ (S,T) Extrude g1.g3 f'' = \p. \s. (error "...") f' = f'' |> (String -> S -> g1.g3) And now we can repeat the whole loop. Aargh! The bug is in applying the state hack to a function which then swallows the argument. This arose in another guise in #3959. Here we had catch# (throw exn >> return ()) Note that (throw :: forall a e. Exn e => e -> a) is called with [a = IO ()]. After inlining (>>) we get catch# (\_. throw {IO ()} exn) We must *not* eta-expand to catch# (\_ _. throw {...} exn) because 'catch#' expects to get a (# _,_ #) after applying its argument to a State#, not another function! In short, we use the state hack to allow us to push let inside a lambda, but not to introduce a new lambda. Note [ArityType] ~~~~~~~~~~~~~~~~ ArityType is the result of a compositional analysis on expressions, from which we can decide the real arity of the expression (extracted with function exprEtaExpandArity). We use the following notation: at ::= \o1..on.div div ::= T | x | ⊥ o ::= ? | 1 And omit the \. if n = 0. Examples: \?11.T stands for @AT [NoOneShotInfo,OneShotLam,OneShotLam] topDiv@ ⊥ stands for @AT [] botDiv@ See the 'Outputable' instance for more information. It's pretty simple. Here is what the fields mean. If an arbitrary expression 'f' has ArityType 'at', then * If @at = AT [o1,..,on] botDiv@ (notation: \o1..on.⊥), then @f x1..xn@ definitely diverges. Partial applications to fewer than n args may *or may not* diverge. We allow ourselves to eta-expand bottoming functions, even if doing so may lose some `seq` sharing, let x = in \y. error (g x y) ==> \y. let x = in error (g x y) * If @at = AT [o1,..,on] topDiv@ (notation: \o1..on.T), then expanding 'f' to @\x1..xn. f x1..xn@ loses no sharing, assuming the calls of f respect the one-shot-ness o1..on of its definition. NB 'f' is an arbitrary expression, eg @f = g e1 e2@. This 'f' can have arity type @AT oss _@, with @length oss > 0@, only if e1 e2 are themselves cheap. * In both cases, @f@, @f x1@, ... @f x1 ... x(n-1)@ are definitely really functions, or bottom, but *not* casts from a data type, in at least one case branch. (If it's a function in one case branch but an unsafe cast from a data type in another, the program is bogus.) So eta expansion is dynamically ok; see Note [State hack and bottoming functions], the part about catch# Example: f = \x\y. let v = in \s(one-shot) \t(one-shot). blah 'f' has arity type \??11.T The one-shot-ness means we can, in effect, push that 'let' inside the \st. Suppose f = \xy. x+y Then f :: \??.T f v :: \?.T f :: T Note [Eta reduction in recursive RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following recursive function: f = \x. ....g (\y. f y).... The recursive call of f in its own RHS seems like a fine opportunity for eta-reduction because f has arity 1. And often it is! Alas, that is unsound in general if the eta-reduction happens in a tail context. Making the arity visible in the RHS allows us to eta-reduce f = \x -> f x to f = f which means we optimise terminating programs like (f `seq` ()) into non-terminating ones. Nor is this problem just for tail calls. Consider f = id (\x -> f x) where we have (for some reason) not yet inlined `id`. We must not eta-reduce to f = id f because that will then simplify to `f = f` as before. An immediate idea might be to look at whether the called function is a local loopbreaker and refrain from eta-expanding. But that doesn't work for mutually recursive function like in #21652: f = g g* x = f x Here, g* is the loopbreaker but f isn't. What can we do? Fix 1: Zap `idArity` when analysing recursive RHSs and re-attach the info when entering the let body. Has the disadvantage that other transformations which make use of arity (such as dropping of `seq`s when arity > 0) will no longer work in the RHS. Plus it requires non-trivial refactorings to both the simple optimiser (in the way `subst_opt_bndr` is used) as well as the Simplifier (in the way `simplRecBndrs` and `simplRecJoinBndrs` is used), modifying the SimplEnv's substitution twice in the process. A very complicated stop-gap. Fix 2: Pass the set of enclosing recursive binders to `tryEtaReduce`; these are the ones we should not eta-reduce. All call-site must maintain this set. Example: rec { f1 = ....rec { g = ... (\x. g x)...(\y. f2 y)... }... ; f2 = ...f1... } when eta-reducing those inner lambdas, we need to know that we are in the rec group for {f1, f2, g}. This is very much like the solution in Note [Speculative evaluation] in GHC.CoreToStg.Prep. It is a bit tiresome to maintain this info, because it means another field in SimplEnv and SimpleOptEnv. We implement Fix (2) because of it isn't as complicated to maintain as (1). Plus, it is the correct fix to begin with. After all, the arity is correct, but doing the transformation isn't. The moving parts are: * A field `scRecIds` in `SimplEnv` tracks the enclosing recursive binders * We extend the `scRecIds` set in `GHC.Core.Opt.Simplify.simplRecBind` * We consult the set in `is_eta_reduction_sound` in `tryEtaReduce` The situation is very similar to Note [Speculative evaluation] which has the same fix. -} -- | The analysis lattice of arity analysis. It is isomorphic to -- -- @ -- data ArityType' -- = AEnd Divergence -- | ALam OneShotInfo ArityType' -- @ -- -- Which is easier to display the Hasse diagram for: -- -- @ -- ALam OneShotLam at -- | -- AEnd topDiv -- | -- ALam NoOneShotInfo at -- | -- AEnd exnDiv -- | -- AEnd botDiv -- @ -- -- where the @at@ fields of @ALam@ are inductively subject to the same order. -- That is, @ALam os at1 < ALam os at2@ iff @at1 < at2@. -- -- Why the strange Top element? -- See Note [Combining case branches: optimistic one-shot-ness] -- -- We rely on this lattice structure for fixed-point iteration in -- 'findRhsArity'. For the semantics of 'ArityType', see Note [ArityType]. data ArityType = AT ![OneShotInfo] !Divergence -- ^ @AT oss div@ means this value can safely be eta-expanded @length oss@ -- times, provided use sites respect the 'OneShotInfo's in @oss@. -- A 'OneShotLam' annotation can come from two sources: -- * The user annotated a lambda as one-shot with 'GHC.Exts.oneShot' -- * It's from a lambda binder of a type affected by `-fstate-hack`. -- See 'idStateHackOneShotInfo'. -- In both cases, 'OneShotLam' should win over 'NoOneShotInfo', see -- Note [Combining case branches]. -- -- If @div@ is dead-ending ('isDeadEndDiv'), then application to -- @length os@ arguments will surely diverge, similar to the situation -- with 'DmdType'. deriving Eq -- | This is the BNF of the generated output: -- -- @ -- @ -- -- We format -- @AT [o1,..,on] topDiv@ as @\o1..on.T@ and -- @AT [o1,..,on] botDiv@ as @\o1..on.⊥@, respectively. -- More concretely, @AT [NOI,OS,OS] topDiv@ is formatted as @\?11.T@. -- If the one-shot info is empty, we omit the leading @\.@. instance Outputable ArityType where ppr (AT oss div) | null oss = pp_div div | otherwise = char '\\' <> hcat (map pp_os oss) <> dot <> pp_div div where pp_div Diverges = char '⊥' pp_div ExnOrDiv = char 'x' pp_div Dunno = char 'T' pp_os OneShotLam = char '1' pp_os NoOneShotInfo = char '?' mkBotArityType :: [OneShotInfo] -> ArityType mkBotArityType oss = AT oss botDiv botArityType :: ArityType botArityType = mkBotArityType [] mkManifestArityType :: [Var] -> CoreExpr -> ArityType mkManifestArityType bndrs body = AT oss div where oss = [idOneShotInfo bndr | bndr <- bndrs, isId bndr] div | exprIsDeadEnd body = botDiv | otherwise = topDiv topArityType :: ArityType topArityType = AT [] topDiv -- | The number of value args for the arity type arityTypeArity :: ArityType -> Arity arityTypeArity (AT oss _) = length oss -- | True <=> eta-expansion will add at least one lambda expandableArityType :: ArityType -> Bool expandableArityType at = arityTypeArity at > 0 -- | See Note [Dead ends] in "GHC.Types.Demand". -- Bottom implies a dead end. isDeadEndArityType :: ArityType -> Bool isDeadEndArityType (AT _ div) = isDeadEndDiv div -- | Expand a non-bottoming arity type so that it has at least the given arity. maxWithArity :: ArityType -> Arity -> ArityType maxWithArity at@(AT oss div) !ar | isDeadEndArityType at = at | oss `lengthAtLeast` ar = at | otherwise = AT (take ar $ oss ++ repeat NoOneShotInfo) div -- | Trim an arity type so that it has at most the given arity. -- Any excess 'OneShotInfo's are truncated to 'topDiv', even if they end in -- 'ABot'. minWithArity :: ArityType -> Arity -> ArityType minWithArity at@(AT oss _) ar | oss `lengthAtMost` ar = at | otherwise = AT (take ar oss) topDiv takeWhileOneShot :: ArityType -> ArityType takeWhileOneShot (AT oss div) | isDeadEndDiv div = AT (takeWhile isOneShotInfo oss) topDiv | otherwise = AT (takeWhile isOneShotInfo oss) div -- | The Arity returned is the number of value args the -- expression can be applied to without doing much work exprEtaExpandArity :: DynFlags -> CoreExpr -> ArityType -- exprEtaExpandArity is used when eta expanding -- e ==> \xy -> e x y exprEtaExpandArity dflags e = arityType (findRhsArityEnv dflags) e getBotArity :: ArityType -> Maybe Arity -- Arity of a divergent function getBotArity (AT oss div) | isDeadEndDiv div = Just $ length oss | otherwise = Nothing ---------------------- findRhsArity :: DynFlags -> Id -> CoreExpr -> Arity -> ArityType -- This implements the fixpoint loop for arity analysis -- See Note [Arity analysis] -- If findRhsArity e = (n, is_bot) then -- (a) any application of e to (\x1..xn. e x1 .. xn) -- (b) if is_bot=True, then e applied to n args is guaranteed bottom findRhsArity dflags bndr rhs old_arity = go 0 botArityType -- We always do one step, but usually that produces a result equal to -- old_arity, and then we stop right away, because old_arity is assumed -- to be sound. In other words, arities should never decrease. -- Result: the common case is that there is just one iteration where go :: Int -> ArityType -> ArityType go !n cur_at@(AT oss div) | not (isDeadEndDiv div) -- the "stop right away" case , length oss <= old_arity = cur_at -- from above | next_at == cur_at = cur_at | otherwise = -- Warn if more than 2 iterations. Why 2? See Note [Exciting arity] warnPprTrace (debugIsOn && n > 2) "Exciting arity" (nest 2 (ppr bndr <+> ppr cur_at <+> ppr next_at $$ ppr rhs)) $ go (n+1) next_at where next_at = step cur_at step :: ArityType -> ArityType step at = -- pprTrace "step" (ppr bndr <+> ppr at <+> ppr (arityType env rhs)) $ arityType env rhs where env = extendSigEnv (findRhsArityEnv dflags) bndr at {- Note [Arity analysis] ~~~~~~~~~~~~~~~~~~~~~ The motivating example for arity analysis is this: f = \x. let g = f (x+1) in \y. ...g... What arity does f have? Really it should have arity 2, but a naive look at the RHS won't see that. You need a fixpoint analysis which says it has arity "infinity" the first time round. This example happens a lot; it first showed up in Andy Gill's thesis, fifteen years ago! It also shows up in the code for 'rnf' on lists in #4138. We do the necessary, quite simple fixed-point iteration in 'findRhsArity', which assumes for a single binding 'ABot' on the first run and iterates until it finds a stable arity type. Two wrinkles * We often have to ask (see the Case or Let case of 'arityType') whether some expression is cheap. In the case of an application, that depends on the arity of the application head! That's why we have our own version of 'exprIsCheap', 'myExprIsCheap', that will integrate the optimistic arity types we have on f and g into the cheapness check. * Consider this (#18793) go = \ds. case ds of [] -> id (x:ys) -> let acc = go ys in case blah of True -> acc False -> \ x1 -> acc (negate x1) We must propagate go's optimistically large arity to @acc@, so that the tail call to @acc@ in the True branch has sufficient arity. This is done by the 'am_sigs' field in 'FindRhsArity', and 'lookupSigEnv' in the Var case of 'arityType'. Note [Exciting arity] ~~~~~~~~~~~~~~~~~~~~~ The fixed-point iteration in 'findRhsArity' stabilises very quickly in almost all cases. To get notified of cases where we need an usual number of iterations, we emit a warning in debug mode, so that we can investigate and make sure that we really can't do better. It's a gross hack, but catches real bugs (#18870). Now, which number is "unusual"? We pick n > 2. Here's a pretty common and expected example that takes two iterations and would ruin the specificity of the warning (from T18937): f :: [Int] -> Int -> Int f [] = id f (x:xs) = let y = sum [0..x] in \z -> f xs (y + z) Fixed-point iteration starts with arity type ⊥ for f. After the first iteration, we get arity type \??.T, e.g. arity 2, because we unconditionally 'floatIn' the let-binding (see its bottom case). After the second iteration, we get arity type \?.T, e.g. arity 1, because now we are no longer allowed to floatIn the non-cheap let-binding. Which is all perfectly benign, but means we do two iterations (well, actually 3 'step's to detect we are stable) and don't want to emit the warning. Note [Eta expanding through dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the experimental -fdicts-cheap flag is on, we eta-expand through dictionary bindings. This improves arities. Thereby, it also means that full laziness is less prone to floating out the application of a function to its dictionary arguments, which can thereby lose opportunities for fusion. Example: foo :: Ord a => a -> ... foo = /\a \(d:Ord a). let d' = ...d... in \(x:a). .... -- So foo has arity 1 f = \x. foo dInt $ bar x The (foo DInt) is floated out, and makes ineffective a RULE foo (bar x) = ... One could go further and make exprIsCheap reply True to any dictionary-typed expression, but that's more work. -} arityLam :: Id -> ArityType -> ArityType arityLam id (AT oss div) = AT (idStateHackOneShotInfo id : oss) div floatIn :: Bool -> ArityType -> ArityType -- We have something like (let x = E in b), -- where b has the given arity type. floatIn cheap at | isDeadEndArityType at || cheap = at -- If E is not cheap, keep arity only for one-shots | otherwise = takeWhileOneShot at arityApp :: ArityType -> Bool -> ArityType -- Processing (fun arg) where at is the ArityType of fun, -- Knock off an argument and behave like 'let' arityApp (AT (_:oss) div) cheap = floatIn cheap (AT oss div) arityApp at _ = at -- | Least upper bound in the 'ArityType' lattice. -- See the haddocks on 'ArityType' for the lattice. -- -- Used for branches of a @case@. andArityType :: ArityEnv -> ArityType -> ArityType -> ArityType andArityType env (AT (lam1:lams1) div1) (AT (lam2:lams2) div2) | AT lams' div' <- andArityType env (AT lams1 div1) (AT lams2 div2) = AT ((lam1 `and_lam` lam2) : lams') div' where (os1) `and_lam` (os2) = ( os1 `bestOneShot` os2) -- bestOneShot: see Note [Combining case branches: optimistic one-shot-ness] andArityType env (AT [] div1) at2 = andWithTail env div1 at2 andArityType env at1 (AT [] div2) = andWithTail env div2 at1 andWithTail :: ArityEnv -> Divergence -> ArityType -> ArityType andWithTail env div1 at2 | isDeadEndDiv div1 -- case x of { T -> error; F -> \y.e } = at2 -- Note [ABot branches: max arity wins] | pedanticBottoms env -- Note [Combining case branches: andWithTail] = AT [] topDiv | otherwise -- case x of { T -> plusInt ; F -> \y.e } = takeWhileOneShot at2 -- We know div1 = topDiv -- See Note [Combining case branches: andWithTail] {- Note [ABot branches: max arity wins] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider case x of True -> \x. error "urk" False -> \xy. error "urk2" Remember: \o1..on.⊥ means "if you apply to n args, it'll definitely diverge". So we need \??.⊥ for the whole thing, the /max/ of both arities. Note [Combining case branches: optimistic one-shot-ness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When combining the ArityTypes for two case branches (with andArityType) and both ArityTypes have ATLamInfo, then we just combine their expensive-ness and one-shot info. The tricky point is when we have case x of True -> \x{one-shot). blah1 Fale -> \y. blah2 Since one-shot-ness is about the /consumer/ not the /producer/, we optimistically assume that if either branch is one-shot, we combine the best of the two branches, on the (slightly dodgy) basis that if we know one branch is one-shot, then they all must be. Surprisingly, this means that the one-shot arity type is effectively the top element of the lattice. Hence the call to `bestOneShot` in `andArityType`. Note [Combining case branches: andWithTail] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When combining the ArityTypes for two case branches (with andArityType) and one side or the other has run out of ATLamInfo; then we get into `andWithTail`. * If one branch is guaranteed bottom (isDeadEndDiv), we just take the other; see Note [ABot branches: max arity wins] * Otherwise, if pedantic-bottoms is on, we just have to return AT [] topDiv. E.g. if we have f x z = case x of True -> \y. blah False -> z then we can't eta-expand, because that would change the behaviour of (f False bottom(). * But if pedantic-bottoms is not on, we allow ourselves to push `z` under a lambda (much as we allow ourselves to put the `case x` under a lambda). However we know nothing about the expensiveness or one-shot-ness of `z`, so we'd better assume it looks like (Expensive, NoOneShotInfo) all the way. Remembering Note [Combining case branches: optimistic one-shot-ness], we just add work to ever ATLamInfo, keeping the one-shot-ness. Here's an example: go = \x. let z = go e0 go2 = \x. case x of True -> z False -> \s(one-shot). e1 in go2 x We *really* want to respect the one-shot annotation provided by the user and eta-expand go and go2. When combining the branches of the case we have T `andAT` \1.T and we want to get \1.T. But if the inner lambda wasn't one-shot (\?.T) we don't want to do this. (We need a usage analysis to justify that.) Unless we can conclude that **all** branches are safe to eta-expand then we must pessimisticaly conclude that we can't eta-expand. See #21694 for where this went wrong. We can do better in the long run, but for the 9.4/9.2 branches we choose to simply ignore oneshot annotations for the time being. Note [Arity trimming] ~~~~~~~~~~~~~~~~~~~~~ Consider ((\x y. blah) |> co), where co :: (Int->Int->Int) ~ (Int -> F a) , and F is some type family. Because of Note [exprArity invariant], item (2), we must return with arity at most 1, because typeArity (Int -> F a) = 1. So we have to trim the result of calling arityType on (\x y. blah). Failing to do so, and hence breaking the exprArity invariant, led to #5441. How to trim? If we end in topDiv, it's easy. But we must take great care with dead ends (i.e. botDiv). Suppose the expression was (\x y. error "urk"), we'll get \??.⊥. We absolutely must not trim that to \?.⊥, because that claims that ((\x y. error "urk") |> co) diverges when given one argument, which it absolutely does not. And Bad Things happen if we think something returns bottom when it doesn't (#16066). So, if we need to trim a dead-ending arity type, switch (conservatively) to topDiv. Historical note: long ago, we unconditionally switched to topDiv when we encountered a cast, but that is far too conservative: see #5475 Note [Eta expanding through CallStacks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Just as it's good to eta-expand through dictionaries, so it is good to do so through CallStacks. #20103 is a case in point, where we got foo :: HasCallStack => Int -> Int foo = \(d::CallStack). let d2 = pushCallStack blah d in \(x:Int). blah We really want to eta-expand this! #20103 is quite convincing! We do this regardless of -fdicts-cheap; it's not really a dictionary. -} --------------------------- -- | Each of the entry-points of the analyser ('arityType') has different -- requirements. The entry-points are -- -- 1. 'exprBotStrictness_maybe' -- 2. 'exprEtaExpandArity' -- 3. 'findRhsArity' -- -- For each of the entry-points, there is a separate mode that governs -- -- 1. How pedantic we are wrt. ⊥, in 'pedanticBottoms'. -- 2. Whether we store arity signatures for non-recursive let-bindings, -- accessed in 'extendSigEnv'/'lookupSigEnv'. -- See Note [Arity analysis] why that's important. -- 3. Which expressions we consider cheap to float inside a lambda, -- in 'myExprIsCheap'. data AnalysisMode = BotStrictness -- ^ Used during 'exprBotStrictness_maybe'. | EtaExpandArity { am_ped_bot :: !Bool , am_dicts_cheap :: !Bool } -- ^ Used for finding an expression's eta-expanding arity quickly, without -- fixed-point iteration ('exprEtaExpandArity'). | FindRhsArity { am_ped_bot :: !Bool , am_dicts_cheap :: !Bool , am_sigs :: !(IdEnv ArityType) } -- ^ Used for regular, fixed-point arity analysis ('findRhsArity'). -- See Note [Arity analysis] for details about fixed-point iteration. -- INVARIANT: Disjoint with 'ae_joins'. data ArityEnv = AE { ae_mode :: !AnalysisMode -- ^ The analysis mode. See 'AnalysisMode'. } -- | The @ArityEnv@ used by 'exprBotStrictness_maybe'. Pedantic about bottoms -- and no application is ever considered cheap. botStrictnessArityEnv :: ArityEnv botStrictnessArityEnv = AE { ae_mode = BotStrictness } {- -- | The @ArityEnv@ used by 'exprEtaExpandArity'. etaExpandArityEnv :: DynFlags -> ArityEnv etaExpandArityEnv dflags = AE { ae_mode = EtaExpandArity { am_ped_bot = gopt Opt_PedanticBottoms dflags , am_dicts_cheap = gopt Opt_DictsCheap dflags } , ae_joins = emptyVarSet } -} -- | The @ArityEnv@ used by 'findRhsArity'. findRhsArityEnv :: DynFlags -> ArityEnv findRhsArityEnv dflags = AE { ae_mode = FindRhsArity { am_ped_bot = gopt Opt_PedanticBottoms dflags , am_dicts_cheap = gopt Opt_DictsCheap dflags , am_sigs = emptyVarEnv } } isFindRhsArity :: ArityEnv -> Bool isFindRhsArity (AE { ae_mode = FindRhsArity {} }) = True isFindRhsArity _ = False -- First some internal functions in snake_case for deleting in certain VarEnvs -- of the ArityType. Don't call these; call delInScope* instead! modifySigEnv :: (IdEnv ArityType -> IdEnv ArityType) -> ArityEnv -> ArityEnv modifySigEnv f env@AE { ae_mode = am@FindRhsArity{am_sigs = sigs} } = env { ae_mode = am { am_sigs = f sigs } } modifySigEnv _ env = env {-# INLINE modifySigEnv #-} del_sig_env :: Id -> ArityEnv -> ArityEnv -- internal! del_sig_env id = modifySigEnv (\sigs -> delVarEnv sigs id) {-# INLINE del_sig_env #-} del_sig_env_list :: [Id] -> ArityEnv -> ArityEnv -- internal! del_sig_env_list ids = modifySigEnv (\sigs -> delVarEnvList sigs ids) {-# INLINE del_sig_env_list #-} -- end of internal deletion functions extendSigEnv :: ArityEnv -> Id -> ArityType -> ArityEnv extendSigEnv env id ar_ty = modifySigEnv (\sigs -> extendVarEnv sigs id ar_ty) env delInScope :: ArityEnv -> Id -> ArityEnv delInScope env id = del_sig_env id env delInScopeList :: ArityEnv -> [Id] -> ArityEnv delInScopeList env ids = del_sig_env_list ids env lookupSigEnv :: ArityEnv -> Id -> Maybe ArityType lookupSigEnv AE{ ae_mode = mode } id = case mode of BotStrictness -> Nothing EtaExpandArity{} -> Nothing FindRhsArity{ am_sigs = sigs } -> lookupVarEnv sigs id -- | Whether the analysis should be pedantic about bottoms. -- 'exprBotStrictness_maybe' always is. pedanticBottoms :: ArityEnv -> Bool pedanticBottoms AE{ ae_mode = mode } = case mode of BotStrictness -> True EtaExpandArity{ am_ped_bot = ped_bot } -> ped_bot FindRhsArity{ am_ped_bot = ped_bot } -> ped_bot -- | A version of 'exprIsCheap' that considers results from arity analysis -- and optionally the expression's type. -- Under 'exprBotStrictness_maybe', no expressions are cheap. myExprIsCheap :: ArityEnv -> CoreExpr -> Maybe Type -> Bool myExprIsCheap AE{ae_mode = mode} e mb_ty = case mode of BotStrictness -> False _ -> cheap_dict || cheap_fun e where cheap_dict = case mb_ty of Nothing -> False Just ty -> (am_dicts_cheap mode && isDictTy ty) || isCallStackPredTy ty -- See Note [Eta expanding through dictionaries] -- See Note [Eta expanding through CallStacks] cheap_fun e = case mode of #if __GLASGOW_HASKELL__ <= 900 BotStrictness -> panic "impossible" #endif EtaExpandArity{} -> exprIsCheap e FindRhsArity{am_sigs = sigs} -> exprIsCheapX (myIsCheapApp sigs) e -- | A version of 'isCheapApp' that considers results from arity analysis. -- See Note [Arity analysis] for what's in the signature environment and why -- it's important. myIsCheapApp :: IdEnv ArityType -> CheapAppFun myIsCheapApp sigs fn n_val_args = case lookupVarEnv sigs fn of -- Nothing means not a local function, fall back to regular -- 'GHC.Core.Utils.isCheapApp' Nothing -> isCheapApp fn n_val_args -- `Just at` means local function with `at` as current SafeArityType. -- NB the SafeArityType bit: that means we can ignore the cost flags -- in 'lams', and just consider the length -- Roughly approximate what 'isCheapApp' is doing. Just (AT oss div) | isDeadEndDiv div -> True -- See Note [isCheapApp: bottoming functions] in GHC.Core.Utils | n_val_args < length oss -> True -- Essentially isWorkFreeApp | otherwise -> False ---------------- arityType :: HasDebugCallStack => ArityEnv -> CoreExpr -> ArityType -- Precondition: all the free join points of the expression -- are bound by the ArityEnv -- See Note [No free join points in arityType] arityType env (Cast e co) = minWithArity (arityType env e) co_arity -- See Note [Arity trimming] where co_arity = length (typeArity (coercionRKind co)) -- See Note [exprArity invariant] (2); must be true of -- arityType too, since that is how we compute the arity -- of variables, and they in turn affect result of exprArity -- #5441 is a nice demo arityType env (Var v) | Just at <- lookupSigEnv env v -- Local binding = at | otherwise = assertPpr (not (isFindRhsArity env && isJoinId v)) (ppr v) $ -- All join-point should be in the ae_sigs -- See Note [No free join points in arityType] idArityType v -- Lambdas; increase arity arityType env (Lam x e) | isId x = arityLam x (arityType env' e) | otherwise = arityType env' e where env' = delInScope env x -- Applications; decrease arity, except for types arityType env (App fun (Type _)) = arityType env fun arityType env (App fun arg ) = arityApp (arityType env fun) (myExprIsCheap env arg Nothing) -- Case/Let; keep arity if either the expression is cheap -- or it's a 1-shot lambda -- The former is not really right for Haskell -- f x = case x of { (a,b) -> \y. e } -- ===> -- f x y = case x of { (a,b) -> e } -- The difference is observable using 'seq' -- arityType env (Case scrut bndr _ alts) | exprIsDeadEnd scrut || null alts = botArityType -- Do not eta expand. See (1) in Note [Dealing with bottom] | not (pedanticBottoms env) -- See (2) in Note [Dealing with bottom] , myExprIsCheap env scrut (Just (idType bndr)) = alts_type | exprOkForSpeculation scrut = alts_type | otherwise -- In the remaining cases we may not push = takeWhileOneShot alts_type -- evaluation of the scrutinee in where env' = delInScope env bndr arity_type_alt (Alt _con bndrs rhs) = arityType (delInScopeList env' bndrs) rhs alts_type = foldr1 (andArityType env) (map arity_type_alt alts) arityType env (Let (NonRec b r) e) = -- See Note [arityType for let-bindings] floatIn cheap_rhs (arityType env' e) where cheap_rhs = myExprIsCheap env r (Just (idType b)) env' = extendSigEnv env b (arityType env r) arityType env (Let (Rec prs) e) = floatIn (all is_cheap prs) (arityType env' e) where is_cheap (b,e) = myExprIsCheap env' e (Just (idType b)) env' = foldl extend_rec env prs extend_rec :: ArityEnv -> (Id,CoreExpr) -> ArityEnv extend_rec env (b,e) = extendSigEnv env b $ mkManifestArityType bndrs body where (bndrs, body) = collectBinders e -- We can't call arityType on the RHS, because it might mention -- join points bound in this very letrec, and we don't want to -- do a fixpoint calculation here. So we make do with the -- manifest arity arityType env (Tick t e) | not (tickishIsCode t) = arityType env e arityType _ _ = topArityType {- Note [No free join points in arityType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we call arityType on this expression (EX1) \x . case x of True -> \y. e False -> $j 3 where $j is a join point. It really makes no sense to talk of the arity of this expression, because it has a free join point. In particular, we can't eta-expand the expression because we'd have do the same thing to the binding of $j, and we can't see that binding. If we had (EX2) \x. join $j y = blah case x of True -> \y. e False -> $j 3 then it would make perfect sense: we can determine $j's ArityType, and propagate it to the usage site as usual. But how can we get (EX1)? It doesn't make much sense, because $j can't be a join point under the \x anyway. So we make it a precondition of arityType that the argument has no free join-point Ids. (This is checked with an assesrt in the Var case of arityType.) BUT the invariant risks being invalidated by one very narrow special case: runRW# join $j y = blah runRW# (\s. case x of True -> \y. e False -> $j x) We have special magic in OccurAnal, and Simplify to allow continuations to move into the body of a runRW# call. So we are careful never to attempt to eta-expand the (\s.blah) in the argument to runRW#, at least not when there is a literal lambda there, so that OccurAnal has seen it and allowed join points bound outside. See Note [No eta-expansion in runRW#] in GHC.Core.Opt.Simplify.Iteration. Note [arityType for let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For non-recursive let-bindings, we just get the arityType of the RHS, and extend the environment. That works nicely for things like this (#18793): go = \ ds. case ds_a2CF of { [] -> id : y ys -> case y of { GHC.Types.I# x -> let acc = go ys in case x ># 42# of { __DEFAULT -> acc 1# -> \x1. acc (negate x2) Here we want to get a good arity for `acc`, based on the ArityType of `go`. All this is particularly important for join points. Consider this (#18328) f x = join j y = case y of True -> \a. blah False -> \b. blah in case x of A -> j True B -> \c. blah C -> j False and suppose the join point is too big to inline. Now, what is the arity of f? If we inlined the join point, we'd definitely say "arity 2" because we are prepared to push case-scrutinisation inside a lambda. It's important that we extend the envt with j's ArityType, so that we can use that information in the A/C branch of the case. For /recursive/ bindings it's more difficult, to call arityType, because we don't have an ArityType to put in the envt for the recursively bound Ids. So for non-join-point bindings we satisfy ourselves with mkManifestArityType. Typically we'll have eta-expanded the binding (based on an earlier fixpoint calculation in findRhsArity), so the manifest arity is good. But for /recursive join points/ things are not so good. See Note [Arity type for recursive join bindings] See Note [Arity type for recursive join bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = joinrec j 0 = \ a b c -> (a,x,b) j n = j (n-1) in j 20 Obviously `f` should get arity 4. But the manifest arity of `j` is 1. Remember, we don't eta-expand join points; see GHC.Core.Opt.Simplify.Utils Note [Do not eta-expand join points]. And the ArityInfo on `j` will be just 1 too; see GHC.Core Note [Invariants on join points], item (2b). So using Note [ArityType for let-bindings] won't work well. We could do a fixpoint iteration, but that's a heavy hammer to use in arityType. So we take advantage of it being a join point: * Extend the ArityEnv to bind each of the recursive binders (all join points) to `botArityType`. This means that any jump to the join point will return botArityType, which is unit for `andArityType`: botAritType `andArityType` at = at So it's almost as if those "jump" branches didn't exist. * In this extended env, find the ArityType of each of the RHS, after stripping off the join-point binders. * Use andArityType to combine all these RHS ArityTypes. * Find the ArityType of the body, also in this strange extended environment * And combine that into the result with andArityType. In our example, the jump (j 20) will yield Bot, as will the jump (j (n-1)). We'll 'and' those the ArityType of (\abc. blah). Good! In effect we are treating the RHSs as alternative bodies (like in a case), and ignoring all jumps. In this way we don't need to take a fixpoint. Tricky! NB: we treat /non-recursive/ join points in the same way, but actually it works fine to treat them uniformly with normal let-bindings, and that takes less code. -} idArityType :: Id -> ArityType idArityType v | strict_sig <- idDmdSig v , not $ isNopSig strict_sig , (ds, div) <- splitDmdSig strict_sig , let arity = length ds -- Every strictness signature admits an arity signature! = AT (take arity one_shots) div | otherwise = AT (take (idArity v) one_shots) topDiv where one_shots :: [OneShotInfo] -- One-shot-ness derived from the type one_shots = typeArity (idType v) {- %************************************************************************ %* * The main eta-expander %* * %************************************************************************ We go for: f = \x1..xn -> N ==> f = \x1..xn y1..ym -> N y1..ym (n >= 0) where (in both cases) * The xi can include type variables * The yi are all value variables * N is a NORMAL FORM (i.e. no redexes anywhere) wanting a suitable number of extra args. The biggest reason for doing this is for cases like f = \x -> case x of True -> \y -> e1 False -> \y -> e2 Here we want to get the lambdas together. A good example is the nofib program fibheaps, which gets 25% more allocation if you don't do this eta-expansion. We may have to sandwich some coerces between the lambdas to make the types work. exprEtaExpandArity looks through coerces when computing arity; and etaExpand adds the coerces as necessary when actually computing the expansion. Note [No crap in eta-expanded code] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The eta expander is careful not to introduce "crap". In particular, given a CoreExpr satisfying the 'CpeRhs' invariant (in CorePrep), it returns a CoreExpr satisfying the same invariant. See Note [Eta expansion and the CorePrep invariants] in CorePrep. This means the eta-expander has to do a bit of on-the-fly simplification but it's not too hard. The alternative, of relying on a subsequent clean-up phase of the Simplifier to de-crapify the result, means you can't really use it in CorePrep, which is painful. Note [Eta expansion for join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The no-crap rule is very tiresome to guarantee when we have join points. Consider eta-expanding let j :: Int -> Int -> Bool j x = e in b The simple way is \(y::Int). (let j x = e in b) y The no-crap way is \(y::Int). let j' :: Int -> Bool j' x = e y in b[j'/j] y where I have written to stress that j's type has changed. Note that (of course!) we have to push the application inside the RHS of the join as well as into the body. AND if j has an unfolding we have to push it into there too. AND j might be recursive... So for now I'm abandoning the no-crap rule in this case. I think that for the use in CorePrep it really doesn't matter; and if it does, then CoreToStg.myCollectArgs will fall over. (Moreover, I think that casts can make the no-crap rule fail too.) Note [Eta expansion and SCCs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that SCCs are not treated specially by etaExpand. If we have etaExpand 2 (\x -> scc "foo" e) = (\xy -> (scc "foo" e) y) So the costs of evaluating 'e' (not 'e y') are attributed to "foo" Note [Eta expansion and source notes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CorePrep puts floatable ticks outside of value applications, but not type applications. As a result we might be trying to eta-expand an expression like (src<...> v) @a which we want to lead to code like \x -> src<...> v @a x This means that we need to look through type applications and be ready to re-add floats on the top. Note [Eta expansion with ArityType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The etaExpandAT function takes an ArityType (not just an Arity) to guide eta-expansion. Why? Because we want to preserve one-shot info. Consider foo = \x. case x of True -> (\s{os}. blah) |> co False -> wubble We'll get an ArityType for foo of \?1.T. Then we want to eta-expand to foo = \x. (\eta{os}. (case x of ...as before...) eta) |> some_co That 'eta' binder is fresh, and we really want it to have the one-shot flag from the inner \s{os}. By expanding with the ArityType gotten from analysing the RHS, we achieve this neatly. This makes a big difference to the one-shot monad trick; see Note [The one-shot state monad trick] in GHC.Utils.Monad. -} -- | @etaExpand n e@ returns an expression with -- the same meaning as @e@, but with arity @n@. -- -- Given: -- -- > e' = etaExpand n e -- -- We should have that: -- -- > ty = exprType e = exprType e' etaExpand :: Arity -> CoreExpr -> CoreExpr etaExpand n orig_expr = eta_expand in_scope (replicate n NoOneShotInfo) orig_expr where in_scope = {-#SCC "eta_expand:in-scopeX" #-} mkInScopeSet (exprFreeVars orig_expr) etaExpandAT :: InScopeSet -> ArityType -> CoreExpr -> CoreExpr -- See Note [Eta expansion with ArityType] -- -- We pass in the InScopeSet from the simplifier to avoid recomputing -- it here, which can be jolly expensive if the casts are big -- In #18223 it took 10% of compile time just to do the exprFreeVars! etaExpandAT in_scope (AT oss _) orig_expr = eta_expand in_scope oss orig_expr -- etaExpand arity e = res -- Then 'res' has at least 'arity' lambdas at the top -- possibly with a cast wrapped around the outside -- See Note [Eta expansion with ArityType] -- -- etaExpand deals with for-alls. For example: -- etaExpand 1 E -- where E :: forall a. a -> a -- would return -- (/\b. \y::a -> E b y) eta_expand :: InScopeSet -> [OneShotInfo] -> CoreExpr -> CoreExpr eta_expand in_scope one_shots (Cast expr co) = Cast (eta_expand in_scope one_shots expr) co eta_expand in_scope one_shots orig_expr = go in_scope one_shots [] orig_expr where -- Strip off existing lambdas and casts before handing off to mkEtaWW -- This is mainly to avoid spending time cloning binders and substituting -- when there is actually nothing to do. It's slightly awkward to deal -- with casts here, apart from the topmost one, and they are rare, so -- if we find one we just hand off to mkEtaWW anyway -- Note [Eta expansion and SCCs] go _ [] _ _ = orig_expr -- Already has the specified arity; no-op go in_scope oss@(_:oss1) vs (Lam v body) | isTyVar v = go (in_scope `extendInScopeSet` v) oss (v:vs) body | otherwise = go (in_scope `extendInScopeSet` v) oss1 (v:vs) body go in_scope oss rev_vs expr = -- pprTrace "ee" (vcat [ppr in_scope', ppr top_bndrs, ppr eis]) $ retick $ etaInfoAbs top_eis $ etaInfoApp in_scope' sexpr eis where (in_scope', eis@(EI eta_bndrs mco)) = mkEtaWW oss (ppr orig_expr) in_scope (exprType expr) top_bndrs = reverse rev_vs top_eis = EI (top_bndrs ++ eta_bndrs) (mkPiMCos top_bndrs mco) -- Find ticks behind type apps. -- See Note [Eta expansion and source notes] -- I don't really understand this code SLPJ May 21 (expr', args) = collectArgs expr (ticks, expr'') = stripTicksTop tickishFloatable expr' sexpr = mkApps expr'' args retick expr = foldr mkTick expr ticks {- ********************************************************************* * * The EtaInfo mechanism mkEtaWW, etaInfoAbs, etaInfoApp * * ********************************************************************* -} {- Note [The EtaInfo mechanism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have (e :: ty) and we want to eta-expand it to arity N. This what eta_expand does. We do it in two steps: 1. mkEtaWW: from 'ty' and 'N' build a EtaInfo which describes the shape of the expansion necessary to expand to arity N. 2. Build the term \ v1..vn. e v1 .. vn where those abstractions and applications are described by the same EtaInfo. Specifically we build the term etaInfoAbs etas (etaInfoApp in_scope e etas) where etas :: EtaInfo etaInfoAbs builds the lambdas etaInfoApp builds the applictions Note that the /same/ EtaInfo drives both etaInfoAbs and etaInfoApp To a first approximation EtaInfo is just [Var]. But casts complicate the question. If we have newtype N a = MkN (S -> a) axN :: N a ~ S -> a and e :: N (N Int) then the eta-expansion should look like (\(x::S) (y::S) -> e |> co x y) |> sym co where co :: N (N Int) ~ S -> S -> Int co = axN @(N Int) ; (S -> axN @Int) We want to get one cast, at the top, to account for all those nested newtypes. This is expressed by the EtaInfo type: data EtaInfo = EI [Var] MCoercionR Note [Check for reflexive casts in eta expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It turns out that the casts created by teh above mechanism are often Refl. When casts are very deeply nested (as happens in #18223), the repetition of types can make the overall term very large. So there is a big payoff in cancelling out casts aggressively wherever possible. (See also Note [No crap in eta-expanded code].) This matters particularly in etaInfoApp, where we * Do beta-reduction on the fly * Use getArg_maybe to get a cast out of the way, so that we can do beta reduction Together this makes a big difference. Consider when e is case x of True -> (\x -> e1) |> c1 False -> (\p -> e2) |> c2 When we eta-expand this to arity 1, say, etaInfoAbs will wrap a (\eta) around the outside and use etaInfoApp to apply each alternative to 'eta'. We want to beta-reduce all that junk away. #18223 was a dramatic example in which the intermediate term was grotesquely huge, even though the next Simplifier iteration squashed it. Better to kill it at birth. The crucial spots in etaInfoApp are: * `checkReflexiveMCo` in the (Cast e co) case of `go` * `checkReflexiveMCo` in `pushCoArg` * Less important: checkReflexiveMCo in the final case of `go` Collectively these make a factor-of-5 difference to the total allocation of T18223, so take care if you change this stuff! Example: newtype N = MkN (Y->Z) f :: X -> N f = \(x::X). ((\(y::Y). blah) |> fco) where fco :: (Y->Z) ~ N mkEtaWW makes an EtaInfo of (EI [(eta1:X), (eta2:Y)] eta_co where eta_co :: (X->N) ~ (X->Y->Z) eta_co = ( -> nco) nco :: N ~ (Y->Z) -- Comes from topNormaliseNewType_maybe Now, when we push that eta_co inward in etaInfoApp: * In the (Cast e co) case, the 'fco' and 'nco' will meet, and should cancel. * When we meet the (\y.e) we want no cast on the y. -} -------------- data EtaInfo = EI [Var] MCoercionR -- (EI bs co) describes a particular eta-expansion, as follows: -- Abstraction: (\b1 b2 .. bn. []) |> sym co -- Application: ([] |> co) b1 b2 .. bn -- -- e :: T co :: T ~ (t1 -> t2 -> .. -> tn -> tr) -- e = (\b1 b2 ... bn. (e |> co) b1 b2 .. bn) |> sym co instance Outputable EtaInfo where ppr (EI vs mco) = text "EI" <+> ppr vs <+> parens (ppr mco) etaInfoApp :: InScopeSet -> CoreExpr -> EtaInfo -> CoreExpr -- (etaInfoApp s e (EI bs mco) returns something equivalent to -- ((substExpr s e) |> mco b1 .. bn) -- See Note [The EtaInfo mechanism] -- -- NB: With very deeply nested casts, this function can be expensive -- In T18223, this function alone costs 15% of allocation, all -- spent in the calls to substExprSC and substBindSC etaInfoApp in_scope expr eis = go (mkEmptySubst in_scope) expr eis where go :: Subst -> CoreExpr -> EtaInfo -> CoreExpr -- 'go' pushed down the eta-infos into the branch of a case -- and the body of a let; and does beta-reduction if possible -- go subst fun co [b1,..,bn] returns (subst(fun) |> co) b1 .. bn go subst (Tick t e) eis = Tick (substTickish subst t) (go subst e eis) go subst (Cast e co) (EI bs mco) = go subst e (EI bs mco') where mco' = checkReflexiveMCo (Core.substCo subst co `mkTransMCoR` mco) -- See Note [Check for reflexive casts in eta expansion] go subst (Case e b ty alts) eis = Case (Core.substExprSC subst e) b1 ty' alts' where (subst1, b1) = Core.substBndr subst b alts' = map subst_alt alts ty' = etaInfoAppTy (Core.substTy subst ty) eis subst_alt (Alt con bs rhs) = Alt con bs' (go subst2 rhs eis) where (subst2,bs') = Core.substBndrs subst1 bs go subst (Let b e) eis | not (isJoinBind b) -- See Note [Eta expansion for join points] = Let b' (go subst' e eis) where (subst', b') = Core.substBindSC subst b -- Beta-reduction if possible, pushing any intervening casts past -- the argument. See Note [The EtaInfo mechanism] go subst (Lam v e) (EI (b:bs) mco) | Just (arg,mco') <- pushMCoArg mco (varToCoreExpr b) = go (Core.extendSubst subst v arg) e (EI bs mco') -- Stop pushing down; just wrap the expression up -- See Note [Check for reflexive casts in eta expansion] go subst e (EI bs mco) = Core.substExprSC subst e `mkCastMCo` checkReflexiveMCo mco `mkVarApps` bs -------------- etaInfoAppTy :: Type -> EtaInfo -> Type -- If e :: ty -- then etaInfoApp e eis :: etaInfoApp ty eis etaInfoAppTy ty (EI bs mco) = applyTypeToArgs (text "etaInfoAppTy") ty1 (map varToCoreExpr bs) where ty1 = case mco of MRefl -> ty MCo co -> coercionRKind co -------------- etaInfoAbs :: EtaInfo -> CoreExpr -> CoreExpr -- See Note [The EtaInfo mechanism] etaInfoAbs (EI bs mco) expr = (mkLams bs expr) `mkCastMCo` mkSymMCo mco -------------- -- | @mkEtaWW n _ fvs ty@ will compute the 'EtaInfo' necessary for eta-expanding -- an expression @e :: ty@ to take @n@ value arguments, where @fvs@ are the -- free variables of @e@. -- -- Note that this function is entirely unconcerned about cost centres and other -- semantically-irrelevant source annotations, so call sites must take care to -- preserve that info. See Note [Eta expansion and SCCs]. mkEtaWW :: [OneShotInfo] -- ^ How many value arguments to eta-expand -> SDoc -- ^ The pretty-printed original expression, for warnings. -> InScopeSet -- ^ A super-set of the free vars of the expression to eta-expand. -> Type -> (InScopeSet, EtaInfo) -- ^ The variables in 'EtaInfo' are fresh wrt. to the incoming 'InScopeSet'. -- The outgoing 'InScopeSet' extends the incoming 'InScopeSet' with the -- fresh variables in 'EtaInfo'. mkEtaWW orig_oss ppr_orig_expr in_scope orig_ty = go 0 orig_oss empty_subst orig_ty where empty_subst = mkEmptyTCvSubst in_scope go :: Int -- For fresh names -> [OneShotInfo] -- Number of value args to expand to -> TCvSubst -> Type -- We are really looking at subst(ty) -> (InScopeSet, EtaInfo) -- (go [o1,..,on] subst ty) = (in_scope, EI [b1,..,bn] co) -- co :: subst(ty) ~ b1_ty -> ... -> bn_ty -> tr go _ [] subst _ -- See Note [exprArity invariant] ----------- Done! No more expansion needed = (getTCvInScope subst, EI [] MRefl) go n oss@(one_shot:oss1) subst ty -- See Note [exprArity invariant] ----------- Forall types (forall a. ty) | Just (tcv,ty') <- splitForAllTyCoVar_maybe ty , (subst', tcv') <- Type.substVarBndr subst tcv , let oss' | isTyVar tcv = oss | otherwise = oss1 -- A forall can bind a CoVar, in which case -- we consume one of the [OneShotInfo] , (in_scope, EI bs mco) <- go n oss' subst' ty' = (in_scope, EI (tcv' : bs) (mkHomoForAllMCo tcv' mco)) ----------- Function types (t1 -> t2) | Just (mult, arg_ty, res_ty) <- splitFunTy_maybe ty , typeHasFixedRuntimeRep arg_ty -- See Note [Representation polymorphism invariants] in GHC.Core -- See also test case typecheck/should_run/EtaExpandLevPoly , (subst', eta_id) <- freshEtaId n subst (Scaled mult arg_ty) -- Avoid free vars of the original expression , let eta_id' = eta_id `setIdOneShotInfo` one_shot , (in_scope, EI bs mco) <- go (n+1) oss1 subst' res_ty = (in_scope, EI (eta_id' : bs) (mkFunResMCo (idScaledType eta_id') mco)) ----------- Newtypes -- Given this: -- newtype T = MkT ([T] -> Int) -- Consider eta-expanding this -- eta_expand 1 e T -- We want to get -- coerce T (\x::[T] -> (coerce ([T]->Int) e) x) | Just (co, ty') <- topNormaliseNewType_maybe ty , -- co :: ty ~ ty' let co' = Type.substCo subst co -- Remember to apply the substitution to co (#16979) -- (or we could have applied to ty, but then -- we'd have had to zap it for the recursive call) , (in_scope, EI bs mco) <- go n oss subst ty' -- mco :: subst(ty') ~ b1_ty -> ... -> bn_ty -> tr = (in_scope, EI bs (mkTransMCoR co' mco)) | otherwise -- We have an expression of arity > 0, -- but its type isn't a function, or a binder -- does not have a fixed runtime representation = warnPprTrace True "mkEtaWW" ((ppr orig_oss <+> ppr orig_ty) $$ ppr_orig_expr) (getTCvInScope subst, EI [] MRefl) -- This *can* legitimately happen: -- e.g. coerce Int (\x. x) Essentially the programmer is -- playing fast and loose with types (Happy does this a lot). -- So we simply decline to eta-expand. Otherwise we'd end up -- with an explicit lambda having a non-function type {- ********************************************************************* * * The "push rules" * * ************************************************************************ Here we implement the "push rules" from FC papers: * The push-argument rules, where we can move a coercion past an argument. We have (fun |> co) arg and we want to transform it to (fun arg') |> co' for some suitable co' and transformed arg'. * The PushK rule for data constructors. We have (K e1 .. en) |> co and we want to transform to (K e1' .. en') by pushing the coercion into the arguments -} pushCoArgs :: CoercionR -> [CoreArg] -> Maybe ([CoreArg], MCoercion) pushCoArgs co [] = return ([], MCo co) pushCoArgs co (arg:args) = do { (arg', m_co1) <- pushCoArg co arg ; case m_co1 of MCo co1 -> do { (args', m_co2) <- pushCoArgs co1 args ; return (arg':args', m_co2) } MRefl -> return (arg':args, MRefl) } pushMCoArg :: MCoercionR -> CoreArg -> Maybe (CoreArg, MCoercion) pushMCoArg MRefl arg = Just (arg, MRefl) pushMCoArg (MCo co) arg = pushCoArg co arg pushCoArg :: CoercionR -> CoreArg -> Maybe (CoreArg, MCoercion) -- We have (fun |> co) arg, and we want to transform it to -- (fun arg) |> co -- This may fail, e.g. if (fun :: N) where N is a newtype -- C.f. simplCast in GHC.Core.Opt.Simplify -- 'co' is always Representational pushCoArg co arg | Type ty <- arg = do { (ty', m_co') <- pushCoTyArg co ty ; return (Type ty', m_co') } | otherwise = do { (arg_mco, m_co') <- pushCoValArg co ; let arg_mco' = checkReflexiveMCo arg_mco -- checkReflexiveMCo: see Note [Check for reflexive casts in eta expansion] -- The coercion is very often (arg_co -> res_co), but without -- the argument coercion actually being ReflCo ; return (arg `mkCastMCo` arg_mco', m_co') } pushCoTyArg :: CoercionR -> Type -> Maybe (Type, MCoercionR) -- We have (fun |> co) @ty -- Push the coercion through to return -- (fun @ty') |> co' -- 'co' is always Representational -- If the returned coercion is Nothing, then it would have been reflexive; -- it's faster not to compute it, though. pushCoTyArg co ty -- The following is inefficient - don't do `eqType` here, the coercion -- optimizer will take care of it. See #14737. -- -- | tyL `eqType` tyR -- -- = Just (ty, Nothing) | isReflCo co = Just (ty, MRefl) | isForAllTy_ty tyL = assertPpr (isForAllTy_ty tyR) (ppr co $$ ppr ty) $ Just (ty `mkCastTy` co1, MCo co2) | otherwise = Nothing where Pair tyL tyR = coercionKind co -- co :: tyL ~ tyR -- tyL = forall (a1 :: k1). ty1 -- tyR = forall (a2 :: k2). ty2 co1 = mkSymCo (mkNthCo Nominal 0 co) -- co1 :: k2 ~N k1 -- Note that NthCo can extract a Nominal equality between the -- kinds of the types related by a coercion between forall-types. -- See the NthCo case in GHC.Core.Lint. co2 = mkInstCo co (mkGReflLeftCo Nominal ty co1) -- co2 :: ty1[ (ty|>co1)/a1 ] ~ ty2[ ty/a2 ] -- Arg of mkInstCo is always nominal, hence mkNomReflCo pushCoValArg :: CoercionR -> Maybe (MCoercionR, MCoercionR) -- We have (fun |> co) arg -- Push the coercion through to return -- (fun (arg |> co_arg)) |> co_res -- 'co' is always Representational -- If the second returned Coercion is actually Nothing, then no cast is necessary; -- the returned coercion would have been reflexive. pushCoValArg co -- The following is inefficient - don't do `eqType` here, the coercion -- optimizer will take care of it. See #14737. -- -- | tyL `eqType` tyR -- -- = Just (mkRepReflCo arg, Nothing) | isReflCo co = Just (MRefl, MRefl) | isFunTy tyL , (co_mult, co1, co2) <- decomposeFunCo Representational co , isReflexiveCo co_mult -- We can't push the coercion in the case where co_mult isn't reflexivity: -- it could be an unsafe axiom, and losing this information could yield -- ill-typed terms. For instance (fun x ::(1) Int -> (fun _ -> () |> co) x) -- with co :: (Int -> ()) ~ (Int %1 -> ()), would reduce to (fun x ::(1) Int -- -> (fun _ ::(Many) Int -> ()) x) which is ill-typed -- If co :: (tyL1 -> tyL2) ~ (tyR1 -> tyR2) -- then co1 :: tyL1 ~ tyR1 -- co2 :: tyL2 ~ tyR2 = assertPpr (isFunTy tyR) (ppr co $$ ppr arg) $ Just (coToMCo (mkSymCo co1), coToMCo co2) -- Critically, coToMCo to checks for ReflCo; the whole coercion may not -- be reflexive, but either of its components might be -- We could use isReflexiveCo, but it's not clear if the benefit -- is worth the cost, and it makes no difference in #18223 | otherwise = Nothing where arg = funArgTy tyR Pair tyL tyR = coercionKind co pushCoercionIntoLambda :: HasDebugCallStack => InScopeSet -> Var -> CoreExpr -> CoercionR -> Maybe (Var, CoreExpr) -- This implements the Push rule from the paper on coercions -- (\x. e) |> co -- ===> -- (\x'. e |> co') pushCoercionIntoLambda in_scope x e co | assert (not (isTyVar x) && not (isCoVar x)) True , Pair s1s2 t1t2 <- coercionKind co , Just (_, _s1,_s2) <- splitFunTy_maybe s1s2 , Just (w1, t1,_t2) <- splitFunTy_maybe t1t2 , (co_mult, co1, co2) <- decomposeFunCo Representational co , isReflexiveCo co_mult -- We can't push the coercion in the case where co_mult isn't -- reflexivity. See pushCoValArg for more details. = let -- Should we optimize the coercions here? -- Otherwise they might not match too well x' = x `setIdType` t1 `setIdMult` w1 in_scope' = in_scope `extendInScopeSet` x' subst = extendIdSubst (mkEmptySubst in_scope') x (mkCast (Var x') (mkSymCo co1)) -- We substitute x' for x, except we need to preserve types. -- The types are as follows: -- x :: s1, x' :: t1, co1 :: s1 ~# t1, -- so we extend the substitution with x |-> (x' |> sym co1). in Just (x', substExpr subst e `mkCast` co2) | otherwise -- See #21555 / #21577 for a case where this trace fired but the cause was benign = -- pprTrace "exprIsLambda_maybe: Unexpected lambda in case" (ppr (Lam x e)) Nothing pushCoDataCon :: DataCon -> [CoreExpr] -> Coercion -> Maybe (DataCon , [Type] -- Universal type args , [CoreExpr]) -- All other args incl existentials -- Implement the KPush reduction rule as described in "Down with kinds" -- The transformation applies iff we have -- (C e1 ... en) `cast` co -- where co :: (T t1 .. tn) ~ to_ty -- The left-hand one must be a T, because exprIsConApp returned True -- but the right-hand one might not be. (Though it usually will.) pushCoDataCon dc dc_args co | isReflCo co || from_ty `eqType` to_ty -- try cheap test first , let (univ_ty_args, rest_args) = splitAtList (dataConUnivTyVars dc) dc_args = Just (dc, map exprToType univ_ty_args, rest_args) | Just (to_tc, to_tc_arg_tys) <- splitTyConApp_maybe to_ty , to_tc == dataConTyCon dc -- These two tests can fail; we might see -- (C x y) `cast` (g :: T a ~ S [a]), -- where S is a type function. In fact, exprIsConApp -- will probably not be called in such circumstances, -- but there's nothing wrong with it = let tc_arity = tyConArity to_tc dc_univ_tyvars = dataConUnivTyVars dc dc_ex_tcvars = dataConExTyCoVars dc arg_tys = dataConRepArgTys dc non_univ_args = dropList dc_univ_tyvars dc_args (ex_args, val_args) = splitAtList dc_ex_tcvars non_univ_args -- Make the "Psi" from the paper omegas = decomposeCo tc_arity co (tyConRolesRepresentational to_tc) (psi_subst, to_ex_arg_tys) = liftCoSubstWithEx Representational dc_univ_tyvars omegas dc_ex_tcvars (map exprToType ex_args) -- Cast the value arguments (which include dictionaries) new_val_args = zipWith cast_arg (map scaledThing arg_tys) val_args cast_arg arg_ty arg = mkCast arg (psi_subst arg_ty) to_ex_args = map Type to_ex_arg_tys dump_doc = vcat [ppr dc, ppr dc_univ_tyvars, ppr dc_ex_tcvars, ppr arg_tys, ppr dc_args, ppr ex_args, ppr val_args, ppr co, ppr from_ty, ppr to_ty, ppr to_tc , ppr $ mkTyConApp to_tc (map exprToType $ takeList dc_univ_tyvars dc_args) ] in assertPpr (eqType from_ty (mkTyConApp to_tc (map exprToType $ takeList dc_univ_tyvars dc_args))) dump_doc $ assertPpr (equalLength val_args arg_tys) dump_doc $ Just (dc, to_tc_arg_tys, to_ex_args ++ new_val_args) | otherwise = Nothing where Pair from_ty to_ty = coercionKind co collectBindersPushingCo :: CoreExpr -> ([Var], CoreExpr) -- Collect lambda binders, pushing coercions inside if possible -- E.g. (\x.e) |> g g :: -> blah -- = (\x. e |> Nth 1 g) -- -- That is, -- -- collectBindersPushingCo ((\x.e) |> g) === ([x], e |> Nth 1 g) collectBindersPushingCo e = go [] e where -- Peel off lambdas until we hit a cast. go :: [Var] -> CoreExpr -> ([Var], CoreExpr) -- The accumulator is in reverse order go bs (Lam b e) = go (b:bs) e go bs (Cast e co) = go_c bs e co go bs e = (reverse bs, e) -- We are in a cast; peel off casts until we hit a lambda. go_c :: [Var] -> CoreExpr -> CoercionR -> ([Var], CoreExpr) -- (go_c bs e c) is same as (go bs e (e |> c)) go_c bs (Cast e co1) co2 = go_c bs e (co1 `mkTransCo` co2) go_c bs (Lam b e) co = go_lam bs b e co go_c bs e co = (reverse bs, mkCast e co) -- We are in a lambda under a cast; peel off lambdas and build a -- new coercion for the body. go_lam :: [Var] -> Var -> CoreExpr -> CoercionR -> ([Var], CoreExpr) -- (go_lam bs b e c) is same as (go_c bs (\b.e) c) go_lam bs b e co | isTyVar b , let Pair tyL tyR = coercionKind co , assert (isForAllTy_ty tyL) $ isForAllTy_ty tyR , isReflCo (mkNthCo Nominal 0 co) -- See Note [collectBindersPushingCo] = go_c (b:bs) e (mkInstCo co (mkNomReflCo (mkTyVarTy b))) | isCoVar b , let Pair tyL tyR = coercionKind co , assert (isForAllTy_co tyL) $ isForAllTy_co tyR , isReflCo (mkNthCo Nominal 0 co) -- See Note [collectBindersPushingCo] , let cov = mkCoVarCo b = go_c (b:bs) e (mkInstCo co (mkNomReflCo (mkCoercionTy cov))) | isId b , let Pair tyL tyR = coercionKind co , assert (isFunTy tyL) $ isFunTy tyR , (co_mult, co_arg, co_res) <- decomposeFunCo Representational co , isReflCo co_mult -- See Note [collectBindersPushingCo] , isReflCo co_arg -- See Note [collectBindersPushingCo] = go_c (b:bs) e co_res | otherwise = (reverse bs, mkCast (Lam b e) co) {- Note [collectBindersPushingCo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We just look for coercions of form % w -> blah (and similarly for foralls) to keep this function simple. We could do more elaborate stuff, but it'd involve substitution etc. -} {- ********************************************************************* * * Join points * * ********************************************************************* -} ------------------- -- | Split an expression into the given number of binders and a body, -- eta-expanding if necessary. Counts value *and* type binders. etaExpandToJoinPoint :: JoinArity -> CoreExpr -> ([CoreBndr], CoreExpr) etaExpandToJoinPoint join_arity expr = go join_arity [] expr where go 0 rev_bs e = (reverse rev_bs, e) go n rev_bs (Lam b e) = go (n-1) (b : rev_bs) e go n rev_bs e = case etaBodyForJoinPoint n e of (bs, e') -> (reverse rev_bs ++ bs, e') etaExpandToJoinPointRule :: JoinArity -> CoreRule -> CoreRule etaExpandToJoinPointRule _ rule@(BuiltinRule {}) = warnPprTrace True "Can't eta-expand built-in rule:" (ppr rule) -- How did a local binding get a built-in rule anyway? Probably a plugin. rule etaExpandToJoinPointRule join_arity rule@(Rule { ru_bndrs = bndrs, ru_rhs = rhs , ru_args = args }) | need_args == 0 = rule | need_args < 0 = pprPanic "etaExpandToJoinPointRule" (ppr join_arity $$ ppr rule) | otherwise = rule { ru_bndrs = bndrs ++ new_bndrs, ru_args = args ++ new_args , ru_rhs = new_rhs } where need_args = join_arity - length args (new_bndrs, new_rhs) = etaBodyForJoinPoint need_args rhs new_args = varsToCoreExprs new_bndrs -- Adds as many binders as asked for; assumes expr is not a lambda etaBodyForJoinPoint :: Int -> CoreExpr -> ([CoreBndr], CoreExpr) etaBodyForJoinPoint need_args body = go need_args (exprType body) (init_subst body) [] body where go 0 _ _ rev_bs e = (reverse rev_bs, e) go n ty subst rev_bs e | Just (tv, res_ty) <- splitForAllTyCoVar_maybe ty , let (subst', tv') = substVarBndr subst tv = go (n-1) res_ty subst' (tv' : rev_bs) (e `App` varToCoreExpr tv') -- The varToCoreExpr is important: `tv` might be a coercion variable | Just (mult, arg_ty, res_ty) <- splitFunTy_maybe ty , let (subst', b) = freshEtaId n subst (Scaled mult arg_ty) = go (n-1) res_ty subst' (b : rev_bs) (e `App` varToCoreExpr b) -- The varToCoreExpr is important: `b` might be a coercion variable | otherwise = pprPanic "etaBodyForJoinPoint" $ int need_args $$ ppr body $$ ppr (exprType body) init_subst e = mkEmptyTCvSubst (mkInScopeSet (exprFreeVars e)) -------------- freshEtaId :: Int -> TCvSubst -> Scaled Type -> (TCvSubst, Id) -- Make a fresh Id, with specified type (after applying substitution) -- It should be "fresh" in the sense that it's not in the in-scope set -- of the TvSubstEnv; and it should itself then be added to the in-scope -- set of the TvSubstEnv -- -- The Int is just a reasonable starting point for generating a unique; -- it does not necessarily have to be unique itself. freshEtaId n subst ty = (subst', eta_id') where Scaled mult' ty' = Type.substScaledTyUnchecked subst ty eta_id' = uniqAway (getTCvInScope subst) $ mkSysLocalOrCoVar (fsLit "eta") (mkBuiltinUnique n) mult' ty' -- "OrCoVar" since this can be used to eta-expand -- coercion abstractions subst' = extendTCvInScope subst eta_id' ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/CallerCC.hs0000644000000000000000000001534314472400112021237 0ustar0000000000000000{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE TupleSections #-} -- | Adds cost-centers to call sites selected with the @-fprof-caller=...@ -- flag. module GHC.Core.Opt.CallerCC ( addCallerCostCentres , CallerCcFilter(..) , NamePattern(..) , parseCallerCcFilter ) where import Data.Word (Word8) import Data.Maybe import Control.Applicative import GHC.Utils.Monad.State.Strict import Data.Either import Control.Monad import qualified Text.ParserCombinators.ReadP as P import GHC.Prelude import GHC.Utils.Outputable as Outputable import GHC.Driver.Session import GHC.Driver.Ppr import GHC.Types.CostCentre import GHC.Types.CostCentre.State import GHC.Types.Name hiding (varName) import GHC.Types.Tickish import GHC.Unit.Module.Name import GHC.Unit.Module.ModGuts import GHC.Types.SrcLoc import GHC.Types.Var import GHC.Unit.Types import GHC.Data.FastString import GHC.Core import GHC.Core.Opt.Monad import GHC.Utils.Panic import qualified GHC.Utils.Binary as B import Data.Char addCallerCostCentres :: ModGuts -> CoreM ModGuts addCallerCostCentres guts = do dflags <- getDynFlags let filters = callerCcFilters dflags let env :: Env env = Env { thisModule = mg_module guts , ccState = newCostCentreState , dflags = dflags , revParents = [] , filters = filters } let guts' = guts { mg_binds = doCoreProgram env (mg_binds guts) } return guts' doCoreProgram :: Env -> CoreProgram -> CoreProgram doCoreProgram env binds = flip evalState newCostCentreState $ do mapM (doBind env) binds doBind :: Env -> CoreBind -> M CoreBind doBind env (NonRec b rhs) = NonRec b <$> doExpr (addParent b env) rhs doBind env (Rec bs) = Rec <$> mapM doPair bs where doPair (b,rhs) = (b,) <$> doExpr (addParent b env) rhs doExpr :: Env -> CoreExpr -> M CoreExpr doExpr env e@(Var v) | needsCallSiteCostCentre env v = do let nameDoc :: SDoc nameDoc = withUserStyle alwaysQualify DefaultDepth $ hcat (punctuate dot (map ppr (parents env))) <> parens (text "calling:" <> ppr v) ccName :: CcName ccName = mkFastString $ showSDoc (dflags env) nameDoc ccIdx <- getCCIndex' ccName let count = gopt Opt_ProfCountEntries (dflags env) span = case revParents env of top:_ -> nameSrcSpan $ varName top _ -> noSrcSpan cc = NormalCC (ExprCC ccIdx) ccName (thisModule env) span tick :: CoreTickish tick = ProfNote cc count True pure $ Tick tick e | otherwise = pure e doExpr _env e@(Lit _) = pure e doExpr env (f `App` x) = App <$> doExpr env f <*> doExpr env x doExpr env (Lam b x) = Lam b <$> doExpr env x doExpr env (Let b rhs) = Let <$> doBind env b <*> doExpr env rhs doExpr env (Case scrut b ty alts) = Case <$> doExpr env scrut <*> pure b <*> pure ty <*> mapM doAlt alts where doAlt (Alt con bs rhs) = Alt con bs <$> doExpr env rhs doExpr env (Cast expr co) = Cast <$> doExpr env expr <*> pure co doExpr env (Tick t e) = Tick t <$> doExpr env e doExpr _env e@(Type _) = pure e doExpr _env e@(Coercion _) = pure e type M = State CostCentreState getCCIndex' :: FastString -> M CostCentreIndex getCCIndex' name = state (getCCIndex name) data Env = Env { thisModule :: Module , dflags :: DynFlags , ccState :: CostCentreState , revParents :: [Id] , filters :: [CallerCcFilter] } addParent :: Id -> Env -> Env addParent i env = env { revParents = i : revParents env } parents :: Env -> [Id] parents env = reverse (revParents env) needsCallSiteCostCentre :: Env -> Id -> Bool needsCallSiteCostCentre env i = any matches (filters env) where matches :: CallerCcFilter -> Bool matches ccf = checkModule && checkFunc where checkModule = case ccfModuleName ccf of Just modFilt | Just iMod <- nameModule_maybe (varName i) -> moduleName iMod == modFilt | otherwise -> False Nothing -> True checkFunc = occNameMatches (ccfFuncName ccf) (getOccName i) data NamePattern = PChar Char NamePattern | PWildcard NamePattern | PEnd instance Outputable NamePattern where ppr (PChar c rest) = char c <> ppr rest ppr (PWildcard rest) = char '*' <> ppr rest ppr PEnd = Outputable.empty instance B.Binary NamePattern where get bh = do tag <- B.get bh case tag :: Word8 of 0 -> PChar <$> B.get bh <*> B.get bh 1 -> PWildcard <$> B.get bh 2 -> pure PEnd _ -> panic "Binary(NamePattern): Invalid tag" put_ bh (PChar x y) = B.put_ bh (0 :: Word8) >> B.put_ bh x >> B.put_ bh y put_ bh (PWildcard x) = B.put_ bh (1 :: Word8) >> B.put_ bh x put_ bh PEnd = B.put_ bh (2 :: Word8) occNameMatches :: NamePattern -> OccName -> Bool occNameMatches pat = go pat . occNameString where go :: NamePattern -> String -> Bool go PEnd "" = True go (PChar c rest) (d:s) = d == c && go rest s go (PWildcard rest) s = go rest s || go (PWildcard rest) (tail s) go _ _ = False type Parser = P.ReadP parseNamePattern :: Parser NamePattern parseNamePattern = pattern where pattern = star P.<++ wildcard P.<++ char P.<++ end star = PChar '*' <$ P.string "\\*" <*> pattern wildcard = do void $ P.char '*' PWildcard <$> pattern char = PChar <$> P.get <*> pattern end = PEnd <$ P.eof data CallerCcFilter = CallerCcFilter { ccfModuleName :: Maybe ModuleName , ccfFuncName :: NamePattern } instance Outputable CallerCcFilter where ppr ccf = maybe (char '*') ppr (ccfModuleName ccf) <> char '.' <> ppr (ccfFuncName ccf) instance B.Binary CallerCcFilter where get bh = CallerCcFilter <$> B.get bh <*> B.get bh put_ bh (CallerCcFilter x y) = B.put_ bh x >> B.put_ bh y parseCallerCcFilter :: String -> Either String CallerCcFilter parseCallerCcFilter inp = case P.readP_to_S parseCallerCcFilter' inp of ((result, ""):_) -> Right result _ -> Left $ "parse error on " ++ inp parseCallerCcFilter' :: Parser CallerCcFilter parseCallerCcFilter' = CallerCcFilter <$> moduleFilter <* P.char '.' <*> parseNamePattern where moduleFilter :: Parser (Maybe ModuleName) moduleFilter = (Just . mkModuleName <$> moduleName) <|> (Nothing <$ P.char '*') moduleName :: Parser String moduleName = do c <- P.satisfy isUpper cs <- P.munch1 (\c -> isUpper c || isLower c || isDigit c || c == '_') rest <- optional $ P.char '.' >> fmap ('.':) moduleName return $ c : (cs ++ fromMaybe "" rest) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/ConstantFold.hs0000644000000000000000000040540114472400112022223 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Conceptually, constant folding should be parameterized with the kind of target machine to get identical behaviour during compilation time and runtime. We cheat a little bit here... ToDo: check boundaries before folding, e.g. we can fold the Float addition (i1 + i2) only if it results in a valid Float. -} {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -optc-DNON_POSIX_SOURCE -Wno-incomplete-uni-patterns #-} -- | Constant Folder module GHC.Core.Opt.ConstantFold ( primOpRules , builtinRules , caseRules ) where import GHC.Prelude import GHC.Platform import GHC.Types.Id.Make ( voidPrimId ) import GHC.Types.Id import GHC.Types.Literal import GHC.Types.Name.Occurrence ( occNameFS ) import GHC.Types.Tickish import GHC.Types.Name ( Name, nameOccName ) import GHC.Types.Basic import GHC.Core import GHC.Core.Make import GHC.Core.SimpleOpt ( exprIsConApp_maybe, exprIsLiteral_maybe ) import GHC.Core.DataCon ( DataCon,dataConTagZ, dataConTyCon, dataConWrapId, dataConWorkId ) import GHC.Core.Utils ( cheapEqExpr, exprIsHNF, exprType , stripTicksTop, stripTicksTopT, mkTicks ) import GHC.Core.Multiplicity import GHC.Core.Type import GHC.Core.TyCon ( tyConDataCons_maybe, isAlgTyCon, isEnumerationTyCon , isNewTyCon, tyConDataCons , tyConFamilySize ) import GHC.Core.Map.Expr ( eqCoreExpr ) import GHC.Builtin.PrimOps ( PrimOp(..), tagToEnumKey ) import GHC.Builtin.PrimOps.Ids (primOpId) import GHC.Builtin.Types import GHC.Builtin.Types.Prim import GHC.Builtin.Names import GHC.Data.FastString import GHC.Data.Maybe ( orElse ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Trace import Control.Applicative ( Alternative(..) ) import Control.Monad import Data.Functor (($>)) import qualified Data.ByteString as BS import Data.Ratio import Data.Word import Data.Maybe (fromMaybe, fromJust) {- Note [Constant folding] ~~~~~~~~~~~~~~~~~~~~~~~ primOpRules generates a rewrite rule for each primop These rules do what is often called "constant folding" E.g. the rules for +# might say 4 +# 5 = 9 Well, of course you'd need a lot of rules if you did it like that, so we use a BuiltinRule instead, so that we can match in any two literal values. So the rule is really more like (Lit x) +# (Lit y) = Lit (x+#y) where the (+#) on the rhs is done at compile time That is why these rules are built in here. -} primOpRules :: Name -> PrimOp -> Maybe CoreRule primOpRules nm = \case TagToEnumOp -> mkPrimOpRule nm 2 [ tagToEnumRule ] DataToTagOp -> mkPrimOpRule nm 2 [ dataToTagRule ] -- Int8 operations Int8AddOp -> mkPrimOpRule nm 2 [ binaryLit (int8Op2 (+)) , identity zeroI8 , addFoldingRules Int8AddOp int8Ops ] Int8SubOp -> mkPrimOpRule nm 2 [ binaryLit (int8Op2 (-)) , rightIdentity zeroI8 , equalArgs $> Lit zeroI8 , subFoldingRules Int8SubOp int8Ops ] Int8MulOp -> mkPrimOpRule nm 2 [ binaryLit (int8Op2 (*)) , zeroElem , identity oneI8 , mulFoldingRules Int8MulOp int8Ops ] Int8QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int8Op2 quot) , leftZero , rightIdentity oneI8 , equalArgs $> Lit oneI8 ] Int8RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int8Op2 rem) , leftZero , oneLit 1 $> Lit zeroI8 , equalArgs $> Lit zeroI8 ] Int8NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int8NegOp ] Int8SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt8 (const shiftL) , rightIdentity zeroI8 ] Int8SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt8 (const shiftR) , rightIdentity zeroI8 ] Int8SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt8 $ const $ shiftRightLogical @Word8 , rightIdentity zeroI8 ] -- Word8 operations Word8AddOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (+)) , identity zeroW8 , addFoldingRules Word8AddOp word8Ops ] Word8SubOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (-)) , rightIdentity zeroW8 , equalArgs $> Lit zeroW8 , subFoldingRules Word8SubOp word8Ops ] Word8MulOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (*)) , identity oneW8 , mulFoldingRules Word8MulOp word8Ops ] Word8QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word8Op2 quot) , rightIdentity oneW8 ] Word8RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word8Op2 rem) , leftZero , oneLit 1 $> Lit zeroW8 , equalArgs $> Lit zeroW8 ] Word8AndOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord8 0xFF) , sameArgIdempotentCommut Word8AndOp , andFoldingRules word8Ops ] Word8OrOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 (.|.)) , idempotent , identity zeroW8 , sameArgIdempotentCommut Word8OrOp , orFoldingRules word8Ops ] Word8XorOp -> mkPrimOpRule nm 2 [ binaryLit (word8Op2 xor) , identity zeroW8 , equalArgs $> Lit zeroW8 ] Word8NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word8NotOp ] Word8SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord8 (const shiftL) ] Word8SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord8 $ const $ shiftRightLogical @Word8 ] -- Int16 operations Int16AddOp -> mkPrimOpRule nm 2 [ binaryLit (int16Op2 (+)) , identity zeroI16 , addFoldingRules Int16AddOp int16Ops ] Int16SubOp -> mkPrimOpRule nm 2 [ binaryLit (int16Op2 (-)) , rightIdentity zeroI16 , equalArgs $> Lit zeroI16 , subFoldingRules Int16SubOp int16Ops ] Int16MulOp -> mkPrimOpRule nm 2 [ binaryLit (int16Op2 (*)) , zeroElem , identity oneI16 , mulFoldingRules Int16MulOp int16Ops ] Int16QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int16Op2 quot) , leftZero , rightIdentity oneI16 , equalArgs $> Lit oneI16 ] Int16RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int16Op2 rem) , leftZero , oneLit 1 $> Lit zeroI16 , equalArgs $> Lit zeroI16 ] Int16NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int16NegOp ] Int16SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt16 (const shiftL) , rightIdentity zeroI16 ] Int16SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt16 (const shiftR) , rightIdentity zeroI16 ] Int16SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt16 $ const $ shiftRightLogical @Word16 , rightIdentity zeroI16 ] -- Word16 operations Word16AddOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (+)) , identity zeroW16 , addFoldingRules Word16AddOp word16Ops ] Word16SubOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (-)) , rightIdentity zeroW16 , equalArgs $> Lit zeroW16 , subFoldingRules Word16SubOp word16Ops ] Word16MulOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (*)) , identity oneW16 , mulFoldingRules Word16MulOp word16Ops ] Word16QuotOp-> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word16Op2 quot) , rightIdentity oneW16 ] Word16RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word16Op2 rem) , leftZero , oneLit 1 $> Lit zeroW16 , equalArgs $> Lit zeroW16 ] Word16AndOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord16 0xFFFF) , sameArgIdempotentCommut Word16AndOp , andFoldingRules word16Ops ] Word16OrOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 (.|.)) , idempotent , identity zeroW16 , sameArgIdempotentCommut Word16OrOp , orFoldingRules word16Ops ] Word16XorOp -> mkPrimOpRule nm 2 [ binaryLit (word16Op2 xor) , identity zeroW16 , equalArgs $> Lit zeroW16 ] Word16NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word16NotOp ] Word16SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord16 (const shiftL) ] Word16SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord16 $ const $ shiftRightLogical @Word16 ] -- Int32 operations Int32AddOp -> mkPrimOpRule nm 2 [ binaryLit (int32Op2 (+)) , identity zeroI32 , addFoldingRules Int32AddOp int32Ops ] Int32SubOp -> mkPrimOpRule nm 2 [ binaryLit (int32Op2 (-)) , rightIdentity zeroI32 , equalArgs $> Lit zeroI32 , subFoldingRules Int32SubOp int32Ops ] Int32MulOp -> mkPrimOpRule nm 2 [ binaryLit (int32Op2 (*)) , zeroElem , identity oneI32 , mulFoldingRules Int32MulOp int32Ops ] Int32QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int32Op2 quot) , leftZero , rightIdentity oneI32 , equalArgs $> Lit oneI32 ] Int32RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int32Op2 rem) , leftZero , oneLit 1 $> Lit zeroI32 , equalArgs $> Lit zeroI32 ] Int32NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int32NegOp ] Int32SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt32 (const shiftL) , rightIdentity zeroI32 ] Int32SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt32 (const shiftR) , rightIdentity zeroI32 ] Int32SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt32 $ const $ shiftRightLogical @Word32 , rightIdentity zeroI32 ] -- Word32 operations Word32AddOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (+)) , identity zeroW32 , addFoldingRules Word32AddOp word32Ops ] Word32SubOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (-)) , rightIdentity zeroW32 , equalArgs $> Lit zeroW32 , subFoldingRules Word32SubOp word32Ops ] Word32MulOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (*)) , identity oneW32 , mulFoldingRules Word32MulOp word32Ops ] Word32QuotOp-> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word32Op2 quot) , rightIdentity oneW32 ] Word32RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word32Op2 rem) , leftZero , oneLit 1 $> Lit zeroW32 , equalArgs $> Lit zeroW32 ] Word32AndOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord32 0xFFFFFFFF) , sameArgIdempotentCommut Word32AndOp , andFoldingRules word32Ops ] Word32OrOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 (.|.)) , idempotent , identity zeroW32 , sameArgIdempotentCommut Word32OrOp , orFoldingRules word32Ops ] Word32XorOp -> mkPrimOpRule nm 2 [ binaryLit (word32Op2 xor) , identity zeroW32 , equalArgs $> Lit zeroW32 ] Word32NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word32NotOp ] Word32SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord32 (const shiftL) ] Word32SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord32 $ const $ shiftRightLogical @Word32 ] -- Int64 operations Int64AddOp -> mkPrimOpRule nm 2 [ binaryLit (int64Op2 (+)) , identity zeroI64 , addFoldingRules Int64AddOp int64Ops ] Int64SubOp -> mkPrimOpRule nm 2 [ binaryLit (int64Op2 (-)) , rightIdentity zeroI64 , equalArgs $> Lit zeroI64 , subFoldingRules Int64SubOp int64Ops ] Int64MulOp -> mkPrimOpRule nm 2 [ binaryLit (int64Op2 (*)) , zeroElem , identity oneI64 , mulFoldingRules Int64MulOp int64Ops ] Int64QuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int64Op2 quot) , leftZero , rightIdentity oneI64 , equalArgs $> Lit oneI64 ] Int64RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (int64Op2 rem) , leftZero , oneLit 1 $> Lit zeroI64 , equalArgs $> Lit zeroI64 ] Int64NegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp Int64NegOp ] Int64SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt64 (const shiftL) , rightIdentity zeroI64 ] Int64SraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt64 (const shiftR) , rightIdentity zeroI64 ] Int64SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt64 $ const $ shiftRightLogical @Word64 , rightIdentity zeroI64 ] -- Word64 operations Word64AddOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (+)) , identity zeroW64 , addFoldingRules Word64AddOp word64Ops ] Word64SubOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (-)) , rightIdentity zeroW64 , equalArgs $> Lit zeroW64 , subFoldingRules Word64SubOp word64Ops ] Word64MulOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (*)) , identity oneW64 , mulFoldingRules Word64MulOp word64Ops ] Word64QuotOp-> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word64Op2 quot) , rightIdentity oneW64 ] Word64RemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (word64Op2 rem) , leftZero , oneLit 1 $> Lit zeroW64 , equalArgs $> Lit zeroW64 ] Word64AndOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (.&.)) , idempotent , zeroElem , identity (mkLitWord64 0xFFFFFFFFFFFFFFFF) , sameArgIdempotentCommut Word64AndOp , andFoldingRules word64Ops ] Word64OrOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 (.|.)) , idempotent , identity zeroW64 , sameArgIdempotentCommut Word64OrOp , orFoldingRules word64Ops ] Word64XorOp -> mkPrimOpRule nm 2 [ binaryLit (word64Op2 xor) , identity zeroW64 , equalArgs $> Lit zeroW64 ] Word64NotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp Word64NotOp ] Word64SllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord64 (const shiftL) ] Word64SrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord64 $ const $ shiftRightLogical @Word64 ] -- Int operations IntAddOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (+)) , identityPlatform zeroi , addFoldingRules IntAddOp intOps ] IntSubOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (-)) , rightIdentityPlatform zeroi , equalArgs >> retLit zeroi , subFoldingRules IntSubOp intOps ] IntAddCOp -> mkPrimOpRule nm 2 [ binaryLit (intOpC2 (+)) , identityCPlatform zeroi ] IntSubCOp -> mkPrimOpRule nm 2 [ binaryLit (intOpC2 (-)) , rightIdentityCPlatform zeroi , equalArgs >> retLitNoC zeroi ] IntMulOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (*)) , zeroElem , identityPlatform onei , mulFoldingRules IntMulOp intOps ] IntMul2Op -> mkPrimOpRule nm 2 [ do [Lit (LitNumber _ l1), Lit (LitNumber _ l2)] <- getArgs platform <- getPlatform let r = l1 * l2 pure $ mkCoreUbxTup [intPrimTy,intPrimTy,intPrimTy] [ Lit (if platformInIntRange platform r then zeroi platform else onei platform) , mkIntLitWrap platform (r `shiftR` platformWordSizeInBits platform) , mkIntLitWrap platform r ] , zeroElem >>= \z -> pure (mkCoreUbxTup [intPrimTy,intPrimTy,intPrimTy] [z,z,z]) -- timesInt2# 1# other -- ~~~> -- (# 0#, 0# -# (other >># (WORD_SIZE_IN_BITS-1)), other #) -- The second element is the sign bit -- repeated to fill a word. , identityPlatform onei >>= \other -> do platform <- getPlatform pure $ mkCoreUbxTup [intPrimTy,intPrimTy,intPrimTy] [ Lit (zeroi platform) , mkCoreApps (Var (primOpId IntSubOp)) [ Lit (zeroi platform) , mkCoreApps (Var (primOpId IntSrlOp)) [ other , mkIntLit platform (fromIntegral (platformWordSizeInBits platform - 1)) ] ] , other ] ] IntQuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (intOp2 quot) , leftZero , rightIdentityPlatform onei , equalArgs >> retLit onei ] IntRemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (intOp2 rem) , leftZero , oneLit 1 >> retLit zeroi , equalArgs >> retLit zeroi ] IntAndOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (.&.)) , idempotent , zeroElem , identityPlatform (\p -> mkLitInt p (-1)) , sameArgIdempotentCommut IntAndOp , andFoldingRules intOps ] IntOrOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 (.|.)) , idempotent , identityPlatform zeroi , sameArgIdempotentCommut IntOrOp , orFoldingRules intOps ] IntXorOp -> mkPrimOpRule nm 2 [ binaryLit (intOp2 xor) , identityPlatform zeroi , equalArgs >> retLit zeroi ] IntNotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp IntNotOp ] IntNegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp IntNegOp ] IntSllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt (const shiftL) , rightIdentityPlatform zeroi ] IntSraOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt (const shiftR) , rightIdentityPlatform zeroi ] IntSrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumInt shiftRightLogicalNative , rightIdentityPlatform zeroi ] -- Word operations WordAddOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (+)) , identityPlatform zerow , addFoldingRules WordAddOp wordOps ] WordSubOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (-)) , rightIdentityPlatform zerow , equalArgs >> retLit zerow , subFoldingRules WordSubOp wordOps ] WordAddCOp -> mkPrimOpRule nm 2 [ binaryLit (wordOpC2 (+)) , identityCPlatform zerow ] WordSubCOp -> mkPrimOpRule nm 2 [ binaryLit (wordOpC2 (-)) , rightIdentityCPlatform zerow , equalArgs >> retLitNoC zerow ] WordMulOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (*)) , identityPlatform onew , mulFoldingRules WordMulOp wordOps ] WordQuotOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (wordOp2 quot) , rightIdentityPlatform onew ] WordRemOp -> mkPrimOpRule nm 2 [ nonZeroLit 1 >> binaryLit (wordOp2 rem) , leftZero , oneLit 1 >> retLit zerow , equalArgs >> retLit zerow ] WordAndOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (.&.)) , idempotent , zeroElem , identityPlatform (\p -> mkLitWord p (platformMaxWord p)) , sameArgIdempotentCommut WordAndOp , andFoldingRules wordOps ] WordOrOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 (.|.)) , idempotent , identityPlatform zerow , sameArgIdempotentCommut WordOrOp , orFoldingRules wordOps ] WordXorOp -> mkPrimOpRule nm 2 [ binaryLit (wordOp2 xor) , identityPlatform zerow , equalArgs >> retLit zerow ] WordNotOp -> mkPrimOpRule nm 1 [ unaryLit complementOp , semiInversePrimOp WordNotOp ] WordSllOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord (const shiftL) ] WordSrlOp -> mkPrimOpRule nm 2 [ shiftRule LitNumWord shiftRightLogicalNative ] PopCnt8Op -> mkPrimOpRule nm 1 [ pop_count @Word8 ] PopCnt16Op -> mkPrimOpRule nm 1 [ pop_count @Word16 ] PopCnt32Op -> mkPrimOpRule nm 1 [ pop_count @Word32 ] PopCnt64Op -> mkPrimOpRule nm 1 [ pop_count @Word64 ] PopCntOp -> mkPrimOpRule nm 1 [ getWordSize >>= \case PW4 -> pop_count @Word32 PW8 -> pop_count @Word64 ] Ctz8Op -> mkPrimOpRule nm 1 [ ctz @Word8 ] Ctz16Op -> mkPrimOpRule nm 1 [ ctz @Word16 ] Ctz32Op -> mkPrimOpRule nm 1 [ ctz @Word32 ] Ctz64Op -> mkPrimOpRule nm 1 [ ctz @Word64 ] CtzOp -> mkPrimOpRule nm 1 [ getWordSize >>= \case PW4 -> ctz @Word32 PW8 -> ctz @Word64 ] Clz8Op -> mkPrimOpRule nm 1 [ clz @Word8 ] Clz16Op -> mkPrimOpRule nm 1 [ clz @Word16 ] Clz32Op -> mkPrimOpRule nm 1 [ clz @Word32 ] Clz64Op -> mkPrimOpRule nm 1 [ clz @Word64 ] ClzOp -> mkPrimOpRule nm 1 [ getWordSize >>= \case PW4 -> clz @Word32 PW8 -> clz @Word64 ] -- coercions Int8ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] Int16ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] Int32ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] Int64ToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToIntLit ] IntToInt8Op -> mkPrimOpRule nm 1 [ liftLit narrowInt8Lit , narrowSubsumesAnd IntAndOp IntToInt8Op 8 ] IntToInt16Op -> mkPrimOpRule nm 1 [ liftLit narrowInt16Lit , narrowSubsumesAnd IntAndOp IntToInt16Op 16 ] IntToInt32Op -> mkPrimOpRule nm 1 [ liftLit narrowInt32Lit , narrowSubsumesAnd IntAndOp IntToInt32Op 32 ] IntToInt64Op -> mkPrimOpRule nm 1 [ liftLit narrowInt64Lit ] Word8ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit , extendNarrowPassthrough WordToWord8Op 0xFF ] Word16ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit , extendNarrowPassthrough WordToWord16Op 0xFFFF ] Word32ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit , extendNarrowPassthrough WordToWord32Op 0xFFFFFFFF ] Word64ToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform convertToWordLit ] WordToWord8Op -> mkPrimOpRule nm 1 [ liftLit narrowWord8Lit , narrowSubsumesAnd WordAndOp WordToWord8Op 8 ] WordToWord16Op -> mkPrimOpRule nm 1 [ liftLit narrowWord16Lit , narrowSubsumesAnd WordAndOp WordToWord16Op 16 ] WordToWord32Op -> mkPrimOpRule nm 1 [ liftLit narrowWord32Lit , narrowSubsumesAnd WordAndOp WordToWord32Op 32 ] WordToWord64Op -> mkPrimOpRule nm 1 [ liftLit narrowWord64Lit ] Word8ToInt8Op -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt8) ] Int8ToWord8Op -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord8) ] Word16ToInt16Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt16) ] Int16ToWord16Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord16) ] Word32ToInt32Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt32) ] Int32ToWord32Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord32) ] Word64ToInt64Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt64) ] Int64ToWord64Op-> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord64) ] WordToIntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumInt) ] IntToWordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumCoerce LitNumWord) ] Narrow8IntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumInt8) , subsumedByPrimOp Narrow8IntOp , Narrow8IntOp `subsumesPrimOp` Narrow16IntOp , Narrow8IntOp `subsumesPrimOp` Narrow32IntOp , narrowSubsumesAnd IntAndOp Narrow8IntOp 8 ] Narrow16IntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumInt16) , subsumedByPrimOp Narrow8IntOp , subsumedByPrimOp Narrow16IntOp , Narrow16IntOp `subsumesPrimOp` Narrow32IntOp , narrowSubsumesAnd IntAndOp Narrow16IntOp 16 ] Narrow32IntOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumInt32) , subsumedByPrimOp Narrow8IntOp , subsumedByPrimOp Narrow16IntOp , subsumedByPrimOp Narrow32IntOp , removeOp32 , narrowSubsumesAnd IntAndOp Narrow32IntOp 32 ] Narrow8WordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumWord8) , subsumedByPrimOp Narrow8WordOp , Narrow8WordOp `subsumesPrimOp` Narrow16WordOp , Narrow8WordOp `subsumesPrimOp` Narrow32WordOp , narrowSubsumesAnd WordAndOp Narrow8WordOp 8 ] Narrow16WordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumWord16) , subsumedByPrimOp Narrow8WordOp , subsumedByPrimOp Narrow16WordOp , Narrow16WordOp `subsumesPrimOp` Narrow32WordOp , narrowSubsumesAnd WordAndOp Narrow16WordOp 16 ] Narrow32WordOp -> mkPrimOpRule nm 1 [ liftLitPlatform (litNumNarrow LitNumWord32) , subsumedByPrimOp Narrow8WordOp , subsumedByPrimOp Narrow16WordOp , subsumedByPrimOp Narrow32WordOp , removeOp32 , narrowSubsumesAnd WordAndOp Narrow32WordOp 32 ] OrdOp -> mkPrimOpRule nm 1 [ liftLit charToIntLit , semiInversePrimOp ChrOp ] ChrOp -> mkPrimOpRule nm 1 [ do [Lit lit] <- getArgs guard (litFitsInChar lit) liftLit intToCharLit , semiInversePrimOp OrdOp ] FloatToIntOp -> mkPrimOpRule nm 1 [ liftLit floatToIntLit ] IntToFloatOp -> mkPrimOpRule nm 1 [ liftLit intToFloatLit ] DoubleToIntOp -> mkPrimOpRule nm 1 [ liftLit doubleToIntLit ] IntToDoubleOp -> mkPrimOpRule nm 1 [ liftLit intToDoubleLit ] -- SUP: Not sure what the standard says about precision in the following 2 cases FloatToDoubleOp -> mkPrimOpRule nm 1 [ liftLit floatToDoubleLit ] DoubleToFloatOp -> mkPrimOpRule nm 1 [ liftLit doubleToFloatLit ] -- Float FloatAddOp -> mkPrimOpRule nm 2 [ binaryLit (floatOp2 (+)) , identity zerof ] FloatSubOp -> mkPrimOpRule nm 2 [ binaryLit (floatOp2 (-)) , rightIdentity zerof ] FloatMulOp -> mkPrimOpRule nm 2 [ binaryLit (floatOp2 (*)) , identity onef , strengthReduction twof FloatAddOp ] -- zeroElem zerof doesn't hold because of NaN FloatDivOp -> mkPrimOpRule nm 2 [ guardFloatDiv >> binaryLit (floatOp2 (/)) , rightIdentity onef ] FloatNegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp FloatNegOp ] FloatDecode_IntOp -> mkPrimOpRule nm 1 [ unaryLit floatDecodeOp ] -- Double DoubleAddOp -> mkPrimOpRule nm 2 [ binaryLit (doubleOp2 (+)) , identity zerod ] DoubleSubOp -> mkPrimOpRule nm 2 [ binaryLit (doubleOp2 (-)) , rightIdentity zerod ] DoubleMulOp -> mkPrimOpRule nm 2 [ binaryLit (doubleOp2 (*)) , identity oned , strengthReduction twod DoubleAddOp ] -- zeroElem zerod doesn't hold because of NaN DoubleDivOp -> mkPrimOpRule nm 2 [ guardDoubleDiv >> binaryLit (doubleOp2 (/)) , rightIdentity oned ] DoubleNegOp -> mkPrimOpRule nm 1 [ unaryLit negOp , semiInversePrimOp DoubleNegOp ] DoubleDecode_Int64Op -> mkPrimOpRule nm 1 [ unaryLit doubleDecodeOp ] -- Relational operators, equality Int8EqOp -> mkRelOpRule nm (==) [ litEq True ] Int8NeOp -> mkRelOpRule nm (/=) [ litEq False ] Int16EqOp -> mkRelOpRule nm (==) [ litEq True ] Int16NeOp -> mkRelOpRule nm (/=) [ litEq False ] Int32EqOp -> mkRelOpRule nm (==) [ litEq True ] Int32NeOp -> mkRelOpRule nm (/=) [ litEq False ] Int64EqOp -> mkRelOpRule nm (==) [ litEq True ] Int64NeOp -> mkRelOpRule nm (/=) [ litEq False ] IntEqOp -> mkRelOpRule nm (==) [ litEq True ] IntNeOp -> mkRelOpRule nm (/=) [ litEq False ] Word8EqOp -> mkRelOpRule nm (==) [ litEq True ] Word8NeOp -> mkRelOpRule nm (/=) [ litEq False ] Word16EqOp -> mkRelOpRule nm (==) [ litEq True ] Word16NeOp -> mkRelOpRule nm (/=) [ litEq False ] Word32EqOp -> mkRelOpRule nm (==) [ litEq True ] Word32NeOp -> mkRelOpRule nm (/=) [ litEq False ] Word64EqOp -> mkRelOpRule nm (==) [ litEq True ] Word64NeOp -> mkRelOpRule nm (/=) [ litEq False ] WordEqOp -> mkRelOpRule nm (==) [ litEq True ] WordNeOp -> mkRelOpRule nm (/=) [ litEq False ] CharEqOp -> mkRelOpRule nm (==) [ litEq True ] CharNeOp -> mkRelOpRule nm (/=) [ litEq False ] FloatEqOp -> mkFloatingRelOpRule nm (==) FloatNeOp -> mkFloatingRelOpRule nm (/=) DoubleEqOp -> mkFloatingRelOpRule nm (==) DoubleNeOp -> mkFloatingRelOpRule nm (/=) -- Relational operators, ordering Int8GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int8GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int8LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int8LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Int16GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int16GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int16LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int16LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Int32GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int32GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int32LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int32LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Int64GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Int64GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Int64LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Int64LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] IntGtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] IntGeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] IntLeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] IntLtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word8GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word8GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word8LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word8LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word16GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word16GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word16LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word16LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word32GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word32GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word32LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word32LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] Word64GtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] Word64GeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] Word64LeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] Word64LtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] WordGtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] WordGeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] WordLeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] WordLtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] CharGtOp -> mkRelOpRule nm (>) [ boundsCmp Gt ] CharGeOp -> mkRelOpRule nm (>=) [ boundsCmp Ge ] CharLeOp -> mkRelOpRule nm (<=) [ boundsCmp Le ] CharLtOp -> mkRelOpRule nm (<) [ boundsCmp Lt ] FloatGtOp -> mkFloatingRelOpRule nm (>) FloatGeOp -> mkFloatingRelOpRule nm (>=) FloatLeOp -> mkFloatingRelOpRule nm (<=) FloatLtOp -> mkFloatingRelOpRule nm (<) DoubleGtOp -> mkFloatingRelOpRule nm (>) DoubleGeOp -> mkFloatingRelOpRule nm (>=) DoubleLeOp -> mkFloatingRelOpRule nm (<=) DoubleLtOp -> mkFloatingRelOpRule nm (<) -- Misc AddrAddOp -> mkPrimOpRule nm 2 [ rightIdentityPlatform zeroi ] SeqOp -> mkPrimOpRule nm 4 [ seqRule ] SparkOp -> mkPrimOpRule nm 4 [ sparkRule ] _ -> Nothing {- ************************************************************************ * * \subsection{Doing the business} * * ************************************************************************ -} -- useful shorthands mkPrimOpRule :: Name -> Int -> [RuleM CoreExpr] -> Maybe CoreRule mkPrimOpRule nm arity rules = Just $ mkBasicRule nm arity (msum rules) mkRelOpRule :: Name -> (forall a . Ord a => a -> a -> Bool) -> [RuleM CoreExpr] -> Maybe CoreRule mkRelOpRule nm cmp extra = mkPrimOpRule nm 2 $ binaryCmpLit cmp : equal_rule : extra where -- x `cmp` x does not depend on x, so -- compute it for the arbitrary value 'True' -- and use that result equal_rule = do { equalArgs ; platform <- getPlatform ; return (if cmp True True then trueValInt platform else falseValInt platform) } {- Note [Rules for floating-point comparisons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need different rules for floating-point values because for floats it is not true that x = x (for NaNs); so we do not want the equal_rule rule that mkRelOpRule uses. Note also that, in the case of equality/inequality, we do /not/ want to switch to a case-expression. For example, we do not want to convert case (eqFloat# x 3.8#) of True -> this False -> that to case x of 3.8#::Float# -> this _ -> that See #9238. Reason: comparing floating-point values for equality delicate, and we don't want to implement that delicacy in the code for case expressions. So we make it an invariant of Core that a case expression never scrutinises a Float# or Double#. This transformation is what the litEq rule does; see Note [The litEq rule: converting equality to case]. So we /refrain/ from using litEq for mkFloatingRelOpRule. -} mkFloatingRelOpRule :: Name -> (forall a . Ord a => a -> a -> Bool) -> Maybe CoreRule -- See Note [Rules for floating-point comparisons] mkFloatingRelOpRule nm cmp = mkPrimOpRule nm 2 [binaryCmpLit cmp] -- common constants zeroi, onei, zerow, onew :: Platform -> Literal zeroi platform = mkLitInt platform 0 onei platform = mkLitInt platform 1 zerow platform = mkLitWord platform 0 onew platform = mkLitWord platform 1 zeroI8, oneI8, zeroW8, oneW8 :: Literal zeroI8 = mkLitInt8 0 oneI8 = mkLitInt8 1 zeroW8 = mkLitWord8 0 oneW8 = mkLitWord8 1 zeroI16, oneI16, zeroW16, oneW16 :: Literal zeroI16 = mkLitInt16 0 oneI16 = mkLitInt16 1 zeroW16 = mkLitWord16 0 oneW16 = mkLitWord16 1 zeroI32, oneI32, zeroW32, oneW32 :: Literal zeroI32 = mkLitInt32 0 oneI32 = mkLitInt32 1 zeroW32 = mkLitWord32 0 oneW32 = mkLitWord32 1 zeroI64, oneI64, zeroW64, oneW64 :: Literal zeroI64 = mkLitInt64 0 oneI64 = mkLitInt64 1 zeroW64 = mkLitWord64 0 oneW64 = mkLitWord64 1 zerof, onef, twof, zerod, oned, twod :: Literal zerof = mkLitFloat 0.0 onef = mkLitFloat 1.0 twof = mkLitFloat 2.0 zerod = mkLitDouble 0.0 oned = mkLitDouble 1.0 twod = mkLitDouble 2.0 cmpOp :: Platform -> (forall a . Ord a => a -> a -> Bool) -> Literal -> Literal -> Maybe CoreExpr cmpOp platform cmp = go where done True = Just $ trueValInt platform done False = Just $ falseValInt platform -- These compares are at different types go (LitChar i1) (LitChar i2) = done (i1 `cmp` i2) go (LitFloat i1) (LitFloat i2) = done (i1 `cmp` i2) go (LitDouble i1) (LitDouble i2) = done (i1 `cmp` i2) go (LitNumber nt1 i1) (LitNumber nt2 i2) | nt1 /= nt2 = Nothing | otherwise = done (i1 `cmp` i2) go _ _ = Nothing -------------------------- negOp :: RuleOpts -> Literal -> Maybe CoreExpr -- Negate negOp env = \case (LitFloat 0.0) -> Nothing -- can't represent -0.0 as a Rational (LitFloat f) -> Just (mkFloatVal env (-f)) (LitDouble 0.0) -> Nothing (LitDouble d) -> Just (mkDoubleVal env (-d)) (LitNumber nt i) | litNumIsSigned nt -> Just (Lit (mkLitNumberWrap (roPlatform env) nt (-i))) _ -> Nothing complementOp :: RuleOpts -> Literal -> Maybe CoreExpr -- Binary complement complementOp env (LitNumber nt i) = Just (Lit (mkLitNumberWrap (roPlatform env) nt (complement i))) complementOp _ _ = Nothing int8Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int8Op2 op _ (LitNumber LitNumInt8 i1) (LitNumber LitNumInt8 i2) = int8Result (fromInteger i1 `op` fromInteger i2) int8Op2 _ _ _ _ = Nothing int16Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int16Op2 op _ (LitNumber LitNumInt16 i1) (LitNumber LitNumInt16 i2) = int16Result (fromInteger i1 `op` fromInteger i2) int16Op2 _ _ _ _ = Nothing int32Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int32Op2 op _ (LitNumber LitNumInt32 i1) (LitNumber LitNumInt32 i2) = int32Result (fromInteger i1 `op` fromInteger i2) int32Op2 _ _ _ _ = Nothing int64Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr int64Op2 op _ (LitNumber LitNumInt64 i1) (LitNumber LitNumInt64 i2) = int64Result (fromInteger i1 `op` fromInteger i2) int64Op2 _ _ _ _ = Nothing intOp2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr intOp2 = intOp2' . const intOp2' :: (Integral a, Integral b) => (RuleOpts -> a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr intOp2' op env (LitNumber LitNumInt i1) (LitNumber LitNumInt i2) = let o = op env in intResult (roPlatform env) (fromInteger i1 `o` fromInteger i2) intOp2' _ _ _ _ = Nothing intOpC2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr intOpC2 op env (LitNumber LitNumInt i1) (LitNumber LitNumInt i2) = intCResult (roPlatform env) (fromInteger i1 `op` fromInteger i2) intOpC2 _ _ _ _ = Nothing shiftRightLogical :: forall t. (Integral t, Bits t) => Integer -> Int -> Integer shiftRightLogical x n = fromIntegral (fromInteger x `shiftR` n :: t) -- | Shift right, putting zeros in rather than sign-propagating as -- 'Bits.shiftR' would do. Do this by converting to the appropriate Word -- and back. Obviously this won't work for too-big values, but its ok as -- we use it here. shiftRightLogicalNative :: Platform -> Integer -> Int -> Integer shiftRightLogicalNative platform = case platformWordSize platform of PW4 -> shiftRightLogical @Word32 PW8 -> shiftRightLogical @Word64 -------------------------- retLit :: (Platform -> Literal) -> RuleM CoreExpr retLit l = do platform <- getPlatform return $ Lit $ l platform retLitNoC :: (Platform -> Literal) -> RuleM CoreExpr retLitNoC l = do platform <- getPlatform let lit = l platform let ty = literalType lit return $ mkCoreUbxTup [ty, ty] [Lit lit, Lit (zeroi platform)] word8Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word8Op2 op _ (LitNumber LitNumWord8 i1) (LitNumber LitNumWord8 i2) = word8Result (fromInteger i1 `op` fromInteger i2) word8Op2 _ _ _ _ = Nothing word16Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word16Op2 op _ (LitNumber LitNumWord16 i1) (LitNumber LitNumWord16 i2) = word16Result (fromInteger i1 `op` fromInteger i2) word16Op2 _ _ _ _ = Nothing word32Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word32Op2 op _ (LitNumber LitNumWord32 i1) (LitNumber LitNumWord32 i2) = word32Result (fromInteger i1 `op` fromInteger i2) word32Op2 _ _ _ _ = Nothing word64Op2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr word64Op2 op _ (LitNumber LitNumWord64 i1) (LitNumber LitNumWord64 i2) = word64Result (fromInteger i1 `op` fromInteger i2) word64Op2 _ _ _ _ = Nothing wordOp2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr wordOp2 op env (LitNumber LitNumWord w1) (LitNumber LitNumWord w2) = wordResult (roPlatform env) (fromInteger w1 `op` fromInteger w2) wordOp2 _ _ _ _ = Nothing wordOpC2 :: (Integral a, Integral b) => (a -> b -> Integer) -> RuleOpts -> Literal -> Literal -> Maybe CoreExpr wordOpC2 op env (LitNumber LitNumWord w1) (LitNumber LitNumWord w2) = wordCResult (roPlatform env) (fromInteger w1 `op` fromInteger w2) wordOpC2 _ _ _ _ = Nothing shiftRule :: LitNumType -> (Platform -> Integer -> Int -> Integer) -> RuleM CoreExpr -- Shifts take an Int; hence third arg of op is Int -- Used for shift primops -- IntSllOp, IntSraOp, IntSrlOp :: Int# -> Int# -> Int# -- SllOp, SrlOp :: Word# -> Int# -> Word# shiftRule lit_num_ty shift_op = do platform <- getPlatform [e1, Lit (LitNumber LitNumInt shift_len)] <- getArgs bit_size <- case litNumBitSize platform lit_num_ty of Nothing -> mzero Just bs -> pure (toInteger bs) case e1 of _ | shift_len == 0 -> pure e1 -- See Note [Guarding against silly shifts] _ | shift_len < 0 || shift_len > bit_size -> pure $ Lit $ mkLitNumberWrap platform lit_num_ty 0 -- Be sure to use lit_num_ty here, so we get a correctly typed zero. -- See #18589 Lit (LitNumber nt x) | 0 < shift_len && shift_len <= bit_size -> assert (nt == lit_num_ty) $ let op = shift_op platform -- Do the shift at type Integer, but shift length is Int. -- Using host's Int is ok even if target's Int has a different size -- because we test that shift_len <= bit_size (which is at most 64) y = x `op` fromInteger shift_len in pure $ Lit $ mkLitNumberWrap platform nt y _ -> mzero -------------------------- floatOp2 :: (Rational -> Rational -> Rational) -> RuleOpts -> Literal -> Literal -> Maybe (Expr CoreBndr) floatOp2 op env (LitFloat f1) (LitFloat f2) = Just (mkFloatVal env (f1 `op` f2)) floatOp2 _ _ _ _ = Nothing -------------------------- floatDecodeOp :: RuleOpts -> Literal -> Maybe CoreExpr floatDecodeOp env (LitFloat ((decodeFloat . fromRational @Float) -> (m, e))) = Just $ mkCoreUbxTup [intPrimTy, intPrimTy] [ mkIntVal (roPlatform env) (toInteger m) , mkIntVal (roPlatform env) (toInteger e) ] floatDecodeOp _ _ = Nothing -------------------------- doubleOp2 :: (Rational -> Rational -> Rational) -> RuleOpts -> Literal -> Literal -> Maybe (Expr CoreBndr) doubleOp2 op env (LitDouble f1) (LitDouble f2) = Just (mkDoubleVal env (f1 `op` f2)) doubleOp2 _ _ _ _ = Nothing -------------------------- doubleDecodeOp :: RuleOpts -> Literal -> Maybe CoreExpr doubleDecodeOp env (LitDouble ((decodeFloat . fromRational @Double) -> (m, e))) = Just $ mkCoreUbxTup [int64PrimTy, intPrimTy] [ Lit (mkLitInt64Wrap (toInteger m)) , mkIntVal platform (toInteger e) ] where platform = roPlatform env doubleDecodeOp _ _ = Nothing -------------------------- {- Note [The litEq rule: converting equality to case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This stuff turns n ==# 3# into case n of 3# -> True m -> False This is a Good Thing, because it allows case-of case things to happen, and case-default absorption to happen. For example: if (n ==# 3#) || (n ==# 4#) then e1 else e2 will transform to case n of 3# -> e1 4# -> e1 m -> e2 (modulo the usual precautions to avoid duplicating e1) -} litEq :: Bool -- True <=> equality, False <=> inequality -> RuleM CoreExpr litEq is_eq = msum [ do [Lit lit, expr] <- getArgs platform <- getPlatform do_lit_eq platform lit expr , do [expr, Lit lit] <- getArgs platform <- getPlatform do_lit_eq platform lit expr ] where do_lit_eq platform lit expr = do guard (not (litIsLifted lit)) return (mkWildCase expr (unrestricted $ literalType lit) intPrimTy [ Alt DEFAULT [] val_if_neq , Alt (LitAlt lit) [] val_if_eq]) where val_if_eq | is_eq = trueValInt platform | otherwise = falseValInt platform val_if_neq | is_eq = falseValInt platform | otherwise = trueValInt platform -- | Check if there is comparison with minBound or maxBound, that is -- always true or false. For instance, an Int cannot be smaller than its -- minBound, so we can replace such comparison with False. boundsCmp :: Comparison -> RuleM CoreExpr boundsCmp op = do platform <- getPlatform [a, b] <- getArgs liftMaybe $ mkRuleFn platform op a b data Comparison = Gt | Ge | Lt | Le mkRuleFn :: Platform -> Comparison -> CoreExpr -> CoreExpr -> Maybe CoreExpr mkRuleFn platform Gt (Lit lit) _ | isMinBound platform lit = Just $ falseValInt platform mkRuleFn platform Le (Lit lit) _ | isMinBound platform lit = Just $ trueValInt platform mkRuleFn platform Ge _ (Lit lit) | isMinBound platform lit = Just $ trueValInt platform mkRuleFn platform Lt _ (Lit lit) | isMinBound platform lit = Just $ falseValInt platform mkRuleFn platform Ge (Lit lit) _ | isMaxBound platform lit = Just $ trueValInt platform mkRuleFn platform Lt (Lit lit) _ | isMaxBound platform lit = Just $ falseValInt platform mkRuleFn platform Gt _ (Lit lit) | isMaxBound platform lit = Just $ falseValInt platform mkRuleFn platform Le _ (Lit lit) | isMaxBound platform lit = Just $ trueValInt platform mkRuleFn _ _ _ _ = Nothing -- | Create an Int literal expression while ensuring the given Integer is in the -- target Int range int8Result :: Integer -> Maybe CoreExpr int8Result result = Just (int8Result' result) int8Result' :: Integer -> CoreExpr int8Result' result = Lit (mkLitInt8Wrap result) -- | Create an Int literal expression while ensuring the given Integer is in the -- target Int range int16Result :: Integer -> Maybe CoreExpr int16Result result = Just (int16Result' result) int16Result' :: Integer -> CoreExpr int16Result' result = Lit (mkLitInt16Wrap result) -- | Create an Int literal expression while ensuring the given Integer is in the -- target Int range int32Result :: Integer -> Maybe CoreExpr int32Result result = Just (int32Result' result) int32Result' :: Integer -> CoreExpr int32Result' result = Lit (mkLitInt32Wrap result) intResult :: Platform -> Integer -> Maybe CoreExpr intResult platform result = Just (intResult' platform result) intResult' :: Platform -> Integer -> CoreExpr intResult' platform result = Lit (mkLitIntWrap platform result) -- | Create an unboxed pair of an Int literal expression, ensuring the given -- Integer is in the target Int range and the corresponding overflow flag -- (@0#@/@1#@) if it wasn't. intCResult :: Platform -> Integer -> Maybe CoreExpr intCResult platform result = Just (mkPair [Lit lit, Lit c]) where mkPair = mkCoreUbxTup [intPrimTy, intPrimTy] (lit, b) = mkLitIntWrapC platform result c = if b then onei platform else zeroi platform -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range word8Result :: Integer -> Maybe CoreExpr word8Result result = Just (word8Result' result) word8Result' :: Integer -> CoreExpr word8Result' result = Lit (mkLitWord8Wrap result) -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range word16Result :: Integer -> Maybe CoreExpr word16Result result = Just (word16Result' result) word16Result' :: Integer -> CoreExpr word16Result' result = Lit (mkLitWord16Wrap result) -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range word32Result :: Integer -> Maybe CoreExpr word32Result result = Just (word32Result' result) word32Result' :: Integer -> CoreExpr word32Result' result = Lit (mkLitWord32Wrap result) -- | Create a Word literal expression while ensuring the given Integer is in the -- target Word range wordResult :: Platform -> Integer -> Maybe CoreExpr wordResult platform result = Just (wordResult' platform result) wordResult' :: Platform -> Integer -> CoreExpr wordResult' platform result = Lit (mkLitWordWrap platform result) -- | Create an unboxed pair of a Word literal expression, ensuring the given -- Integer is in the target Word range and the corresponding carry flag -- (@0#@/@1#@) if it wasn't. wordCResult :: Platform -> Integer -> Maybe CoreExpr wordCResult platform result = Just (mkPair [Lit lit, Lit c]) where mkPair = mkCoreUbxTup [wordPrimTy, intPrimTy] (lit, b) = mkLitWordWrapC platform result c = if b then onei platform else zeroi platform int64Result :: Integer -> Maybe CoreExpr int64Result result = Just (int64Result' result) int64Result' :: Integer -> CoreExpr int64Result' result = Lit (mkLitInt64Wrap result) word64Result :: Integer -> Maybe CoreExpr word64Result result = Just (word64Result' result) word64Result' :: Integer -> CoreExpr word64Result' result = Lit (mkLitWord64Wrap result) -- | 'ambiant (primop x) = x', but not nececesarily 'primop (ambient x) = x'. semiInversePrimOp :: PrimOp -> RuleM CoreExpr semiInversePrimOp primop = do [Var primop_id `App` e] <- getArgs matchPrimOpId primop primop_id return e subsumesPrimOp :: PrimOp -> PrimOp -> RuleM CoreExpr this `subsumesPrimOp` that = do [Var primop_id `App` e] <- getArgs matchPrimOpId that primop_id return (Var (primOpId this) `App` e) subsumedByPrimOp :: PrimOp -> RuleM CoreExpr subsumedByPrimOp primop = do [e@(Var primop_id `App` _)] <- getArgs matchPrimOpId primop primop_id return e -- | Transform `extendWordN (narrowWordN x)` into `x .&. 0xFF..FF` extendNarrowPassthrough :: PrimOp -> Integer -> RuleM CoreExpr extendNarrowPassthrough narrow_primop n = do [Var primop_id `App` x] <- getArgs matchPrimOpId narrow_primop primop_id return (Var (primOpId WordAndOp) `App` x `App` Lit (LitNumber LitNumWord n)) -- | narrow subsumes bitwise `and` with full mask (cf #16402): -- -- narrowN (x .&. m) -- m .&. (2^N-1) = 2^N-1 -- ==> narrowN x -- -- e.g. narrow16 (x .&. 0xFFFF) -- ==> narrow16 x -- narrowSubsumesAnd :: PrimOp -> PrimOp -> Int -> RuleM CoreExpr narrowSubsumesAnd and_primop narrw n = do [Var primop_id `App` x `App` y] <- getArgs matchPrimOpId and_primop primop_id let mask = bit n -1 g v (Lit (LitNumber _ m)) = do guard (m .&. mask == mask) return (Var (primOpId narrw) `App` v) g _ _ = mzero g x y <|> g y x idempotent :: RuleM CoreExpr idempotent = do [e1, e2] <- getArgs guard $ cheapEqExpr e1 e2 return e1 -- | Match -- (op (op v e) e) -- or (op e (op v e)) -- or (op (op e v) e) -- or (op e (op e v)) -- and return the innermost (op v e) or (op e v). sameArgIdempotentCommut :: PrimOp -> RuleM CoreExpr sameArgIdempotentCommut op = do [a,b] <- getArgs case (a,b) of (is_binop op -> Just (e1,e2), e3) | cheapEqExpr e2 e3 -> return a | cheapEqExpr e1 e3 -> return a (e3, is_binop op -> Just (e1,e2)) | cheapEqExpr e2 e3 -> return b | cheapEqExpr e1 e3 -> return b _ -> mzero {- Note [Guarding against silly shifts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this code: import Data.Bits( (.|.), shiftL ) chunkToBitmap :: [Bool] -> Word32 chunkToBitmap chunk = foldr (.|.) 0 [ 1 `shiftL` n | (True,n) <- zip chunk [0..] ] This optimises to: Shift.$wgo = \ (w_sCS :: GHC.Prim.Int#) (w1_sCT :: [GHC.Types.Bool]) -> case w1_sCT of _ { [] -> 0##; : x_aAW xs_aAX -> case x_aAW of _ { GHC.Types.False -> case w_sCS of wild2_Xh { __DEFAULT -> Shift.$wgo (GHC.Prim.+# wild2_Xh 1) xs_aAX; 9223372036854775807 -> 0## }; GHC.Types.True -> case GHC.Prim.>=# w_sCS 64 of _ { GHC.Types.False -> case w_sCS of wild3_Xh { __DEFAULT -> case Shift.$wgo (GHC.Prim.+# wild3_Xh 1) xs_aAX of ww_sCW { __DEFAULT -> GHC.Prim.or# (GHC.Prim.narrow32Word# (GHC.Prim.uncheckedShiftL# 1## wild3_Xh)) ww_sCW }; 9223372036854775807 -> GHC.Prim.narrow32Word# !!!!--> (GHC.Prim.uncheckedShiftL# 1## 9223372036854775807) }; GHC.Types.True -> case w_sCS of wild3_Xh { __DEFAULT -> Shift.$wgo (GHC.Prim.+# wild3_Xh 1) xs_aAX; 9223372036854775807 -> 0## } } } } Note the massive shift on line "!!!!". It can't happen, because we've checked that w < 64, but the optimiser didn't spot that. We DO NOT want to constant-fold this! Moreover, if the programmer writes (n `uncheckedShiftL` 9223372036854775807), we can't constant fold it, but if it gets to the assembler we get Error: operand type mismatch for `shl' So the best thing to do is to rewrite the shift with a call to error, when the second arg is large. However, in general we cannot do this; consider this case let x = I# (uncheckedIShiftL# n 80) in ... Here x contains an invalid shift and consequently we would like to rewrite it as follows: let x = I# (error "invalid shift") in ... This was originally done in the fix to #16449 but this breaks the let/app invariant (see Note [Core let/app invariant] in GHC.Core) as noted in #16742. For the reasons discussed in Note [Checking versus non-checking primops] (in the PrimOp module) there is no safe way rewrite the argument of I# such that it bottoms. Consequently we instead take advantage of the fact that large shifts are undefined behavior (see associated documentation in primops.txt.pp) and transform the invalid shift into an "obviously incorrect" value. There are two cases: - Shifting fixed-width things: the primops IntSll, Sll, etc These are handled by shiftRule. We are happy to shift by any amount up to wordSize but no more. - Shifting Bignums (Integer, Natural): these are handled by bignum_shift. Here we could in principle shift by any amount, but we arbitrary limit the shift to 4 bits; in particular we do not want shift by a huge amount, which can happen in code like that above. The two cases are more different in their code paths that is comfortable, but that is only a historical accident. ************************************************************************ * * \subsection{Vaguely generic functions} * * ************************************************************************ -} mkBasicRule :: Name -> Int -> RuleM CoreExpr -> CoreRule -- Gives the Rule the same name as the primop itself mkBasicRule op_name n_args rm = BuiltinRule { ru_name = occNameFS (nameOccName op_name), ru_fn = op_name, ru_nargs = n_args, ru_try = runRuleM rm } newtype RuleM r = RuleM { runRuleM :: RuleOpts -> InScopeEnv -> Id -> [CoreExpr] -> Maybe r } deriving (Functor) instance Applicative RuleM where pure x = RuleM $ \_ _ _ _ -> Just x (<*>) = ap instance Monad RuleM where RuleM f >>= g = RuleM $ \env iu fn args -> case f env iu fn args of Nothing -> Nothing Just r -> runRuleM (g r) env iu fn args instance MonadFail RuleM where fail _ = mzero instance Alternative RuleM where empty = RuleM $ \_ _ _ _ -> Nothing RuleM f1 <|> RuleM f2 = RuleM $ \env iu fn args -> f1 env iu fn args <|> f2 env iu fn args instance MonadPlus RuleM getPlatform :: RuleM Platform getPlatform = roPlatform <$> getRuleOpts getWordSize :: RuleM PlatformWordSize getWordSize = platformWordSize <$> getPlatform getRuleOpts :: RuleM RuleOpts getRuleOpts = RuleM $ \rule_opts _ _ _ -> Just rule_opts liftMaybe :: Maybe a -> RuleM a liftMaybe Nothing = mzero liftMaybe (Just x) = return x liftLit :: (Literal -> Literal) -> RuleM CoreExpr liftLit f = liftLitPlatform (const f) liftLitPlatform :: (Platform -> Literal -> Literal) -> RuleM CoreExpr liftLitPlatform f = do platform <- getPlatform [Lit lit] <- getArgs return $ Lit (f platform lit) removeOp32 :: RuleM CoreExpr removeOp32 = do platform <- getPlatform case platformWordSize platform of PW4 -> do [e] <- getArgs return e PW8 -> mzero getArgs :: RuleM [CoreExpr] getArgs = RuleM $ \_ _ _ args -> Just args getInScopeEnv :: RuleM InScopeEnv getInScopeEnv = RuleM $ \_ iu _ _ -> Just iu getFunction :: RuleM Id getFunction = RuleM $ \_ _ fn _ -> Just fn isLiteral :: CoreExpr -> RuleM Literal isLiteral e = do env <- getInScopeEnv case exprIsLiteral_maybe env e of Nothing -> mzero Just l -> pure l -- | Match BigNat#, Integer and Natural literals isBignumLiteral :: CoreExpr -> RuleM Integer isBignumLiteral e = isNumberLiteral e <|> isIntegerLiteral e <|> isNaturalLiteral e -- | Match numeric literals isNumberLiteral :: CoreExpr -> RuleM Integer isNumberLiteral e = isLiteral e >>= \case LitNumber _ x -> pure x _ -> mzero -- | Match the application of a DataCon to a numeric literal. -- -- Can be used to match e.g.: -- IS 123# -- IP bigNatLiteral -- W# 123## isLitNumConApp :: CoreExpr -> RuleM (DataCon,Integer) isLitNumConApp e = do env <- getInScopeEnv case exprIsConApp_maybe env e of Just (_env,_fb,dc,_tys,[arg]) -> case exprIsLiteral_maybe env arg of Just (LitNumber _ i) -> pure (dc,i) _ -> mzero _ -> mzero isIntegerLiteral :: CoreExpr -> RuleM Integer isIntegerLiteral e = do (dc,i) <- isLitNumConApp e if | dc == integerISDataCon -> pure i | dc == integerINDataCon -> pure (negate i) | dc == integerIPDataCon -> pure i | otherwise -> mzero isBigIntegerLiteral :: CoreExpr -> RuleM Integer isBigIntegerLiteral e = do (dc,i) <- isLitNumConApp e if | dc == integerINDataCon -> pure (negate i) | dc == integerIPDataCon -> pure i | otherwise -> mzero isNaturalLiteral :: CoreExpr -> RuleM Integer isNaturalLiteral e = do (dc,i) <- isLitNumConApp e if | dc == naturalNSDataCon -> pure i | dc == naturalNBDataCon -> pure i | otherwise -> mzero -- return the n-th argument of this rule, if it is a literal -- argument indices start from 0 getLiteral :: Int -> RuleM Literal getLiteral n = RuleM $ \_ _ _ exprs -> case drop n exprs of (Lit l:_) -> Just l _ -> Nothing unaryLit :: (RuleOpts -> Literal -> Maybe CoreExpr) -> RuleM CoreExpr unaryLit op = do env <- getRuleOpts [Lit l] <- getArgs liftMaybe $ op env (convFloating env l) binaryLit :: (RuleOpts -> Literal -> Literal -> Maybe CoreExpr) -> RuleM CoreExpr binaryLit op = do env <- getRuleOpts [Lit l1, Lit l2] <- getArgs liftMaybe $ op env (convFloating env l1) (convFloating env l2) binaryCmpLit :: (forall a . Ord a => a -> a -> Bool) -> RuleM CoreExpr binaryCmpLit op = do platform <- getPlatform binaryLit (\_ -> cmpOp platform op) leftIdentity :: Literal -> RuleM CoreExpr leftIdentity id_lit = leftIdentityPlatform (const id_lit) rightIdentity :: Literal -> RuleM CoreExpr rightIdentity id_lit = rightIdentityPlatform (const id_lit) identity :: Literal -> RuleM CoreExpr identity lit = leftIdentity lit `mplus` rightIdentity lit leftIdentityPlatform :: (Platform -> Literal) -> RuleM CoreExpr leftIdentityPlatform id_lit = do platform <- getPlatform [Lit l1, e2] <- getArgs guard $ l1 == id_lit platform return e2 -- | Left identity rule for PrimOps like 'IntAddC' and 'WordAddC', where, in -- addition to the result, we have to indicate that no carry/overflow occurred. leftIdentityCPlatform :: (Platform -> Literal) -> RuleM CoreExpr leftIdentityCPlatform id_lit = do platform <- getPlatform [Lit l1, e2] <- getArgs guard $ l1 == id_lit platform let no_c = Lit (zeroi platform) return (mkCoreUbxTup [exprType e2, intPrimTy] [e2, no_c]) rightIdentityPlatform :: (Platform -> Literal) -> RuleM CoreExpr rightIdentityPlatform id_lit = do platform <- getPlatform [e1, Lit l2] <- getArgs guard $ l2 == id_lit platform return e1 -- | Right identity rule for PrimOps like 'IntSubC' and 'WordSubC', where, in -- addition to the result, we have to indicate that no carry/overflow occurred. rightIdentityCPlatform :: (Platform -> Literal) -> RuleM CoreExpr rightIdentityCPlatform id_lit = do platform <- getPlatform [e1, Lit l2] <- getArgs guard $ l2 == id_lit platform let no_c = Lit (zeroi platform) return (mkCoreUbxTup [exprType e1, intPrimTy] [e1, no_c]) identityPlatform :: (Platform -> Literal) -> RuleM CoreExpr identityPlatform lit = leftIdentityPlatform lit `mplus` rightIdentityPlatform lit -- | Identity rule for PrimOps like 'IntAddC' and 'WordAddC', where, in addition -- to the result, we have to indicate that no carry/overflow occurred. identityCPlatform :: (Platform -> Literal) -> RuleM CoreExpr identityCPlatform lit = leftIdentityCPlatform lit `mplus` rightIdentityCPlatform lit leftZero :: RuleM CoreExpr leftZero = do [Lit l1, _] <- getArgs guard $ isZeroLit l1 return $ Lit l1 rightZero :: RuleM CoreExpr rightZero = do [_, Lit l2] <- getArgs guard $ isZeroLit l2 return $ Lit l2 zeroElem :: RuleM CoreExpr zeroElem = leftZero `mplus` rightZero equalArgs :: RuleM () equalArgs = do [e1, e2] <- getArgs guard $ e1 `cheapEqExpr` e2 nonZeroLit :: Int -> RuleM () nonZeroLit n = getLiteral n >>= guard . not . isZeroLit oneLit :: Int -> RuleM () oneLit n = getLiteral n >>= guard . isOneLit lift_bits_op :: forall a. (Num a, FiniteBits a) => (a -> Integer) -> RuleM CoreExpr lift_bits_op op = do platform <- getPlatform [Lit (LitNumber _ l)] <- getArgs pure $ mkWordLit platform $ op (fromInteger l :: a) pop_count :: forall a. (Num a, FiniteBits a) => RuleM CoreExpr pop_count = lift_bits_op @a (fromIntegral . popCount) ctz :: forall a. (Num a, FiniteBits a) => RuleM CoreExpr ctz = lift_bits_op @a (fromIntegral . countTrailingZeros) clz :: forall a. (Num a, FiniteBits a) => RuleM CoreExpr clz = lift_bits_op @a (fromIntegral . countLeadingZeros) -- When excess precision is not requested, cut down the precision of the -- Rational value to that of Float/Double. We confuse host architecture -- and target architecture here, but it's convenient (and wrong :-). convFloating :: RuleOpts -> Literal -> Literal convFloating env (LitFloat f) | not (roExcessRationalPrecision env) = LitFloat (toRational (fromRational f :: Float )) convFloating env (LitDouble d) | not (roExcessRationalPrecision env) = LitDouble (toRational (fromRational d :: Double)) convFloating _ l = l guardFloatDiv :: RuleM () guardFloatDiv = do [Lit (LitFloat f1), Lit (LitFloat f2)] <- getArgs guard $ (f1 /=0 || f2 > 0) -- see Note [negative zero] && f2 /= 0 -- avoid NaN and Infinity/-Infinity guardDoubleDiv :: RuleM () guardDoubleDiv = do [Lit (LitDouble d1), Lit (LitDouble d2)] <- getArgs guard $ (d1 /=0 || d2 > 0) -- see Note [negative zero] && d2 /= 0 -- avoid NaN and Infinity/-Infinity -- Note [negative zero] -- ~~~~~~~~~~~~~~~~~~~~ -- Avoid (0 / -d), otherwise 0/(-1) reduces to -- zero, but we might want to preserve the negative zero here which -- is representable in Float/Double but not in (normalised) -- Rational. (#3676) Perhaps we should generate (0 :% (-1)) instead? strengthReduction :: Literal -> PrimOp -> RuleM CoreExpr strengthReduction two_lit add_op = do -- Note [Strength reduction] arg <- msum [ do [arg, Lit mult_lit] <- getArgs guard (mult_lit == two_lit) return arg , do [Lit mult_lit, arg] <- getArgs guard (mult_lit == two_lit) return arg ] return $ Var (primOpId add_op) `App` arg `App` arg -- Note [Strength reduction] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- This rule turns floating point multiplications of the form 2.0 * x and -- x * 2.0 into x + x addition, because addition costs less than multiplication. -- See #7116 -- Note [What's true and false] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- trueValInt and falseValInt represent true and false values returned by -- comparison primops for Char, Int, Word, Integer, Double, Float and Addr. -- True is represented as an unboxed 1# literal, while false is represented -- as 0# literal. -- We still need Bool data constructors (True and False) to use in a rule -- for constant folding of equal Strings trueValInt, falseValInt :: Platform -> Expr CoreBndr trueValInt platform = Lit $ onei platform -- see Note [What's true and false] falseValInt platform = Lit $ zeroi platform trueValBool, falseValBool :: Expr CoreBndr trueValBool = Var trueDataConId -- see Note [What's true and false] falseValBool = Var falseDataConId ltVal, eqVal, gtVal :: Expr CoreBndr ltVal = Var ordLTDataConId eqVal = Var ordEQDataConId gtVal = Var ordGTDataConId mkIntVal :: Platform -> Integer -> Expr CoreBndr mkIntVal platform i = Lit (mkLitInt platform i) mkFloatVal :: RuleOpts -> Rational -> Expr CoreBndr mkFloatVal env f = Lit (convFloating env (LitFloat f)) mkDoubleVal :: RuleOpts -> Rational -> Expr CoreBndr mkDoubleVal env d = Lit (convFloating env (LitDouble d)) matchPrimOpId :: PrimOp -> Id -> RuleM () matchPrimOpId op id = do op' <- liftMaybe $ isPrimOpId_maybe id guard $ op == op' {- ************************************************************************ * * \subsection{Special rules for seq, tagToEnum, dataToTag} * * ************************************************************************ Note [tagToEnum#] ~~~~~~~~~~~~~~~~~ Nasty check to ensure that tagToEnum# is applied to a type that is an enumeration TyCon. Unification may refine the type later, but this check won't see that, alas. It's crude but it works. Here's are two cases that should fail f :: forall a. a f = tagToEnum# 0 -- Can't do tagToEnum# at a type variable g :: Int g = tagToEnum# 0 -- Int is not an enumeration We used to make this check in the type inference engine, but it's quite ugly to do so, because the delayed constraint solving means that we don't really know what's going on until the end. It's very much a corner case because we don't expect the user to call tagToEnum# at all; we merely generate calls in derived instances of Enum. So we compromise: a rewrite rule rewrites a bad instance of tagToEnum# to an error call, and emits a warning. -} tagToEnumRule :: RuleM CoreExpr -- If data T a = A | B | C -- then tagToEnum# (T ty) 2# --> B ty tagToEnumRule = do [Type ty, Lit (LitNumber LitNumInt i)] <- getArgs case splitTyConApp_maybe ty of Just (tycon, tc_args) | isEnumerationTyCon tycon -> do let tag = fromInteger i correct_tag dc = (dataConTagZ dc) == tag (dc:rest) <- return $ filter correct_tag (tyConDataCons_maybe tycon `orElse` []) massert (null rest) return $ mkTyApps (Var (dataConWorkId dc)) tc_args -- See Note [tagToEnum#] _ -> warnPprTrace True "tagToEnum# on non-enumeration type" (ppr ty) $ return $ mkRuntimeErrorApp rUNTIME_ERROR_ID ty "tagToEnum# on non-enumeration type" ------------------------------ dataToTagRule :: RuleM CoreExpr -- See Note [dataToTag# magic]. dataToTagRule = a `mplus` b where -- dataToTag (tagToEnum x) ==> x a = do [Type ty1, Var tag_to_enum `App` Type ty2 `App` tag] <- getArgs guard $ tag_to_enum `hasKey` tagToEnumKey guard $ ty1 `eqType` ty2 return tag -- dataToTag (K e1 e2) ==> tag-of K -- This also works (via exprIsConApp_maybe) for -- dataToTag x -- where x's unfolding is a constructor application b = do dflags <- getPlatform [_, val_arg] <- getArgs in_scope <- getInScopeEnv (_,floats, dc,_,_) <- liftMaybe $ exprIsConApp_maybe in_scope val_arg massert (not (isNewTyCon (dataConTyCon dc))) return $ wrapFloats floats (mkIntVal dflags (toInteger (dataConTagZ dc))) {- Note [dataToTag# magic] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The primop dataToTag# is unusual because it evaluates its argument. Only `SeqOp` shares that property. (Other primops do not do anything as fancy as argument evaluation.) The special handling for dataToTag# is: * GHC.Core.Utils.exprOkForSpeculation has a special case for DataToTagOp, (actually in app_ok). Most primops with lifted arguments do not evaluate those arguments, but DataToTagOp and SeqOp are two exceptions. We say that they are /never/ ok-for-speculation, regardless of the evaluated-ness of their argument. See GHC.Core.Utils Note [exprOkForSpeculation and SeqOp/DataToTagOp] * There is a special case for DataToTagOp in GHC.StgToCmm.Expr.cgExpr, that evaluates its argument and then extracts the tag from the returned value. * An application like (dataToTag# (Just x)) is optimised by dataToTagRule in GHC.Core.Opt.ConstantFold. * A case expression like case (dataToTag# e) of gets transformed t case e of by GHC.Core.Opt.ConstantFold.caseRules; see Note [caseRules for dataToTag] See #15696 for a long saga. -} {- ********************************************************************* * * unsafeEqualityProof * * ********************************************************************* -} -- unsafeEqualityProof k t t ==> UnsafeRefl (Refl t) -- That is, if the two types are equal, it's not unsafe! unsafeEqualityProofRule :: RuleM CoreExpr unsafeEqualityProofRule = do { [Type rep, Type t1, Type t2] <- getArgs ; guard (t1 `eqType` t2) ; fn <- getFunction ; let (_, ue) = splitForAllTyCoVars (idType fn) tc = tyConAppTyCon ue -- tycon: UnsafeEquality (dc:_) = tyConDataCons tc -- data con: UnsafeRefl -- UnsafeRefl :: forall (r :: RuntimeRep) (a :: TYPE r). -- UnsafeEquality r a a ; return (mkTyApps (Var (dataConWrapId dc)) [rep, t1]) } {- ********************************************************************* * * Rules for seq# and spark# * * ********************************************************************* -} {- Note [seq# magic] ~~~~~~~~~~~~~~~~~~~~ The primop seq# :: forall a s . a -> State# s -> (# State# s, a #) is /not/ the same as the Prelude function seq :: a -> b -> b as you can see from its type. In fact, seq# is the implementation mechanism for 'evaluate' evaluate :: a -> IO a evaluate a = IO $ \s -> seq# a s The semantics of seq# is * evaluate its first argument * and return it Things to note * Why do we need a primop at all? That is, instead of case seq# x s of (# x, s #) -> blah why not instead say this? case x of { DEFAULT -> blah) Reason (see #5129): if we saw catch# (\s -> case x of { DEFAULT -> raiseIO# exn s }) handler then we'd drop the 'case x' because the body of the case is bottom anyway. But we don't want to do that; the whole /point/ of seq#/evaluate is to evaluate 'x' first in the IO monad. In short, we /always/ evaluate the first argument and never just discard it. * Why return the value? So that we can control sharing of seq'd values: in let x = e in x `seq` ... x ... We don't want to inline x, so better to represent it as let x = e in case seq# x RW of (# _, x' #) -> ... x' ... also it matches the type of rseq in the Eval monad. Implementing seq#. The compiler has magic for SeqOp in - GHC.Core.Opt.ConstantFold.seqRule: eliminate (seq# s) - GHC.StgToCmm.Expr.cgExpr, and cgCase: special case for seq# - GHC.Core.Utils.exprOkForSpeculation; see Note [exprOkForSpeculation and SeqOp/DataToTagOp] in GHC.Core.Utils - Simplify.addEvals records evaluated-ness for the result; see Note [Adding evaluatedness info to pattern-bound variables] in GHC.Core.Opt.Simplify -} seqRule :: RuleM CoreExpr seqRule = do [Type ty_a, Type _ty_s, a, s] <- getArgs guard $ exprIsHNF a return $ mkCoreUbxTup [exprType s, ty_a] [s, a] -- spark# :: forall a s . a -> State# s -> (# State# s, a #) sparkRule :: RuleM CoreExpr sparkRule = seqRule -- reduce on HNF, just the same -- XXX perhaps we shouldn't do this, because a spark eliminated by -- this rule won't be counted as a dud at runtime? {- ************************************************************************ * * \subsection{Built in rules} * * ************************************************************************ Note [Scoping for Builtin rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When compiling a (base-package) module that defines one of the functions mentioned in the RHS of a built-in rule, there's a danger that we'll see f = ...(eq String x).... ....and lower down... eqString = ... Then a rewrite would give f = ...(eqString x)... ....and lower down... eqString = ... and lo, eqString is not in scope. This only really matters when we get to code generation. But the occurrence analyser does a GlomBinds step when necessary, that does a new SCC analysis on the whole set of bindings (see occurAnalysePgm), which sorts out the dependency, so all is fine. -} builtinRules :: [CoreRule] -- Rules for non-primops that can't be expressed using a RULE pragma builtinRules = [BuiltinRule { ru_name = fsLit "CStringFoldrLit", ru_fn = unpackCStringFoldrName, ru_nargs = 4, ru_try = match_cstring_foldr_lit_C }, BuiltinRule { ru_name = fsLit "CStringFoldrLitUtf8", ru_fn = unpackCStringFoldrUtf8Name, ru_nargs = 4, ru_try = match_cstring_foldr_lit_utf8 }, BuiltinRule { ru_name = fsLit "CStringAppendLit", ru_fn = unpackCStringAppendName, ru_nargs = 2, ru_try = match_cstring_append_lit_C }, BuiltinRule { ru_name = fsLit "CStringAppendLitUtf8", ru_fn = unpackCStringAppendUtf8Name, ru_nargs = 2, ru_try = match_cstring_append_lit_utf8 }, BuiltinRule { ru_name = fsLit "EqString", ru_fn = eqStringName, ru_nargs = 2, ru_try = match_eq_string }, BuiltinRule { ru_name = fsLit "CStringLength", ru_fn = cstringLengthName, ru_nargs = 1, ru_try = match_cstring_length }, BuiltinRule { ru_name = fsLit "Inline", ru_fn = inlineIdName, ru_nargs = 2, ru_try = \_ _ _ -> match_inline }, mkBasicRule unsafeEqualityProofName 3 unsafeEqualityProofRule, mkBasicRule divIntName 2 $ msum [ nonZeroLit 1 >> binaryLit (intOp2 div) , leftZero , do [arg, Lit (LitNumber LitNumInt d)] <- getArgs Just n <- return $ exactLog2 d platform <- getPlatform return $ Var (primOpId IntSraOp) `App` arg `App` mkIntVal platform n ], mkBasicRule modIntName 2 $ msum [ nonZeroLit 1 >> binaryLit (intOp2 mod) , leftZero , do [arg, Lit (LitNumber LitNumInt d)] <- getArgs Just _ <- return $ exactLog2 d platform <- getPlatform return $ Var (primOpId IntAndOp) `App` arg `App` mkIntVal platform (d - 1) ] ] ++ builtinBignumRules {-# NOINLINE builtinRules #-} -- there is no benefit to inlining these yet, despite this, GHC produces -- unfoldings for this regardless since the floated list entries look small. builtinBignumRules :: [CoreRule] builtinBignumRules = [ -- conversions lit_to_integer "Word# -> Integer" integerFromWordName , lit_to_integer "Int64# -> Integer" integerFromInt64Name , lit_to_integer "Word64# -> Integer" integerFromWord64Name , lit_to_integer "Natural -> Integer" integerFromNaturalName , integer_to_lit "Integer -> Word# (wrap)" integerToWordName mkWordLitWrap , integer_to_lit "Integer -> Int# (wrap)" integerToIntName mkIntLitWrap , integer_to_lit "Integer -> Word64# (wrap)" integerToWord64Name (\_ -> mkWord64LitWord64 . fromInteger) , integer_to_lit "Integer -> Int64# (wrap)" integerToInt64Name (\_ -> mkInt64LitInt64 . fromInteger) , integer_to_lit "Integer -> Float#" integerToFloatName (\_ -> mkFloatLitFloat . fromInteger) , integer_to_lit "Integer -> Double#" integerToDoubleName (\_ -> mkDoubleLitDouble . fromInteger) , integer_to_natural "Integer -> Natural (clamp)" integerToNaturalClampName False True , integer_to_natural "Integer -> Natural (wrap)" integerToNaturalName False False , integer_to_natural "Integer -> Natural (throw)" integerToNaturalThrowName True False , natural_to_word "Natural -> Word# (wrap)" naturalToWordName -- comparisons (return an unlifted Int#) , bignum_bin_pred "bigNatEq#" bignatEqName (==) -- comparisons (return an Ordering) , bignum_compare "bignatCompare" bignatCompareName , bignum_compare "bignatCompareWord#" bignatCompareWordName -- binary operations , integer_binop "integerAdd" integerAddName (+) , integer_binop "integerSub" integerSubName (-) , integer_binop "integerMul" integerMulName (*) , integer_binop "integerGcd" integerGcdName gcd , integer_binop "integerLcm" integerLcmName lcm , integer_binop "integerAnd" integerAndName (.&.) , integer_binop "integerOr" integerOrName (.|.) , integer_binop "integerXor" integerXorName xor , natural_binop "naturalAdd" naturalAddName (+) , natural_binop "naturalMul" naturalMulName (*) , natural_binop "naturalGcd" naturalGcdName gcd , natural_binop "naturalLcm" naturalLcmName lcm , natural_binop "naturalAnd" naturalAndName (.&.) , natural_binop "naturalOr" naturalOrName (.|.) , natural_binop "naturalXor" naturalXorName xor -- Natural subtraction: it's a binop but it can fail because of underflow so -- we have several primitives to handle here. , natural_sub "naturalSubUnsafe" naturalSubUnsafeName , natural_sub "naturalSubThrow" naturalSubThrowName , mkRule "naturalSub" naturalSubName 2 $ do [a0,a1] <- getArgs x <- isNaturalLiteral a0 y <- isNaturalLiteral a1 -- return an unboxed sum: (# (# #) | Natural #) let ret n v = pure $ mkCoreUbxSum 2 n [unboxedUnitTy,naturalTy] v platform <- getPlatform if x < y then ret 1 $ Var voidPrimId else ret 2 $ mkNaturalExpr platform (x - y) -- unary operations , bignum_unop "integerNegate" integerNegateName mkIntegerExpr negate , bignum_unop "integerAbs" integerAbsName mkIntegerExpr abs , bignum_unop "integerComplement" integerComplementName mkIntegerExpr complement , bignum_popcount "integerPopCount" integerPopCountName mkLitIntWrap , bignum_popcount "naturalPopCount" naturalPopCountName mkLitWordWrap -- Bits.bit , bignum_bit "integerBit" integerBitName mkIntegerExpr , bignum_bit "naturalBit" naturalBitName mkNaturalExpr -- Bits.testBit , bignum_testbit "integerTestBit" integerTestBitName , bignum_testbit "naturalTestBit" naturalTestBitName -- Bits.shift , bignum_shift "integerShiftL" integerShiftLName shiftL mkIntegerExpr , bignum_shift "integerShiftR" integerShiftRName shiftR mkIntegerExpr , bignum_shift "naturalShiftL" naturalShiftLName shiftL mkNaturalExpr , bignum_shift "naturalShiftR" naturalShiftRName shiftR mkNaturalExpr -- division , divop_one "integerQuot" integerQuotName quot mkIntegerExpr , divop_one "integerRem" integerRemName rem mkIntegerExpr , divop_one "integerDiv" integerDivName div mkIntegerExpr , divop_one "integerMod" integerModName mod mkIntegerExpr , divop_both "integerDivMod" integerDivModName divMod mkIntegerExpr integerTy , divop_both "integerQuotRem" integerQuotRemName quotRem mkIntegerExpr integerTy , divop_one "naturalQuot" naturalQuotName quot mkNaturalExpr , divop_one "naturalRem" naturalRemName rem mkNaturalExpr , divop_both "naturalQuotRem" naturalQuotRemName quotRem mkNaturalExpr naturalTy -- conversions from Rational for Float/Double literals , rational_to "rationalToFloat" rationalToFloatName mkFloatExpr , rational_to "rationalToDouble" rationalToDoubleName mkDoubleExpr -- conversions from Integer for Float/Double literals , integer_encode_float "integerEncodeFloat" integerEncodeFloatName mkFloatLitFloat , integer_encode_float "integerEncodeDouble" integerEncodeDoubleName mkDoubleLitDouble ] where mkRule str name nargs f = BuiltinRule { ru_name = fsLit str , ru_fn = name , ru_nargs = nargs , ru_try = runRuleM $ do env <- getRuleOpts guard (roBignumRules env) f } integer_to_lit str name convert = mkRule str name 1 $ do [a0] <- getArgs platform <- getPlatform -- we only match on Big Integer literals. Small literals -- are matched by the "Int# -> Integer -> *" rules x <- isBigIntegerLiteral a0 pure (convert platform x) natural_to_word str name = mkRule str name 1 $ do [a0] <- getArgs n <- isNaturalLiteral a0 platform <- getPlatform pure (Lit (mkLitWordWrap platform n)) integer_to_natural str name thrw clamp = mkRule str name 1 $ do [a0] <- getArgs x <- isIntegerLiteral a0 platform <- getPlatform if | x >= 0 -> pure $ mkNaturalExpr platform x | thrw -> mzero | clamp -> pure $ mkNaturalExpr platform 0 -- clamp to 0 | otherwise -> pure $ mkNaturalExpr platform (abs x) -- negate/wrap lit_to_integer str name = mkRule str name 1 $ do [a0] <- getArgs platform <- getPlatform i <- isBignumLiteral a0 -- convert any numeric literal into an Integer literal pure (mkIntegerExpr platform i) integer_binop str name op = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isIntegerLiteral a0 y <- isIntegerLiteral a1 platform <- getPlatform pure (mkIntegerExpr platform (x `op` y)) natural_binop str name op = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isNaturalLiteral a0 y <- isNaturalLiteral a1 platform <- getPlatform pure (mkNaturalExpr platform (x `op` y)) natural_sub str name = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isNaturalLiteral a0 y <- isNaturalLiteral a1 guard (x >= y) platform <- getPlatform pure (mkNaturalExpr platform (x - y)) bignum_bin_pred str name op = mkRule str name 2 $ do platform <- getPlatform [a0,a1] <- getArgs x <- isBignumLiteral a0 y <- isBignumLiteral a1 pure $ if x `op` y then trueValInt platform else falseValInt platform bignum_compare str name = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isBignumLiteral a0 y <- isBignumLiteral a1 pure $ case x `compare` y of LT -> ltVal EQ -> eqVal GT -> gtVal bignum_unop str name mk_lit op = mkRule str name 1 $ do [a0] <- getArgs x <- isBignumLiteral a0 platform <- getPlatform pure $ mk_lit platform (op x) bignum_popcount str name mk_lit = mkRule str name 1 $ do platform <- getPlatform -- We use a host Int to compute the popCount. If we compile on a 32-bit -- host for a 64-bit target, the result may be different than if computed -- by the target. So we disable this rule if sizes don't match. guard (platformWordSizeInBits platform == finiteBitSize (0 :: Word)) [a0] <- getArgs x <- isBignumLiteral a0 pure $ Lit (mk_lit platform (fromIntegral (popCount x))) bignum_bit str name mk_lit = mkRule str name 1 $ do [a0] <- getArgs platform <- getPlatform n <- isNumberLiteral a0 -- Make sure n is positive and small enough to yield a decently -- small number. Attempting to construct the Integer for -- (integerBit 9223372036854775807#) -- would be a bad idea (#14959) guard (n >= 0 && n <= fromIntegral (platformWordSizeInBits platform)) -- it's safe to convert a target Int value into a host Int value -- to perform the "bit" operation because n is very small (<= 64). pure $ mk_lit platform (bit (fromIntegral n)) bignum_testbit str name = mkRule str name 2 $ do [a0,a1] <- getArgs platform <- getPlatform x <- isBignumLiteral a0 n <- isNumberLiteral a1 -- ensure that we can store 'n' in a host Int guard (n >= 0 && n <= fromIntegral (maxBound :: Int)) pure $ if testBit x (fromIntegral n) then trueValInt platform else falseValInt platform bignum_shift str name shift_op mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isBignumLiteral a0 n <- isNumberLiteral a1 -- See Note [Guarding against silly shifts] -- Restrict constant-folding of shifts on Integers, somewhat arbitrary. -- We can get huge shifts in inaccessible code (#15673) guard (n <= 4) platform <- getPlatform pure $ mk_lit platform (x `shift_op` fromIntegral n) divop_one str name divop mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs n <- isBignumLiteral a0 d <- isBignumLiteral a1 guard (d /= 0) platform <- getPlatform pure $ mk_lit platform (n `divop` d) divop_both str name divop mk_lit ty = mkRule str name 2 $ do [a0,a1] <- getArgs n <- isBignumLiteral a0 d <- isBignumLiteral a1 guard (d /= 0) let (r,s) = n `divop` d platform <- getPlatform pure $ mkCoreUbxTup [ty,ty] [mk_lit platform r, mk_lit platform s] integer_encode_float :: RealFloat a => String -> Name -> (a -> CoreExpr) -> CoreRule integer_encode_float str name mk_lit = mkRule str name 2 $ do [a0,a1] <- getArgs x <- isIntegerLiteral a0 y <- isNumberLiteral a1 -- check that y (a target Int) is in the host Int range guard (y <= fromIntegral (maxBound :: Int)) pure (mk_lit $ encodeFloat x (fromInteger y)) rational_to :: RealFloat a => String -> Name -> (a -> CoreExpr) -> CoreRule rational_to str name mk_lit = mkRule str name 2 $ do -- This turns `rationalToFloat n d` where `n` and `d` are literals into -- a literal Float (and similarly for Double). [a0,a1] <- getArgs n <- isIntegerLiteral a0 d <- isIntegerLiteral a1 -- it's important to not match d == 0, because that may represent a -- literal "0/0" or similar, and we can't produce a literal value for -- NaN or +-Inf guard (d /= 0) pure $ mk_lit (fromRational (n % d)) --------------------------------------------------- -- The rules are: -- unpackAppendCString*# "foo"# (unpackCString*# "baz"#) -- = unpackCString*# "foobaz"# -- -- unpackAppendCString*# "foo"# (unpackAppendCString*# "baz"# e) -- = unpackAppendCString*# "foobaz"# e -- -- CString version match_cstring_append_lit_C :: RuleFun match_cstring_append_lit_C = match_cstring_append_lit unpackCStringAppendIdKey unpackCStringIdKey -- CStringUTF8 version match_cstring_append_lit_utf8 :: RuleFun match_cstring_append_lit_utf8 = match_cstring_append_lit unpackCStringAppendUtf8IdKey unpackCStringUtf8IdKey {-# INLINE match_cstring_append_lit #-} match_cstring_append_lit :: Unique -> Unique -> RuleFun match_cstring_append_lit append_key unpack_key _ env _ [lit1, e2] | Just (LitString s1) <- exprIsLiteral_maybe env lit1 , (strTicks, Var unpk `App` lit2) <- stripStrTopTicks env e2 , unpk `hasKey` unpack_key , Just (LitString s2) <- exprIsLiteral_maybe env lit2 = Just $ mkTicks strTicks $ Var unpk `App` Lit (LitString (s1 `BS.append` s2)) | Just (LitString s1) <- exprIsLiteral_maybe env lit1 , (strTicks, Var appnd `App` lit2 `App` e) <- stripStrTopTicks env e2 , appnd `hasKey` append_key , Just (LitString s2) <- exprIsLiteral_maybe env lit2 = Just $ mkTicks strTicks $ Var appnd `App` Lit (LitString (s1 `BS.append` s2)) `App` e match_cstring_append_lit _ _ _ _ _ _ = Nothing --------------------------------------------------- -- The rule is this: -- unpackFoldrCString*# "foo"# c (unpackFoldrCString*# "baz"# c n) -- = unpackFoldrCString*# "foobaz"# c n -- -- See also Note [String literals in GHC] in CString.hs -- CString version match_cstring_foldr_lit_C :: RuleFun match_cstring_foldr_lit_C = match_cstring_foldr_lit unpackCStringFoldrIdKey -- CStringUTF8 version match_cstring_foldr_lit_utf8 :: RuleFun match_cstring_foldr_lit_utf8 = match_cstring_foldr_lit unpackCStringFoldrUtf8IdKey {-# INLINE match_cstring_foldr_lit #-} match_cstring_foldr_lit :: Unique -> RuleFun match_cstring_foldr_lit foldVariant _ env _ [ Type ty1 , lit1 , c1 , e2 ] | (strTicks, Var unpk `App` Type ty2 `App` lit2 `App` c2 `App` n) <- stripStrTopTicks env e2 , unpk `hasKey` foldVariant , Just (LitString s1) <- exprIsLiteral_maybe env lit1 , Just (LitString s2) <- exprIsLiteral_maybe env lit2 , eqCoreExpr c1 c2 , (c1Ticks, c1') <- stripStrTopTicks env c1 , c2Ticks <- stripStrTopTicksT c2 = assert (ty1 `eqType` ty2) $ Just $ mkTicks strTicks $ Var unpk `App` Type ty1 `App` Lit (LitString (s1 `BS.append` s2)) `App` mkTicks (c1Ticks ++ c2Ticks) c1' `App` n match_cstring_foldr_lit _ _ _ _ _ = Nothing -- N.B. Ensure that we strip off any ticks (e.g. source notes) from the -- argument, lest this may fail to fire when building with -g3. See #16740. -- -- Also, look into variable's unfolding just in case the expression we look for -- is in a top-level thunk. stripStrTopTicks :: InScopeEnv -> CoreExpr -> ([CoreTickish], CoreExpr) stripStrTopTicks (_,id_unf) e = case e of Var v | Just rhs <- expandUnfolding_maybe (id_unf v) -> stripTicksTop tickishFloatable rhs _ -> stripTicksTop tickishFloatable e stripStrTopTicksT :: CoreExpr -> [CoreTickish] stripStrTopTicksT e = stripTicksTopT tickishFloatable e --------------------------------------------------- -- The rule is this: -- eqString (unpackCString# (Lit s1)) (unpackCString# (Lit s2)) = s1==s2 -- Also matches unpackCStringUtf8# match_eq_string :: RuleFun match_eq_string _ env _ [e1, e2] | (ticks1, Var unpk1 `App` lit1) <- stripStrTopTicks env e1 , (ticks2, Var unpk2 `App` lit2) <- stripStrTopTicks env e2 , unpk_key1 <- getUnique unpk1 , unpk_key2 <- getUnique unpk2 , unpk_key1 == unpk_key2 -- For now we insist the literals have to agree in their encoding -- to keep the rule simple. But we could check if the decoded strings -- compare equal in here as well. , unpk_key1 `elem` [unpackCStringUtf8IdKey, unpackCStringIdKey] , Just (LitString s1) <- exprIsLiteral_maybe env lit1 , Just (LitString s2) <- exprIsLiteral_maybe env lit2 = Just $ mkTicks (ticks1 ++ ticks2) $ (if s1 == s2 then trueValBool else falseValBool) match_eq_string _ _ _ _ = Nothing ----------------------------------------------------------------------- -- Illustration of this rule: -- -- cstringLength# "foobar"# --> 6 -- cstringLength# "fizz\NULzz"# --> 4 -- -- Nota bene: Addr# literals are suffixed by a NUL byte when they are -- compiled to read-only data sections. That's why cstringLength# is -- well defined on Addr# literals that do not explicitly have an embedded -- NUL byte. -- -- See GHC issue #5218, MR 2165, and bytestring PR 191. This is particularly -- helpful when using OverloadedStrings to create a ByteString since the -- function computing the length of such ByteStrings can often be constant -- folded. match_cstring_length :: RuleFun match_cstring_length rule_env env _ [lit1] | Just (LitString str) <- exprIsLiteral_maybe env lit1 -- If elemIndex returns Just, it has the index of the first embedded NUL -- in the string. If no NUL bytes are present (the common case) then use -- full length of the byte string. = let len = fromMaybe (BS.length str) (BS.elemIndex 0 str) in Just (Lit (mkLitInt (roPlatform rule_env) (fromIntegral len))) match_cstring_length _ _ _ _ = Nothing {- Note [inlineId magic] ~~~~~~~~~~~~~~~~~~~~~~~~ The call 'inline f' arranges that 'f' is inlined, regardless of its size. More precisely, the call 'inline f' rewrites to the right-hand side of 'f's definition. This allows the programmer to control inlining from a particular call site rather than the definition site of the function. The moving parts are simple: * A very simple definition in the library base:GHC.Magic {-# NOINLINE[0] inline #-} inline :: a -> a inline x = x So in phase 0, 'inline' will be inlined, so its use imposes no overhead. * A rewrite rule, in GHC.Core.Opt.ConstantFold, which makes (inline f) inline, implemented by match_inline. The rule for the 'inline' function is this: inline f_ty (f a b c) = a b c (if f has an unfolding, EVEN if it's a loop breaker) It's important to allow the argument to 'inline' to have args itself (a) because its more forgiving to allow the programmer to write either inline f a b c or inline (f a b c) (b) because a polymorphic f wll get a type argument that the programmer can't avoid, so the call may look like inline (map @Int @Bool) g xs Also, don't forget about 'inline's type argument! -} match_inline :: [Expr CoreBndr] -> Maybe (Expr CoreBndr) match_inline (Type _ : e : _) | (Var f, args1) <- collectArgs e, Just unf <- maybeUnfoldingTemplate (realIdUnfolding f) -- Ignore the IdUnfoldingFun here! = Just (mkApps unf args1) match_inline _ = Nothing -------------------------------------------------------- -- Note [Constant folding through nested expressions] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- We use rewrites rules to perform constant folding. It means that we don't -- have a global view of the expression we are trying to optimise. As a -- consequence we only perform local (small-step) transformations that either: -- 1) reduce the number of operations -- 2) rearrange the expression to increase the odds that other rules will -- match -- -- We don't try to handle more complex expression optimisation cases that would -- require a global view. For example, rewriting expressions to increase -- sharing (e.g., Horner's method); optimisations that require local -- transformations increasing the number of operations; rearrangements to -- cancel/factorize terms (e.g., (a+b-a-b) isn't rearranged to reduce to 0). -- -- We already have rules to perform constant folding on expressions with the -- following shape (where a and/or b are literals): -- -- D) op -- /\ -- / \ -- / \ -- a b -- -- To support nested expressions, we match three other shapes of expression -- trees: -- -- A) op1 B) op1 C) op1 -- /\ /\ /\ -- / \ / \ / \ -- / \ / \ / \ -- a op2 op2 c op2 op3 -- /\ /\ /\ /\ -- / \ / \ / \ / \ -- b c a b a b c d -- -- -- R1) +/- simplification: -- ops = + or -, two literals (not siblings) -- -- Examples: -- A: 5 + (10-x) ==> 15-x -- B: (10+x) + 5 ==> 15+x -- C: (5+a)-(5-b) ==> 0+(a+b) -- -- R2) *, `and`, `or` simplification -- ops = *, `and`, `or` two literals (not siblings) -- -- Examples: -- A: 5 * (10*x) ==> 50*x -- B: (10*x) * 5 ==> 50*x -- C: (5*a)*(5*b) ==> 25*(a*b) -- -- R3) * distribution over +/- -- op1 = *, op2 = + or -, two literals (not siblings) -- -- This transformation doesn't reduce the number of operations but switches -- the outer and the inner operations so that the outer is (+) or (-) instead -- of (*). It increases the odds that other rules will match after this one. -- -- Examples: -- A: 5 * (10-x) ==> 50 - (5*x) -- B: (10+x) * 5 ==> 50 + (5*x) -- C: Not supported as it would increase the number of operations: -- (5+a)*(5-b) ==> 25 - 5*b + 5*a - a*b -- -- R4) Simple factorization -- -- op1 = + or -, op2/op3 = *, -- one literal for each innermost * operation (except in the D case), -- the two other terms are equals -- -- Examples: -- A: x - (10*x) ==> (-9)*x -- B: (10*x) + x ==> 11*x -- C: (5*x)-(x*3) ==> 2*x -- D: x+x ==> 2*x -- -- R5) +/- propagation -- -- ops = + or -, one literal -- -- This transformation doesn't reduce the number of operations but propagates -- the constant to the outer level. It increases the odds that other rules -- will match after this one. -- -- Examples: -- A: x - (10-y) ==> (x+y) - 10 -- B: (10+x) - y ==> 10 + (x-y) -- C: N/A (caught by the A and B cases) -- -------------------------------------------------------- -- Rules to perform constant folding into nested expressions -- --See Note [Constant folding through nested expressions] addFoldingRules :: PrimOp -> NumOps -> RuleM CoreExpr addFoldingRules op num_ops = do massert (op == numAdd num_ops) env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for + is handled here (addFoldingRules' platform arg1 arg2 num_ops <|> addFoldingRules' platform arg2 arg1 num_ops) subFoldingRules :: PrimOp -> NumOps -> RuleM CoreExpr subFoldingRules op num_ops = do massert (op == numSub num_ops) env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe (subFoldingRules' platform arg1 arg2 num_ops) mulFoldingRules :: PrimOp -> NumOps -> RuleM CoreExpr mulFoldingRules op num_ops = do massert (op == numMul num_ops) env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for * is handled here (mulFoldingRules' platform arg1 arg2 num_ops <|> mulFoldingRules' platform arg2 arg1 num_ops) andFoldingRules :: NumOps -> RuleM CoreExpr andFoldingRules num_ops = do env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for `and` is handled here (andFoldingRules' platform arg1 arg2 num_ops <|> andFoldingRules' platform arg2 arg1 num_ops) orFoldingRules :: NumOps -> RuleM CoreExpr orFoldingRules num_ops = do env <- getRuleOpts guard (roNumConstantFolding env) [arg1,arg2] <- getArgs platform <- getPlatform liftMaybe -- commutativity for `or` is handled here (orFoldingRules' platform arg1 arg2 num_ops <|> orFoldingRules' platform arg2 arg1 num_ops) addFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr addFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- x + (-y) ==> x-y (x, is_neg num_ops -> Just y) -> Just (x `sub` y) -- R1) +/- simplification -- l1 + (l2 + x) ==> (l1+l2) + x (L l1, is_lit_add num_ops -> Just (l2,x)) -> Just (mkL (l1+l2) `add` x) -- l1 + (l2 - x) ==> (l1+l2) - x (L l1, is_sub num_ops -> Just (L l2,x)) -> Just (mkL (l1+l2) `sub` x) -- l1 + (x - l2) ==> (l1-l2) + x (L l1, is_sub num_ops -> Just (x,L l2)) -> Just (mkL (l1-l2) `add` x) -- (l1 + x) + (l2 + y) ==> (l1+l2) + (x+y) (is_lit_add num_ops -> Just (l1,x), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (l1+l2) `add` (x `add` y)) -- (l1 + x) + (l2 - y) ==> (l1+l2) + (x-y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1+l2) `add` (x `sub` y)) -- (l1 + x) + (y - l2) ==> (l1-l2) + (x+y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1-l2) `add` (x `add` y)) -- (l1 - x) + (l2 - y) ==> (l1+l2) - (x+y) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1+l2) `sub` (x `add` y)) -- (l1 - x) + (y - l2) ==> (l1-l2) + (y-x) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1-l2) `add` (y `sub` x)) -- (x - l1) + (y - l2) ==> (0-l1-l2) + (x+y) (is_sub num_ops -> Just (x,L l1), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (0-l1-l2) `add` (x `add` y)) -- R4) Simple factorization -- x + x ==> 2 * x _ | Just l1 <- is_expr_mul num_ops arg1 arg2 -> Just (mkL (l1+1) `mul` arg1) -- (l1 * x) + x ==> (l1+1) * x _ | Just l1 <- is_expr_mul num_ops arg2 arg1 -> Just (mkL (l1+1) `mul` arg2) -- (l1 * x) + (l2 * x) ==> (l1+l2) * x (is_lit_mul num_ops -> Just (l1,x), is_expr_mul num_ops x -> Just l2) -> Just (mkL (l1+l2) `mul` x) -- R5) +/- propagation: these transformations push literals outwards -- with the hope that other rules can then be applied. -- In the following rules, x can't be a literal otherwise another -- rule would have combined it with the other literal in arg2. So we -- don't have to check this to avoid loops here. -- x + (l1 + y) ==> l1 + (x + y) (_, is_lit_add num_ops -> Just (l1,y)) -> Just (mkL l1 `add` (arg1 `add` y)) -- x + (l1 - y) ==> l1 + (x - y) (_, is_sub num_ops -> Just (L l1,y)) -> Just (mkL l1 `add` (arg1 `sub` y)) -- x + (y - l1) ==> (x + y) - l1 (_, is_sub num_ops -> Just (y,L l1)) -> Just ((arg1 `add` y) `sub` mkL l1) _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops add x y = BinOpApp x (numAdd num_ops) y sub x y = BinOpApp x (numSub num_ops) y mul x y = BinOpApp x (numMul num_ops) y subFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr subFoldingRules' platform arg1 arg2 num_ops = case (arg1,arg2) of -- x - (-y) ==> x+y (x, is_neg num_ops -> Just y) -> Just (x `add` y) -- R1) +/- simplification -- l1 - (l2 + x) ==> (l1-l2) - x (L l1, is_lit_add num_ops -> Just (l2,x)) -> Just (mkL (l1-l2) `sub` x) -- l1 - (l2 - x) ==> (l1-l2) + x (L l1, is_sub num_ops -> Just (L l2,x)) -> Just (mkL (l1-l2) `add` x) -- l1 - (x - l2) ==> (l1+l2) - x (L l1, is_sub num_ops -> Just (x, L l2)) -> Just (mkL (l1+l2) `sub` x) -- (l1 + x) - l2 ==> (l1-l2) + x (is_lit_add num_ops -> Just (l1,x), L l2) -> Just (mkL (l1-l2) `add` x) -- (l1 - x) - l2 ==> (l1-l2) - x (is_sub num_ops -> Just (L l1,x), L l2) -> Just (mkL (l1-l2) `sub` x) -- (x - l1) - l2 ==> x - (l1+l2) (is_sub num_ops -> Just (x,L l1), L l2) -> Just (x `sub` mkL (l1+l2)) -- (l1 + x) - (l2 + y) ==> (l1-l2) + (x-y) (is_lit_add num_ops -> Just (l1,x), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (l1-l2) `add` (x `sub` y)) -- (l1 + x) - (l2 - y) ==> (l1-l2) + (x+y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1-l2) `add` (x `add` y)) -- (l1 + x) - (y - l2) ==> (l1+l2) + (x-y) (is_lit_add num_ops -> Just (l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1+l2) `add` (x `sub` y)) -- (l1 - x) - (l2 + y) ==> (l1-l2) - (x+y) (is_sub num_ops -> Just (L l1,x), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (l1-l2) `sub` (x `add` y)) -- (x - l1) - (l2 + y) ==> (0-l1-l2) + (x-y) (is_sub num_ops -> Just (x,L l1), is_lit_add num_ops -> Just (l2,y)) -> Just (mkL (0-l1-l2) `add` (x `sub` y)) -- (l1 - x) - (l2 - y) ==> (l1-l2) + (y-x) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (l1-l2) `add` (y `sub` x)) -- (l1 - x) - (y - l2) ==> (l1+l2) - (x+y) (is_sub num_ops -> Just (L l1,x), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l1+l2) `sub` (x `add` y)) -- (x - l1) - (l2 - y) ==> (0-l1-l2) + (x+y) (is_sub num_ops -> Just (x,L l1), is_sub num_ops -> Just (L l2,y)) -> Just (mkL (0-l1-l2) `add` (x `add` y)) -- (x - l1) - (y - l2) ==> (l2-l1) + (x-y) (is_sub num_ops -> Just (x,L l1), is_sub num_ops -> Just (y,L l2)) -> Just (mkL (l2-l1) `add` (x `sub` y)) -- R4) Simple factorization -- x - (l1 * x) ==> (1-l1) * x _ | Just l1 <- is_expr_mul num_ops arg1 arg2 -> Just (mkL (1-l1) `mul` arg1) -- (l1 * x) - x ==> (l1-1) * x _ | Just l1 <- is_expr_mul num_ops arg2 arg1 -> Just (mkL (l1-1) `mul` arg2) -- (l1 * x) - (l2 * x) ==> (l1-l2) * x (is_lit_mul num_ops -> Just (l1,x), is_expr_mul num_ops x -> Just l2) -> Just (mkL (l1-l2) `mul` x) -- R5) +/- propagation: these transformations push literals outwards -- with the hope that other rules can then be applied. -- In the following rules, x can't be a literal otherwise another -- rule would have combined it with the other literal in arg2. So we -- don't have to check this to avoid loops here. -- x - (l1 + y) ==> (x - y) - l1 (_, is_lit_add num_ops -> Just (l1,y)) -> Just ((arg1 `sub` y) `sub` mkL l1) -- (l1 + x) - y ==> l1 + (x - y) (is_lit_add num_ops -> Just (l1,x), _) -> Just (mkL l1 `add` (x `sub` arg2)) -- x - (l1 - y) ==> (x + y) - l1 (_, is_sub num_ops -> Just (L l1,y)) -> Just ((arg1 `add` y) `sub` mkL l1) -- x - (y - l1) ==> l1 + (x - y) (_, is_sub num_ops -> Just (y,L l1)) -> Just (mkL l1 `add` (arg1 `sub` y)) -- (l1 - x) - y ==> l1 - (x + y) (is_sub num_ops -> Just (L l1,x), _) -> Just (mkL l1 `sub` (x `add` arg2)) -- (x - l1) - y ==> (x - y) - l1 (is_sub num_ops -> Just (x,L l1), _) -> Just ((x `sub` arg2) `sub` mkL l1) _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops add x y = BinOpApp x (numAdd num_ops) y sub x y = BinOpApp x (numSub num_ops) y mul x y = BinOpApp x (numMul num_ops) y mulFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr mulFoldingRules' platform arg1 arg2 num_ops = case (arg1,arg2) of -- (-x) * (-y) ==> x*y (is_neg num_ops -> Just x, is_neg num_ops -> Just y) -> Just (x `mul` y) -- l1 * (-x) ==> (-l1) * x (L l1, is_neg num_ops -> Just x) -> Just (mkL (-l1) `mul` x) -- l1 * (l2 * x) ==> (l1*l2) * x (L l1, is_lit_mul num_ops -> Just (l2,x)) -> Just (mkL (l1*l2) `mul` x) -- l1 * (l2 + x) ==> (l1*l2) + (l1 * x) (L l1, is_lit_add num_ops -> Just (l2,x)) -> Just (mkL (l1*l2) `add` (arg1 `mul` x)) -- l1 * (l2 - x) ==> (l1*l2) - (l1 * x) (L l1, is_sub num_ops -> Just (L l2,x)) -> Just (mkL (l1*l2) `sub` (arg1 `mul` x)) -- l1 * (x - l2) ==> (l1 * x) - (l1*l2) (L l1, is_sub num_ops -> Just (x, L l2)) -> Just ((arg1 `mul` x) `sub` mkL (l1*l2)) -- (l1 * x) * (l2 * y) ==> (l1*l2) * (x * y) (is_lit_mul num_ops -> Just (l1,x), is_lit_mul num_ops -> Just (l2,y)) -> Just (mkL (l1*l2) `mul` (x `mul` y)) _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops add x y = BinOpApp x (numAdd num_ops) y sub x y = BinOpApp x (numSub num_ops) y mul x y = BinOpApp x (numMul num_ops) y andFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr andFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- R2) * `or` `and` simplications -- l1 and (l2 and x) ==> (l1 and l2) and x (L l1, is_lit_and num_ops -> Just (l2, x)) -> Just (mkL (l1 .&. l2) `and` x) -- l1 and (l2 or x) ==> (l1 and l2) or (l1 and x) -- does not decrease operations -- (l1 and x) and (l2 and y) ==> (l1 and l2) and (x and y) (is_lit_and num_ops -> Just (l1, x), is_lit_and num_ops -> Just (l2, y)) -> Just (mkL (l1 .&. l2) `and` (x `and` y)) -- (l1 and x) and (l2 or y) ==> (l1 and l2 and x) or (l1 and x and y) -- (l1 or x) and (l2 or y) ==> (l1 and l2) or (x and l2) or (l1 and y) or (x and y) -- increase operation numbers _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops and x y = BinOpApp x (fromJust (numAnd num_ops)) y orFoldingRules' :: Platform -> CoreExpr -> CoreExpr -> NumOps -> Maybe CoreExpr orFoldingRules' platform arg1 arg2 num_ops = case (arg1, arg2) of -- R2) * `or` `and` simplications -- l1 or (l2 or x) ==> (l1 or l2) or x (L l1, is_lit_or num_ops -> Just (l2, x)) -> Just (mkL (l1 .|. l2) `or` x) -- l1 or (l2 and x) ==> (l1 or l2) and (l1 and x) -- does not decrease operations -- (l1 or x) or (l2 or y) ==> (l1 or l2) or (x or y) (is_lit_or num_ops -> Just (l1, x), is_lit_or num_ops -> Just (l2, y)) -> Just (mkL (l1 .|. l2) `or` (x `or` y)) -- (l1 and x) or (l2 or y) ==> (l1 and l2 and x) or (l1 and x and y) -- (l1 and x) or (l2 and y) ==> (l1 and l2) or (x and l2) or (l1 and y) or (x and y) -- increase operation numbers _ -> Nothing where mkL = Lit . mkNumLiteral platform num_ops or x y = BinOpApp x (fromJust (numOr num_ops)) y is_binop :: PrimOp -> CoreExpr -> Maybe (Arg CoreBndr, Arg CoreBndr) is_binop op e = case e of BinOpApp x op' y | op == op' -> Just (x,y) _ -> Nothing is_op :: PrimOp -> CoreExpr -> Maybe (Arg CoreBndr) is_op op e = case e of App (OpVal op') x | op == op' -> Just x _ -> Nothing is_add, is_sub, is_mul, is_and, is_or :: NumOps -> CoreExpr -> Maybe (Arg CoreBndr, Arg CoreBndr) is_add num_ops e = is_binop (numAdd num_ops) e is_sub num_ops e = is_binop (numSub num_ops) e is_mul num_ops e = is_binop (numMul num_ops) e is_and num_ops e = numAnd num_ops >>= \op -> is_binop op e is_or num_ops e = numOr num_ops >>= \op -> is_binop op e is_neg :: NumOps -> CoreExpr -> Maybe (Arg CoreBndr) is_neg num_ops e = numNeg num_ops >>= \op -> is_op op e -- match operation with a literal (handles commutativity) is_lit_add, is_lit_mul, is_lit_and, is_lit_or :: NumOps -> CoreExpr -> Maybe (Integer, Arg CoreBndr) is_lit_add num_ops e = is_lit' is_add num_ops e is_lit_mul num_ops e = is_lit' is_mul num_ops e is_lit_and num_ops e = is_lit' is_and num_ops e is_lit_or num_ops e = is_lit' is_or num_ops e is_lit' :: (NumOps -> CoreExpr -> Maybe (Arg CoreBndr, Arg CoreBndr)) -> NumOps -> CoreExpr -> Maybe (Integer, Arg CoreBndr) is_lit' f num_ops e = case f num_ops e of Just (L l, x ) -> Just (l,x) Just (x , L l) -> Just (l,x) _ -> Nothing -- match given "x": return 1 -- match "lit * x": return lit value (handles commutativity) is_expr_mul :: NumOps -> Expr CoreBndr -> Expr CoreBndr -> Maybe Integer is_expr_mul num_ops x e = if | x `cheapEqExpr` e -> Just 1 | Just (k,x') <- is_lit_mul num_ops e , x `cheapEqExpr` x' -> return k | otherwise -> Nothing -- | Match the application of a binary primop pattern BinOpApp :: Arg CoreBndr -> PrimOp -> Arg CoreBndr -> CoreExpr pattern BinOpApp x op y = OpVal op `App` x `App` y -- | Match a primop pattern OpVal:: PrimOp -> Arg CoreBndr pattern OpVal op <- Var (isPrimOpId_maybe -> Just op) where OpVal op = Var (primOpId op) -- | Match a literal pattern L :: Integer -> Arg CoreBndr pattern L i <- Lit (LitNumber _ i) -- | Explicit "type-class"-like dictionary for numeric primops data NumOps = NumOps { numAdd :: !PrimOp -- ^ Add two numbers , numSub :: !PrimOp -- ^ Sub two numbers , numMul :: !PrimOp -- ^ Multiply two numbers , numAnd :: !(Maybe PrimOp) -- ^ And two numbers , numOr :: !(Maybe PrimOp) -- ^ Or two numbers , numNeg :: !(Maybe PrimOp) -- ^ Negate a number , numLitType :: !LitNumType -- ^ Literal type } -- | Create a numeric literal mkNumLiteral :: Platform -> NumOps -> Integer -> Literal mkNumLiteral platform ops i = mkLitNumberWrap platform (numLitType ops) i int8Ops :: NumOps int8Ops = NumOps { numAdd = Int8AddOp , numSub = Int8SubOp , numMul = Int8MulOp , numLitType = LitNumInt8 , numAnd = Nothing , numOr = Nothing , numNeg = Just Int8NegOp } word8Ops :: NumOps word8Ops = NumOps { numAdd = Word8AddOp , numSub = Word8SubOp , numMul = Word8MulOp , numAnd = Just Word8AndOp , numOr = Just Word8OrOp , numNeg = Nothing , numLitType = LitNumWord8 } int16Ops :: NumOps int16Ops = NumOps { numAdd = Int16AddOp , numSub = Int16SubOp , numMul = Int16MulOp , numLitType = LitNumInt16 , numAnd = Nothing , numOr = Nothing , numNeg = Just Int16NegOp } word16Ops :: NumOps word16Ops = NumOps { numAdd = Word16AddOp , numSub = Word16SubOp , numMul = Word16MulOp , numAnd = Just Word16AndOp , numOr = Just Word16OrOp , numNeg = Nothing , numLitType = LitNumWord16 } int32Ops :: NumOps int32Ops = NumOps { numAdd = Int32AddOp , numSub = Int32SubOp , numMul = Int32MulOp , numLitType = LitNumInt32 , numAnd = Nothing , numOr = Nothing , numNeg = Just Int32NegOp } word32Ops :: NumOps word32Ops = NumOps { numAdd = Word32AddOp , numSub = Word32SubOp , numMul = Word32MulOp , numAnd = Just Word32AndOp , numOr = Just Word32OrOp , numNeg = Nothing , numLitType = LitNumWord32 } int64Ops :: NumOps int64Ops = NumOps { numAdd = Int64AddOp , numSub = Int64SubOp , numMul = Int64MulOp , numLitType = LitNumInt64 , numAnd = Nothing , numOr = Nothing , numNeg = Just Int64NegOp } word64Ops :: NumOps word64Ops = NumOps { numAdd = Word64AddOp , numSub = Word64SubOp , numMul = Word64MulOp , numAnd = Just Word64AndOp , numOr = Just Word64OrOp , numNeg = Nothing , numLitType = LitNumWord64 } intOps :: NumOps intOps = NumOps { numAdd = IntAddOp , numSub = IntSubOp , numMul = IntMulOp , numAnd = Just IntAndOp , numOr = Just IntOrOp , numNeg = Just IntNegOp , numLitType = LitNumInt } wordOps :: NumOps wordOps = NumOps { numAdd = WordAddOp , numSub = WordSubOp , numMul = WordMulOp , numAnd = Just WordAndOp , numOr = Just WordOrOp , numNeg = Nothing , numLitType = LitNumWord } -------------------------------------------------------- -- Constant folding through case-expressions -- -- cf Scrutinee Constant Folding in simplCore/GHC.Core.Opt.Simplify.Utils -------------------------------------------------------- -- | Match the scrutinee of a case and potentially return a new scrutinee and a -- function to apply to each literal alternative. caseRules :: Platform -> CoreExpr -- Scrutinee -> Maybe ( CoreExpr -- New scrutinee , AltCon -> Maybe AltCon -- How to fix up the alt pattern -- Nothing <=> Unreachable -- See Note [Unreachable caseRules alternatives] , Id -> CoreExpr) -- How to reconstruct the original scrutinee -- from the new case-binder -- e.g case e of b { -- ...; -- con bs -> rhs; -- ... } -- ==> -- case e' of b' { -- ...; -- fixup_altcon[con] bs -> let b = mk_orig[b] in rhs; -- ... } caseRules platform (App (App (Var f) v) (Lit l)) -- v `op` x# | Just op <- isPrimOpId_maybe f , LitNumber _ x <- l , Just adjust_lit <- adjustDyadicRight op x = Just (v, tx_lit_con platform adjust_lit , \v -> (App (App (Var f) (Var v)) (Lit l))) caseRules platform (App (App (Var f) (Lit l)) v) -- x# `op` v | Just op <- isPrimOpId_maybe f , LitNumber _ x <- l , Just adjust_lit <- adjustDyadicLeft x op = Just (v, tx_lit_con platform adjust_lit , \v -> (App (App (Var f) (Lit l)) (Var v))) caseRules platform (App (Var f) v ) -- op v | Just op <- isPrimOpId_maybe f , Just adjust_lit <- adjustUnary op = Just (v, tx_lit_con platform adjust_lit , \v -> App (Var f) (Var v)) -- See Note [caseRules for tagToEnum] caseRules platform (App (App (Var f) type_arg) v) | Just TagToEnumOp <- isPrimOpId_maybe f = Just (v, tx_con_tte platform , \v -> (App (App (Var f) type_arg) (Var v))) -- See Note [caseRules for dataToTag] caseRules _ (App (App (Var f) (Type ty)) v) -- dataToTag x | Just DataToTagOp <- isPrimOpId_maybe f , Just (tc, _) <- tcSplitTyConApp_maybe ty , isAlgTyCon tc = Just (v, tx_con_dtt ty , \v -> App (App (Var f) (Type ty)) (Var v)) caseRules _ _ = Nothing tx_lit_con :: Platform -> (Integer -> Integer) -> AltCon -> Maybe AltCon tx_lit_con _ _ DEFAULT = Just DEFAULT tx_lit_con platform adjust (LitAlt l) = Just $ LitAlt (mapLitValue platform adjust l) tx_lit_con _ _ alt = pprPanic "caseRules" (ppr alt) -- NB: mapLitValue uses mkLitIntWrap etc, to ensure that the -- literal alternatives remain in Word/Int target ranges -- (See Note [Word/Int underflow/overflow] in GHC.Types.Literal and #13172). adjustDyadicRight :: PrimOp -> Integer -> Maybe (Integer -> Integer) -- Given (x `op` lit) return a function 'f' s.t. f (x `op` lit) = x adjustDyadicRight op lit = case op of WordAddOp -> Just (\y -> y-lit ) IntAddOp -> Just (\y -> y-lit ) WordSubOp -> Just (\y -> y+lit ) IntSubOp -> Just (\y -> y+lit ) WordXorOp -> Just (\y -> y `xor` lit) IntXorOp -> Just (\y -> y `xor` lit) _ -> Nothing adjustDyadicLeft :: Integer -> PrimOp -> Maybe (Integer -> Integer) -- Given (lit `op` x) return a function 'f' s.t. f (lit `op` x) = x adjustDyadicLeft lit op = case op of WordAddOp -> Just (\y -> y-lit ) IntAddOp -> Just (\y -> y-lit ) WordSubOp -> Just (\y -> lit-y ) IntSubOp -> Just (\y -> lit-y ) WordXorOp -> Just (\y -> y `xor` lit) IntXorOp -> Just (\y -> y `xor` lit) _ -> Nothing adjustUnary :: PrimOp -> Maybe (Integer -> Integer) -- Given (op x) return a function 'f' s.t. f (op x) = x adjustUnary op = case op of WordNotOp -> Just (\y -> complement y) IntNotOp -> Just (\y -> complement y) IntNegOp -> Just (\y -> negate y ) _ -> Nothing tx_con_tte :: Platform -> AltCon -> Maybe AltCon tx_con_tte _ DEFAULT = Just DEFAULT tx_con_tte _ alt@(LitAlt {}) = pprPanic "caseRules" (ppr alt) tx_con_tte platform (DataAlt dc) -- See Note [caseRules for tagToEnum] = Just $ LitAlt $ mkLitInt platform $ toInteger $ dataConTagZ dc tx_con_dtt :: Type -> AltCon -> Maybe AltCon tx_con_dtt _ DEFAULT = Just DEFAULT tx_con_dtt ty (LitAlt (LitNumber LitNumInt i)) | tag >= 0 , tag < n_data_cons = Just (DataAlt (data_cons !! tag)) -- tag is zero-indexed, as is (!!) | otherwise = Nothing where tag = fromInteger i :: ConTagZ tc = tyConAppTyCon ty n_data_cons = tyConFamilySize tc data_cons = tyConDataCons tc tx_con_dtt _ alt = pprPanic "caseRules" (ppr alt) {- Note [caseRules for tagToEnum] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to transform case tagToEnum x of False -> e1 True -> e2 into case x of 0# -> e1 1# -> e2 This rule eliminates a lot of boilerplate. For if (x>y) then e2 else e1 we generate case tagToEnum (x ># y) of False -> e1 True -> e2 and it is nice to then get rid of the tagToEnum. Beware (#14768): avoid the temptation to map constructor 0 to DEFAULT, in the hope of getting this case (x ># y) of DEFAULT -> e1 1# -> e2 That fails utterly in the case of data Colour = Red | Green | Blue case tagToEnum x of DEFAULT -> e1 Red -> e2 We don't want to get this! case x of DEFAULT -> e1 DEFAULT -> e2 Instead, we deal with turning one branch into DEFAULT in GHC.Core.Opt.Simplify.Utils (add_default in mkCase3). Note [caseRules for dataToTag] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [dataToTag# magic]. We want to transform case dataToTag x of DEFAULT -> e1 1# -> e2 into case x of DEFAULT -> e1 (:) _ _ -> e2 Note the need for some wildcard binders in the 'cons' case. For the time, we only apply this transformation when the type of `x` is a type headed by a normal tycon. In particular, we do not apply this in the case of a data family tycon, since that would require carefully applying coercion(s) between the data family and the data family instance's representation type, which caseRules isn't currently engineered to handle (#14680). Note [Unreachable caseRules alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Take care if we see something like case dataToTag x of DEFAULT -> e1 -1# -> e2 100 -> e3 because there isn't a data constructor with tag -1 or 100. In this case the out-of-range alternative is dead code -- we know the range of tags for x. Hence caseRules returns (AltCon -> Maybe AltCon), with Nothing indicating an alternative that is unreachable. You may wonder how this can happen: check out #15436. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/Monad.hs0000644000000000000000000007635714472400112020701 0ustar0000000000000000{- (c) The AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE DeriveFunctor #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Core.Opt.Monad ( -- * Configuration of the core-to-core passes CoreToDo(..), runWhen, runMaybe, SimplMode(..), FloatOutSwitches(..), pprPassDetails, -- * Plugins CorePluginPass, bindsOnlyPass, -- * Counting SimplCount, doSimplTick, doFreeSimplTick, simplCountN, pprSimplCount, plusSimplCount, zeroSimplCount, isZeroSimplCount, hasDetailedCounts, Tick(..), -- * The monad CoreM, runCoreM, -- ** Reading from the monad getHscEnv, getRuleBase, getModule, getDynFlags, getPackageFamInstEnv, getVisibleOrphanMods, getUniqMask, getPrintUnqualified, getSrcSpanM, -- ** Writing to the monad addSimplCount, -- ** Lifting into the monad liftIO, liftIOWithCount, -- ** Dealing with annotations getAnnotations, getFirstAnnotations, -- ** Screen output putMsg, putMsgS, errorMsg, errorMsgS, msg, fatalErrorMsg, fatalErrorMsgS, debugTraceMsg, debugTraceMsgS, ) where import GHC.Prelude hiding ( read ) import GHC.Driver.Session import GHC.Driver.Env import GHC.Core import GHC.Core.Unfold import GHC.Types.Basic ( CompilerPhase(..) ) import GHC.Types.Annotations import GHC.Types.Var import GHC.Types.Unique.Supply import GHC.Types.Name.Env import GHC.Types.SrcLoc import GHC.Types.Error import GHC.Utils.Error ( errorDiagnostic ) import GHC.Utils.Outputable as Outputable import GHC.Utils.Logger import GHC.Utils.Monad import GHC.Data.FastString import GHC.Data.IOEnv hiding ( liftIO, failM, failWithM ) import qualified GHC.Data.IOEnv as IOEnv import GHC.Unit.Module import GHC.Unit.Module.ModGuts import GHC.Unit.External import Data.Bifunctor ( bimap ) import Data.List (intersperse, groupBy, sortBy) import Data.Ord import Data.Dynamic import Data.Map (Map) import qualified Data.Map as Map import qualified Data.Map.Strict as MapStrict import Data.Word import Control.Monad import Control.Applicative ( Alternative(..) ) import GHC.Utils.Panic (throwGhcException, GhcException(..), panic) {- ************************************************************************ * * The CoreToDo type and related types Abstraction of core-to-core passes to run. * * ************************************************************************ -} data CoreToDo -- These are diff core-to-core passes, -- which may be invoked in any order, -- as many times as you like. = CoreDoSimplify -- The core-to-core simplifier. Int -- Max iterations SimplMode | CoreDoPluginPass String CorePluginPass | CoreDoFloatInwards | CoreDoFloatOutwards FloatOutSwitches | CoreLiberateCase | CoreDoPrintCore | CoreDoStaticArgs | CoreDoCallArity | CoreDoExitify | CoreDoDemand | CoreDoCpr | CoreDoWorkerWrapper | CoreDoSpecialising | CoreDoSpecConstr | CoreCSE | CoreDoRuleCheck CompilerPhase String -- Check for non-application of rules -- matching this string | CoreDoNothing -- Useful when building up | CoreDoPasses [CoreToDo] -- lists of these things | CoreDesugar -- Right after desugaring, no simple optimisation yet! | CoreDesugarOpt -- CoreDesugarXXX: Not strictly a core-to-core pass, but produces -- Core output, and hence useful to pass to endPass | CoreTidy | CorePrep | CoreAddCallerCcs | CoreAddLateCcs | CoreOccurAnal instance Outputable CoreToDo where ppr (CoreDoSimplify _ _) = text "Simplifier" ppr (CoreDoPluginPass s _) = text "Core plugin: " <+> text s ppr CoreDoFloatInwards = text "Float inwards" ppr (CoreDoFloatOutwards f) = text "Float out" <> parens (ppr f) ppr CoreLiberateCase = text "Liberate case" ppr CoreDoStaticArgs = text "Static argument" ppr CoreDoCallArity = text "Called arity analysis" ppr CoreDoExitify = text "Exitification transformation" ppr CoreDoDemand = text "Demand analysis" ppr CoreDoCpr = text "Constructed Product Result analysis" ppr CoreDoWorkerWrapper = text "Worker Wrapper binds" ppr CoreDoSpecialising = text "Specialise" ppr CoreDoSpecConstr = text "SpecConstr" ppr CoreCSE = text "Common sub-expression" ppr CoreDesugar = text "Desugar (before optimization)" ppr CoreDesugarOpt = text "Desugar (after optimization)" ppr CoreTidy = text "Tidy Core" ppr CoreAddCallerCcs = text "Add caller cost-centres" ppr CoreAddLateCcs = text "Add late core cost-centres" ppr CorePrep = text "CorePrep" ppr CoreOccurAnal = text "Occurrence analysis" ppr CoreDoPrintCore = text "Print core" ppr (CoreDoRuleCheck {}) = text "Rule check" ppr CoreDoNothing = text "CoreDoNothing" ppr (CoreDoPasses passes) = text "CoreDoPasses" <+> ppr passes pprPassDetails :: CoreToDo -> SDoc pprPassDetails (CoreDoSimplify n md) = vcat [ text "Max iterations =" <+> int n , ppr md ] pprPassDetails _ = Outputable.empty data SimplMode -- See comments in GHC.Core.Opt.Simplify.Monad = SimplMode { sm_names :: [String] -- ^ Name(s) of the phase , sm_phase :: CompilerPhase , sm_uf_opts :: !UnfoldingOpts -- ^ Unfolding options , sm_rules :: !Bool -- ^ Whether RULES are enabled , sm_inline :: !Bool -- ^ Whether inlining is enabled , sm_case_case :: !Bool -- ^ Whether case-of-case is enabled , sm_eta_expand :: !Bool -- ^ Whether eta-expansion is enabled , sm_cast_swizzle :: !Bool -- ^ Do we swizzle casts past lambdas? , sm_pre_inline :: !Bool -- ^ Whether pre-inlining is enabled , sm_logger :: !Logger , sm_dflags :: DynFlags -- Just for convenient non-monadic access; we don't override these. -- -- Used for: -- - target platform (for `exprIsDupable` and `mkDupableAlt`) -- - Opt_DictsCheap and Opt_PedanticBottoms general flags -- - rules options (initRuleOpts) -- - inlineCheck } instance Outputable SimplMode where ppr (SimplMode { sm_phase = p, sm_names = ss , sm_rules = r, sm_inline = i , sm_cast_swizzle = cs , sm_eta_expand = eta, sm_case_case = cc }) = text "SimplMode" <+> braces ( sep [ text "Phase =" <+> ppr p <+> brackets (text (concat $ intersperse "," ss)) <> comma , pp_flag i (text "inline") <> comma , pp_flag r (text "rules") <> comma , pp_flag eta (text "eta-expand") <> comma , pp_flag cs (text "cast-swizzle") <> comma , pp_flag cc (text "case-of-case") ]) where pp_flag f s = ppUnless f (text "no") <+> s data FloatOutSwitches = FloatOutSwitches { floatOutLambdas :: Maybe Int, -- ^ Just n <=> float lambdas to top level, if -- doing so will abstract over n or fewer -- value variables -- Nothing <=> float all lambdas to top level, -- regardless of how many free variables -- Just 0 is the vanilla case: float a lambda -- iff it has no free vars floatOutConstants :: Bool, -- ^ True <=> float constants to top level, -- even if they do not escape a lambda floatOutOverSatApps :: Bool, -- ^ True <=> float out over-saturated applications -- based on arity information. -- See Note [Floating over-saturated applications] -- in GHC.Core.Opt.SetLevels floatToTopLevelOnly :: Bool -- ^ Allow floating to the top level only. } instance Outputable FloatOutSwitches where ppr = pprFloatOutSwitches pprFloatOutSwitches :: FloatOutSwitches -> SDoc pprFloatOutSwitches sw = text "FOS" <+> (braces $ sep $ punctuate comma $ [ text "Lam =" <+> ppr (floatOutLambdas sw) , text "Consts =" <+> ppr (floatOutConstants sw) , text "OverSatApps =" <+> ppr (floatOutOverSatApps sw) ]) -- The core-to-core pass ordering is derived from the DynFlags: runWhen :: Bool -> CoreToDo -> CoreToDo runWhen True do_this = do_this runWhen False _ = CoreDoNothing runMaybe :: Maybe a -> (a -> CoreToDo) -> CoreToDo runMaybe (Just x) f = f x runMaybe Nothing _ = CoreDoNothing {- ************************************************************************ * * Types for Plugins * * ************************************************************************ -} -- | A description of the plugin pass itself type CorePluginPass = ModGuts -> CoreM ModGuts bindsOnlyPass :: (CoreProgram -> CoreM CoreProgram) -> ModGuts -> CoreM ModGuts bindsOnlyPass pass guts = do { binds' <- pass (mg_binds guts) ; return (guts { mg_binds = binds' }) } {- ************************************************************************ * * Counting and logging * * ************************************************************************ -} getVerboseSimplStats :: (Bool -> SDoc) -> SDoc getVerboseSimplStats = getPprDebug -- For now, anyway zeroSimplCount :: DynFlags -> SimplCount isZeroSimplCount :: SimplCount -> Bool hasDetailedCounts :: SimplCount -> Bool pprSimplCount :: SimplCount -> SDoc doSimplTick :: DynFlags -> Tick -> SimplCount -> SimplCount doFreeSimplTick :: Tick -> SimplCount -> SimplCount plusSimplCount :: SimplCount -> SimplCount -> SimplCount data SimplCount = VerySimplCount !Int -- Used when don't want detailed stats | SimplCount { ticks :: !Int, -- Total ticks details :: !TickCounts, -- How many of each type n_log :: !Int, -- N log1 :: [Tick], -- Last N events; <= opt_HistorySize, -- most recent first log2 :: [Tick] -- Last opt_HistorySize events before that -- Having log1, log2 lets us accumulate the -- recent history reasonably efficiently } type TickCounts = Map Tick Int simplCountN :: SimplCount -> Int simplCountN (VerySimplCount n) = n simplCountN (SimplCount { ticks = n }) = n zeroSimplCount dflags -- This is where we decide whether to do -- the VerySimpl version or the full-stats version | dopt Opt_D_dump_simpl_stats dflags = SimplCount {ticks = 0, details = Map.empty, n_log = 0, log1 = [], log2 = []} | otherwise = VerySimplCount 0 isZeroSimplCount (VerySimplCount n) = n==0 isZeroSimplCount (SimplCount { ticks = n }) = n==0 hasDetailedCounts (VerySimplCount {}) = False hasDetailedCounts (SimplCount {}) = True doFreeSimplTick tick sc@SimplCount { details = dts } = sc { details = dts `addTick` tick } doFreeSimplTick _ sc = sc doSimplTick dflags tick sc@(SimplCount { ticks = tks, details = dts, n_log = nl, log1 = l1 }) | nl >= historySize dflags = sc1 { n_log = 1, log1 = [tick], log2 = l1 } | otherwise = sc1 { n_log = nl+1, log1 = tick : l1 } where sc1 = sc { ticks = tks+1, details = dts `addTick` tick } doSimplTick _ _ (VerySimplCount n) = VerySimplCount (n+1) addTick :: TickCounts -> Tick -> TickCounts addTick fm tick = MapStrict.insertWith (+) tick 1 fm plusSimplCount sc1@(SimplCount { ticks = tks1, details = dts1 }) sc2@(SimplCount { ticks = tks2, details = dts2 }) = log_base { ticks = tks1 + tks2 , details = MapStrict.unionWith (+) dts1 dts2 } where -- A hackish way of getting recent log info log_base | null (log1 sc2) = sc1 -- Nothing at all in sc2 | null (log2 sc2) = sc2 { log2 = log1 sc1 } | otherwise = sc2 plusSimplCount (VerySimplCount n) (VerySimplCount m) = VerySimplCount (n+m) plusSimplCount lhs rhs = throwGhcException . PprProgramError "plusSimplCount" $ vcat [ text "lhs" , pprSimplCount lhs , text "rhs" , pprSimplCount rhs ] -- We use one or the other consistently pprSimplCount (VerySimplCount n) = text "Total ticks:" <+> int n pprSimplCount (SimplCount { ticks = tks, details = dts, log1 = l1, log2 = l2 }) = vcat [text "Total ticks: " <+> int tks, blankLine, pprTickCounts dts, getVerboseSimplStats $ \dbg -> if dbg then vcat [blankLine, text "Log (most recent first)", nest 4 (vcat (map ppr l1) $$ vcat (map ppr l2))] else Outputable.empty ] {- Note [Which transformations are innocuous] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ At one point (Jun 18) I wondered if some transformations (ticks) might be "innocuous", in the sense that they do not unlock a later transformation that does not occur in the same pass. If so, we could refrain from bumping the overall tick-count for such innocuous transformations, and perhaps terminate the simplifier one pass earlier. But alas I found that virtually nothing was innocuous! This Note just records what I learned, in case anyone wants to try again. These transformations are not innocuous: *** NB: I think these ones could be made innocuous EtaExpansion LetFloatFromLet LetFloatFromLet x = K (let z = e2 in Just z) prepareRhs transforms to x2 = let z=e2 in Just z x = K xs And now more let-floating can happen in the next pass, on x2 PreInlineUnconditionally Example in spectral/cichelli/Auxil hinsert = ...let lo = e in let j = ...lo... in case x of False -> () True -> case lo of I# lo' -> ...j... When we PreInlineUnconditionally j, lo's occ-info changes to once, so it can be PreInlineUnconditionally in the next pass, and a cascade of further things can happen. PostInlineUnconditionally let x = e in let y = ...x.. in case .. of { A -> ...x...y... B -> ...x...y... } Current postinlineUnconditinaly will inline y, and then x; sigh. But PostInlineUnconditionally might also unlock subsequent transformations for the same reason as PreInlineUnconditionally, so it's probably not innocuous anyway. KnownBranch, BetaReduction: May drop chunks of code, and thereby enable PreInlineUnconditionally for some let-binding which now occurs once EtaExpansion: Example in imaginary/digits-of-e1 fail = \void. e where e :: IO () --> etaExpandRhs fail = \void. (\s. (e |> g) s) |> sym g where g :: IO () ~ S -> (S,()) --> Next iteration of simplify fail1 = \void. \s. (e |> g) s fail = fail1 |> Void# -> sym g And now inline 'fail' CaseMerge: case x of y { DEFAULT -> case y of z { pi -> ei } alts2 } ---> CaseMerge case x of { pi -> let z = y in ei ; alts2 } The "let z=y" case-binder-swap gets dealt with in the next pass -} pprTickCounts :: Map Tick Int -> SDoc pprTickCounts counts = vcat (map pprTickGroup groups) where groups :: [[(Tick,Int)]] -- Each group shares a common tag -- toList returns common tags adjacent groups = groupBy same_tag (Map.toList counts) same_tag (tick1,_) (tick2,_) = tickToTag tick1 == tickToTag tick2 pprTickGroup :: [(Tick, Int)] -> SDoc pprTickGroup group@((tick1,_):_) = hang (int (sum [n | (_,n) <- group]) <+> text (tickString tick1)) 2 (vcat [ int n <+> pprTickCts tick -- flip as we want largest first | (tick,n) <- sortBy (flip (comparing snd)) group]) pprTickGroup [] = panic "pprTickGroup" data Tick -- See Note [Which transformations are innocuous] = PreInlineUnconditionally Id | PostInlineUnconditionally Id | UnfoldingDone Id | RuleFired FastString -- Rule name | LetFloatFromLet | EtaExpansion Id -- LHS binder | EtaReduction Id -- Binder on outer lambda | BetaReduction Id -- Lambda binder | CaseOfCase Id -- Bndr on *inner* case | KnownBranch Id -- Case binder | CaseMerge Id -- Binder on outer case | AltMerge Id -- Case binder | CaseElim Id -- Case binder | CaseIdentity Id -- Case binder | FillInCaseDefault Id -- Case binder | SimplifierDone -- Ticked at each iteration of the simplifier instance Outputable Tick where ppr tick = text (tickString tick) <+> pprTickCts tick instance Eq Tick where a == b = case a `cmpTick` b of EQ -> True _ -> False instance Ord Tick where compare = cmpTick tickToTag :: Tick -> Int tickToTag (PreInlineUnconditionally _) = 0 tickToTag (PostInlineUnconditionally _) = 1 tickToTag (UnfoldingDone _) = 2 tickToTag (RuleFired _) = 3 tickToTag LetFloatFromLet = 4 tickToTag (EtaExpansion _) = 5 tickToTag (EtaReduction _) = 6 tickToTag (BetaReduction _) = 7 tickToTag (CaseOfCase _) = 8 tickToTag (KnownBranch _) = 9 tickToTag (CaseMerge _) = 10 tickToTag (CaseElim _) = 11 tickToTag (CaseIdentity _) = 12 tickToTag (FillInCaseDefault _) = 13 tickToTag SimplifierDone = 16 tickToTag (AltMerge _) = 17 tickString :: Tick -> String tickString (PreInlineUnconditionally _) = "PreInlineUnconditionally" tickString (PostInlineUnconditionally _)= "PostInlineUnconditionally" tickString (UnfoldingDone _) = "UnfoldingDone" tickString (RuleFired _) = "RuleFired" tickString LetFloatFromLet = "LetFloatFromLet" tickString (EtaExpansion _) = "EtaExpansion" tickString (EtaReduction _) = "EtaReduction" tickString (BetaReduction _) = "BetaReduction" tickString (CaseOfCase _) = "CaseOfCase" tickString (KnownBranch _) = "KnownBranch" tickString (CaseMerge _) = "CaseMerge" tickString (AltMerge _) = "AltMerge" tickString (CaseElim _) = "CaseElim" tickString (CaseIdentity _) = "CaseIdentity" tickString (FillInCaseDefault _) = "FillInCaseDefault" tickString SimplifierDone = "SimplifierDone" pprTickCts :: Tick -> SDoc pprTickCts (PreInlineUnconditionally v) = ppr v pprTickCts (PostInlineUnconditionally v)= ppr v pprTickCts (UnfoldingDone v) = ppr v pprTickCts (RuleFired v) = ppr v pprTickCts LetFloatFromLet = Outputable.empty pprTickCts (EtaExpansion v) = ppr v pprTickCts (EtaReduction v) = ppr v pprTickCts (BetaReduction v) = ppr v pprTickCts (CaseOfCase v) = ppr v pprTickCts (KnownBranch v) = ppr v pprTickCts (CaseMerge v) = ppr v pprTickCts (AltMerge v) = ppr v pprTickCts (CaseElim v) = ppr v pprTickCts (CaseIdentity v) = ppr v pprTickCts (FillInCaseDefault v) = ppr v pprTickCts _ = Outputable.empty cmpTick :: Tick -> Tick -> Ordering cmpTick a b = case (tickToTag a `compare` tickToTag b) of GT -> GT EQ -> cmpEqTick a b LT -> LT cmpEqTick :: Tick -> Tick -> Ordering cmpEqTick (PreInlineUnconditionally a) (PreInlineUnconditionally b) = a `compare` b cmpEqTick (PostInlineUnconditionally a) (PostInlineUnconditionally b) = a `compare` b cmpEqTick (UnfoldingDone a) (UnfoldingDone b) = a `compare` b cmpEqTick (RuleFired a) (RuleFired b) = a `uniqCompareFS` b cmpEqTick (EtaExpansion a) (EtaExpansion b) = a `compare` b cmpEqTick (EtaReduction a) (EtaReduction b) = a `compare` b cmpEqTick (BetaReduction a) (BetaReduction b) = a `compare` b cmpEqTick (CaseOfCase a) (CaseOfCase b) = a `compare` b cmpEqTick (KnownBranch a) (KnownBranch b) = a `compare` b cmpEqTick (CaseMerge a) (CaseMerge b) = a `compare` b cmpEqTick (AltMerge a) (AltMerge b) = a `compare` b cmpEqTick (CaseElim a) (CaseElim b) = a `compare` b cmpEqTick (CaseIdentity a) (CaseIdentity b) = a `compare` b cmpEqTick (FillInCaseDefault a) (FillInCaseDefault b) = a `compare` b cmpEqTick _ _ = EQ {- ************************************************************************ * * Monad and carried data structure definitions * * ************************************************************************ -} data CoreReader = CoreReader { cr_hsc_env :: HscEnv, cr_rule_base :: RuleBase, cr_module :: Module, cr_print_unqual :: PrintUnqualified, cr_loc :: SrcSpan, -- Use this for log/error messages so they -- are at least tagged with the right source file cr_visible_orphan_mods :: !ModuleSet, cr_uniq_mask :: !Char -- Mask for creating unique values } -- Note: CoreWriter used to be defined with data, rather than newtype. If it -- is defined that way again, the cw_simpl_count field, at least, must be -- strict to avoid a space leak (#7702). newtype CoreWriter = CoreWriter { cw_simpl_count :: SimplCount } emptyWriter :: DynFlags -> CoreWriter emptyWriter dflags = CoreWriter { cw_simpl_count = zeroSimplCount dflags } plusWriter :: CoreWriter -> CoreWriter -> CoreWriter plusWriter w1 w2 = CoreWriter { cw_simpl_count = (cw_simpl_count w1) `plusSimplCount` (cw_simpl_count w2) } type CoreIOEnv = IOEnv CoreReader -- | The monad used by Core-to-Core passes to register simplification statistics. -- Also used to have common state (in the form of UniqueSupply) for generating Uniques. newtype CoreM a = CoreM { unCoreM :: CoreIOEnv (a, CoreWriter) } deriving (Functor) instance Monad CoreM where mx >>= f = CoreM $ do (x, w1) <- unCoreM mx (y, w2) <- unCoreM (f x) let w = w1 `plusWriter` w2 return $ seq w (y, w) -- forcing w before building the tuple avoids a space leak -- (#7702) instance Applicative CoreM where pure x = CoreM $ nop x (<*>) = ap m *> k = m >>= \_ -> k instance Alternative CoreM where empty = CoreM Control.Applicative.empty m <|> n = CoreM (unCoreM m <|> unCoreM n) instance MonadPlus CoreM instance MonadUnique CoreM where getUniqueSupplyM = do mask <- read cr_uniq_mask liftIO $! mkSplitUniqSupply mask getUniqueM = do mask <- read cr_uniq_mask liftIO $! uniqFromMask mask runCoreM :: HscEnv -> RuleBase -> Char -- ^ Mask -> Module -> ModuleSet -> PrintUnqualified -> SrcSpan -> CoreM a -> IO (a, SimplCount) runCoreM hsc_env rule_base mask mod orph_imps print_unqual loc m = liftM extract $ runIOEnv reader $ unCoreM m where reader = CoreReader { cr_hsc_env = hsc_env, cr_rule_base = rule_base, cr_module = mod, cr_visible_orphan_mods = orph_imps, cr_print_unqual = print_unqual, cr_loc = loc, cr_uniq_mask = mask } extract :: (a, CoreWriter) -> (a, SimplCount) extract (value, writer) = (value, cw_simpl_count writer) {- ************************************************************************ * * Core combinators, not exported * * ************************************************************************ -} nop :: a -> CoreIOEnv (a, CoreWriter) nop x = do r <- getEnv return (x, emptyWriter $ (hsc_dflags . cr_hsc_env) r) read :: (CoreReader -> a) -> CoreM a read f = CoreM $ getEnv >>= (\r -> nop (f r)) write :: CoreWriter -> CoreM () write w = CoreM $ return ((), w) -- \subsection{Lifting IO into the monad} -- | Lift an 'IOEnv' operation into 'CoreM' liftIOEnv :: CoreIOEnv a -> CoreM a liftIOEnv mx = CoreM (mx >>= (\x -> nop x)) instance MonadIO CoreM where liftIO = liftIOEnv . IOEnv.liftIO -- | Lift an 'IO' operation into 'CoreM' while consuming its 'SimplCount' liftIOWithCount :: IO (SimplCount, a) -> CoreM a liftIOWithCount what = liftIO what >>= (\(count, x) -> addSimplCount count >> return x) {- ************************************************************************ * * Reader, writer and state accessors * * ************************************************************************ -} getHscEnv :: CoreM HscEnv getHscEnv = read cr_hsc_env getRuleBase :: CoreM RuleBase getRuleBase = read cr_rule_base getVisibleOrphanMods :: CoreM ModuleSet getVisibleOrphanMods = read cr_visible_orphan_mods getPrintUnqualified :: CoreM PrintUnqualified getPrintUnqualified = read cr_print_unqual getSrcSpanM :: CoreM SrcSpan getSrcSpanM = read cr_loc addSimplCount :: SimplCount -> CoreM () addSimplCount count = write (CoreWriter { cw_simpl_count = count }) getUniqMask :: CoreM Char getUniqMask = read cr_uniq_mask -- Convenience accessors for useful fields of HscEnv instance HasDynFlags CoreM where getDynFlags = fmap hsc_dflags getHscEnv instance HasLogger CoreM where getLogger = fmap hsc_logger getHscEnv instance HasModule CoreM where getModule = read cr_module getPackageFamInstEnv :: CoreM PackageFamInstEnv getPackageFamInstEnv = do hsc_env <- getHscEnv eps <- liftIO $ hscEPS hsc_env return $ eps_fam_inst_env eps {- ************************************************************************ * * Dealing with annotations * * ************************************************************************ -} -- | Get all annotations of a given type. This happens lazily, that is -- no deserialization will take place until the [a] is actually demanded and -- the [a] can also be empty (the UniqFM is not filtered). -- -- This should be done once at the start of a Core-to-Core pass that uses -- annotations. -- -- See Note [Annotations] getAnnotations :: Typeable a => ([Word8] -> a) -> ModGuts -> CoreM (ModuleEnv [a], NameEnv [a]) getAnnotations deserialize guts = do hsc_env <- getHscEnv ann_env <- liftIO $ prepareAnnotations hsc_env (Just guts) return (deserializeAnns deserialize ann_env) -- | Get at most one annotation of a given type per annotatable item. getFirstAnnotations :: Typeable a => ([Word8] -> a) -> ModGuts -> CoreM (ModuleEnv a, NameEnv a) getFirstAnnotations deserialize guts = bimap mod name <$> getAnnotations deserialize guts where mod = mapModuleEnv head . filterModuleEnv (const $ not . null) name = mapNameEnv head . filterNameEnv (not . null) {- Note [Annotations] ~~~~~~~~~~~~~~~~~~ A Core-to-Core pass that wants to make use of annotations calls getAnnotations or getFirstAnnotations at the beginning to obtain a UniqFM with annotations of a specific type. This produces all annotations from interface files read so far. However, annotations from interface files read during the pass will not be visible until getAnnotations is called again. This is similar to how rules work and probably isn't too bad. The current implementation could be optimised a bit: when looking up annotations for a thing from the HomePackageTable, we could search directly in the module where the thing is defined rather than building one UniqFM which contains all annotations we know of. This would work because annotations can only be given to things defined in the same module. However, since we would only want to deserialise every annotation once, we would have to build a cache for every module in the HTP. In the end, it's probably not worth it as long as we aren't using annotations heavily. ************************************************************************ * * Direct screen output * * ************************************************************************ -} msg :: MessageClass -> SDoc -> CoreM () msg msg_class doc = do logger <- getLogger loc <- getSrcSpanM unqual <- getPrintUnqualified let sty = case msg_class of MCDiagnostic _ _ -> err_sty MCDump -> dump_sty _ -> user_sty err_sty = mkErrStyle unqual user_sty = mkUserStyle unqual AllTheWay dump_sty = mkDumpStyle unqual liftIO $ logMsg logger msg_class loc (withPprStyle sty doc) -- | Output a String message to the screen putMsgS :: String -> CoreM () putMsgS = putMsg . text -- | Output a message to the screen putMsg :: SDoc -> CoreM () putMsg = msg MCInfo -- | Output an error to the screen. Does not cause the compiler to die. errorMsgS :: String -> CoreM () errorMsgS = errorMsg . text -- | Output an error to the screen. Does not cause the compiler to die. errorMsg :: SDoc -> CoreM () errorMsg doc = msg errorDiagnostic doc -- | Output a fatal error to the screen. Does not cause the compiler to die. fatalErrorMsgS :: String -> CoreM () fatalErrorMsgS = fatalErrorMsg . text -- | Output a fatal error to the screen. Does not cause the compiler to die. fatalErrorMsg :: SDoc -> CoreM () fatalErrorMsg = msg MCFatal -- | Output a string debugging message at verbosity level of @-v@ or higher debugTraceMsgS :: String -> CoreM () debugTraceMsgS = debugTraceMsg . text -- | Outputs a debugging message at verbosity level of @-v@ or higher debugTraceMsg :: SDoc -> CoreM () debugTraceMsg = msg MCDump ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/OccurAnal.hs0000644000000000000000000041120714472400112021475 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 ************************************************************************ * * \section[OccurAnal]{Occurrence analysis pass} * * ************************************************************************ The occurrence analyser re-typechecks a core expression, returning a new core expression with (hopefully) improved usage information. -} module GHC.Core.Opt.OccurAnal ( occurAnalysePgm, occurAnalyseExpr, zapLambdaBndrs ) where import GHC.Prelude import GHC.Core import GHC.Core.FVs import GHC.Core.Utils ( exprIsTrivial, isDefaultAlt, isExpandableApp, stripTicksTopE, mkTicks ) import GHC.Core.Opt.Arity ( joinRhsArity ) import GHC.Core.Coercion import GHC.Core.Type import GHC.Core.TyCo.FVs( tyCoVarsOfMCo ) import GHC.Data.Maybe( isJust, orElse ) import GHC.Data.Graph.Directed ( SCC(..), Node(..) , stronglyConnCompFromEdgedVerticesUniq , stronglyConnCompFromEdgedVerticesUniqR ) import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Basic import GHC.Types.Tickish import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Types.Var import GHC.Types.Demand ( argOneShots, argsOneShots ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Misc import GHC.Utils.Trace import GHC.Builtin.Names( runRWKey ) import GHC.Unit.Module( Module ) import Data.List (mapAccumL, mapAccumR) {- ************************************************************************ * * occurAnalysePgm, occurAnalyseExpr * * ************************************************************************ Here's the externally-callable interface: -} -- | Do occurrence analysis, and discard occurrence info returned occurAnalyseExpr :: CoreExpr -> CoreExpr occurAnalyseExpr expr = expr' where (WithUsageDetails _ expr') = occAnal initOccEnv expr occurAnalysePgm :: Module -- Used only in debug output -> (Id -> Bool) -- Active unfoldings -> (Activation -> Bool) -- Active rules -> [CoreRule] -- Local rules for imported Ids -> CoreProgram -> CoreProgram occurAnalysePgm this_mod active_unf active_rule imp_rules binds | isEmptyDetails final_usage = occ_anald_binds | otherwise -- See Note [Glomming] = warnPprTrace True "Glomming in" (hang (ppr this_mod <> colon) 2 (ppr final_usage)) occ_anald_glommed_binds where init_env = initOccEnv { occ_rule_act = active_rule , occ_unf_act = active_unf } (WithUsageDetails final_usage occ_anald_binds) = go init_env binds (WithUsageDetails _ occ_anald_glommed_binds) = occAnalRecBind init_env TopLevel imp_rule_edges (flattenBinds binds) initial_uds -- It's crucial to re-analyse the glommed-together bindings -- so that we establish the right loop breakers. Otherwise -- we can easily create an infinite loop (#9583 is an example) -- -- Also crucial to re-analyse the /original/ bindings -- in case the first pass accidentally discarded as dead code -- a binding that was actually needed (albeit before its -- definition site). #17724 threw this up. initial_uds = addManyOccs emptyDetails (rulesFreeVars imp_rules) -- The RULES declarations keep things alive! -- imp_rule_edges maps a top-level local binder 'f' to the -- RHS free vars of any IMP-RULE, a local RULE for an imported function, -- where 'f' appears on the LHS -- e.g. RULE foldr f = blah -- imp_rule_edges contains f :-> fvs(blah) -- We treat such RULES as extra rules for 'f' -- See Note [Preventing loops due to imported functions rules] imp_rule_edges :: ImpRuleEdges imp_rule_edges = foldr (plusVarEnv_C (++)) emptyVarEnv [ mapVarEnv (const [(act,rhs_fvs)]) $ getUniqSet $ exprsFreeIds args `delVarSetList` bndrs | Rule { ru_act = act, ru_bndrs = bndrs , ru_args = args, ru_rhs = rhs } <- imp_rules -- Not BuiltinRules; see Note [Plugin rules] , let rhs_fvs = exprFreeIds rhs `delVarSetList` bndrs ] go :: OccEnv -> [CoreBind] -> WithUsageDetails [CoreBind] go !_ [] = WithUsageDetails initial_uds [] go env (bind:binds) = WithUsageDetails final_usage (bind' ++ binds') where (WithUsageDetails bs_usage binds') = go env binds (WithUsageDetails final_usage bind') = occAnalBind env TopLevel imp_rule_edges bind bs_usage {- ********************************************************************* * * IMP-RULES Local rules for imported functions * * ********************************************************************* -} type ImpRuleEdges = IdEnv [(Activation, VarSet)] -- Mapping from a local Id 'f' to info about its IMP-RULES, -- i.e. /local/ rules for an imported Id that mention 'f' on the LHS -- We record (a) its Activation and (b) the RHS free vars -- See Note [IMP-RULES: local rules for imported functions] noImpRuleEdges :: ImpRuleEdges noImpRuleEdges = emptyVarEnv lookupImpRules :: ImpRuleEdges -> Id -> [(Activation,VarSet)] lookupImpRules imp_rule_edges bndr = case lookupVarEnv imp_rule_edges bndr of Nothing -> [] Just vs -> vs impRulesScopeUsage :: [(Activation,VarSet)] -> UsageDetails -- Variable mentioned in RHS of an IMP-RULE for the bndr, -- whether active or not impRulesScopeUsage imp_rules_info = foldr add emptyDetails imp_rules_info where add (_,vs) usage = addManyOccs usage vs impRulesActiveFvs :: (Activation -> Bool) -> VarSet -> [(Activation,VarSet)] -> VarSet impRulesActiveFvs is_active bndr_set vs = foldr add emptyVarSet vs `intersectVarSet` bndr_set where add (act,vs) acc | is_active act = vs `unionVarSet` acc | otherwise = acc {- Note [IMP-RULES: local rules for imported functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We quite often have * A /local/ rule * for an /imported/ function like this: foo x = blah {-# RULE "map/foo" forall xs. map foo xs = xs #-} We call them IMP-RULES. They are important in practice, and occur a lot in the libraries. IMP-RULES are held in mg_rules of ModGuts, and passed in to occurAnalysePgm. Main Invariant: * Throughout, we treat an IMP-RULE that mentions 'f' on its LHS just like a RULE for f. Note [IMP-RULES: unavoidable loops] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this f = /\a. B.g a RULE B.g Int = 1 + f Int Note that * The RULE is for an imported function. * f is non-recursive Now we can get f Int --> B.g Int Inlining f --> 1 + f Int Firing RULE and so the simplifier goes into an infinite loop. This would not happen if the RULE was for a local function, because we keep track of dependencies through rules. But that is pretty much impossible to do for imported Ids. Suppose f's definition had been f = /\a. C.h a where (by some long and devious process), C.h eventually inlines to B.g. We could only spot such loops by exhaustively following unfoldings of C.h etc, in case we reach B.g, and hence (via the RULE) f. We regard this potential infinite loop as a *programmer* error. It's up the programmer not to write silly rules like RULE f x = f x and the example above is just a more complicated version. Note [Specialising imported functions] (referred to from Specialise) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For *automatically-generated* rules, the programmer can't be responsible for the "programmer error" in Note [IMP-RULES: unavoidable loops]. In particular, consider specialising a recursive function defined in another module. If we specialise a recursive function B.g, we get g_spec = .....(B.g Int)..... RULE B.g Int = g_spec Here, g_spec doesn't look recursive, but when the rule fires, it becomes so. And if B.g was mutually recursive, the loop might not be as obvious as it is here. To avoid this, * When specialising a function that is a loop breaker, give a NOINLINE pragma to the specialised function Note [Preventing loops due to imported functions rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider: import GHC.Base (foldr) {-# RULES "filterList" forall p. foldr (filterFB (:) p) [] = filter p #-} filter p xs = build (\c n -> foldr (filterFB c p) n xs) filterFB c p = ... f = filter p xs Note that filter is not a loop-breaker, so what happens is: f = filter p xs = {inline} build (\c n -> foldr (filterFB c p) n xs) = {inline} foldr (filterFB (:) p) [] xs = {RULE} filter p xs We are in an infinite loop. A more elaborate example (that I actually saw in practice when I went to mark GHC.List.filter as INLINABLE) is as follows. Say I have this module: {-# LANGUAGE RankNTypes #-} module GHCList where import Prelude hiding (filter) import GHC.Base (build) {-# INLINABLE filter #-} filter :: (a -> Bool) -> [a] -> [a] filter p [] = [] filter p (x:xs) = if p x then x : filter p xs else filter p xs {-# NOINLINE [0] filterFB #-} filterFB :: (a -> b -> b) -> (a -> Bool) -> a -> b -> b filterFB c p x r | p x = x `c` r | otherwise = r {-# RULES "filter" [~1] forall p xs. filter p xs = build (\c n -> foldr (filterFB c p) n xs) "filterList" [1] forall p. foldr (filterFB (:) p) [] = filter p #-} Then (because RULES are applied inside INLINABLE unfoldings, but inlinings are not), the unfolding given to "filter" in the interface file will be: filter p [] = [] filter p (x:xs) = if p x then x : build (\c n -> foldr (filterFB c p) n xs) else build (\c n -> foldr (filterFB c p) n xs Note that because this unfolding does not mention "filter", filter is not marked as a strong loop breaker. Therefore at a use site in another module: filter p xs = {inline} case xs of [] -> [] (x:xs) -> if p x then x : build (\c n -> foldr (filterFB c p) n xs) else build (\c n -> foldr (filterFB c p) n xs) build (\c n -> foldr (filterFB c p) n xs) = {inline} foldr (filterFB (:) p) [] xs = {RULE} filter p xs And we are in an infinite loop again, except that this time the loop is producing an infinitely large *term* (an unrolling of filter) and so the simplifier finally dies with "ticks exhausted" SOLUTION: we treat the rule "filterList" as an extra rule for 'filterFB' because it mentions 'filterFB' on the LHS. This is the Main Invariant in Note [IMP-RULES: local rules for imported functions]. So, during loop-breaker analysis: - for each active RULE for a local function 'f' we add an edge between 'f' and the local FVs of the rule RHS - for each active RULE for an *imported* function we add dependency edges between the *local* FVS of the rule LHS and the *local* FVS of the rule RHS. Even with this extra hack we aren't always going to get things right. For example, it might be that the rule LHS mentions an imported Id, and another module has a RULE that can rewrite that imported Id to one of our local Ids. Note [Plugin rules] ~~~~~~~~~~~~~~~~~~~ Conal Elliott (#11651) built a GHC plugin that added some BuiltinRules (for imported Ids) to the mg_rules field of ModGuts, to do some domain-specific transformations that could not be expressed with an ordinary pattern-matching CoreRule. But then we can't extract the dependencies (in imp_rule_edges) from ru_rhs etc, because a BuiltinRule doesn't have any of that stuff. So we simply assume that BuiltinRules have no dependencies, and filter them out from the imp_rule_edges comprehension. Note [Glomming] ~~~~~~~~~~~~~~~ RULES for imported Ids can make something at the top refer to something at the bottom: foo = ...(B.f @Int)... $sf = blah RULE: B.f @Int = $sf Applying this rule makes foo refer to $sf, although foo doesn't appear to depend on $sf. (And, as in Note [IMP-RULES: local rules for imported functions], the dependency might be more indirect. For example, foo might mention C.t rather than B.f, where C.t eventually inlines to B.f.) NOTICE that this cannot happen for rules whose head is a locally-defined function, because we accurately track dependencies through RULES. It only happens for rules whose head is an imported function (B.f in the example above). Solution: - When simplifying, bring all top level identifiers into scope at the start, ignoring the Rec/NonRec structure, so that when 'h' pops up in f's rhs, we find it in the in-scope set (as the simplifier generally expects). This happens in simplTopBinds. - In the occurrence analyser, if there are any out-of-scope occurrences that pop out of the top, which will happen after firing the rule: f = \x -> h x h = \y -> 3 then just glom all the bindings into a single Rec, so that the *next* iteration of the occurrence analyser will sort them all out. This part happens in occurAnalysePgm. -} {- ************************************************************************ * * Bindings * * ************************************************************************ Note [Recursive bindings: the grand plan] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Loop breaking is surprisingly subtle. First read the section 4 of "Secrets of the GHC inliner". This describes our basic plan. We avoid infinite inlinings by choosing loop breakers, and ensuring that a loop breaker cuts each loop. See also Note [Inlining and hs-boot files] in GHC.Core.ToIface, which deals with a closely related source of infinite loops. When we come across a binding group Rec { x1 = r1; ...; xn = rn } we treat it like this (occAnalRecBind): 1. Note [Forming Rec groups] Occurrence-analyse each right hand side, and build a "Details" for each binding to capture the results. Wrap the details in a LetrecNode, ready for SCC analysis. All this is done by makeNode. The edges of this graph are the "scope edges". 2. Do SCC-analysis on these Nodes: - Each CyclicSCC will become a new Rec - Each AcyclicSCC will become a new NonRec The key property is that every free variable of a binding is accounted for by the scope edges, so that when we are done everything is still in scope. 3. For each AcyclicSCC, just make a NonRec binding. 4. For each CyclicSCC of the scope-edge SCC-analysis in (2), we identify suitable loop-breakers to ensure that inlining terminates. This is done by occAnalRec. To do so, form the loop-breaker graph, do SCC analysis. For each CyclicSCC we choose a loop breaker, delete all edges to that node, re-analyse the SCC, and iterate. See Note [Choosing loop breakers] for the details Note [Dead code] ~~~~~~~~~~~~~~~~ Dropping dead code for a cyclic Strongly Connected Component is done in a very simple way: the entire SCC is dropped if none of its binders are mentioned in the body; otherwise the whole thing is kept. The key observation is that dead code elimination happens after dependency analysis: so 'occAnalBind' processes SCCs instead of the original term's binding groups. Thus 'occAnalBind' does indeed drop 'f' in an example like letrec f = ...g... g = ...(...g...)... in ...g... when 'g' no longer uses 'f' at all (eg 'f' does not occur in a RULE in 'g'). 'occAnalBind' first consumes 'CyclicSCC g' and then it consumes 'AcyclicSCC f', where 'body_usage' won't contain 'f'. Note [Forming Rec groups] ~~~~~~~~~~~~~~~~~~~~~~~~~ The key point about the "Forming Rec groups" step is that it /preserves scoping/. If 'x' is mentioned, it had better be bound somewhere. So if we start with Rec { f = ...h... ; g = ...f... ; h = ...f... } we can split into SCCs Rec { f = ...h... ; h = ..f... } NonRec { g = ...f... } We put bindings {f = ef; g = eg } in a Rec group if "f uses g" and "g uses f", no matter how indirectly. We do a SCC analysis with an edge f -> g if "f mentions g". That is, g is free in: a) the rhs 'ef' b) or the RHS of a rule for f, whether active or inactive Note [Rules are extra RHSs] c) or the LHS or a rule for f, whether active or inactive Note [Rule dependency info] d) the RHS of an /active/ local IMP-RULE Note [IMP-RULES: local rules for imported functions] (b) and (c) apply regardless of the activation of the RULE, because even if the rule is inactive its free variables must be bound. But (d) doesn't need to worry about this because IMP-RULES are always notionally at the bottom of the file. * Note [Rules are extra RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A RULE for 'f' is like an extra RHS for 'f'. That way the "parent" keeps the specialised "children" alive. If the parent dies (because it isn't referenced any more), then the children will die too (unless they are already referenced directly). So in Example [eftInt], eftInt and eftIntFB will be put in the same Rec, even though their 'main' RHSs are both non-recursive. We must also include inactive rules, so that their free vars remain in scope. * Note [Rule dependency info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The VarSet in a RuleInfo is used for dependency analysis in the occurrence analyser. We must track free vars in *both* lhs and rhs. Hence use of idRuleVars, rather than idRuleRhsVars in occAnalBind. Why both? Consider x = y RULE f x = v+4 Then if we substitute y for x, we'd better do so in the rule's LHS too, so we'd better ensure the RULE appears to mention 'x' as well as 'v' * Note [Rules are visible in their own rec group] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want the rules for 'f' to be visible in f's right-hand side. And we'd like them to be visible in other functions in f's Rec group. E.g. in Note [Specialisation rules] we want f' rule to be visible in both f's RHS, and fs's RHS. This means that we must simplify the RULEs first, before looking at any of the definitions. This is done by Simplify.simplRecBind, when it calls addLetIdInfo. Note [Stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~ None of the above stuff about RULES applies to a stable unfolding stored in a CoreUnfolding. The unfolding, if any, is simplified at the same time as the regular RHS of the function (ie *not* like Note [Rules are visible in their own rec group]), so it should be treated *exactly* like an extra RHS. Or, rather, when computing loop-breaker edges, * If f has an INLINE pragma, and it is active, we treat the INLINE rhs as f's rhs * If it's inactive, we treat f as having no rhs * If it has no INLINE pragma, we look at f's actual rhs There is a danger that we'll be sub-optimal if we see this f = ...f... [INLINE f = ..no f...] where f is recursive, but the INLINE is not. This can just about happen with a sufficiently odd set of rules; eg foo :: Int -> Int {-# INLINE [1] foo #-} foo x = x+1 bar :: Int -> Int {-# INLINE [1] bar #-} bar x = foo x + 1 {-# RULES "foo" [~1] forall x. foo x = bar x #-} Here the RULE makes bar recursive; but it's INLINE pragma remains non-recursive. It's tempting to then say that 'bar' should not be a loop breaker, but an attempt to do so goes wrong in two ways: a) We may get $df = ...$cfoo... $cfoo = ...$df.... [INLINE $cfoo = ...no-$df...] But we want $cfoo to depend on $df explicitly so that we put the bindings in the right order to inline $df in $cfoo and perhaps break the loop altogether. (Maybe this b) Example [eftInt] ~~~~~~~~~~~~~~~ Example (from GHC.Enum): eftInt :: Int# -> Int# -> [Int] eftInt x y = ...(non-recursive)... {-# INLINE [0] eftIntFB #-} eftIntFB :: (Int -> r -> r) -> r -> Int# -> Int# -> r eftIntFB c n x y = ...(non-recursive)... {-# RULES "eftInt" [~1] forall x y. eftInt x y = build (\ c n -> eftIntFB c n x y) "eftIntList" [1] eftIntFB (:) [] = eftInt #-} Note [Specialisation rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this group, which is typical of what SpecConstr builds: fs a = ....f (C a).... f x = ....f (C a).... {-# RULE f (C a) = fs a #-} So 'f' and 'fs' are in the same Rec group (since f refers to fs via its RULE). But watch out! If 'fs' is not chosen as a loop breaker, we may get an infinite loop: - the RULE is applied in f's RHS (see Note [Rules for recursive functions] in GHC.Core.Opt.Simplify - fs is inlined (say it's small) - now there's another opportunity to apply the RULE This showed up when compiling Control.Concurrent.Chan.getChanContents. Hence the transitive rule_fv_env stuff described in Note [Rules and loop breakers]. ------------------------------------------------------------ Note [Finding join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~ It's the occurrence analyser's job to find bindings that we can turn into join points, but it doesn't perform that transformation right away. Rather, it marks the eligible bindings as part of their occurrence data, leaving it to the simplifier (or to simpleOptPgm) to actually change the binder's 'IdDetails'. The simplifier then eta-expands the RHS if needed and then updates the occurrence sites. Dividing the work this way means that the occurrence analyser still only takes one pass, yet one can always tell the difference between a function call and a jump by looking at the occurrence (because the same pass changes the 'IdDetails' and propagates the binders to their occurrence sites). To track potential join points, we use the 'occ_tail' field of OccInfo. A value of `AlwaysTailCalled n` indicates that every occurrence of the variable is a tail call with `n` arguments (counting both value and type arguments). Otherwise 'occ_tail' will be 'NoTailCallInfo'. The tail call info flows bottom-up with the rest of 'OccInfo' until it goes on the binder. Note [Join points and unfoldings/rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider let j2 y = blah let j x = j2 (x+x) {-# INLINE [2] j #-} in case e of { A -> j 1; B -> ...; C -> j 2 } Before j is inlined, we'll have occurrences of j2 in both j's RHS and in its stable unfolding. We want to discover j2 as a join point. So we must do the adjustRhsUsage thing on j's RHS. That's why we pass mb_join_arity to calcUnfolding. Aame with rules. Suppose we have: let j :: Int -> Int j y = 2 * y let k :: Int -> Int -> Int {-# RULES "SPEC k 0" k 0 y = j y #-} k x y = x + 2 * y in case e of { A -> k 1 2; B -> k 3 5; C -> blah } We identify k as a join point, and we want j to be a join point too. Without the RULE it would be, and we don't want the RULE to mess it up. So provided the join-point arity of k matches the args of the rule we can allow the tail-cal info from the RHS of the rule to propagate. * Wrinkle for Rec case. In the recursive case we don't know the join-point arity in advance, when calling occAnalUnfolding and occAnalRules. (See makeNode.) We don't want to pass Nothing, because then a recursive joinrec might lose its join-poin-hood when SpecConstr adds a RULE. So we just make do with the *current* join-poin-hood, stored in the Id. In the non-recursive case things are simple: see occAnalNonRecBind * Wrinkle for RULES. Suppose the example was a bit different: let j :: Int -> Int j y = 2 * y k :: Int -> Int -> Int {-# RULES "SPEC k 0" k 0 = j #-} k x y = x + 2 * y in ... If we eta-expanded the rule all would be well, but as it stands the one arg of the rule don't match the join-point arity of 2. Conceivably we could notice that a potential join point would have an "undersaturated" rule and account for it. This would mean we could make something that's been specialised a join point, for instance. But local bindings are rarely specialised, and being overly cautious about rules only costs us anything when, for some `j`: * Before specialisation, `j` has non-tail calls, so it can't be a join point. * During specialisation, `j` gets specialised and thus acquires rules. * Sometime afterward, the non-tail calls to `j` disappear (as dead code, say), and so now `j` *could* become a join point. This appears to be very rare in practice. TODO Perhaps we should gather statistics to be sure. Note [Unfoldings and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We assume that anything in an unfolding occurs multiple times, since unfoldings are often copied (that's the whole point!). But we still need to track tail calls for the purpose of finding join points. ------------------------------------------------------------ Note [Adjusting right-hand sides] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There's a bit of a dance we need to do after analysing a lambda expression or a right-hand side. In particular, we need to a) call 'markAllInsideLam' *unless* the binding is for a thunk, a one-shot lambda, or a non-recursive join point; and b) call 'markAllNonTail' *unless* the binding is for a join point, and the RHS has the right arity; e.g. join j x y = case ... of A -> j2 p B -> j2 q in j a b Here we want the tail calls to j2 to be tail calls of the whole expression Some examples, with how the free occurrences in e (assumed not to be a value lambda) get marked: inside lam non-tail-called ------------------------------------------------------------ let x = e No Yes let f = \x -> e Yes Yes let f = \x{OneShot} -> e No Yes \x -> e Yes Yes join j x = e No No joinrec j x = e Yes No There are a few other caveats; most importantly, if we're marking a binding as 'AlwaysTailCalled', it's *going* to be a join point, so we treat it as one so that the effect cascades properly. Consequently, at the time the RHS is analysed, we won't know what adjustments to make; thus 'occAnalLamOrRhs' must return the unadjusted 'UsageDetails', to be adjusted by 'adjustRhsUsage' once join-point-hood has been decided. Thus the overall sequence taking place in 'occAnalNonRecBind' and 'occAnalRecBind' is as follows: 1. Call 'occAnalLamOrRhs' to find usage information for the RHS. 2. Call 'tagNonRecBinder' or 'tagRecBinders', which decides whether to make the binding a join point. 3. Call 'adjustRhsUsage' accordingly. (Done as part of 'tagRecBinders' when recursive.) (In the recursive case, this logic is spread between 'makeNode' and 'occAnalRec'.) -} data WithUsageDetails a = WithUsageDetails !UsageDetails !a ------------------------------------------------------------------ -- occAnalBind ------------------------------------------------------------------ occAnalBind :: OccEnv -- The incoming OccEnv -> TopLevelFlag -> ImpRuleEdges -> CoreBind -> UsageDetails -- Usage details of scope -> WithUsageDetails [CoreBind] -- Of the whole let(rec) occAnalBind !env lvl top_env (NonRec binder rhs) body_usage = occAnalNonRecBind env lvl top_env binder rhs body_usage occAnalBind env lvl top_env (Rec pairs) body_usage = occAnalRecBind env lvl top_env pairs body_usage ----------------- occAnalNonRecBind :: OccEnv -> TopLevelFlag -> ImpRuleEdges -> Var -> CoreExpr -> UsageDetails -> WithUsageDetails [CoreBind] occAnalNonRecBind !env lvl imp_rule_edges bndr rhs body_usage | isTyVar bndr -- A type let; we don't gather usage info = WithUsageDetails body_usage [NonRec bndr rhs] | not (bndr `usedIn` body_usage) -- It's not mentioned = WithUsageDetails body_usage [] | otherwise -- It's mentioned in the body = WithUsageDetails (body_usage' `andUDs` rhs_usage) [NonRec final_bndr rhs'] where (body_usage', tagged_bndr) = tagNonRecBinder lvl body_usage bndr final_bndr = tagged_bndr `setIdUnfolding` unf' `setIdSpecialisation` mkRuleInfo rules' rhs_usage = rhs_uds `andUDs` unf_uds `andUDs` rule_uds -- Get the join info from the *new* decision -- See Note [Join points and unfoldings/rules] mb_join_arity = willBeJoinId_maybe tagged_bndr is_join_point = isJust mb_join_arity --------- Right hand side --------- env1 | is_join_point = env -- See Note [Join point RHSs] | certainly_inline = env -- See Note [Cascading inlines] | otherwise = rhsCtxt env -- See Note [Sources of one-shot information] rhs_env = env1 { occ_one_shots = argOneShots dmd } (WithUsageDetails rhs_uds rhs') = occAnalRhs rhs_env NonRecursive mb_join_arity rhs --------- Unfolding --------- -- See Note [Unfoldings and join points] unf | isId bndr = idUnfolding bndr | otherwise = NoUnfolding (WithUsageDetails unf_uds unf') = occAnalUnfolding rhs_env NonRecursive mb_join_arity unf --------- Rules --------- -- See Note [Rules are extra RHSs] and Note [Rule dependency info] rules_w_uds = occAnalRules rhs_env mb_join_arity bndr rules' = map fstOf3 rules_w_uds imp_rule_uds = impRulesScopeUsage (lookupImpRules imp_rule_edges bndr) -- imp_rule_uds: consider -- h = ... -- g = ... -- RULE map g = h -- Then we want to ensure that h is in scope everwhere -- that g is (since the RULE might turn g into h), so -- we make g mention h. rule_uds = foldr add_rule_uds imp_rule_uds rules_w_uds add_rule_uds (_, l, r) uds = l `andUDs` r `andUDs` uds ---------- occ = idOccInfo tagged_bndr certainly_inline -- See Note [Cascading inlines] = case occ of OneOcc { occ_in_lam = NotInsideLam, occ_n_br = 1 } -> active && not_stable _ -> False dmd = idDemandInfo bndr active = isAlwaysActive (idInlineActivation bndr) not_stable = not (isStableUnfolding (idUnfolding bndr)) ----------------- occAnalRecBind :: OccEnv -> TopLevelFlag -> ImpRuleEdges -> [(Var,CoreExpr)] -> UsageDetails -> WithUsageDetails [CoreBind] -- For a recursive group, we -- * occ-analyse all the RHSs -- * compute strongly-connected components -- * feed those components to occAnalRec -- See Note [Recursive bindings: the grand plan] occAnalRecBind !env lvl imp_rule_edges pairs body_usage = foldr (occAnalRec rhs_env lvl) (WithUsageDetails body_usage []) sccs where sccs :: [SCC Details] sccs = {-# SCC "occAnalBind.scc" #-} stronglyConnCompFromEdgedVerticesUniq nodes nodes :: [LetrecNode] nodes = {-# SCC "occAnalBind.assoc" #-} map (makeNode rhs_env imp_rule_edges bndr_set) pairs bndrs = map fst pairs bndr_set = mkVarSet bndrs rhs_env = env `addInScope` bndrs ----------------------------- occAnalRec :: OccEnv -> TopLevelFlag -> SCC Details -> WithUsageDetails [CoreBind] -> WithUsageDetails [CoreBind] -- The NonRec case is just like a Let (NonRec ...) above occAnalRec !_ lvl (AcyclicSCC (ND { nd_bndr = bndr, nd_rhs = rhs , nd_uds = rhs_uds })) (WithUsageDetails body_uds binds) | not (bndr `usedIn` body_uds) = WithUsageDetails body_uds binds -- See Note [Dead code] | otherwise -- It's mentioned in the body = WithUsageDetails (body_uds' `andUDs` rhs_uds') (NonRec tagged_bndr rhs : binds) where (body_uds', tagged_bndr) = tagNonRecBinder lvl body_uds bndr rhs_uds' = adjustRhsUsage mb_join_arity rhs rhs_uds mb_join_arity = willBeJoinId_maybe tagged_bndr -- The Rec case is the interesting one -- See Note [Recursive bindings: the grand plan] -- See Note [Loop breaking] occAnalRec env lvl (CyclicSCC details_s) (WithUsageDetails body_uds binds) | not (any (`usedIn` body_uds) bndrs) -- NB: look at body_uds, not total_uds = WithUsageDetails body_uds binds -- See Note [Dead code] | otherwise -- At this point we always build a single Rec = -- pprTrace "occAnalRec" (ppr loop_breaker_nodes) WithUsageDetails final_uds (Rec pairs : binds) where bndrs = map nd_bndr details_s all_simple = all nd_simple details_s ------------------------------ -- Make the nodes for the loop-breaker analysis -- See Note [Choosing loop breakers] for loop_breaker_nodes final_uds :: UsageDetails loop_breaker_nodes :: [LetrecNode] (WithUsageDetails final_uds loop_breaker_nodes) = mkLoopBreakerNodes env lvl body_uds details_s ------------------------------ weak_fvs :: VarSet weak_fvs = mapUnionVarSet nd_weak_fvs details_s --------------------------- -- Now reconstruct the cycle pairs :: [(Id,CoreExpr)] pairs | all_simple = reOrderNodes 0 weak_fvs loop_breaker_nodes [] | otherwise = loopBreakNodes 0 weak_fvs loop_breaker_nodes [] -- In the common case when all are "simple" (no rules at all) -- the loop_breaker_nodes will include all the scope edges -- so a SCC computation would yield a single CyclicSCC result; -- and reOrderNodes deals with exactly that case. -- Saves a SCC analysis in a common case {- ********************************************************************* * * Loop breaking * * ********************************************************************* -} {- Note [Choosing loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Step 4 in Note [Recursive bindings: the grand plan]), occAnalRec does loop-breaking on each CyclicSCC of the original program: * mkLoopBreakerNodes: Form the loop-breaker graph for that CyclicSCC * loopBreakNodes: Do SCC analysis on it * reOrderNodes: For each CyclicSCC, pick a loop breaker * Delete edges to that loop breaker * Do another SCC analysis on that reduced SCC * Repeat To form the loop-breaker graph, we construct a new set of Nodes, the "loop-breaker nodes", with the same details but different edges, the "loop-breaker edges". The loop-breaker nodes have both more and fewer dependencies than the scope edges: More edges: If f calls g, and g has an active rule that mentions h then we add an edge from f -> h. See Note [Rules and loop breakers]. Fewer edges: we only include dependencies * only on /active/ rules, * on rule /RHSs/ (not LHSs) The scope edges, by contrast, must be much more inclusive. The nd_simple flag tracks the common case when a binding has no RULES at all, in which case the loop-breaker edges will be identical to the scope edges. Note that in Example [eftInt], *neither* eftInt *nor* eftIntFB is chosen as a loop breaker, because their RHSs don't mention each other. And indeed both can be inlined safely. Note [inl_fvs] ~~~~~~~~~~~~~~ Note that the loop-breaker graph includes edges for occurrences in /both/ the RHS /and/ the stable unfolding. Consider this, which actually occurred when compiling BooleanFormula.hs in GHC: Rec { lvl1 = go ; lvl2[StableUnf = go] = lvl1 ; go = ...go...lvl2... } From the point of view of infinite inlining, we need only these edges: lvl1 :-> go lvl2 :-> go -- The RHS lvl1 will never be used for inlining go :-> go, lvl2 But the danger is that, lacking any edge to lvl1, we'll put it at the end thus Rec { lvl2[ StableUnf = go] = lvl1 ; go[LoopBreaker] = ...go...lvl2... } ; lvl1[Occ=Once] = go } And now the Simplifer will try to use PreInlineUnconditionally on lvl1 (which occurs just once), but because it is last we won't actually substitute in lvl2. Sigh. To avoid this possiblity, we include edges from lvl2 to /both/ its stable unfolding /and/ its RHS. Hence the defn of inl_fvs in makeNode. Maybe we could be more clever, but it's very much a corner case. Note [Weak loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~ There is a last nasty wrinkle. Suppose we have Rec { f = f_rhs RULE f [] = g h = h_rhs g = h ...more... } Remember that we simplify the RULES before any RHS (see Note [Rules are visible in their own rec group] above). So we must *not* postInlineUnconditionally 'g', even though its RHS turns out to be trivial. (I'm assuming that 'g' is not chosen as a loop breaker.) Why not? Because then we drop the binding for 'g', which leaves it out of scope in the RULE! Here's a somewhat different example of the same thing Rec { q = r ; r = ...p... ; p = p_rhs RULE p [] = q } Here the RULE is "below" q, but we *still* can't postInlineUnconditionally q, because the RULE for p is active throughout. So the RHS of r might rewrite to r = ...q... So q must remain in scope in the output program! We "solve" this by: Make q a "weak" loop breaker (OccInfo = IAmLoopBreaker True) iff q is a mentioned in the RHS of any RULE (active on not) in the Rec group Note the "active or not" comment; even if a RULE is inactive, we want its RHS free vars to stay alive (#20820)! A normal "strong" loop breaker has IAmLoopBreaker False. So: Inline postInlineUnconditionally strong IAmLoopBreaker False no no weak IAmLoopBreaker True yes no other yes yes The **sole** reason for this kind of loop breaker is so that postInlineUnconditionally does not fire. Ugh. Annoyingly, since we simplify the rules *first* we'll never inline q into p's RULE. That trivial binding for q will hang around until we discard the rule. Yuk. But it's rare. Note [Rules and loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we form the loop-breaker graph (Step 4 in Note [Recursive bindings: the grand plan]), we must be careful about RULEs. For a start, we want a loop breaker to cut every cycle, so inactive rules play no part; we need only consider /active/ rules. See Note [Finding rule RHS free vars] The second point is more subtle. A RULE is like an equation for 'f' that is *always* inlined if it is applicable. We do *not* disable rules for loop-breakers. It's up to whoever makes the rules to make sure that the rules themselves always terminate. See Note [Rules for recursive functions] in GHC.Core.Opt.Simplify Hence, if f's RHS (or its stable unfolding if it has one) mentions g, and g has a RULE that mentions h, and h has a RULE that mentions f then we *must* choose f to be a loop breaker. Example: see Note [Specialisation rules]. So our plan is this: Take the free variables of f's RHS, and augment it with all the variables reachable by a transitive sequence RULES from those starting points. That is the whole reason for computing rule_fv_env in mkLoopBreakerNodes. Wrinkles: * We only consider /active/ rules. See Note [Finding rule RHS free vars] * We need only consider free vars that are also binders in this Rec group. See also Note [Finding rule RHS free vars] * We only consider variables free in the *RHS* of the rule, in contrast to the way we build the Rec group in the first place (Note [Rule dependency info]) * Why "transitive sequence of rules"? Because active rules apply unconditionally, without checking loop-breaker-ness. See Note [Loop breaker dependencies]. Note [Finding rule RHS free vars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this real example from Data Parallel Haskell tagZero :: Array Int -> Array Tag {-# INLINE [1] tagZeroes #-} tagZero xs = pmap (\x -> fromBool (x==0)) xs {-# RULES "tagZero" [~1] forall xs n. pmap fromBool = tagZero xs #-} So tagZero's RHS mentions pmap, and pmap's RULE mentions tagZero. However, tagZero can only be inlined in phase 1 and later, while the RULE is only active *before* phase 1. So there's no problem. To make this work, we look for the RHS free vars only for *active* rules. That's the reason for the occ_rule_act field of the OccEnv. Note [loopBreakNodes] ~~~~~~~~~~~~~~~~~~~~~ loopBreakNodes is applied to the list of nodes for a cyclic strongly connected component (there's guaranteed to be a cycle). It returns the same nodes, but a) in a better order, b) with some of the Ids having a IAmALoopBreaker pragma The "loop-breaker" Ids are sufficient to break all cycles in the SCC. This means that the simplifier can guarantee not to loop provided it never records an inlining for these no-inline guys. Furthermore, the order of the binds is such that if we neglect dependencies on the no-inline Ids then the binds are topologically sorted. This means that the simplifier will generally do a good job if it works from top bottom, recording inlinings for any Ids which aren't marked as "no-inline" as it goes. -} type Binding = (Id,CoreExpr) -- See Note [loopBreakNodes] loopBreakNodes :: Int -> VarSet -- Binders whose dependencies may be "missing" -- See Note [Weak loop breakers] -> [LetrecNode] -> [Binding] -- Append these to the end -> [Binding] -- Return the bindings sorted into a plausible order, and marked with loop breakers. -- See Note [loopBreakNodes] loopBreakNodes depth weak_fvs nodes binds = -- pprTrace "loopBreakNodes" (ppr nodes) $ go (stronglyConnCompFromEdgedVerticesUniqR nodes) where go [] = binds go (scc:sccs) = loop_break_scc scc (go sccs) loop_break_scc scc binds = case scc of AcyclicSCC node -> nodeBinding (mk_non_loop_breaker weak_fvs) node : binds CyclicSCC nodes -> reOrderNodes depth weak_fvs nodes binds ---------------------------------- reOrderNodes :: Int -> VarSet -> [LetrecNode] -> [Binding] -> [Binding] -- Choose a loop breaker, mark it no-inline, -- and call loopBreakNodes on the rest reOrderNodes _ _ [] _ = panic "reOrderNodes" reOrderNodes _ _ [node] binds = nodeBinding mk_loop_breaker node : binds reOrderNodes depth weak_fvs (node : nodes) binds = -- pprTrace "reOrderNodes" (vcat [ text "unchosen" <+> ppr unchosen -- , text "chosen" <+> ppr chosen_nodes ]) $ loopBreakNodes new_depth weak_fvs unchosen $ (map (nodeBinding mk_loop_breaker) chosen_nodes ++ binds) where (chosen_nodes, unchosen) = chooseLoopBreaker approximate_lb (nd_score (node_payload node)) [node] [] nodes approximate_lb = depth >= 2 new_depth | approximate_lb = 0 | otherwise = depth+1 -- After two iterations (d=0, d=1) give up -- and approximate, returning to d=0 nodeBinding :: (Id -> Id) -> LetrecNode -> Binding nodeBinding set_id_occ (node_payload -> ND { nd_bndr = bndr, nd_rhs = rhs}) = (set_id_occ bndr, rhs) mk_loop_breaker :: Id -> Id mk_loop_breaker bndr = bndr `setIdOccInfo` occ' where occ' = strongLoopBreaker { occ_tail = tail_info } tail_info = tailCallInfo (idOccInfo bndr) mk_non_loop_breaker :: VarSet -> Id -> Id -- See Note [Weak loop breakers] mk_non_loop_breaker weak_fvs bndr | bndr `elemVarSet` weak_fvs = setIdOccInfo bndr occ' | otherwise = bndr where occ' = weakLoopBreaker { occ_tail = tail_info } tail_info = tailCallInfo (idOccInfo bndr) ---------------------------------- chooseLoopBreaker :: Bool -- True <=> Too many iterations, -- so approximate -> NodeScore -- Best score so far -> [LetrecNode] -- Nodes with this score -> [LetrecNode] -- Nodes with higher scores -> [LetrecNode] -- Unprocessed nodes -> ([LetrecNode], [LetrecNode]) -- This loop looks for the bind with the lowest score -- to pick as the loop breaker. The rest accumulate in chooseLoopBreaker _ _ loop_nodes acc [] = (loop_nodes, acc) -- Done -- If approximate_loop_breaker is True, we pick *all* -- nodes with lowest score, else just one -- See Note [Complexity of loop breaking] chooseLoopBreaker approx_lb loop_sc loop_nodes acc (node : nodes) | approx_lb , rank sc == rank loop_sc = chooseLoopBreaker approx_lb loop_sc (node : loop_nodes) acc nodes | sc `betterLB` loop_sc -- Better score so pick this new one = chooseLoopBreaker approx_lb sc [node] (loop_nodes ++ acc) nodes | otherwise -- Worse score so don't pick it = chooseLoopBreaker approx_lb loop_sc loop_nodes (node : acc) nodes where sc = nd_score (node_payload node) {- Note [Complexity of loop breaking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The loop-breaking algorithm knocks out one binder at a time, and performs a new SCC analysis on the remaining binders. That can behave very badly in tightly-coupled groups of bindings; in the worst case it can be (N**2)*log N, because it does a full SCC on N, then N-1, then N-2 and so on. To avoid this, we switch plans after 2 (or whatever) attempts: Plan A: pick one binder with the lowest score, make it a loop breaker, and try again Plan B: pick *all* binders with the lowest score, make them all loop breakers, and try again Since there are only a small finite number of scores, this will terminate in a constant number of iterations, rather than O(N) iterations. You might thing that it's very unlikely, but RULES make it much more likely. Here's a real example from #1969: Rec { $dm = \d.\x. op d {-# RULES forall d. $dm Int d = $s$dm1 forall d. $dm Bool d = $s$dm2 #-} dInt = MkD .... opInt ... dInt = MkD .... opBool ... opInt = $dm dInt opBool = $dm dBool $s$dm1 = \x. op dInt $s$dm2 = \x. op dBool } The RULES stuff means that we can't choose $dm as a loop breaker (Note [Choosing loop breakers]), so we must choose at least (say) opInt *and* opBool, and so on. The number of loop breakders is linear in the number of instance declarations. Note [Loop breakers and INLINE/INLINABLE pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Avoid choosing a function with an INLINE pramga as the loop breaker! If such a function is mutually-recursive with a non-INLINE thing, then the latter should be the loop-breaker. It's vital to distinguish between INLINE and INLINABLE (the Bool returned by hasStableCoreUnfolding_maybe). If we start with Rec { {-# INLINABLE f #-} f x = ...f... } and then worker/wrapper it through strictness analysis, we'll get Rec { {-# INLINABLE $wf #-} $wf p q = let x = (p,q) in ...f... {-# INLINE f #-} f x = case x of (p,q) -> $wf p q } Now it is vital that we choose $wf as the loop breaker, so we can inline 'f' in '$wf'. Note [DFuns should not be loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's particularly bad to make a DFun into a loop breaker. See Note [How instance declarations are translated] in GHC.Tc.TyCl.Instance We give DFuns a higher score than ordinary CONLIKE things because if there's a choice we want the DFun to be the non-loop breaker. Eg rec { sc = /\ a \$dC. $fBWrap (T a) ($fCT @ a $dC) $fCT :: forall a_afE. (Roman.C a_afE) => Roman.C (Roman.T a_afE) {-# DFUN #-} $fCT = /\a \$dC. MkD (T a) ((sc @ a $dC) |> blah) ($ctoF @ a $dC) } Here 'sc' (the superclass) looks CONLIKE, but we'll never get to it if we can't unravel the DFun first. Note [Constructor applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's really really important to inline dictionaries. Real example (the Enum Ordering instance from GHC.Base): rec f = \ x -> case d of (p,q,r) -> p x g = \ x -> case d of (p,q,r) -> q x d = (v, f, g) Here, f and g occur just once; but we can't inline them into d. On the other hand we *could* simplify those case expressions if we didn't stupidly choose d as the loop breaker. But we won't because constructor args are marked "Many". Inlining dictionaries is really essential to unravelling the loops in static numeric dictionaries, see GHC.Float. Note [Closure conversion] ~~~~~~~~~~~~~~~~~~~~~~~~~ We treat (\x. C p q) as a high-score candidate in the letrec scoring algorithm. The immediate motivation came from the result of a closure-conversion transformation which generated code like this: data Clo a b = forall c. Clo (c -> a -> b) c ($:) :: Clo a b -> a -> b Clo f env $: x = f env x rec { plus = Clo plus1 () ; plus1 _ n = Clo plus2 n ; plus2 Zero n = n ; plus2 (Succ m) n = Succ (plus $: m $: n) } If we inline 'plus' and 'plus1', everything unravels nicely. But if we choose 'plus1' as the loop breaker (which is entirely possible otherwise), the loop does not unravel nicely. @occAnalUnfolding@ deals with the question of bindings where the Id is marked by an INLINE pragma. For these we record that anything which occurs in its RHS occurs many times. This pessimistically assumes that this inlined binder also occurs many times in its scope, but if it doesn't we'll catch it next time round. At worst this costs an extra simplifier pass. ToDo: try using the occurrence info for the inline'd binder. [March 97] We do the same for atomic RHSs. Reason: see notes with loopBreakSCC. [June 98, SLPJ] I've undone this change; I don't understand it. See notes with loopBreakSCC. ************************************************************************ * * Making nodes * * ************************************************************************ -} type LetrecNode = Node Unique Details -- Node comes from Digraph -- The Unique key is gotten from the Id data Details = ND { nd_bndr :: Id -- Binder , nd_rhs :: CoreExpr -- RHS, already occ-analysed , nd_uds :: UsageDetails -- Usage from RHS, and RULES, and stable unfoldings -- ignoring phase (ie assuming all are active) -- See Note [Forming Rec groups] , nd_inl :: IdSet -- Free variables of the stable unfolding and the RHS -- but excluding any RULES -- This is the IdSet that may be used if the Id is inlined , nd_simple :: Bool -- True iff this binding has no local RULES -- If all nodes are simple we don't need a loop-breaker -- dep-anal before reconstructing. , nd_weak_fvs :: IdSet -- Variables bound in this Rec group that are free -- in the RHS of any rule (active or not) for this bndr -- See Note [Weak loop breakers] , nd_active_rule_fvs :: IdSet -- Variables bound in this Rec group that are free -- in the RHS of an active rule for this bndr -- See Note [Rules and loop breakers] , nd_score :: NodeScore } instance Outputable Details where ppr nd = text "ND" <> braces (sep [ text "bndr =" <+> ppr (nd_bndr nd) , text "uds =" <+> ppr (nd_uds nd) , text "inl =" <+> ppr (nd_inl nd) , text "simple =" <+> ppr (nd_simple nd) , text "active_rule_fvs =" <+> ppr (nd_active_rule_fvs nd) , text "score =" <+> ppr (nd_score nd) ]) -- The NodeScore is compared lexicographically; -- e.g. lower rank wins regardless of size type NodeScore = ( Int -- Rank: lower => more likely to be picked as loop breaker , Int -- Size of rhs: higher => more likely to be picked as LB -- Maxes out at maxExprSize; we just use it to prioritise -- small functions , Bool ) -- Was it a loop breaker before? -- True => more likely to be picked -- Note [Loop breakers, node scoring, and stability] rank :: NodeScore -> Int rank (r, _, _) = r makeNode :: OccEnv -> ImpRuleEdges -> VarSet -> (Var, CoreExpr) -> LetrecNode -- See Note [Recursive bindings: the grand plan] makeNode !env imp_rule_edges bndr_set (bndr, rhs) = DigraphNode { node_payload = details , node_key = varUnique bndr , node_dependencies = nonDetKeysUniqSet scope_fvs } -- It's OK to use nonDetKeysUniqSet here as stronglyConnCompFromEdgedVerticesR -- is still deterministic with edges in nondeterministic order as -- explained in Note [Deterministic SCC] in GHC.Data.Graph.Directed. where details = ND { nd_bndr = bndr' , nd_rhs = rhs' , nd_uds = scope_uds , nd_inl = inl_fvs , nd_simple = null rules_w_uds && null imp_rule_info , nd_weak_fvs = weak_fvs , nd_active_rule_fvs = active_rule_fvs , nd_score = pprPanic "makeNodeDetails" (ppr bndr) } bndr' = bndr `setIdUnfolding` unf' `setIdSpecialisation` mkRuleInfo rules' inl_uds = rhs_uds `andUDs` unf_uds scope_uds = inl_uds `andUDs` rule_uds -- Note [Rules are extra RHSs] -- Note [Rule dependency info] scope_fvs = udFreeVars bndr_set scope_uds -- scope_fvs: all occurrences from this binder: RHS, unfolding, -- and RULES, both LHS and RHS thereof, active or inactive inl_fvs = udFreeVars bndr_set inl_uds -- inl_fvs: vars that would become free if the function was inlined. -- We conservatively approximate that by thefree vars from the RHS -- and the unfolding together. -- See Note [inl_fvs] mb_join_arity = isJoinId_maybe bndr -- Get join point info from the *current* decision -- We don't know what the new decision will be! -- Using the old decision at least allows us to -- preserve existing join point, even RULEs are added -- See Note [Join points and unfoldings/rules] --------- Right hand side --------- -- Constructing the edges for the main Rec computation -- See Note [Forming Rec groups] -- Do not use occAnalRhs because we don't yet know the final -- answer for mb_join_arity; instead, do the occAnalLam call from -- occAnalRhs, and postpone adjustRhsUsage until occAnalRec rhs_env = rhsCtxt env (WithUsageDetails rhs_uds rhs') = occAnalLam rhs_env rhs --------- Unfolding --------- -- See Note [Unfoldings and join points] unf = realIdUnfolding bndr -- realIdUnfolding: Ignore loop-breaker-ness -- here because that is what we are setting! (WithUsageDetails unf_uds unf') = occAnalUnfolding rhs_env Recursive mb_join_arity unf --------- IMP-RULES -------- is_active = occ_rule_act env :: Activation -> Bool imp_rule_info = lookupImpRules imp_rule_edges bndr imp_rule_uds = impRulesScopeUsage imp_rule_info imp_rule_fvs = impRulesActiveFvs is_active bndr_set imp_rule_info --------- All rules -------- rules_w_uds :: [(CoreRule, UsageDetails, UsageDetails)] rules_w_uds = occAnalRules rhs_env mb_join_arity bndr rules' = map fstOf3 rules_w_uds rule_uds = foldr add_rule_uds imp_rule_uds rules_w_uds add_rule_uds (_, l, r) uds = l `andUDs` r `andUDs` uds -------- active_rule_fvs ------------ active_rule_fvs = foldr add_active_rule imp_rule_fvs rules_w_uds add_active_rule (rule, _, rhs_uds) fvs | is_active (ruleActivation rule) = udFreeVars bndr_set rhs_uds `unionVarSet` fvs | otherwise = fvs -------- weak_fvs ------------ -- See Note [Weak loop breakers] weak_fvs = foldr add_rule emptyVarSet rules_w_uds add_rule (_, _, rhs_uds) fvs = udFreeVars bndr_set rhs_uds `unionVarSet` fvs mkLoopBreakerNodes :: OccEnv -> TopLevelFlag -> UsageDetails -- for BODY of let -> [Details] -> WithUsageDetails [LetrecNode] -- adjusted -- See Note [Choosing loop breakers] -- This function primarily creates the Nodes for the -- loop-breaker SCC analysis. More specifically: -- a) tag each binder with its occurrence info -- b) add a NodeScore to each node -- c) make a Node with the right dependency edges for -- the loop-breaker SCC analysis -- d) adjust each RHS's usage details according to -- the binder's (new) shotness and join-point-hood mkLoopBreakerNodes !env lvl body_uds details_s = WithUsageDetails final_uds (zipWithEqual "mkLoopBreakerNodes" mk_lb_node details_s bndrs') where (final_uds, bndrs') = tagRecBinders lvl body_uds details_s mk_lb_node nd@(ND { nd_bndr = old_bndr, nd_inl = inl_fvs }) new_bndr = DigraphNode { node_payload = new_nd , node_key = varUnique old_bndr , node_dependencies = nonDetKeysUniqSet lb_deps } -- It's OK to use nonDetKeysUniqSet here as -- stronglyConnCompFromEdgedVerticesR is still deterministic with edges -- in nondeterministic order as explained in -- Note [Deterministic SCC] in GHC.Data.Graph.Directed. where new_nd = nd { nd_bndr = new_bndr, nd_score = score } score = nodeScore env new_bndr lb_deps nd lb_deps = extendFvs_ rule_fv_env inl_fvs -- See Note [Loop breaker dependencies] rule_fv_env :: IdEnv IdSet -- Maps a variable f to the variables from this group -- reachable by a sequence of RULES starting with f -- Domain is *subset* of bound vars (others have no rule fvs) -- See Note [Finding rule RHS free vars] -- Why transClosureFV? See Note [Loop breaker dependencies] rule_fv_env = transClosureFV $ mkVarEnv $ [ (b, rule_fvs) | ND { nd_bndr = b, nd_active_rule_fvs = rule_fvs } <- details_s , not (isEmptyVarSet rule_fvs) ] {- Note [Loop breaker dependencies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The loop breaker dependencies of x in a recursive group { f1 = e1; ...; fn = en } are: - The "inline free variables" of f: the fi free in f's stable unfolding and RHS; see Note [inl_fvs] - Any fi reachable from those inline free variables by a sequence of RULE rewrites. Remember, rule rewriting is not affected by fi being a loop breaker, so we have to take the transitive closure in case f is the only possible loop breaker in the loop. Hence rule_fv_env. We need only account for /active/ rules. -} ------------------------------------------ nodeScore :: OccEnv -> Id -- Binder with new occ-info -> VarSet -- Loop-breaker dependencies -> Details -> NodeScore nodeScore !env new_bndr lb_deps (ND { nd_bndr = old_bndr, nd_rhs = bind_rhs }) | not (isId old_bndr) -- A type or coercion variable is never a loop breaker = (100, 0, False) | old_bndr `elemVarSet` lb_deps -- Self-recursive things are great loop breakers = (0, 0, True) -- See Note [Self-recursion and loop breakers] | not (occ_unf_act env old_bndr) -- A binder whose inlining is inactive (e.g. has = (0, 0, True) -- a NOINLINE pragma) makes a great loop breaker | exprIsTrivial rhs = mk_score 10 -- Practically certain to be inlined -- Used to have also: && not (isExportedId bndr) -- But I found this sometimes cost an extra iteration when we have -- rec { d = (a,b); a = ...df...; b = ...df...; df = d } -- where df is the exported dictionary. Then df makes a really -- bad choice for loop breaker | DFunUnfolding { df_args = args } <- old_unf -- Never choose a DFun as a loop breaker -- Note [DFuns should not be loop breakers] = (9, length args, is_lb) -- Data structures are more important than INLINE pragmas -- so that dictionary/method recursion unravels | CoreUnfolding { uf_guidance = UnfWhen {} } <- old_unf = mk_score 6 | is_con_app rhs -- Data types help with cases: = mk_score 5 -- Note [Constructor applications] | isStableUnfolding old_unf , can_unfold = mk_score 3 | isOneOcc (idOccInfo new_bndr) = mk_score 2 -- Likely to be inlined | can_unfold -- The Id has some kind of unfolding = mk_score 1 | otherwise = (0, 0, is_lb) where mk_score :: Int -> NodeScore mk_score rank = (rank, rhs_size, is_lb) -- is_lb: see Note [Loop breakers, node scoring, and stability] is_lb = isStrongLoopBreaker (idOccInfo old_bndr) old_unf = realIdUnfolding old_bndr can_unfold = canUnfold old_unf rhs = case old_unf of CoreUnfolding { uf_src = src, uf_tmpl = unf_rhs } | isStableSource src -> unf_rhs _ -> bind_rhs -- 'bind_rhs' is irrelevant for inlining things with a stable unfolding rhs_size = case old_unf of CoreUnfolding { uf_guidance = guidance } | UnfIfGoodArgs { ug_size = size } <- guidance -> size _ -> cheapExprSize rhs -- Checking for a constructor application -- Cheap and cheerful; the simplifier moves casts out of the way -- The lambda case is important to spot x = /\a. C (f a) -- which comes up when C is a dictionary constructor and -- f is a default method. -- Example: the instance for Show (ST s a) in GHC.ST -- -- However we *also* treat (\x. C p q) as a con-app-like thing, -- Note [Closure conversion] is_con_app (Var v) = isConLikeId v is_con_app (App f _) = is_con_app f is_con_app (Lam _ e) = is_con_app e is_con_app (Tick _ e) = is_con_app e is_con_app (Let _ e) = is_con_app e -- let x = let y = blah in (a,b) is_con_app _ = False -- We will float the y out, so treat -- the x-binding as a con-app (#20941) maxExprSize :: Int maxExprSize = 20 -- Rather arbitrary cheapExprSize :: CoreExpr -> Int -- Maxes out at maxExprSize cheapExprSize e = go 0 e where go n e | n >= maxExprSize = n | otherwise = go1 n e go1 n (Var {}) = n+1 go1 n (Lit {}) = n+1 go1 n (Type {}) = n go1 n (Coercion {}) = n go1 n (Tick _ e) = go1 n e go1 n (Cast e _) = go1 n e go1 n (App f a) = go (go1 n f) a go1 n (Lam b e) | isTyVar b = go1 n e | otherwise = go (n+1) e go1 n (Let b e) = gos (go1 n e) (rhssOfBind b) go1 n (Case e _ _ as) = gos (go1 n e) (rhssOfAlts as) gos n [] = n gos n (e:es) | n >= maxExprSize = n | otherwise = gos (go1 n e) es betterLB :: NodeScore -> NodeScore -> Bool -- If n1 `betterLB` n2 then choose n1 as the loop breaker betterLB (rank1, size1, lb1) (rank2, size2, _) | rank1 < rank2 = True | rank1 > rank2 = False | size1 < size2 = False -- Make the bigger n2 into the loop breaker | size1 > size2 = True | lb1 = True -- Tie-break: if n1 was a loop breaker before, choose it | otherwise = False -- See Note [Loop breakers, node scoring, and stability] {- Note [Self-recursion and loop breakers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we have rec { f = ...f...g... ; g = .....f... } then 'f' has to be a loop breaker anyway, so we may as well choose it right away, so that g can inline freely. This is really just a cheap hack. Consider rec { f = ...g... ; g = ..f..h... ; h = ...f....} Here f or g are better loop breakers than h; but we might accidentally choose h. Finding the minimal set of loop breakers is hard. Note [Loop breakers, node scoring, and stability] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To choose a loop breaker, we give a NodeScore to each node in the SCC, and pick the one with the best score (according to 'betterLB'). We need to be jolly careful (#12425, #12234) about the stability of this choice. Suppose we have let rec { f = ...g...g... ; g = ...f...f... } in case x of True -> ...f.. False -> ..f... In each iteration of the simplifier the occurrence analyser OccAnal chooses a loop breaker. Suppose in iteration 1 it choose g as the loop breaker. That means it is free to inline f. Suppose that GHC decides to inline f in the branches of the case, but (for some reason; eg it is not saturated) in the rhs of g. So we get let rec { f = ...g...g... ; g = ...f...f... } in case x of True -> ...g...g..... False -> ..g..g.... Now suppose that, for some reason, in the next iteration the occurrence analyser chooses f as the loop breaker, so it can freely inline g. And again for some reason the simplifier inlines g at its calls in the case branches, but not in the RHS of f. Then we get let rec { f = ...g...g... ; g = ...f...f... } in case x of True -> ...(...f...f...)...(...f..f..)..... False -> ..(...f...f...)...(..f..f...).... You can see where this is going! Each iteration of the simplifier doubles the number of calls to f or g. No wonder GHC is slow! (In the particular example in comment:3 of #12425, f and g are the two mutually recursive fmap instances for CondT and Result. They are both marked INLINE which, oddly, is why they don't inline in each other's RHS, because the call there is not saturated.) The root cause is that we flip-flop on our choice of loop breaker. I always thought it didn't matter, and indeed for any single iteration to terminate, it doesn't matter. But when we iterate, it matters a lot!! So The Plan is this: If there is a tie, choose the node that was a loop breaker last time round Hence the is_lb field of NodeScore -} {- ********************************************************************* * * Lambda groups * * ********************************************************************* -} {- Note [Occurrence analysis for lambda binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For value lambdas we do a special hack. Consider (\x. \y. ...x...) If we did nothing, x is used inside the \y, so would be marked as dangerous to dup. But in the common case where the abstraction is applied to two arguments this is over-pessimistic, which delays inlining x, which forces more simplifier iterations. So the occurrence analyser collaborates with the simplifier to treat a /lambda-group/ specially. A lambda-group is a contiguous run of lambda and casts, e.g. Lam x (Lam y (Cast (Lam z body) co)) * Occurrence analyser: we just mark each binder in the lambda-group (here: x,y,z) with its occurrence info in the *body* of the lambda-group. See occAnalLam. * Simplifier. The simplifier is careful when partially applying lambda-groups. See the call to zapLambdaBndrs in GHC.Core.Opt.Simplify.simplExprF1 GHC.Core.SimpleOpt.simple_app * Why do we take care to account for intervening casts? Answer: currently we don't do eta-expansion and cast-swizzling in a stable unfolding (see Note [Eta-expansion in stable unfoldings]). So we can get f = \x. ((\y. ...x...y...) |> co) Now, since the lambdas aren't together, the occurrence analyser will say that x is OnceInLam. Now if we have a call (f e1 |> co) e2 we'll end up with let x = e1 in ...x..e2... and it'll take an extra iteration of the Simplifier to substitute for x. A thought: a lambda-group is pretty much what GHC.Core.Opt.Arity.manifestArity recognises except that the latter looks through (some) ticks. Maybe a lambda group should also look through (some) ticks? -} isOneShotFun :: CoreExpr -> Bool -- The top level lambdas, ignoring casts, of the expression -- are all one-shot. If there aren't any lambdas at all, this is True isOneShotFun (Lam b e) = isOneShotBndr b && isOneShotFun e isOneShotFun (Cast e _) = isOneShotFun e isOneShotFun _ = True zapLambdaBndrs :: CoreExpr -> FullArgCount -> CoreExpr -- If (\xyz. t) appears under-applied to only two arguments, -- we must zap the occ-info on x,y, because they appear under the \z -- See Note [Occurrence analysis for lambda binders] in GHc.Core.Opt.OccurAnal -- -- NB: `arg_count` includes both type and value args zapLambdaBndrs fun arg_count = -- If the lambda is fully applied, leave it alone; if not -- zap the OccInfo on the lambdas that do have arguments, -- so they beta-reduce to use-many Lets rather than used-once ones. zap arg_count fun `orElse` fun where zap :: FullArgCount -> CoreExpr -> Maybe CoreExpr -- Nothing => No need to change the occ-info -- Just e => Had to change zap 0 e | isOneShotFun e = Nothing -- All remaining lambdas are one-shot | otherwise = Just e -- in which case no need to zap zap n (Cast e co) = do { e' <- zap n e; return (Cast e' co) } zap n (Lam b e) = do { e' <- zap (n-1) e ; return (Lam (zap_bndr b) e') } zap _ _ = Nothing -- More arguments than lambdas zap_bndr b | isTyVar b = b | otherwise = zapLamIdInfo b occAnalLam :: OccEnv -> CoreExpr -> (WithUsageDetails CoreExpr) -- See Note [Occurrence analysis for lambda binders] -- It does the following: -- * Sets one-shot info on the lambda binder from the OccEnv, and -- removes that one-shot info from the OccEnv -- * Sets the OccEnv to OccVanilla when going under a value lambda -- * Tags each lambda with its occurrence information -- * Walks through casts -- This function does /not/ do -- markAllInsideLam or -- markAllNonTail -- The caller does that, either in occAnal (Lam {}), or in adjustRhsUsage -- See Note [Adjusting right-hand sides] occAnalLam env (Lam bndr expr) | isTyVar bndr = let env1 = addOneInScope env bndr WithUsageDetails usage expr' = occAnalLam env1 expr in WithUsageDetails usage (Lam bndr expr') -- Important: Keep the 'env' unchanged so that with a RHS like -- \(@ x) -> K @x (f @x) -- we'll see that (K @x (f @x)) is in a OccRhs, and hence refrain -- from inlining f. See the beginning of Note [Cascading inlines]. | otherwise -- So 'bndr' is an Id = let (env_one_shots', bndr1) = case occ_one_shots env of [] -> ([], bndr) (os : oss) -> (oss, updOneShotInfo bndr os) -- Use updOneShotInfo, not setOneShotInfo, as pre-existing -- one-shot info might be better than what we can infer, e.g. -- due to explicit use of the magic 'oneShot' function. -- See Note [The oneShot function] env1 = env { occ_encl = OccVanilla, occ_one_shots = env_one_shots' } env2 = addOneInScope env1 bndr (WithUsageDetails usage expr') = occAnalLam env2 expr (usage', bndr2) = tagLamBinder usage bndr1 in WithUsageDetails usage' (Lam bndr2 expr') -- For casts, keep going in the same lambda-group -- See Note [Occurrence analysis for lambda binders] occAnalLam env (Cast expr co) = let (WithUsageDetails usage expr') = occAnalLam env expr -- usage1: see Note [Gather occurrences of coercion variables] usage1 = addManyOccs usage (coVarsOfCo co) -- usage2: see Note [Occ-anal and cast worker/wrapper] usage2 = case expr of Var {} | isRhsEnv env -> markAllMany usage1 _ -> usage1 -- usage3: you might think this was not necessary, because of -- the markAllNonTail in adjustRhsUsage; but not so! For a -- join point, adjustRhsUsage doesn't do this; yet if there is -- a cast, we must! usage3 = markAllNonTail usage2 in WithUsageDetails usage3 (Cast expr' co) occAnalLam env expr = occAnal env expr {- Note [Occ-anal and cast worker/wrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider y = e; x = y |> co If we mark y as used-once, we'll inline y into x, and the the Cast worker/wrapper transform will float it straight back out again. See Note [Cast worker/wrapper] in GHC.Core.Opt.Simplify. So in this particular case we want to mark 'y' as Many. It's very ad-hoc, but it's also simple. It's also what would happen if we gave the binding for x a stable unfolding (as we usually do for wrappers, thus y = e {-# INLINE x #-} x = y |> co Now y appears twice -- once in x's stable unfolding, and once in x's RHS. So it'll get a Many occ-info. (Maybe Cast w/w should create a stable unfolding, which would obviate this Note; but that seems a bit of a heavyweight solution.) We only need to this in occAnalLam, not occAnal, because the top leve of a right hand side is handled by occAnalLam. -} {- ********************************************************************* * * Right hand sides * * ********************************************************************* -} occAnalRhs :: OccEnv -> RecFlag -> Maybe JoinArity -> CoreExpr -- RHS -> WithUsageDetails CoreExpr occAnalRhs !env is_rec mb_join_arity rhs = let (WithUsageDetails usage rhs1) = occAnalLam env rhs -- We call occAnalLam here, not occAnalExpr, so that it doesn't -- do the markAllInsideLam and markNonTailCall stuff before -- we've had a chance to help with join points; that comes next rhs2 = markJoinOneShots is_rec mb_join_arity rhs1 rhs_usage = adjustRhsUsage mb_join_arity rhs2 usage in WithUsageDetails rhs_usage rhs2 markJoinOneShots :: RecFlag -> Maybe JoinArity -> CoreExpr -> CoreExpr -- For a /non-recursive/ join point we can mark all -- its join-lambda as one-shot; and it's a good idea to do so markJoinOneShots NonRecursive (Just join_arity) rhs = go join_arity rhs where go 0 rhs = rhs go n (Lam b rhs) = Lam (if isId b then setOneShotLambda b else b) (go (n-1) rhs) go _ rhs = rhs -- Not enough lambdas. This can legitimately happen. -- e.g. let j = case ... in j True -- This will become an arity-1 join point after the -- simplifier has eta-expanded it; but it may not have -- enough lambdas /yet/. (Lint checks that JoinIds do -- have enough lambdas.) markJoinOneShots _ _ rhs = rhs occAnalUnfolding :: OccEnv -> RecFlag -> Maybe JoinArity -- See Note [Join points and unfoldings/rules] -> Unfolding -> WithUsageDetails Unfolding -- Occurrence-analyse a stable unfolding; -- discard a non-stable one altogether. occAnalUnfolding !env is_rec mb_join_arity unf = case unf of unf@(CoreUnfolding { uf_tmpl = rhs, uf_src = src }) | isStableSource src -> let (WithUsageDetails usage rhs') = occAnalRhs env is_rec mb_join_arity rhs unf' | noBinderSwaps env = unf -- Note [Unfoldings and rules] | otherwise = unf { uf_tmpl = rhs' } in WithUsageDetails (markAllMany usage) unf' -- markAllMany: see Note [Occurrences in stable unfoldings] | otherwise -> WithUsageDetails emptyDetails unf -- For non-Stable unfoldings we leave them undisturbed, but -- don't count their usage because the simplifier will discard them. -- We leave them undisturbed because nodeScore uses their size info -- to guide its decisions. It's ok to leave un-substituted -- expressions in the tree because all the variables that were in -- scope remain in scope; there is no cloning etc. unf@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) -> WithUsageDetails final_usage (unf { df_args = args' }) where env' = env `addInScope` bndrs (WithUsageDetails usage args') = occAnalList env' args final_usage = markAllManyNonTail (delDetailsList usage bndrs) `addLamCoVarOccs` bndrs `delDetailsList` bndrs -- delDetailsList; no need to use tagLamBinders because we -- never inline DFuns so the occ-info on binders doesn't matter unf -> WithUsageDetails emptyDetails unf occAnalRules :: OccEnv -> Maybe JoinArity -- See Note [Join points and unfoldings/rules] -> Id -- Get rules from here -> [(CoreRule, -- Each (non-built-in) rule UsageDetails, -- Usage details for LHS UsageDetails)] -- Usage details for RHS occAnalRules !env mb_join_arity bndr = map occ_anal_rule (idCoreRules bndr) where occ_anal_rule rule@(Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs }) = (rule', lhs_uds', rhs_uds') where env' = env `addInScope` bndrs rule' | noBinderSwaps env = rule -- Note [Unfoldings and rules] | otherwise = rule { ru_args = args', ru_rhs = rhs' } (WithUsageDetails lhs_uds args') = occAnalList env' args lhs_uds' = markAllManyNonTail (lhs_uds `delDetailsList` bndrs) `addLamCoVarOccs` bndrs (WithUsageDetails rhs_uds rhs') = occAnal env' rhs -- Note [Rules are extra RHSs] -- Note [Rule dependency info] rhs_uds' = markAllNonTailIf (not exact_join) $ markAllMany $ rhs_uds `delDetailsList` bndrs exact_join = exactJoin mb_join_arity args -- See Note [Join points and unfoldings/rules] occ_anal_rule other_rule = (other_rule, emptyDetails, emptyDetails) {- Note [Join point RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~ Consider x = e join j = Just x We want to inline x into j right away, so we don't want to give the join point a RhsCtxt (#14137). It's not a huge deal, because the FloatIn pass knows to float into join point RHSs; and the simplifier does not float things out of join point RHSs. But it's a simple, cheap thing to do. See #14137. Note [Occurrences in stable unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f p = BIG {-# INLINE g #-} g y = not (f y) where this is the /only/ occurrence of 'f'. So 'g' will get a stable unfolding. Now suppose that g's RHS gets optimised (perhaps by a rule or inlining f) so that it doesn't mention 'f' any more. Now the last remaining call to f is in g's Stable unfolding. But, even though there is only one syntactic occurrence of f, we do /not/ want to do preinlineUnconditionally here! The INLINE pragma says "inline exactly this RHS"; perhaps the programmer wants to expose that 'not', say. If we inline f that will make the Stable unfoldign big, and that wasn't what the programmer wanted. Another way to think about it: if we inlined g as-is into multiple call sites, now there's be multiple calls to f. Bottom line: treat all occurrences in a stable unfolding as "Many". Note [Unfoldings and rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally unfoldings and rules are already occurrence-analysed, so we don't want to reconstruct their trees; we just want to analyse them to find how they use their free variables. EXCEPT if there is a binder-swap going on, in which case we do want to produce a new tree. So we have a fast-path that keeps the old tree if the occ_bs_env is empty. This just saves a bit of allocation and reconstruction; not a big deal. Note [Cascading inlines] ~~~~~~~~~~~~~~~~~~~~~~~~ By default we use an rhsCtxt for the RHS of a binding. This tells the occ anal n that it's looking at an RHS, which has an effect in occAnalApp. In particular, for constructor applications, it makes the arguments appear to have NoOccInfo, so that we don't inline into them. Thus x = f y k = Just x we do not want to inline x. But there's a problem. Consider x1 = a0 : [] x2 = a1 : x1 x3 = a2 : x2 g = f x3 First time round, it looks as if x1 and x2 occur as an arg of a let-bound constructor ==> give them a many-occurrence. But then x3 is inlined (unconditionally as it happens) and next time round, x2 will be, and the next time round x1 will be Result: multiple simplifier iterations. Sigh. So, when analysing the RHS of x3 we notice that x3 will itself definitely inline the next time round, and so we analyse x3's rhs in an ordinary context, not rhsCtxt. Hence the "certainly_inline" stuff. Annoyingly, we have to approximate GHC.Core.Opt.Simplify.Utils.preInlineUnconditionally. If (a) the RHS is expandable (see isExpandableApp in occAnalApp), and (b) certainly_inline says "yes" when preInlineUnconditionally says "no" then the simplifier iterates indefinitely: x = f y k = Just x -- We decide that k is 'certainly_inline' v = ...k... -- but preInlineUnconditionally doesn't inline it inline ==> k = Just (f y) v = ...k... float ==> x1 = f y k = Just x1 v = ...k... This is worse than the slow cascade, so we only want to say "certainly_inline" if it really is certain. Look at the note with preInlineUnconditionally for the various clauses. ************************************************************************ * * Expressions * * ************************************************************************ -} occAnalList :: OccEnv -> [CoreExpr] -> WithUsageDetails [CoreExpr] occAnalList !_ [] = WithUsageDetails emptyDetails [] occAnalList env (e:es) = let (WithUsageDetails uds1 e') = occAnal env e (WithUsageDetails uds2 es') = occAnalList env es in WithUsageDetails (uds1 `andUDs` uds2) (e' : es') occAnal :: OccEnv -> CoreExpr -> WithUsageDetails CoreExpr -- Gives info only about the "interesting" Ids occAnal !_ expr@(Lit _) = WithUsageDetails emptyDetails expr occAnal env expr@(Var _) = occAnalApp env (expr, [], []) -- At one stage, I gathered the idRuleVars for the variable here too, -- which in a way is the right thing to do. -- But that went wrong right after specialisation, when -- the *occurrences* of the overloaded function didn't have any -- rules in them, so the *specialised* versions looked as if they -- weren't used at all. occAnal _ expr@(Type ty) = WithUsageDetails (addManyOccs emptyDetails (coVarsOfType ty)) expr occAnal _ expr@(Coercion co) = WithUsageDetails (addManyOccs emptyDetails (coVarsOfCo co)) expr -- See Note [Gather occurrences of coercion variables] {- Note [Gather occurrences of coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We need to gather info about what coercion variables appear, for two reasons: 1. So that we can sort them into the right place when doing dependency analysis. 2. So that we know when they are surely dead. It is useful to know when they a coercion variable is surely dead, when we want to discard a case-expression, in GHC.Core.Opt.Simplify.rebuildCase. For example (#20143): case unsafeEqualityProof @blah of UnsafeRefl cv -> ...no use of cv... Here we can discard the case, since unsafeEqualityProof always terminates. But only if the coercion variable 'cv' is unused. Another example from #15696: we had something like case eq_sel d of co -> ...(typeError @(...co...) "urk")... Then 'd' was substituted by a dictionary, so the expression simpified to case (Coercion ) of cv -> ...(typeError @(...cv...) "urk")... We can only drop the case altogether if 'cv' is unused, which is not the case here. Conclusion: we need accurate dead-ness info for CoVars. We gather CoVar occurrences from: * The (Type ty) and (Coercion co) cases of occAnal * The type 'ty' of a lambda-binder (\(x:ty). blah) See addLamCoVarOccs But it is not necessary to gather CoVars from the types of other binders. * For let-binders, if the type mentions a CoVar, so will the RHS (since it has the same type) * For case-alt binders, if the type mentions a CoVar, so will the scrutinee (since it has the same type) -} occAnal env (Tick tickish body) | SourceNote{} <- tickish = WithUsageDetails usage (Tick tickish body') -- SourceNotes are best-effort; so we just proceed as usual. -- If we drop a tick due to the issues described below it's -- not the end of the world. | tickish `tickishScopesLike` SoftScope = WithUsageDetails (markAllNonTail usage) (Tick tickish body') | Breakpoint _ _ ids <- tickish = WithUsageDetails (usage_lam `andUDs` foldr addManyOcc emptyDetails ids) (Tick tickish body') -- never substitute for any of the Ids in a Breakpoint | otherwise = WithUsageDetails usage_lam (Tick tickish body') where (WithUsageDetails usage body') = occAnal env body -- for a non-soft tick scope, we can inline lambdas only usage_lam = markAllNonTail (markAllInsideLam usage) -- TODO There may be ways to make ticks and join points play -- nicer together, but right now there are problems: -- let j x = ... in tick (j 1) -- Making j a join point may cause the simplifier to drop t -- (if the tick is put into the continuation). So we don't -- count j 1 as a tail call. -- See #14242. occAnal env (Cast expr co) = let (WithUsageDetails usage expr') = occAnal env expr usage1 = addManyOccs usage (coVarsOfCo co) -- usage2: see Note [Gather occurrences of coercion variables] usage2 = markAllNonTail usage1 -- usage3: calls inside expr aren't tail calls any more in WithUsageDetails usage2 (Cast expr' co) occAnal env app@(App _ _) = occAnalApp env (collectArgsTicks tickishFloatable app) occAnal env expr@(Lam {}) = let (WithUsageDetails usage expr') = occAnalLam env expr final_usage = markAllInsideLamIf (not (isOneShotFun expr')) $ markAllNonTail usage in WithUsageDetails final_usage expr' occAnal env (Case scrut bndr ty alts) = let (WithUsageDetails scrut_usage scrut') = occAnal (scrutCtxt env alts) scrut alt_env = addBndrSwap scrut' bndr $ env { occ_encl = OccVanilla } `addOneInScope` bndr (alts_usage_s, alts') = mapAndUnzip (do_alt alt_env) alts alts_usage = foldr orUDs emptyDetails alts_usage_s (alts_usage1, tagged_bndr) = tagLamBinder alts_usage bndr total_usage = markAllNonTail scrut_usage `andUDs` alts_usage1 -- Alts can have tail calls, but the scrutinee can't in WithUsageDetails total_usage (Case scrut' tagged_bndr ty alts') where do_alt !env (Alt con bndrs rhs) = let (WithUsageDetails rhs_usage1 rhs1) = occAnal (env `addInScope` bndrs) rhs (alt_usg, tagged_bndrs) = tagLamBinders rhs_usage1 bndrs in -- See Note [Binders in case alternatives] (alt_usg, Alt con tagged_bndrs rhs1) occAnal env (Let bind body) = let body_env = env { occ_encl = OccVanilla } `addInScope` bindersOf bind (WithUsageDetails body_usage body') = occAnal body_env body (WithUsageDetails final_usage binds') = occAnalBind env NotTopLevel noImpRuleEdges bind body_usage in WithUsageDetails final_usage (mkLets binds' body') occAnalArgs :: OccEnv -> CoreExpr -> [CoreExpr] -> [OneShots] -> WithUsageDetails CoreExpr -- The `fun` argument is just an accumulating parameter, -- the base for building the application we return occAnalArgs !env fun args !one_shots = go emptyDetails fun args one_shots where go uds fun [] _ = WithUsageDetails uds fun go uds fun (arg:args) one_shots = go (uds `andUDs` arg_uds) (fun `App` arg') args one_shots' where !(WithUsageDetails arg_uds arg') = occAnal arg_env arg !(arg_env, one_shots') | isTypeArg arg = (env, one_shots) | otherwise = valArgCtxt env one_shots {- Applications are dealt with specially because we want the "build hack" to work. Note [Arguments of let-bound constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = let y = expensive x in let z = (True,y) in (case z of {(p,q)->q}, case z of {(p,q)->q}) We feel free to duplicate the WHNF (True,y), but that means that y may be duplicated thereby. If we aren't careful we duplicate the (expensive x) call! Constructors are rather like lambdas in this way. -} occAnalApp :: OccEnv -> (Expr CoreBndr, [Arg CoreBndr], [CoreTickish]) -> WithUsageDetails (Expr CoreBndr) -- Naked variables (not applied) end up here too occAnalApp !env (Var fun, args, ticks) -- Account for join arity of runRW# continuation -- See Note [Simplification of runRW#] -- -- NB: Do not be tempted to make the next (Var fun, args, tick) -- equation into an 'otherwise' clause for this equation -- The former has a bang-pattern to occ-anal the args, and -- we don't want to occ-anal them twice in the runRW# case! -- This caused #18296 | fun `hasKey` runRWKey , [t1, t2, arg] <- args , let (WithUsageDetails usage arg') = occAnalRhs env NonRecursive (Just 1) arg = WithUsageDetails usage (mkTicks ticks $ mkApps (Var fun) [t1, t2, arg']) occAnalApp env (Var fun_id, args, ticks) = WithUsageDetails all_uds (mkTicks ticks app') where -- Lots of banged bindings: this is a very heavily bit of code, -- so it pays not to make lots of thunks here, all of which -- will ultimately be forced. !(fun', fun_id') = lookupBndrSwap env fun_id !(WithUsageDetails args_uds app') = occAnalArgs env fun' args one_shots fun_uds = mkOneOcc fun_id' int_cxt n_args -- NB: fun_uds is computed for fun_id', not fun_id -- See (BS1) in Note [The binder-swap substitution] all_uds = fun_uds `andUDs` final_args_uds !final_args_uds = markAllNonTail $ markAllInsideLamIf (isRhsEnv env && is_exp) $ args_uds -- We mark the free vars of the argument of a constructor or PAP -- as "inside-lambda", if it is the RHS of a let(rec). -- This means that nothing gets inlined into a constructor or PAP -- argument position, which is what we want. Typically those -- constructor arguments are just variables, or trivial expressions. -- We use inside-lam because it's like eta-expanding the PAP. -- -- This is the *whole point* of the isRhsEnv predicate -- See Note [Arguments of let-bound constructors] !n_val_args = valArgCount args !n_args = length args !int_cxt = case occ_encl env of OccScrut -> IsInteresting _other | n_val_args > 0 -> IsInteresting | otherwise -> NotInteresting !is_exp = isExpandableApp fun_id n_val_args -- See Note [CONLIKE pragma] in GHC.Types.Basic -- The definition of is_exp should match that in GHC.Core.Opt.Simplify.prepareRhs one_shots = argsOneShots (idDmdSig fun_id) guaranteed_val_args guaranteed_val_args = n_val_args + length (takeWhile isOneShotInfo (occ_one_shots env)) -- See Note [Sources of one-shot information], bullet point A'] occAnalApp env (fun, args, ticks) = WithUsageDetails (markAllNonTail (fun_uds `andUDs` args_uds)) (mkTicks ticks app') where !(WithUsageDetails args_uds app') = occAnalArgs env fun' args [] !(WithUsageDetails fun_uds fun') = occAnal (addAppCtxt env args) fun -- The addAppCtxt is a bit cunning. One iteration of the simplifier -- often leaves behind beta redexs like -- (\x y -> e) a1 a2 -- Here we would like to mark x,y as one-shot, and treat the whole -- thing much like a let. We do this by pushing some OneShotLam items -- onto the context stack. addAppCtxt :: OccEnv -> [Arg CoreBndr] -> OccEnv addAppCtxt env@(OccEnv { occ_one_shots = ctxt }) args | n_val_args > 0 = env { occ_one_shots = replicate n_val_args OneShotLam ++ ctxt , occ_encl = OccVanilla } -- OccVanilla: the function part of the application -- is no longer on OccRhs or OccScrut | otherwise = env where n_val_args = valArgCount args {- Note [Sources of one-shot information] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The occurrence analyser obtains one-shot-lambda information from two sources: A: Saturated applications: eg f e1 .. en In general, given a call (f e1 .. en) we can propagate one-shot info from f's strictness signature into e1 .. en, but /only/ if n is enough to saturate the strictness signature. A strictness signature like f :: C1(C1(L))LS means that *if f is applied to three arguments* then it will guarantee to call its first argument at most once, and to call the result of that at most once. But if f has fewer than three arguments, all bets are off; e.g. map (f (\x y. expensive) e2) xs Here the \x y abstraction may be called many times (once for each element of xs) so we should not mark x and y as one-shot. But if it was map (f (\x y. expensive) 3 2) xs then the first argument of f will be called at most once. The one-shot info, derived from f's strictness signature, is computed by 'argsOneShots', called in occAnalApp. A': Non-obviously saturated applications: eg build (f (\x y -> expensive)) where f is as above. In this case, f is only manifestly applied to one argument, so it does not look saturated. So by the previous point, we should not use its strictness signature to learn about the one-shotness of \x y. But in this case we can: build is fully applied, so we may use its strictness signature; and from that we learn that build calls its argument with two arguments *at most once*. So there is really only one call to f, and it will have three arguments. In that sense, f is saturated, and we may proceed as described above. Hence the computation of 'guaranteed_val_args' in occAnalApp, using '(occ_one_shots env)'. See also #13227, comment:9 B: Let-bindings: eg let f = \c. let ... in \n -> blah in (build f, build f) Propagate one-shot info from the demanand-info on 'f' to the lambdas in its RHS (which may not be syntactically at the top) This information must have come from a previous run of the demanand analyser. Previously, the demand analyser would *also* set the one-shot information, but that code was buggy (see #11770), so doing it only in on place, namely here, is saner. Note [OneShots] ~~~~~~~~~~~~~~~ When analysing an expression, the occ_one_shots argument contains information about how the function is being used. The length of the list indicates how many arguments will eventually be passed to the analysed expression, and the OneShotInfo indicates whether this application is once or multiple times. Example: Context of f occ_one_shots when analysing f f 1 2 [OneShot, OneShot] map (f 1) [OneShot, NoOneShotInfo] build f [OneShot, OneShot] f 1 2 `seq` f 2 1 [NoOneShotInfo, OneShot] Note [Binders in case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider case x of y { (a,b) -> f y } We treat 'a', 'b' as dead, because they don't physically occur in the case alternative. (Indeed, a variable is dead iff it doesn't occur in its scope in the output of OccAnal.) It really helps to know when binders are unused. See esp the call to isDeadBinder in Simplify.mkDupableAlt In this example, though, the Simplifier will bring 'a' and 'b' back to life, because it binds 'y' to (a,b) (imagine got inlined and scrutinised y). -} {- ************************************************************************ * * OccEnv * * ************************************************************************ -} data OccEnv = OccEnv { occ_encl :: !OccEncl -- Enclosing context information , occ_one_shots :: !OneShots -- See Note [OneShots] , occ_unf_act :: Id -> Bool -- Which Id unfoldings are active , occ_rule_act :: Activation -> Bool -- Which rules are active -- See Note [Finding rule RHS free vars] -- See Note [The binder-swap substitution] -- If x :-> (y, co) is in the env, -- then please replace x by (y |> sym mco) -- Invariant of course: idType x = exprType (y |> sym mco) , occ_bs_env :: !(VarEnv (OutId, MCoercion)) -- Domain is Global and Local Ids -- Range is just Local Ids , occ_bs_rng :: !VarSet -- Vars (TyVars and Ids) free in the range of occ_bs_env } ----------------------------- -- OccEncl is used to control whether to inline into constructor arguments -- For example: -- x = (p,q) -- Don't inline p or q -- y = /\a -> (p a, q a) -- Still don't inline p or q -- z = f (p,q) -- Do inline p,q; it may make a rule fire -- So OccEncl tells enough about the context to know what to do when -- we encounter a constructor application or PAP. -- -- OccScrut is used to set the "interesting context" field of OncOcc data OccEncl = OccRhs -- RHS of let(rec), albeit perhaps inside a type lambda -- Don't inline into constructor args here | OccScrut -- Scrutintee of a case -- Can inline into constructor args | OccVanilla -- Argument of function, body of lambda, etc -- Do inline into constructor args here instance Outputable OccEncl where ppr OccRhs = text "occRhs" ppr OccScrut = text "occScrut" ppr OccVanilla = text "occVanilla" -- See Note [OneShots] type OneShots = [OneShotInfo] initOccEnv :: OccEnv initOccEnv = OccEnv { occ_encl = OccVanilla , occ_one_shots = [] -- To be conservative, we say that all -- inlines and rules are active , occ_unf_act = \_ -> True , occ_rule_act = \_ -> True , occ_bs_env = emptyVarEnv , occ_bs_rng = emptyVarSet } noBinderSwaps :: OccEnv -> Bool noBinderSwaps (OccEnv { occ_bs_env = bs_env }) = isEmptyVarEnv bs_env scrutCtxt :: OccEnv -> [CoreAlt] -> OccEnv scrutCtxt !env alts | interesting_alts = env { occ_encl = OccScrut, occ_one_shots = [] } | otherwise = env { occ_encl = OccVanilla, occ_one_shots = [] } where interesting_alts = case alts of [] -> False [alt] -> not (isDefaultAlt alt) _ -> True -- 'interesting_alts' is True if the case has at least one -- non-default alternative. That in turn influences -- pre/postInlineUnconditionally. Grep for "occ_int_cxt"! rhsCtxt :: OccEnv -> OccEnv rhsCtxt !env = env { occ_encl = OccRhs, occ_one_shots = [] } valArgCtxt :: OccEnv -> [OneShots] -> (OccEnv, [OneShots]) valArgCtxt !env [] = (env { occ_encl = OccVanilla, occ_one_shots = [] }, []) valArgCtxt env (one_shots:one_shots_s) = (env { occ_encl = OccVanilla, occ_one_shots = one_shots }, one_shots_s) isRhsEnv :: OccEnv -> Bool isRhsEnv (OccEnv { occ_encl = cxt }) = case cxt of OccRhs -> True _ -> False addOneInScope :: OccEnv -> CoreBndr -> OccEnv -- Needed for all Vars not just Ids -- See Note [The binder-swap substitution] (BS3) addOneInScope env@(OccEnv { occ_bs_env = swap_env, occ_bs_rng = rng_vars }) bndr | bndr `elemVarSet` rng_vars = env { occ_bs_env = emptyVarEnv, occ_bs_rng = emptyVarSet } | otherwise = env { occ_bs_env = swap_env `delVarEnv` bndr } addInScope :: OccEnv -> [Var] -> OccEnv -- Needed for all Vars not just Ids -- See Note [The binder-swap substitution] (BS3) addInScope env@(OccEnv { occ_bs_env = swap_env, occ_bs_rng = rng_vars }) bndrs | any (`elemVarSet` rng_vars) bndrs = env { occ_bs_env = emptyVarEnv, occ_bs_rng = emptyVarSet } | otherwise = env { occ_bs_env = swap_env `delVarEnvList` bndrs } -------------------- transClosureFV :: VarEnv VarSet -> VarEnv VarSet -- If (f,g), (g,h) are in the input, then (f,h) is in the output -- as well as (f,g), (g,h) transClosureFV env | no_change = env | otherwise = transClosureFV (listToUFM_Directly new_fv_list) where (no_change, new_fv_list) = mapAccumL bump True (nonDetUFMToList env) -- It's OK to use nonDetUFMToList here because we'll forget the -- ordering by creating a new set with listToUFM bump no_change (b,fvs) | no_change_here = (no_change, (b,fvs)) | otherwise = (False, (b,new_fvs)) where (new_fvs, no_change_here) = extendFvs env fvs ------------- extendFvs_ :: VarEnv VarSet -> VarSet -> VarSet extendFvs_ env s = fst (extendFvs env s) -- Discard the Bool flag extendFvs :: VarEnv VarSet -> VarSet -> (VarSet, Bool) -- (extendFVs env s) returns -- (s `union` env(s), env(s) `subset` s) extendFvs env s | isNullUFM env = (s, True) | otherwise = (s `unionVarSet` extras, extras `subVarSet` s) where extras :: VarSet -- env(s) extras = nonDetStrictFoldUFM unionVarSet emptyVarSet $ -- It's OK to use nonDetStrictFoldUFM here because unionVarSet commutes intersectUFM_C (\x _ -> x) env (getUniqSet s) {- ************************************************************************ * * Binder swap * * ************************************************************************ Note [Binder swap] ~~~~~~~~~~~~~~~~~~ The "binder swap" transformation swaps occurrence of the scrutinee of a case for occurrences of the case-binder: (1) case x of b { pi -> ri } ==> case x of b { pi -> ri[b/x] } (2) case (x |> co) of b { pi -> ri } ==> case (x |> co) of b { pi -> ri[b |> sym co/x] } The substitution ri[b/x] etc is done by the occurrence analyser. See Note [The binder-swap substitution]. There are two reasons for making this swap: (A) It reduces the number of occurrences of the scrutinee, x. That in turn might reduce its occurrences to one, so we can inline it and save an allocation. E.g. let x = factorial y in case x of b { I# v -> ...x... } If we replace 'x' by 'b' in the alternative we get let x = factorial y in case x of b { I# v -> ...b... } and now we can inline 'x', thus case (factorial y) of b { I# v -> ...b... } (B) The case-binder b has unfolding information; in the example above we know that b = I# v. That in turn allows nested cases to simplify. Consider case x of b { I# v -> ...(case x of b2 { I# v2 -> rhs })... If we replace 'x' by 'b' in the alternative we get case x of b { I# v -> ...(case b of b2 { I# v2 -> rhs })... and now it is trivial to simplify the inner case: case x of b { I# v -> ...(let b2 = b in rhs)... The same can happen even if the scrutinee is a variable with a cast: see Note [Case of cast] The reason for doing these transformations /here in the occurrence analyser/ is because it allows us to adjust the OccInfo for 'x' and 'b' as we go. * Suppose the only occurrences of 'x' are the scrutinee and in the ri; then this transformation makes it occur just once, and hence get inlined right away. * If instead the Simplifier replaces occurrences of x with occurrences of b, that will mess up b's occurrence info. That in turn might have consequences. There is a danger though. Consider let v = x +# y in case (f v) of w -> ...v...v... And suppose that (f v) expands to just v. Then we'd like to use 'w' instead of 'v' in the alternative. But it may be too late; we may have substituted the (cheap) x+#y for v in the same simplifier pass that reduced (f v) to v. I think this is just too bad. CSE will recover some of it. Note [The binder-swap substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The binder-swap is implemented by the occ_bs_env field of OccEnv. There are two main pieces: * Given case x |> co of b { alts } we add [x :-> (b, co)] to the occ_bs_env environment; this is done by addBndrSwap. * Then, at an occurrence of a variable, we look up in the occ_bs_env to perform the swap. This is done by lookupBndrSwap. Some tricky corners: (BS1) We do the substitution before gathering occurrence info. So in the above example, an occurrence of x turns into an occurrence of b, and that's what we gather in the UsageDetails. It's as if the binder-swap occurred before occurrence analysis. See the computation of fun_uds in occAnalApp. (BS2) When doing a lookup in occ_bs_env, we may need to iterate, as you can see implemented in lookupBndrSwap. Why? Consider case x of a { 1# -> e1; DEFAULT -> case x of b { 2# -> e2; DEFAULT -> case x of c { 3# -> e3; DEFAULT -> ..x..a..b.. }}} At the first case addBndrSwap will extend occ_bs_env with [x :-> a] At the second case we occ-anal the scrutinee 'x', which looks up 'x in occ_bs_env, returning 'a', as it should. Then addBndrSwap will add [a :-> b] to occ_bs_env, yielding occ_bs_env = [x :-> a, a :-> b] At the third case we'll again look up 'x' which returns 'a'. But we don't want to stop the lookup there, else we'll end up with case x of a { 1# -> e1; DEFAULT -> case a of b { 2# -> e2; DEFAULT -> case a of c { 3# -> e3; DEFAULT -> ..a..b..c.. }}} Instead, we want iterate the lookup in addBndrSwap, to give case x of a { 1# -> e1; DEFAULT -> case a of b { 2# -> e2; DEFAULT -> case b of c { 3# -> e3; DEFAULT -> ..c..c..c.. }}} This makes a particular difference for case-merge, which works only if the scrutinee is the case-binder of the immediately enclosing case (Note [Merge Nested Cases] in GHC.Core.Opt.Simplify.Utils See #19581 for the bug report that showed this up. (BS3) We need care when shadowing. Suppose [x :-> b] is in occ_bs_env, and we encounter: (i) \x. blah Here we want to delete the x-binding from occ_bs_env (ii) \b. blah This is harder: we really want to delete all bindings that have 'b' free in the range. That is a bit tiresome to implement, so we compromise. We keep occ_bs_rng, which is the set of free vars of rng(occc_bs_env). If a binder shadows any of these variables, we discard all of occ_bs_env. Safe, if a bit brutal. NB, however: the simplifer de-shadows the code, so the next time around this won't happen. These checks are implemented in addInScope. (i) is needed only for Ids, but (ii) is needed for tyvars too (#22623) because if occ_bs_env has [x :-> ...a...] where `a` is a tyvar, we must not replace `x` by `...a...` under /\a. ...x..., or similarly under a case pattern match that binds `a`. An alternative would be for the occurrence analyser to do cloning as it goes. In principle it could do so, but it'd make it a bit more complicated and there is no great benefit. The simplifer uses cloning to get a no-shadowing situation, the care-when-shadowing behaviour above isn't needed for long. (BS4) The domain of occ_bs_env can include GlobaIds. Eg case M.foo of b { alts } We extend occ_bs_env with [M.foo :-> b]. That's fine. (BS5) We have to apply the occ_bs_env substitution uniformly, including to (local) rules and unfoldings. Historical note --------------- We used to do the binder-swap transformation by introducing a proxy let-binding, thus; case x of b { pi -> ri } ==> case x of b { pi -> let x = b in ri } But that had two problems: 1. If 'x' is an imported GlobalId, we'd end up with a GlobalId on the LHS of a let-binding which isn't allowed. We worked around this for a while by "localising" x, but it turned out to be very painful #16296, 2. In CorePrep we use the occurrence analyser to do dead-code elimination (see Note [Dead code in CorePrep]). But that occasionally led to an unlifted let-binding case x of b { DEFAULT -> let x::Int# = b in ... } which disobeys one of CorePrep's output invariants (no unlifted let-bindings) -- see #5433. Doing a substitution (via occ_bs_env) is much better. Note [Case of cast] ~~~~~~~~~~~~~~~~~~~ Consider case (x `cast` co) of b { I# -> ... (case (x `cast` co) of {...}) ... We'd like to eliminate the inner case. That is the motivation for equation (2) in Note [Binder swap]. When we get to the inner case, we inline x, cancel the casts, and away we go. Note [Zap case binders in proxy bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From the original case x of cb(dead) { p -> ...x... } we will get case x of cb(live) { p -> ...cb... } Core Lint never expects to find an *occurrence* of an Id marked as Dead, so we must zap the OccInfo on cb before making the binding x = cb. See #5028. NB: the OccInfo on /occurrences/ really doesn't matter much; the simplifier doesn't use it. So this is only to satisfy the perhaps-over-picky Lint. Historical Note [no-case-of-case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We *used* to suppress the binder-swap in case expressions when -fno-case-of-case is on. Old remarks: "This happens in the first simplifier pass, and enhances full laziness. Here's the bad case: f = \ y -> ...(case x of I# v -> ...(case x of ...) ... ) If we eliminate the inner case, we trap it inside the I# v -> arm, which might prevent some full laziness happening. I've seen this in action in spectral/cichelli/Prog.hs: [(m,n) | m <- [1..max], n <- [1..max]] Hence the check for NoCaseOfCase." However, now the full-laziness pass itself reverses the binder-swap, so this check is no longer necessary. Historical Note [Suppressing the case binder-swap] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This old note describes a problem that is also fixed by doing the binder-swap in OccAnal: There is another situation when it might make sense to suppress the case-expression binde-swap. If we have case x of w1 { DEFAULT -> case x of w2 { A -> e1; B -> e2 } ...other cases .... } We'll perform the binder-swap for the outer case, giving case x of w1 { DEFAULT -> case w1 of w2 { A -> e1; B -> e2 } ...other cases .... } But there is no point in doing it for the inner case, because w1 can't be inlined anyway. Furthermore, doing the case-swapping involves zapping w2's occurrence info (see paragraphs that follow), and that forces us to bind w2 when doing case merging. So we get case x of w1 { A -> let w2 = w1 in e1 B -> let w2 = w1 in e2 ...other cases .... } This is plain silly in the common case where w2 is dead. Even so, I can't see a good way to implement this idea. I tried not doing the binder-swap if the scrutinee was already evaluated but that failed big-time: data T = MkT !Int case v of w { MkT x -> case x of x1 { I# y1 -> case x of x2 { I# y2 -> ... Notice that because MkT is strict, x is marked "evaluated". But to eliminate the last case, we must either make sure that x (as well as x1) has unfolding MkT y1. The straightforward thing to do is to do the binder-swap. So this whole note is a no-op. It's fixed by doing the binder-swap in OccAnal because we can do the binder-swap unconditionally and still get occurrence analysis information right. -} addBndrSwap :: OutExpr -> Id -> OccEnv -> OccEnv -- See Note [The binder-swap substitution] addBndrSwap scrut case_bndr env@(OccEnv { occ_bs_env = swap_env, occ_bs_rng = rng_vars }) | Just (scrut_var, mco) <- get_scrut_var (stripTicksTopE (const True) scrut) , scrut_var /= case_bndr -- Consider: case x of x { ... } -- Do not add [x :-> x] to occ_bs_env, else lookupBndrSwap will loop = env { occ_bs_env = extendVarEnv swap_env scrut_var (case_bndr', mco) , occ_bs_rng = rng_vars `extendVarSet` case_bndr' `unionVarSet` tyCoVarsOfMCo mco } | otherwise = env where get_scrut_var :: OutExpr -> Maybe (OutVar, MCoercion) get_scrut_var (Var v) = Just (v, MRefl) get_scrut_var (Cast (Var v) co) = Just (v, MCo co) -- See Note [Case of cast] get_scrut_var _ = Nothing case_bndr' = zapIdOccInfo case_bndr -- See Note [Zap case binders in proxy bindings] lookupBndrSwap :: OccEnv -> Id -> (CoreExpr, Id) -- See Note [The binder-swap substitution] -- Returns an expression of the same type as Id lookupBndrSwap env@(OccEnv { occ_bs_env = bs_env }) bndr = case lookupVarEnv bs_env bndr of { Nothing -> (Var bndr, bndr) ; Just (bndr1, mco) -> -- Why do we iterate here? -- See (BS2) in Note [The binder-swap substitution] case lookupBndrSwap env bndr1 of (fun, fun_id) -> (add_cast fun mco, fun_id) } where add_cast fun MRefl = fun add_cast fun (MCo co) = Cast fun (mkSymCo co) -- We must switch that 'co' to 'sym co'; -- see the comment with occ_bs_env -- No need to test for isReflCo, because 'co' came from -- a (Cast e co) and hence is unlikely to be Refl {- ************************************************************************ * * \subsection[OccurAnal-types]{OccEnv} * * ************************************************************************ Note [UsageDetails and zapping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ On many occasions, we must modify all gathered occurrence data at once. For instance, all occurrences underneath a (non-one-shot) lambda set the 'occ_in_lam' flag to become 'True'. We could use 'mapVarEnv' to do this, but that takes O(n) time and we will do this often---in particular, there are many places where tail calls are not allowed, and each of these causes all variables to get marked with 'NoTailCallInfo'. Instead of relying on `mapVarEnv`, then, we carry three 'IdEnv's around along with the 'OccInfoEnv'. Each of these extra environments is a "zapped set" recording which variables have been zapped in some way. Zapping all occurrence info then simply means setting the corresponding zapped set to the whole 'OccInfoEnv', a fast O(1) operation. -} type OccInfoEnv = IdEnv OccInfo -- A finite map from ids to their usage -- INVARIANT: never IAmDead -- (Deadness is signalled by not being in the map at all) type ZappedSet = OccInfoEnv -- Values are ignored data UsageDetails = UD { ud_env :: !OccInfoEnv , ud_z_many :: !ZappedSet -- apply 'markMany' to these , ud_z_in_lam :: !ZappedSet -- apply 'markInsideLam' to these , ud_z_no_tail :: !ZappedSet } -- apply 'markNonTail' to these -- INVARIANT: All three zapped sets are subsets of the OccInfoEnv instance Outputable UsageDetails where ppr ud = ppr (ud_env (flattenUsageDetails ud)) ------------------- -- UsageDetails API andUDs, orUDs :: UsageDetails -> UsageDetails -> UsageDetails andUDs = combineUsageDetailsWith addOccInfo orUDs = combineUsageDetailsWith orOccInfo mkOneOcc :: Id -> InterestingCxt -> JoinArity -> UsageDetails mkOneOcc id int_cxt arity | isLocalId id = emptyDetails { ud_env = unitVarEnv id occ_info } | otherwise = emptyDetails where occ_info = OneOcc { occ_in_lam = NotInsideLam , occ_n_br = oneBranch , occ_int_cxt = int_cxt , occ_tail = AlwaysTailCalled arity } addManyOccId :: UsageDetails -> Id -> UsageDetails -- Add the non-committal (id :-> noOccInfo) to the usage details addManyOccId ud id = ud { ud_env = extendVarEnv (ud_env ud) id noOccInfo } -- Add several occurrences, assumed not to be tail calls addManyOcc :: Var -> UsageDetails -> UsageDetails addManyOcc v u | isId v = addManyOccId u v | otherwise = u -- Give a non-committal binder info (i.e noOccInfo) because -- a) Many copies of the specialised thing can appear -- b) We don't want to substitute a BIG expression inside a RULE -- even if that's the only occurrence of the thing -- (Same goes for INLINE.) addManyOccs :: UsageDetails -> VarSet -> UsageDetails addManyOccs usage id_set = nonDetStrictFoldUniqSet addManyOcc usage id_set -- It's OK to use nonDetStrictFoldUniqSet here because addManyOcc commutes addLamCoVarOccs :: UsageDetails -> [Var] -> UsageDetails -- Add any CoVars free in the type of a lambda-binder -- See Note [Gather occurrences of coercion variables] addLamCoVarOccs uds bndrs = uds `addManyOccs` coVarsOfTypes (map varType bndrs) delDetails :: UsageDetails -> Id -> UsageDetails delDetails ud bndr = ud `alterUsageDetails` (`delVarEnv` bndr) delDetailsList :: UsageDetails -> [Id] -> UsageDetails delDetailsList ud bndrs = ud `alterUsageDetails` (`delVarEnvList` bndrs) emptyDetails :: UsageDetails emptyDetails = UD { ud_env = emptyVarEnv , ud_z_many = emptyVarEnv , ud_z_in_lam = emptyVarEnv , ud_z_no_tail = emptyVarEnv } isEmptyDetails :: UsageDetails -> Bool isEmptyDetails = isEmptyVarEnv . ud_env markAllMany, markAllInsideLam, markAllNonTail, markAllManyNonTail :: UsageDetails -> UsageDetails markAllMany ud = ud { ud_z_many = ud_env ud } markAllInsideLam ud = ud { ud_z_in_lam = ud_env ud } markAllNonTail ud = ud { ud_z_no_tail = ud_env ud } markAllInsideLamIf, markAllNonTailIf :: Bool -> UsageDetails -> UsageDetails markAllInsideLamIf True ud = markAllInsideLam ud markAllInsideLamIf False ud = ud markAllNonTailIf True ud = markAllNonTail ud markAllNonTailIf False ud = ud markAllManyNonTail = markAllMany . markAllNonTail -- effectively sets to noOccInfo lookupDetails :: UsageDetails -> Id -> OccInfo lookupDetails ud id = case lookupVarEnv (ud_env ud) id of Just occ -> doZapping ud id occ Nothing -> IAmDead usedIn :: Id -> UsageDetails -> Bool v `usedIn` ud = isExportedId v || v `elemVarEnv` ud_env ud udFreeVars :: VarSet -> UsageDetails -> VarSet -- Find the subset of bndrs that are mentioned in uds udFreeVars bndrs ud = restrictFreeVars bndrs (ud_env ud) restrictFreeVars :: VarSet -> OccInfoEnv -> VarSet restrictFreeVars bndrs fvs = restrictUniqSetToUFM bndrs fvs ------------------- -- Auxiliary functions for UsageDetails implementation combineUsageDetailsWith :: (OccInfo -> OccInfo -> OccInfo) -> UsageDetails -> UsageDetails -> UsageDetails combineUsageDetailsWith plus_occ_info ud1 ud2 | isEmptyDetails ud1 = ud2 | isEmptyDetails ud2 = ud1 | otherwise = UD { ud_env = plusVarEnv_C plus_occ_info (ud_env ud1) (ud_env ud2) , ud_z_many = plusVarEnv (ud_z_many ud1) (ud_z_many ud2) , ud_z_in_lam = plusVarEnv (ud_z_in_lam ud1) (ud_z_in_lam ud2) , ud_z_no_tail = plusVarEnv (ud_z_no_tail ud1) (ud_z_no_tail ud2) } doZapping :: UsageDetails -> Var -> OccInfo -> OccInfo doZapping ud var occ = doZappingByUnique ud (varUnique var) occ doZappingByUnique :: UsageDetails -> Unique -> OccInfo -> OccInfo doZappingByUnique (UD { ud_z_many = many , ud_z_in_lam = in_lam , ud_z_no_tail = no_tail }) uniq occ = occ2 where occ1 | uniq `elemVarEnvByKey` many = markMany occ | uniq `elemVarEnvByKey` in_lam = markInsideLam occ | otherwise = occ occ2 | uniq `elemVarEnvByKey` no_tail = markNonTail occ1 | otherwise = occ1 alterUsageDetails :: UsageDetails -> (OccInfoEnv -> OccInfoEnv) -> UsageDetails alterUsageDetails !ud f = UD { ud_env = f (ud_env ud) , ud_z_many = f (ud_z_many ud) , ud_z_in_lam = f (ud_z_in_lam ud) , ud_z_no_tail = f (ud_z_no_tail ud) } flattenUsageDetails :: UsageDetails -> UsageDetails flattenUsageDetails ud@(UD { ud_env = env }) = UD { ud_env = mapUFM_Directly (doZappingByUnique ud) env , ud_z_many = emptyVarEnv , ud_z_in_lam = emptyVarEnv , ud_z_no_tail = emptyVarEnv } ------------------- -- See Note [Adjusting right-hand sides] adjustRhsUsage :: Maybe JoinArity -> CoreExpr -- Rhs, AFTER occ anal -> UsageDetails -- From body of lambda -> UsageDetails adjustRhsUsage mb_join_arity rhs usage = -- c.f. occAnal (Lam {}) markAllInsideLamIf (not one_shot) $ markAllNonTailIf (not exact_join) $ usage where one_shot = isOneShotFun rhs exact_join = exactJoin mb_join_arity bndrs (bndrs,_) = collectBinders rhs exactJoin :: Maybe JoinArity -> [a] -> Bool exactJoin Nothing _ = False exactJoin (Just join_arity) args = args `lengthIs` join_arity -- Remember join_arity includes type binders type IdWithOccInfo = Id tagLamBinders :: UsageDetails -- Of scope -> [Id] -- Binders -> (UsageDetails, -- Details with binders removed [IdWithOccInfo]) -- Tagged binders tagLamBinders usage binders = usage' `seq` (usage', bndrs') where (usage', bndrs') = mapAccumR tagLamBinder usage binders tagLamBinder :: UsageDetails -- Of scope -> Id -- Binder -> (UsageDetails, -- Details with binder removed IdWithOccInfo) -- Tagged binders -- Used for lambda and case binders -- It copes with the fact that lambda bindings can have a -- stable unfolding, used for join points tagLamBinder usage bndr = (usage2, bndr') where occ = lookupDetails usage bndr bndr' = setBinderOcc (markNonTail occ) bndr -- Don't try to make an argument into a join point usage1 = usage `delDetails` bndr usage2 | isId bndr = addManyOccs usage1 (idUnfoldingVars bndr) -- This is effectively the RHS of a -- non-join-point binding, so it's okay to use -- addManyOccsSet, which assumes no tail calls | otherwise = usage1 tagNonRecBinder :: TopLevelFlag -- At top level? -> UsageDetails -- Of scope -> CoreBndr -- Binder -> (UsageDetails, -- Details with binder removed IdWithOccInfo) -- Tagged binder tagNonRecBinder lvl usage binder = let occ = lookupDetails usage binder will_be_join = decideJoinPointHood lvl usage [binder] occ' | will_be_join = -- must already be marked AlwaysTailCalled assert (isAlwaysTailCalled occ) occ | otherwise = markNonTail occ binder' = setBinderOcc occ' binder usage' = usage `delDetails` binder in usage' `seq` (usage', binder') tagRecBinders :: TopLevelFlag -- At top level? -> UsageDetails -- Of body of let ONLY -> [Details] -> (UsageDetails, -- Adjusted details for whole scope, -- with binders removed [IdWithOccInfo]) -- Tagged binders -- Substantially more complicated than non-recursive case. Need to adjust RHS -- details *before* tagging binders (because the tags depend on the RHSes). tagRecBinders lvl body_uds details_s = let bndrs = map nd_bndr details_s rhs_udss = map nd_uds details_s -- 1. Determine join-point-hood of whole group, as determined by -- the *unadjusted* usage details unadj_uds = foldr andUDs body_uds rhs_udss will_be_joins = decideJoinPointHood lvl unadj_uds bndrs -- 2. Adjust usage details of each RHS, taking into account the -- join-point-hood decision rhs_udss' = [ adjustRhsUsage (mb_join_arity bndr) rhs rhs_uds | ND { nd_bndr = bndr, nd_uds = rhs_uds , nd_rhs = rhs } <- details_s ] mb_join_arity :: Id -> Maybe JoinArity mb_join_arity bndr -- Can't use willBeJoinId_maybe here because we haven't tagged -- the binder yet (the tag depends on these adjustments!) | will_be_joins , let occ = lookupDetails unadj_uds bndr , AlwaysTailCalled arity <- tailCallInfo occ = Just arity | otherwise = assert (not will_be_joins) -- Should be AlwaysTailCalled if Nothing -- we are making join points! -- 3. Compute final usage details from adjusted RHS details adj_uds = foldr andUDs body_uds rhs_udss' -- 4. Tag each binder with its adjusted details bndrs' = [ setBinderOcc (lookupDetails adj_uds bndr) bndr | bndr <- bndrs ] -- 5. Drop the binders from the adjusted details and return usage' = adj_uds `delDetailsList` bndrs in (usage', bndrs') setBinderOcc :: OccInfo -> CoreBndr -> CoreBndr setBinderOcc occ_info bndr | isTyVar bndr = bndr | isExportedId bndr = if isManyOccs (idOccInfo bndr) then bndr else setIdOccInfo bndr noOccInfo -- Don't use local usage info for visible-elsewhere things -- BUT *do* erase any IAmALoopBreaker annotation, because we're -- about to re-generate it and it shouldn't be "sticky" | otherwise = setIdOccInfo bndr occ_info -- | Decide whether some bindings should be made into join points or not. -- Returns `False` if they can't be join points. Note that it's an -- all-or-nothing decision, as if multiple binders are given, they're -- assumed to be mutually recursive. -- -- It must, however, be a final decision. If we say "True" for 'f', -- and then subsequently decide /not/ make 'f' into a join point, then -- the decision about another binding 'g' might be invalidated if (say) -- 'f' tail-calls 'g'. -- -- See Note [Invariants on join points] in "GHC.Core". decideJoinPointHood :: TopLevelFlag -> UsageDetails -> [CoreBndr] -> Bool decideJoinPointHood TopLevel _ _ = False decideJoinPointHood NotTopLevel usage bndrs | isJoinId (head bndrs) = warnPprTrace (not all_ok) "OccurAnal failed to rediscover join point(s)" (ppr bndrs) all_ok | otherwise = all_ok where -- See Note [Invariants on join points]; invariants cited by number below. -- Invariant 2 is always satisfiable by the simplifier by eta expansion. all_ok = -- Invariant 3: Either all are join points or none are all ok bndrs ok bndr | -- Invariant 1: Only tail calls, all same join arity AlwaysTailCalled arity <- tailCallInfo (lookupDetails usage bndr) , -- Invariant 1 as applied to LHSes of rules all (ok_rule arity) (idCoreRules bndr) -- Invariant 2a: stable unfoldings -- See Note [Join points and INLINE pragmas] , ok_unfolding arity (realIdUnfolding bndr) -- Invariant 4: Satisfies polymorphism rule , isValidJoinPointType arity (idType bndr) = True | otherwise = False ok_rule _ BuiltinRule{} = False -- only possible with plugin shenanigans ok_rule join_arity (Rule { ru_args = args }) = args `lengthIs` join_arity -- Invariant 1 as applied to LHSes of rules -- ok_unfolding returns False if we should /not/ convert a non-join-id -- into a join-id, even though it is AlwaysTailCalled ok_unfolding join_arity (CoreUnfolding { uf_src = src, uf_tmpl = rhs }) = not (isStableSource src && join_arity > joinRhsArity rhs) ok_unfolding _ (DFunUnfolding {}) = False ok_unfolding _ _ = True willBeJoinId_maybe :: CoreBndr -> Maybe JoinArity willBeJoinId_maybe bndr | isId bndr , AlwaysTailCalled arity <- tailCallInfo (idOccInfo bndr) = Just arity | otherwise = isJoinId_maybe bndr {- Note [Join points and INLINE pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = let g = \x. not -- Arity 1 {-# INLINE g #-} in case x of A -> g True True B -> g True False C -> blah2 Here 'g' is always tail-called applied to 2 args, but the stable unfolding captured by the INLINE pragma has arity 1. If we try to convert g to be a join point, its unfolding will still have arity 1 (since it is stable, and we don't meddle with stable unfoldings), and Lint will complain (see Note [Invariants on join points], (2a), in GHC.Core. #13413. Moreover, since g is going to be inlined anyway, there is no benefit from making it a join point. If it is recursive, and uselessly marked INLINE, this will stop us making it a join point, which is annoying. But occasionally (notably in class methods; see Note [Instances and loop breakers] in GHC.Tc.TyCl.Instance) we mark recursive things as INLINE but the recursion unravels; so ignoring INLINE pragmas on recursive things isn't good either. See Invariant 2a of Note [Invariants on join points] in GHC.Core ************************************************************************ * * \subsection{Operations over OccInfo} * * ************************************************************************ -} markMany, markInsideLam, markNonTail :: OccInfo -> OccInfo markMany IAmDead = IAmDead markMany occ = ManyOccs { occ_tail = occ_tail occ } markInsideLam occ@(OneOcc {}) = occ { occ_in_lam = IsInsideLam } markInsideLam occ = occ markNonTail IAmDead = IAmDead markNonTail occ = occ { occ_tail = NoTailCallInfo } addOccInfo, orOccInfo :: OccInfo -> OccInfo -> OccInfo addOccInfo a1 a2 = assert (not (isDeadOcc a1 || isDeadOcc a2)) $ ManyOccs { occ_tail = tailCallInfo a1 `andTailCallInfo` tailCallInfo a2 } -- Both branches are at least One -- (Argument is never IAmDead) -- (orOccInfo orig new) is used -- when combining occurrence info from branches of a case orOccInfo (OneOcc { occ_in_lam = in_lam1 , occ_n_br = nbr1 , occ_int_cxt = int_cxt1 , occ_tail = tail1 }) (OneOcc { occ_in_lam = in_lam2 , occ_n_br = nbr2 , occ_int_cxt = int_cxt2 , occ_tail = tail2 }) = OneOcc { occ_n_br = nbr1 + nbr2 , occ_in_lam = in_lam1 `mappend` in_lam2 , occ_int_cxt = int_cxt1 `mappend` int_cxt2 , occ_tail = tail1 `andTailCallInfo` tail2 } orOccInfo a1 a2 = assert (not (isDeadOcc a1 || isDeadOcc a2)) $ ManyOccs { occ_tail = tailCallInfo a1 `andTailCallInfo` tailCallInfo a2 } andTailCallInfo :: TailCallInfo -> TailCallInfo -> TailCallInfo andTailCallInfo info@(AlwaysTailCalled arity1) (AlwaysTailCalled arity2) | arity1 == arity2 = info andTailCallInfo _ _ = NoTailCallInfo ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/PatSyn.hs0000644000000000000000000004353714472400112020311 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[PatSyn]{@PatSyn@: Pattern synonyms} -} module GHC.Core.PatSyn ( -- * Main data types PatSyn, PatSynMatcher, PatSynBuilder, mkPatSyn, -- ** Type deconstruction patSynName, patSynArity, patSynIsInfix, patSynResultType, isVanillaPatSyn, patSynArgs, patSynMatcher, patSynBuilder, patSynUnivTyVarBinders, patSynExTyVars, patSynExTyVarBinders, patSynSig, patSynSigBndr, patSynInstArgTys, patSynInstResTy, patSynFieldLabels, patSynFieldType, pprPatSynType ) where import GHC.Prelude import GHC.Core.Type import GHC.Core.TyCo.Ppr import GHC.Types.Name import GHC.Types.Unique import GHC.Types.Basic import GHC.Types.Var import GHC.Types.FieldLabel import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import qualified Data.Data as Data import Data.Function import Data.List (find) {- ************************************************************************ * * \subsection{Pattern synonyms} * * ************************************************************************ -} -- | Pattern Synonym -- -- See Note [Pattern synonym representation] -- See Note [Pattern synonym signature contexts] data PatSyn = MkPatSyn { psName :: Name, psUnique :: Unique, -- Cached from Name psArgs :: [FRRType], -- ^ Argument types psArity :: Arity, -- == length psArgs psInfix :: Bool, -- True <=> declared infix psFieldLabels :: [FieldLabel], -- List of fields for a -- record pattern synonym -- INVARIANT: either empty if no -- record pat syn or same length as -- psArgs -- Universally-quantified type variables psUnivTyVars :: [InvisTVBinder], -- Required dictionaries (may mention psUnivTyVars) psReqTheta :: ThetaType, -- Existentially-quantified type vars psExTyVars :: [InvisTVBinder], -- Provided dictionaries (may mention psUnivTyVars or psExTyVars) psProvTheta :: ThetaType, -- Result type psResultTy :: Type, -- Mentions only psUnivTyVars -- See Note [Pattern synonym result type] -- See Note [Matchers and builders for pattern synonyms] -- See Note [Keep Ids out of PatSyn] psMatcher :: PatSynMatcher, psBuilder :: PatSynBuilder } type PatSynMatcher = (Name, Type, Bool) -- Matcher function. -- If Bool is True then prov_theta and arg_tys are empty -- and type is -- forall (p :: RuntimeRep) (r :: TYPE p) univ_tvs. -- req_theta -- => res_ty -- -> (forall ex_tvs. Void# -> r) -- -> (Void# -> r) -- -> r -- -- Otherwise type is -- forall (p :: RuntimeRep) (r :: TYPE r) univ_tvs. -- req_theta -- => res_ty -- -> (forall ex_tvs. prov_theta => arg_tys -> r) -- -> (Void# -> r) -- -> r type PatSynBuilder = Maybe (Name, Type, Bool) -- Nothing => uni-directional pattern synonym -- Just (builder, is_unlifted) => bi-directional -- Builder function, of type -- forall univ_tvs, ex_tvs. (req_theta, prov_theta) -- => arg_tys -> res_ty -- See Note [Builder for pattern synonyms with unboxed type] {- Note [Pattern synonym signature contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a pattern synonym signature we write pattern P :: req => prov => t1 -> ... tn -> res_ty Note that the "required" context comes first, then the "provided" context. Moreover, the "required" context must not mention existentially-bound type variables; that is, ones not mentioned in res_ty. See lots of discussion in #10928. If there is no "provided" context, you can omit it; but you can't omit the "required" part (unless you omit both). Example 1: pattern P1 :: (Num a, Eq a) => b -> Maybe (a,b) pattern P1 x = Just (3,x) We require (Num a, Eq a) to match the 3; there is no provided context. Example 2: data T2 where MkT2 :: (Num a, Eq a) => a -> a -> T2 pattern P2 :: () => (Num a, Eq a) => a -> T2 pattern P2 x = MkT2 3 x When we match against P2 we get a Num dictionary provided. We can use that to check the match against 3. Example 3: pattern P3 :: Eq a => a -> b -> T3 b This signature is illegal because the (Eq a) is a required constraint, but it mentions the existentially-bound variable 'a'. You can see it's existential because it doesn't appear in the result type (T3 b). Note [Pattern synonym result type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T a b = MkT b a pattern P :: a -> T [a] Bool pattern P x = MkT True [x] P's psResultTy is (T a Bool), and it really only matches values of type (T [a] Bool). For example, this is ill-typed f :: T p q -> String f (P x) = "urk" This is different to the situation with GADTs: data S a where MkS :: Int -> S Bool Now MkS (and pattern synonyms coming from MkS) can match a value of type (S a), not just (S Bool); we get type refinement. That in turn means that if you have a pattern P x :: T [ty] Bool it's not entirely straightforward to work out the instantiation of P's universal tyvars. You have to /match/ the type of the pattern, (T [ty] Bool) against the psResultTy for the pattern synonym, T [a] Bool to get the instantiation a := ty. This is very unlike DataCons, where univ tyvars match 1-1 the arguments of the TyCon. Side note: I (SG) get the impression that instantiated return types should generate a *required* constraint for pattern synonyms, rather than a *provided* constraint like it's the case for GADTs. For example, I'd expect these declarations to have identical semantics: pattern Just42 :: Maybe Int pattern Just42 = Just 42 pattern Just'42 :: (a ~ Int) => Maybe a pattern Just'42 = Just 42 The latter generates the proper required constraint, the former does not. Also rather different to GADTs is the fact that Just42 doesn't have any universally quantified type variables, whereas Just'42 or MkS above has. Note [Keep Ids out of PatSyn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We carefully arrange that PatSyn does not contain the Ids for the matcher and builder. We want PatSyn, like TyCon and DataCon, to be completely immutable. But, the matcher and builder are relatively sophisticated functions, and we want to get their final IdInfo in the same way as any other Id, so we'd have to update the Ids in the PatSyn too. Rather than try to tidy PatSyns (which is easy to forget and is a bit tricky, see #19074), it seems cleaner to make them entirely immutable, like TyCons and Classes. To that end PatSynBuilder and PatSynMatcher contain Names not Ids. Which, it turns out, is absolutely fine. c.f. DefMethInfo in Class, which contains the Name, but not the Id, of the default method. Note [Pattern synonym representation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following pattern synonym declaration pattern P x = MkT [x] (Just 42) where data T a where MkT :: (Show a, Ord b) => [b] -> a -> T a so pattern P has type b -> T (Maybe t) with the following typeclass constraints: requires: (Eq t, Num t) provides: (Show (Maybe t), Ord b) In this case, the fields of MkPatSyn will be set as follows: psArgs = [b] psArity = 1 psInfix = False psUnivTyVars = [t] psExTyVars = [b] psProvTheta = (Show (Maybe t), Ord b) psReqTheta = (Eq t, Num t) psResultTy = T (Maybe t) Note [Matchers and builders for pattern synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For each pattern synonym P, we generate * a "matcher" function, used to desugar uses of P in patterns, which implements pattern matching * A "builder" function (for bidirectional pattern synonyms only), used to desugar uses of P in expressions, which constructs P-values. For the above example, the matcher function has type: $mP :: forall (r :: ?) t. (Eq t, Num t) => T (Maybe t) -> (forall b. (Show (Maybe t), Ord b) => b -> r) -> (Void# -> r) -> r with the following implementation: $mP @r @t $dEq $dNum scrut cont fail = case scrut of MkT @b $dShow $dOrd [x] (Just 42) -> cont @b $dShow $dOrd x _ -> fail Void# Notice that the return type 'r' has an open kind, so that it can be instantiated by an unboxed type; for example where we see f (P x) = 3# The extra Void# argument for the failure continuation is needed so that it is lazy even when the result type is unboxed. For the same reason, if the pattern has no arguments, an extra Void# argument is added to the success continuation as well. For *bidirectional* pattern synonyms, we also generate a "builder" function which implements the pattern synonym in an expression context. For our running example, it will be: $bP :: forall t b. (Eq t, Num t, Show (Maybe t), Ord b) => b -> T (Maybe t) $bP x = MkT [x] (Just 42) NB: the existential/universal and required/provided split does not apply to the builder since you are only putting stuff in, not getting stuff out. Injectivity of bidirectional pattern synonyms is checked in tcPatToExpr which walks the pattern and returns its corresponding expression when available. Note [Builder for pattern synonyms with unboxed type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For bidirectional pattern synonyms that have no arguments and have an unboxed type, we add an extra Void# argument to the builder, else it would be a top-level declaration with an unboxed type. pattern P = 0# $bP :: Void# -> Int# $bP _ = 0# This means that when typechecking an occurrence of P in an expression, we must remember that the builder has this void argument. This is done by GHC.Tc.TyCl.PatSyn.patSynBuilderOcc. Note [Pattern synonyms and the data type Type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type of a pattern synonym is of the form (See Note [Pattern synonym signatures] in GHC.Tc.Gen.Sig): forall univ_tvs. req => forall ex_tvs. prov => ... We cannot in general represent this by a value of type Type: - if ex_tvs is empty, then req and prov cannot be distinguished from each other - if req is empty, then univ_tvs and ex_tvs cannot be distinguished from each other, and moreover, prov is seen as the "required" context (as it is the only context) ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Eq PatSyn where (==) = (==) `on` getUnique (/=) = (/=) `on` getUnique instance Uniquable PatSyn where getUnique = psUnique instance NamedThing PatSyn where getName = patSynName instance Outputable PatSyn where ppr = ppr . getName instance OutputableBndr PatSyn where pprInfixOcc = pprInfixName . getName pprPrefixOcc = pprPrefixName . getName instance Data.Data PatSyn where -- don't traverse? toConstr _ = abstractConstr "PatSyn" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "PatSyn" {- ************************************************************************ * * \subsection{Construction} * * ************************************************************************ -} -- | Build a new pattern synonym mkPatSyn :: Name -> Bool -- ^ Is the pattern synonym declared infix? -> ([InvisTVBinder], ThetaType) -- ^ Universially-quantified type -- variables and required dicts -> ([InvisTVBinder], ThetaType) -- ^ Existentially-quantified type -- variables and provided dicts -> [FRRType] -- ^ Original arguments -> Type -- ^ Original result type -> PatSynMatcher -- ^ Matcher -> PatSynBuilder -- ^ Builder -> [FieldLabel] -- ^ Names of fields for -- a record pattern synonym -> PatSyn -- NB: The univ and ex vars are both in TyBinder form and TyVar form for -- convenience. All the TyBinders should be Named! mkPatSyn name declared_infix (univ_tvs, req_theta) (ex_tvs, prov_theta) orig_args orig_res_ty matcher builder field_labels = MkPatSyn {psName = name, psUnique = getUnique name, psUnivTyVars = univ_tvs, psExTyVars = ex_tvs, psProvTheta = prov_theta, psReqTheta = req_theta, psInfix = declared_infix, psArgs = orig_args, psArity = length orig_args, psResultTy = orig_res_ty, psMatcher = matcher, psBuilder = builder, psFieldLabels = field_labels } -- | The 'Name' of the 'PatSyn', giving it a unique, rooted identification patSynName :: PatSyn -> Name patSynName = psName -- | Should the 'PatSyn' be presented infix? patSynIsInfix :: PatSyn -> Bool patSynIsInfix = psInfix -- | Arity of the pattern synonym patSynArity :: PatSyn -> Arity patSynArity = psArity -- | Is this a \'vanilla\' pattern synonym (no existentials, no provided constraints)? isVanillaPatSyn :: PatSyn -> Bool isVanillaPatSyn ps = null (psExTyVars ps) && null (psProvTheta ps) patSynArgs :: PatSyn -> [Type] patSynArgs = psArgs patSynFieldLabels :: PatSyn -> [FieldLabel] patSynFieldLabels = psFieldLabels -- | Extract the type for any given labelled field of the 'DataCon' patSynFieldType :: PatSyn -> FieldLabelString -> Type patSynFieldType ps label = case find ((== label) . flLabel . fst) (psFieldLabels ps `zip` psArgs ps) of Just (_, ty) -> ty Nothing -> pprPanic "dataConFieldType" (ppr ps <+> ppr label) patSynUnivTyVarBinders :: PatSyn -> [InvisTVBinder] patSynUnivTyVarBinders = psUnivTyVars patSynExTyVars :: PatSyn -> [TyVar] patSynExTyVars ps = binderVars (psExTyVars ps) patSynExTyVarBinders :: PatSyn -> [InvisTVBinder] patSynExTyVarBinders = psExTyVars patSynSigBndr :: PatSyn -> ([InvisTVBinder], ThetaType, [InvisTVBinder], ThetaType, [Scaled Type], Type) patSynSigBndr (MkPatSyn { psUnivTyVars = univ_tvs, psExTyVars = ex_tvs , psProvTheta = prov, psReqTheta = req , psArgs = arg_tys, psResultTy = res_ty }) = (univ_tvs, req, ex_tvs, prov, map unrestricted arg_tys, res_ty) patSynSig :: PatSyn -> ([TyVar], ThetaType, [TyVar], ThetaType, [Scaled Type], Type) patSynSig ps = let (u_tvs, req, e_tvs, prov, arg_tys, res_ty) = patSynSigBndr ps in (binderVars u_tvs, req, binderVars e_tvs, prov, arg_tys, res_ty) patSynMatcher :: PatSyn -> PatSynMatcher patSynMatcher = psMatcher patSynBuilder :: PatSyn -> PatSynBuilder patSynBuilder = psBuilder patSynResultType :: PatSyn -> Type patSynResultType = psResultTy patSynInstArgTys :: PatSyn -> [Type] -> [Type] -- Return the types of the argument patterns -- e.g. data D a = forall b. MkD a b (b->a) -- pattern P f x y = MkD (x,True) y f -- D :: forall a. forall b. a -> b -> (b->a) -> D a -- P :: forall c. forall b. (b->(c,Bool)) -> c -> b -> P c -- patSynInstArgTys P [Int,bb] = [bb->(Int,Bool), Int, bb] -- NB: the inst_tys should be both universal and existential patSynInstArgTys (MkPatSyn { psName = name, psUnivTyVars = univ_tvs , psExTyVars = ex_tvs, psArgs = arg_tys }) inst_tys = assertPpr (tyvars `equalLength` inst_tys) (text "patSynInstArgTys" <+> ppr name $$ ppr tyvars $$ ppr inst_tys) $ map (substTyWith tyvars inst_tys) arg_tys where tyvars = binderVars (univ_tvs ++ ex_tvs) patSynInstResTy :: PatSyn -> [Type] -> Type -- Return the type of whole pattern -- E.g. pattern P x y = Just (x,x,y) -- P :: a -> b -> Just (a,a,b) -- (patSynInstResTy P [Int,Bool] = Maybe (Int,Int,Bool) -- NB: unlike patSynInstArgTys, the inst_tys should be just the *universal* tyvars patSynInstResTy (MkPatSyn { psName = name, psUnivTyVars = univ_tvs , psResultTy = res_ty }) inst_tys = assertPpr (univ_tvs `equalLength` inst_tys) (text "patSynInstResTy" <+> ppr name $$ ppr univ_tvs $$ ppr inst_tys) $ substTyWith (binderVars univ_tvs) inst_tys res_ty -- | Print the type of a pattern synonym. The foralls are printed explicitly pprPatSynType :: PatSyn -> SDoc pprPatSynType (MkPatSyn { psUnivTyVars = univ_tvs, psReqTheta = req_theta , psExTyVars = ex_tvs, psProvTheta = prov_theta , psArgs = orig_args, psResultTy = orig_res_ty }) = sep [ pprForAll $ tyVarSpecToBinders univ_tvs , pprThetaArrowTy req_theta , ppWhen insert_empty_ctxt $ parens empty <+> darrow , pprType sigma_ty ] where sigma_ty = mkInvisForAllTys ex_tvs $ mkInvisFunTysMany prov_theta $ mkVisFunTysMany orig_args orig_res_ty insert_empty_ctxt = null req_theta && not (null prov_theta && null ex_tvs) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Ppr.hs0000644000000000000000000005732714472400112017636 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {- these are needed for the Outputable instance for GenTickish, since we need XTickishId to be Outputable. This should immediately resolve to something like Id. -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -fno-warn-orphans #-} {- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1996-1998 Printing of Core syntax -} module GHC.Core.Ppr ( pprCoreExpr, pprParendExpr, pprCoreBinding, pprCoreBindings, pprCoreAlt, pprCoreBindingWithSize, pprCoreBindingsWithSize, pprCoreBinder, pprCoreBinders, pprId, pprIds, pprRule, pprRules, pprOptCo, pprOcc, pprOccWithTick ) where import GHC.Prelude import GHC.Core import GHC.Core.Stats (exprStats) import GHC.Types.Fixity (LexicalFixity(..)) import GHC.Types.Literal( pprLiteral ) import GHC.Types.Name( pprInfixName, pprPrefixName ) import GHC.Types.Var import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand import GHC.Types.Cpr import GHC.Core.DataCon import GHC.Core.TyCon import GHC.Core.TyCo.Ppr import GHC.Core.Coercion import GHC.Types.Basic import GHC.Data.Maybe import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Types.SrcLoc ( pprUserRealSpan ) import GHC.Types.Tickish {- ************************************************************************ * * \subsection{Public interfaces for Core printing (excluding instances)} * * ************************************************************************ @pprParendCoreExpr@ puts parens around non-atomic Core expressions. -} pprCoreBindings :: OutputableBndr b => [Bind b] -> SDoc pprCoreBinding :: OutputableBndr b => Bind b -> SDoc pprCoreExpr :: OutputableBndr b => Expr b -> SDoc pprParendExpr :: OutputableBndr b => Expr b -> SDoc pprCoreBindings = pprTopBinds noAnn pprCoreBinding = pprTopBind noAnn pprCoreBindingsWithSize :: [CoreBind] -> SDoc pprCoreBindingWithSize :: CoreBind -> SDoc pprCoreBindingsWithSize = pprTopBinds sizeAnn pprCoreBindingWithSize = pprTopBind sizeAnn instance OutputableBndr b => Outputable (Bind b) where ppr bind = ppr_bind noAnn bind instance OutputableBndr b => Outputable (Expr b) where ppr expr = pprCoreExpr expr instance OutputableBndr b => Outputable (Alt b) where ppr expr = pprCoreAlt expr {- ************************************************************************ * * \subsection{The guts} * * ************************************************************************ -} -- | A function to produce an annotation for a given right-hand-side type Annotation b = Expr b -> SDoc -- | Annotate with the size of the right-hand-side sizeAnn :: CoreExpr -> SDoc sizeAnn e = text "-- RHS size:" <+> ppr (exprStats e) -- | No annotation noAnn :: Expr b -> SDoc noAnn _ = empty pprTopBinds :: OutputableBndr a => Annotation a -- ^ generate an annotation to place before the -- binding -> [Bind a] -- ^ bindings to show -> SDoc -- ^ the pretty result pprTopBinds ann binds = vcat (map (pprTopBind ann) binds) pprTopBind :: OutputableBndr a => Annotation a -> Bind a -> SDoc pprTopBind ann (NonRec binder expr) = ppr_binding ann (binder,expr) $$ blankLine pprTopBind _ (Rec []) = text "Rec { }" pprTopBind ann (Rec (b:bs)) = vcat [text "Rec {", ppr_binding ann b, vcat [blankLine $$ ppr_binding ann b | b <- bs], text "end Rec }", blankLine] ppr_bind :: OutputableBndr b => Annotation b -> Bind b -> SDoc ppr_bind ann (NonRec val_bdr expr) = ppr_binding ann (val_bdr, expr) ppr_bind ann (Rec binds) = vcat (map pp binds) where pp bind = ppr_binding ann bind <> semi ppr_binding :: OutputableBndr b => Annotation b -> (b, Expr b) -> SDoc ppr_binding ann (val_bdr, expr) = vcat [ ann expr , ppUnlessOption sdocSuppressTypeSignatures (pprBndr LetBind val_bdr) , pp_bind ] where pp_val_bdr = pprPrefixOcc val_bdr pp_bind = case bndrIsJoin_maybe val_bdr of Nothing -> pp_normal_bind Just ar -> pp_join_bind ar pp_normal_bind = hang pp_val_bdr 2 (equals <+> pprCoreExpr expr) -- For a join point of join arity n, we want to print j = \x1 ... xn -> e -- as "j x1 ... xn = e" to differentiate when a join point returns a -- lambda (the first rendering looks like a nullary join point returning -- an n-argument function). pp_join_bind join_arity | bndrs `lengthAtLeast` join_arity = hang (pp_val_bdr <+> sep (map (pprBndr LambdaBind) lhs_bndrs)) 2 (equals <+> pprCoreExpr rhs) | otherwise -- Yikes! A join-binding with too few lambda -- Lint will complain, but we don't want to crash -- the pretty-printer else we can't see what's wrong -- So refer to printing j = e = pp_normal_bind where (bndrs, body) = collectBinders expr lhs_bndrs = take join_arity bndrs rhs = mkLams (drop join_arity bndrs) body pprParendExpr expr = ppr_expr parens expr pprCoreExpr expr = ppr_expr noParens expr noParens :: SDoc -> SDoc noParens pp = pp pprOptCo :: Coercion -> SDoc -- Print a coercion optionally; i.e. honouring -dsuppress-coercions pprOptCo co = sdocOption sdocSuppressCoercions $ \case True -> angleBrackets (text "Co:" <> int (coercionSize co)) <+> dcolon <+> co_type False -> parens $ sep [ppr co, dcolon <+> co_type] where co_type = sdocOption sdocSuppressCoercionTypes $ \case True -> text "..." False -> ppr (coercionType co) ppr_id_occ :: (SDoc -> SDoc) -> Id -> SDoc ppr_id_occ add_par id | isJoinId id = add_par ((text "jump") <+> pp_id) | otherwise = pp_id where pp_id = ppr id -- We could use pprPrefixOcc to print (+) etc, but this is -- Core where we don't print things infix anyway, so doing -- so just adds extra redundant parens ppr_expr :: OutputableBndr b => (SDoc -> SDoc) -> Expr b -> SDoc -- The function adds parens in context that need -- an atomic value (e.g. function args) ppr_expr add_par (Var id) = ppr_id_occ add_par id ppr_expr add_par (Type ty) = add_par (text "TYPE:" <+> ppr ty) -- Weird ppr_expr add_par (Coercion co) = add_par (text "CO:" <+> ppr co) ppr_expr add_par (Lit lit) = pprLiteral add_par lit ppr_expr add_par (Cast expr co) = add_par $ sep [pprParendExpr expr, text "`cast`" <+> pprOptCo co] ppr_expr add_par expr@(Lam _ _) = let (bndrs, body) = collectBinders expr in add_par $ hang (text "\\" <+> sep (map (pprBndr LambdaBind) bndrs) <+> arrow) 2 (pprCoreExpr body) ppr_expr add_par expr@(App {}) = sdocOption sdocSuppressTypeApplications $ \supp_ty_app -> case collectArgs expr of { (fun, args) -> let pp_args = sep (map pprArg args) val_args = dropWhile isTypeArg args -- Drop the type arguments for tuples pp_tup_args = pprWithCommas pprCoreExpr val_args args' | supp_ty_app = val_args | otherwise = args parens | null args' = id | otherwise = add_par in case fun of Var f -> case isDataConWorkId_maybe f of -- Notice that we print the *worker* -- for tuples in paren'd format. Just dc | saturated , Just sort <- tyConTuple_maybe tc -> tupleParens sort pp_tup_args where tc = dataConTyCon dc saturated = val_args `lengthIs` idArity f _ -> parens (hang fun_doc 2 pp_args) where fun_doc = ppr_id_occ noParens f _ -> parens (hang (pprParendExpr fun) 2 pp_args) } ppr_expr add_par (Case expr var ty [Alt con args rhs]) = sdocOption sdocPrintCaseAsLet $ \case True -> add_par $ -- See Note [Print case as let] sep [ sep [ text "let! {" <+> ppr_case_pat con args <+> text "~" <+> ppr_bndr var , text "<-" <+> ppr_expr id expr <+> text "} in" ] , pprCoreExpr rhs ] False -> add_par $ sep [sep [sep [ text "case" <+> pprCoreExpr expr , whenPprDebug (text "return" <+> ppr ty) , text "of" <+> ppr_bndr var ] , char '{' <+> ppr_case_pat con args <+> arrow ] , pprCoreExpr rhs , char '}' ] where ppr_bndr = pprBndr CaseBind ppr_expr add_par (Case expr var ty alts) = add_par $ sep [sep [text "case" <+> pprCoreExpr expr <+> whenPprDebug (text "return" <+> ppr ty), text "of" <+> ppr_bndr var <+> char '{'], nest 2 (vcat (punctuate semi (map pprCoreAlt alts))), char '}' ] where ppr_bndr = pprBndr CaseBind -- special cases: let ... in let ... -- ("disgusting" SLPJ) {- ppr_expr add_par (Let bind@(NonRec val_bdr rhs@(Let _ _)) body) = add_par $ vcat [ hsep [text "let {", (pprBndr LetBind val_bdr $$ ppr val_bndr), equals], nest 2 (pprCoreExpr rhs), text "} in", pprCoreExpr body ] ppr_expr add_par (Let bind@(NonRec val_bdr rhs) expr@(Let _ _)) = add_par (hang (text "let {") 2 (hsep [ppr_binding (val_bdr,rhs), text "} in"]) $$ pprCoreExpr expr) -} -- General case (recursive case, too) ppr_expr add_par (Let bind expr) = add_par $ sep [hang (keyword bind <+> char '{') 2 (ppr_bind noAnn bind <+> text "} in"), pprCoreExpr expr] where keyword (NonRec b _) | isJust (bndrIsJoin_maybe b) = text "join" | otherwise = text "let" keyword (Rec pairs) | ((b,_):_) <- pairs , isJust (bndrIsJoin_maybe b) = text "joinrec" | otherwise = text "letrec" ppr_expr add_par (Tick tickish expr) = sdocOption sdocSuppressTicks $ \case True -> ppr_expr add_par expr False -> add_par (sep [ppr tickish, pprCoreExpr expr]) pprCoreAlt :: OutputableBndr a => Alt a -> SDoc pprCoreAlt (Alt con args rhs) = hang (ppr_case_pat con args <+> arrow) 2 (pprCoreExpr rhs) ppr_case_pat :: OutputableBndr a => AltCon -> [a] -> SDoc ppr_case_pat (DataAlt dc) args | Just sort <- tyConTuple_maybe tc = tupleParens sort (pprWithCommas ppr_bndr args) where ppr_bndr = pprBndr CasePatBind tc = dataConTyCon dc ppr_case_pat con args = ppr con <+> (fsep (map ppr_bndr args)) where ppr_bndr = pprBndr CasePatBind -- | Pretty print the argument in a function application. pprArg :: OutputableBndr a => Expr a -> SDoc pprArg (Type ty) = ppUnlessOption sdocSuppressTypeApplications (text "@" <> pprParendType ty) pprArg (Coercion co) = text "@~" <> pprOptCo co pprArg expr = pprParendExpr expr {- Note [Print case as let] ~~~~~~~~~~~~~~~~~~~~~~~~ Single-branch case expressions are very common: case x of y { I# x' -> case p of q { I# p' -> ... } } These are, in effect, just strict let's, with pattern matching. With -dppr-case-as-let we print them as such: let! { I# x' ~ y <- x } in let! { I# p' ~ q <- p } in ... Other printing bits-and-bobs used with the general @pprCoreBinding@ and @pprCoreExpr@ functions. Note [Binding-site specific printing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ pprCoreBinder and pprTypedLamBinder receive a BindingSite argument to adjust the information printed. Let-bound binders are printed with their full type and idInfo. Case-bound variables (both the case binder and pattern variables) are printed without a type and without their unfolding. Furthermore, a dead case-binder is completely ignored, while otherwise, dead binders are printed as "_". -} -- These instances are sadly orphans instance OutputableBndr Var where pprBndr = pprCoreBinder pprInfixOcc = pprInfixName . varName pprPrefixOcc = pprPrefixName . varName bndrIsJoin_maybe = isJoinId_maybe instance Outputable b => OutputableBndr (TaggedBndr b) where pprBndr _ b = ppr b -- Simple pprInfixOcc b = ppr b pprPrefixOcc b = ppr b bndrIsJoin_maybe (TB b _) = isJoinId_maybe b pprOcc :: OutputableBndr a => LexicalFixity -> a -> SDoc pprOcc Infix = pprInfixOcc pprOcc Prefix = pprPrefixOcc pprOccWithTick :: OutputableBndr a => LexicalFixity -> PromotionFlag -> a -> SDoc pprOccWithTick fixity prom op | isPromoted prom = quote (pprOcc fixity op) | otherwise = pprOcc fixity op pprCoreBinder :: BindingSite -> Var -> SDoc pprCoreBinder LetBind binder | isTyVar binder = pprKindedTyVarBndr binder | otherwise = pprTypedLetBinder binder $$ ppIdInfo binder (idInfo binder) -- Lambda bound type variables are preceded by "@" pprCoreBinder bind_site bndr = getPprDebug $ \debug -> pprTypedLamBinder bind_site debug bndr pprCoreBinders :: [Var] -> SDoc -- Print as lambda-binders, i.e. with their type pprCoreBinders vs = sep (map (pprCoreBinder LambdaBind) vs) pprUntypedBinder :: Var -> SDoc pprUntypedBinder binder | isTyVar binder = text "@" <> ppr binder -- NB: don't print kind | otherwise = pprIdBndr binder pprTypedLamBinder :: BindingSite -> Bool -> Var -> SDoc -- For lambda and case binders, show the unfolding info (usually none) pprTypedLamBinder bind_site debug_on var = sdocOption sdocSuppressTypeSignatures $ \suppress_sigs -> case () of _ | not debug_on -- Show case-bound wild binders only if debug is on , CaseBind <- bind_site , isDeadBinder var -> empty | not debug_on -- Even dead binders can be one-shot , isDeadBinder var -> char '_' <+> ppWhen (isId var) (pprIdBndrInfo (idInfo var)) | not debug_on -- No parens, no kind info , CaseBind <- bind_site -> pprUntypedBinder var | not debug_on , CasePatBind <- bind_site -> pprUntypedBinder var | suppress_sigs -> pprUntypedBinder var | isTyVar var -> parens (pprKindedTyVarBndr var) | otherwise -> parens (hang (pprIdBndr var) 2 (vcat [ dcolon <+> pprType (idType var) , pp_unf])) where unf_info = realUnfoldingInfo (idInfo var) pp_unf | hasSomeUnfolding unf_info = text "Unf=" <> ppr unf_info | otherwise = empty pprTypedLetBinder :: Var -> SDoc -- Print binder with a type or kind signature (not paren'd) pprTypedLetBinder binder = sdocOption sdocSuppressTypeSignatures $ \suppress_sigs -> case () of _ | isTyVar binder -> pprKindedTyVarBndr binder | suppress_sigs -> pprIdBndr binder | otherwise -> hang (pprIdBndr binder) 2 (dcolon <+> pprType (idType binder)) pprKindedTyVarBndr :: TyVar -> SDoc -- Print a type variable binder with its kind (but not if *) pprKindedTyVarBndr tyvar = text "@" <> pprTyVar tyvar -- pprId x prints x :: ty pprId :: Id -> SDoc pprId x = ppr x <+> dcolon <+> ppr (idType x) pprIds :: [Id] -> SDoc pprIds xs = sep (map pprId xs) -- pprIdBndr does *not* print the type -- When printing any Id binder in debug mode, we print its inline pragma and one-shot-ness pprIdBndr :: Id -> SDoc pprIdBndr id = pprPrefixOcc id <+> pprIdBndrInfo (idInfo id) pprIdBndrInfo :: IdInfo -> SDoc pprIdBndrInfo info = ppUnlessOption sdocSuppressIdInfo (info `seq` doc) -- The seq is useful for poking on black holes where prag_info = inlinePragInfo info occ_info = occInfo info dmd_info = demandInfo info lbv_info = oneShotInfo info has_prag = not (isDefaultInlinePragma prag_info) has_occ = not (isNoOccInfo occ_info) has_dmd = not $ isTopDmd dmd_info has_lbv = not (hasNoOneShotInfo lbv_info) doc = showAttributes [ (has_prag, text "InlPrag=" <> pprInlineDebug prag_info) , (has_occ, text "Occ=" <> ppr occ_info) , (has_dmd, text "Dmd=" <> ppr dmd_info) , (has_lbv , text "OS=" <> ppr lbv_info) ] instance Outputable IdInfo where ppr info = showAttributes [ (has_prag, text "InlPrag=" <> pprInlineDebug prag_info) , (has_occ, text "Occ=" <> ppr occ_info) , (has_dmd, text "Dmd=" <> ppr dmd_info) , (has_lbv , text "OS=" <> ppr lbv_info) , (has_arity, text "Arity=" <> int arity) , (has_called_arity, text "CallArity=" <> int called_arity) , (has_caf_info, text "Caf=" <> ppr caf_info) , (has_str_info, text "Str=" <> pprStrictness str_info) , (has_unf, text "Unf=" <> ppr unf_info) , (has_rules, text "RULES:" <+> vcat (map pprRule rules)) ] where prag_info = inlinePragInfo info has_prag = not (isDefaultInlinePragma prag_info) occ_info = occInfo info has_occ = not (isManyOccs occ_info) dmd_info = demandInfo info has_dmd = not $ isTopDmd dmd_info lbv_info = oneShotInfo info has_lbv = not (hasNoOneShotInfo lbv_info) arity = arityInfo info has_arity = arity /= 0 called_arity = callArityInfo info has_called_arity = called_arity /= 0 caf_info = cafInfo info has_caf_info = not (mayHaveCafRefs caf_info) str_info = dmdSigInfo info has_str_info = not (isNopSig str_info) unf_info = realUnfoldingInfo info has_unf = hasSomeUnfolding unf_info rules = ruleInfoRules (ruleInfo info) has_rules = not (null rules) {- ----------------------------------------------------- -- IdDetails and IdInfo ----------------------------------------------------- -} ppIdInfo :: Id -> IdInfo -> SDoc ppIdInfo id info = ppUnlessOption sdocSuppressIdInfo $ showAttributes [ (True, pp_scope <> ppr (idDetails id)) , (has_arity, text "Arity=" <> int arity) , (has_called_arity, text "CallArity=" <> int called_arity) , (has_caf_info, text "Caf=" <> ppr caf_info) , (has_str_info, text "Str=" <> pprStrictness str_info) , (has_cpr_info, text "Cpr=" <> ppr cpr_info) , (has_unf, text "Unf=" <> ppr unf_info) , (not (null rules), text "RULES:" <+> vcat (map pprRule rules)) ] -- Inline pragma, occ, demand, one-shot info -- printed out with all binders (when debug is on); -- see GHC.Core.Ppr.pprIdBndr where pp_scope | isGlobalId id = text "GblId" | isExportedId id = text "LclIdX" | otherwise = text "LclId" arity = arityInfo info has_arity = arity /= 0 called_arity = callArityInfo info has_called_arity = called_arity /= 0 caf_info = cafInfo info has_caf_info = not (mayHaveCafRefs caf_info) str_info = dmdSigInfo info has_str_info = not (isNopSig str_info) cpr_info = cprSigInfo info has_cpr_info = cpr_info /= topCprSig unf_info = realUnfoldingInfo info has_unf = hasSomeUnfolding unf_info rules = ruleInfoRules (ruleInfo info) showAttributes :: [(Bool,SDoc)] -> SDoc showAttributes stuff | null docs = empty | otherwise = brackets (sep (punctuate comma docs)) where docs = [d | (True,d) <- stuff] {- ----------------------------------------------------- -- Unfolding and UnfoldingGuidance ----------------------------------------------------- -} instance Outputable UnfoldingGuidance where ppr UnfNever = text "NEVER" ppr (UnfWhen { ug_arity = arity, ug_unsat_ok = unsat_ok, ug_boring_ok = boring_ok }) = text "ALWAYS_IF" <> parens (text "arity=" <> int arity <> comma <> text "unsat_ok=" <> ppr unsat_ok <> comma <> text "boring_ok=" <> ppr boring_ok) ppr (UnfIfGoodArgs { ug_args = cs, ug_size = size, ug_res = discount }) = hsep [ text "IF_ARGS", brackets (hsep (map int cs)), int size, int discount ] instance Outputable UnfoldingSource where ppr InlineCompulsory = text "Compulsory" ppr InlineStable = text "InlineStable" ppr InlineRhs = text "" instance Outputable Unfolding where ppr NoUnfolding = text "No unfolding" ppr BootUnfolding = text "No unfolding (from boot)" ppr (OtherCon cs) = text "OtherCon" <+> ppr cs ppr (DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }) = hang (text "DFun:" <+> char '\\' <+> sep (map (pprBndr LambdaBind) bndrs) <+> arrow) 2 (ppr con <+> sep (map ppr args)) ppr (CoreUnfolding { uf_src = src , uf_tmpl=rhs, uf_is_top=top , uf_cache=cache, uf_guidance=g }) = text "Unf" <> braces (pp_info $$ pp_rhs) where pp_info = fsep $ punctuate comma [ text "Src=" <> ppr src , text "TopLvl=" <> ppr top , ppr cache , text "Guidance=" <> ppr g ] pp_tmpl = ppUnlessOption sdocSuppressUnfoldings (text "Tmpl=" <+> ppr rhs) pp_rhs | isStableSource src = pp_tmpl | otherwise = empty -- Don't print the RHS or we get a quadratic -- blowup in the size of the printout! instance Outputable UnfoldingCache where ppr (UnfoldingCache { uf_is_value = hnf, uf_is_conlike = conlike , uf_is_work_free = wf, uf_expandable = exp }) = fsep $ punctuate comma [ text "Value=" <> ppr hnf , text "ConLike=" <> ppr conlike , text "WorkFree=" <> ppr wf , text "Expandable=" <> ppr exp ] {- ----------------------------------------------------- -- Rules ----------------------------------------------------- -} instance Outputable CoreRule where ppr = pprRule pprRules :: [CoreRule] -> SDoc pprRules rules = vcat (map pprRule rules) pprRule :: CoreRule -> SDoc pprRule (BuiltinRule { ru_fn = fn, ru_name = name}) = text "Built in rule for" <+> ppr fn <> colon <+> doubleQuotes (ftext name) pprRule (Rule { ru_name = name, ru_act = act, ru_fn = fn, ru_bndrs = tpl_vars, ru_args = tpl_args, ru_rhs = rhs }) = hang (doubleQuotes (ftext name) <+> ppr act) 4 (sep [text "forall" <+> pprCoreBinders tpl_vars <> dot, nest 2 (ppr fn <+> sep (map pprArg tpl_args)), nest 2 (text "=" <+> pprCoreExpr rhs) ]) {- ----------------------------------------------------- -- Tickish ----------------------------------------------------- -} instance Outputable (XTickishId pass) => Outputable (GenTickish pass) where ppr (HpcTick modl ix) = hcat [text "hpc<", ppr modl, comma, ppr ix, text ">"] ppr (Breakpoint _ext ix vars) = hcat [text "break<", ppr ix, text ">", parens (hcat (punctuate comma (map ppr vars)))] ppr (ProfNote { profNoteCC = cc, profNoteCount = tick, profNoteScope = scope }) = case (tick,scope) of (True,True) -> hcat [text "scctick<", ppr cc, char '>'] (True,False) -> hcat [text "tick<", ppr cc, char '>'] _ -> hcat [text "scc<", ppr cc, char '>'] ppr (SourceNote span _) = hcat [ text "src<", pprUserRealSpan True span, char '>'] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Predicate.hs0000644000000000000000000003060214472400112020760 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {- Describes predicates as they are considered by the solver. -} module GHC.Core.Predicate ( Pred(..), classifyPredType, isPredTy, isEvVarType, -- Equality predicates EqRel(..), eqRelRole, isEqPrimPred, isEqPred, getEqPredTys, getEqPredTys_maybe, getEqPredRole, predTypeEqRel, mkPrimEqPred, mkReprPrimEqPred, mkPrimEqPredRole, mkHeteroPrimEqPred, mkHeteroReprPrimEqPred, -- Class predicates mkClassPred, isDictTy, isClassPred, isEqPredClass, isCTupleClass, getClassPredTys, getClassPredTys_maybe, classMethodTy, classMethodInstTy, -- Implicit parameters isIPLikePred, hasIPSuperClasses, isIPTyCon, isIPClass, isCallStackTy, isCallStackPred, isCallStackPredTy, isIPPred_maybe, -- Evidence variables DictId, isEvVar, isDictId ) where import GHC.Prelude import GHC.Core.Type import GHC.Core.Class import GHC.Core.TyCon import GHC.Core.TyCon.RecWalk import GHC.Types.Var import GHC.Core.Coercion import GHC.Core.Multiplicity ( scaledThing ) import GHC.Builtin.Names import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Data.FastString import Control.Monad ( guard ) -- | A predicate in the solver. The solver tries to prove Wanted predicates -- from Given ones. data Pred -- | A typeclass predicate. = ClassPred Class [Type] -- | A type equality predicate. | EqPred EqRel Type Type -- | An irreducible predicate. | IrredPred PredType -- | A quantified predicate. -- -- See Note [Quantified constraints] in GHC.Tc.Solver.Canonical | ForAllPred [TyVar] [PredType] PredType -- NB: There is no TuplePred case -- Tuple predicates like (Eq a, Ord b) are just treated -- as ClassPred, as if we had a tuple class with two superclasses -- class (c1, c2) => (%,%) c1 c2 classifyPredType :: PredType -> Pred classifyPredType ev_ty = case splitTyConApp_maybe ev_ty of Just (tc, [_, _, ty1, ty2]) | tc `hasKey` eqReprPrimTyConKey -> EqPred ReprEq ty1 ty2 | tc `hasKey` eqPrimTyConKey -> EqPred NomEq ty1 ty2 Just (tc, tys) | Just clas <- tyConClass_maybe tc -> ClassPred clas tys _ | (tvs, rho) <- splitForAllTyCoVars ev_ty , (theta, pred) <- splitFunTys rho , not (null tvs && null theta) -> ForAllPred tvs (map scaledThing theta) pred | otherwise -> IrredPred ev_ty -- --------------------- Dictionary types --------------------------------- mkClassPred :: Class -> [Type] -> PredType mkClassPred clas tys = mkTyConApp (classTyCon clas) tys isDictTy :: Type -> Bool isDictTy = isClassPred getClassPredTys :: HasDebugCallStack => PredType -> (Class, [Type]) getClassPredTys ty = case getClassPredTys_maybe ty of Just (clas, tys) -> (clas, tys) Nothing -> pprPanic "getClassPredTys" (ppr ty) getClassPredTys_maybe :: PredType -> Maybe (Class, [Type]) getClassPredTys_maybe ty = case splitTyConApp_maybe ty of Just (tc, tys) | Just clas <- tyConClass_maybe tc -> Just (clas, tys) _ -> Nothing classMethodTy :: Id -> Type -- Takes a class selector op :: forall a. C a => meth_ty -- and returns the type of its method, meth_ty -- The selector can be a superclass selector, in which case -- you get back a superclass classMethodTy sel_id = funResultTy $ -- meth_ty dropForAlls $ -- C a => meth_ty varType sel_id -- forall a. C n => meth_ty classMethodInstTy :: Id -> [Type] -> Type -- Takes a class selector op :: forall a b. C a b => meth_ty -- and the types [ty1, ty2] at which it is instantiated, -- returns the instantiated type of its method, meth_ty[t1/a,t2/b] -- The selector can be a superclass selector, in which case -- you get back a superclass classMethodInstTy sel_id arg_tys = funResultTy $ piResultTys (varType sel_id) arg_tys -- --------------------- Equality predicates --------------------------------- -- | A choice of equality relation. This is separate from the type 'Role' -- because 'Phantom' does not define a (non-trivial) equality relation. data EqRel = NomEq | ReprEq deriving (Eq, Ord) instance Outputable EqRel where ppr NomEq = text "nominal equality" ppr ReprEq = text "representational equality" eqRelRole :: EqRel -> Role eqRelRole NomEq = Nominal eqRelRole ReprEq = Representational getEqPredTys :: PredType -> (Type, Type) getEqPredTys ty = case splitTyConApp_maybe ty of Just (tc, [_, _, ty1, ty2]) | tc `hasKey` eqPrimTyConKey || tc `hasKey` eqReprPrimTyConKey -> (ty1, ty2) _ -> pprPanic "getEqPredTys" (ppr ty) getEqPredTys_maybe :: PredType -> Maybe (Role, Type, Type) getEqPredTys_maybe ty = case splitTyConApp_maybe ty of Just (tc, [_, _, ty1, ty2]) | tc `hasKey` eqPrimTyConKey -> Just (Nominal, ty1, ty2) | tc `hasKey` eqReprPrimTyConKey -> Just (Representational, ty1, ty2) _ -> Nothing getEqPredRole :: PredType -> Role getEqPredRole ty = eqRelRole (predTypeEqRel ty) -- | Get the equality relation relevant for a pred type. predTypeEqRel :: PredType -> EqRel predTypeEqRel ty | Just (tc, _) <- splitTyConApp_maybe ty , tc `hasKey` eqReprPrimTyConKey = ReprEq | otherwise = NomEq {------------------------------------------- Predicates on PredType --------------------------------------------} {- Note [Evidence for quantified constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The superclass mechanism in GHC.Tc.Solver.Canonical.makeSuperClasses risks taking a quantified constraint like (forall a. C a => a ~ b) and generate superclass evidence (forall a. C a => a ~# b) This is a funny thing: neither isPredTy nor isCoVarType are true of it. So we are careful not to generate it in the first place: see Note [Equality superclasses in quantified constraints] in GHC.Tc.Solver.Canonical. -} isEvVarType :: Type -> Bool -- True of (a) predicates, of kind Constraint, such as (Eq a), and (a ~ b) -- (b) coercion types, such as (t1 ~# t2) or (t1 ~R# t2) -- See Note [Types for coercions, predicates, and evidence] in GHC.Core.TyCo.Rep -- See Note [Evidence for quantified constraints] isEvVarType ty = isCoVarType ty || isPredTy ty isEqPredClass :: Class -> Bool -- True of (~) and (~~) isEqPredClass cls = cls `hasKey` eqTyConKey || cls `hasKey` heqTyConKey isClassPred, isEqPred, isEqPrimPred :: PredType -> Bool isClassPred ty = case tyConAppTyCon_maybe ty of Just tyCon | isClassTyCon tyCon -> True _ -> False isEqPred ty -- True of (a ~ b) and (a ~~ b) -- ToDo: should we check saturation? | Just tc <- tyConAppTyCon_maybe ty , Just cls <- tyConClass_maybe tc = isEqPredClass cls | otherwise = False isEqPrimPred ty = isCoVarType ty -- True of (a ~# b) (a ~R# b) isCTupleClass :: Class -> Bool isCTupleClass cls = isTupleTyCon (classTyCon cls) {- ********************************************************************* * * Implicit parameters * * ********************************************************************* -} isIPTyCon :: TyCon -> Bool isIPTyCon tc = tc `hasKey` ipClassKey -- Class and its corresponding TyCon have the same Unique isIPClass :: Class -> Bool isIPClass cls = cls `hasKey` ipClassKey isIPLikePred :: Type -> Bool -- See Note [Local implicit parameters] isIPLikePred = is_ip_like_pred initIPRecTc is_ip_like_pred :: RecTcChecker -> Type -> Bool is_ip_like_pred rec_clss ty | Just (tc, tys) <- splitTyConApp_maybe ty , Just rec_clss' <- if isTupleTyCon tc -- Tuples never cause recursion then Just rec_clss else checkRecTc rec_clss tc , Just cls <- tyConClass_maybe tc = isIPClass cls || has_ip_super_classes rec_clss' cls tys | otherwise = False -- Includes things like (D []) where D is -- a Constraint-ranged family; #7785 hasIPSuperClasses :: Class -> [Type] -> Bool -- See Note [Local implicit parameters] hasIPSuperClasses = has_ip_super_classes initIPRecTc has_ip_super_classes :: RecTcChecker -> Class -> [Type] -> Bool has_ip_super_classes rec_clss cls tys = any ip_ish (classSCSelIds cls) where -- Check that the type of a superclass determines its value -- sc_sel_id :: forall a b. C a b -> ip_ish sc_sel_id = is_ip_like_pred rec_clss $ classMethodInstTy sc_sel_id tys initIPRecTc :: RecTcChecker initIPRecTc = setRecTcMaxBound 1 initRecTc -- --------------------- CallStack predicates --------------------------------- isCallStackPredTy :: Type -> Bool -- True of HasCallStack, or IP "blah" CallStack isCallStackPredTy ty | Just (tc, tys) <- splitTyConApp_maybe ty , Just cls <- tyConClass_maybe tc , Just {} <- isCallStackPred cls tys = True | otherwise = False -- | Is a 'PredType' a 'CallStack' implicit parameter? -- -- If so, return the name of the parameter. isCallStackPred :: Class -> [Type] -> Maybe FastString isCallStackPred cls tys | [ty1, ty2] <- tys , isIPClass cls , isCallStackTy ty2 = isStrLitTy ty1 | otherwise = Nothing -- | Is a type a 'CallStack'? isCallStackTy :: Type -> Bool isCallStackTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` callStackTyConKey | otherwise = False -- | Decomposes a predicate if it is an implicit parameter. Does not look in -- superclasses. See also [Local implicit parameters]. isIPPred_maybe :: Type -> Maybe (FastString, Type) isIPPred_maybe ty = do (tc,[t1,t2]) <- splitTyConApp_maybe ty guard (isIPTyCon tc) x <- isStrLitTy t1 return (x,t2) {- Note [Local implicit parameters] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function isIPLikePred tells if this predicate, or any of its superclasses, is an implicit parameter. Why are implicit parameters special? Unlike normal classes, we can have local instances for implicit parameters, in the form of let ?x = True in ... So in various places we must be careful not to assume that any value of the right type will do; we must carefully look for the innermost binding. So isIPLikePred checks whether this is an implicit parameter, or has a superclass that is an implicit parameter. Several wrinkles * We must be careful with superclasses, as #18649 showed. Haskell doesn't allow an implicit parameter as a superclass class (?x::a) => C a where ... but with a constraint tuple we might have (% Eq a, ?x::Int %) and /its/ superclasses, namely (Eq a) and (?x::Int), /do/ include an implicit parameter. With ConstraintKinds this can apply to /any/ class, e.g. class sc => C sc where ... Then (C (?x::Int)) has (?x::Int) as a superclass. So we must instantiate and check each superclass, one by one, in hasIPSuperClasses. * With -XUndecidableSuperClasses, the superclass hunt can go on forever, so we need a RecTcChecker to cut it off. * Another apparent additional complexity involves type families. For example, consider type family D (v::*->*) :: Constraint type instance D [] = () f :: D v => v Char -> Int If we see a call (f "foo"), we'll pass a "dictionary" () |> (g :: () ~ D []) and it's good to specialise f at this dictionary. So the question is: can an implicit parameter "hide inside" a type-family constraint like (D a). Well, no. We don't allow type instance D Maybe = ?x:Int Hence the umbrella 'otherwise' case in is_ip_like_pred. See #7785. Small worries (Sept 20): * I don't see what stops us having that 'type instance'. Indeed I think nothing does. * I'm a little concerned about type variables; such a variable might be instantiated to an implicit parameter. I don't think this matters in the cases for which isIPLikePred is used, and it's pretty obscure anyway. * The superclass hunt stops when it encounters the same class again, but in principle we could have the same class, differently instantiated, and the second time it could have an implicit parameter I'm going to treat these as problems for another day. They are all exotic. -} {- ********************************************************************* * * Evidence variables * * ********************************************************************* -} isEvVar :: Var -> Bool isEvVar var = isEvVarType (varType var) isDictId :: Id -> Bool isDictId id = isDictTy (varType id) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Reduction.hs0000644000000000000000000010346514472400112021024 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE FlexibleContexts #-} module GHC.Core.Reduction ( -- * Reductions Reduction(..), ReductionN, ReductionR, HetReduction(..), Reductions(..), mkReduction, mkReductions, mkHetReduction, coercionRedn, reductionOriginalType, downgradeRedn, mkSubRedn, mkTransRedn, mkCoherenceRightRedn, mkCoherenceRightMRedn, mkCastRedn1, mkCastRedn2, mkReflRedn, mkGReflRightRedn, mkGReflRightMRedn, mkGReflLeftRedn, mkGReflLeftMRedn, mkAppRedn, mkAppRedns, mkFunRedn, mkForAllRedn, mkHomoForAllRedn, mkTyConAppRedn, mkClassPredRedn, mkProofIrrelRedn, mkReflCoRedn, homogeniseHetRedn, unzipRedns, -- * Rewriting type arguments ArgsReductions(..), simplifyArgsWorker ) where import GHC.Prelude import GHC.Core.Class ( Class(classTyCon) ) import GHC.Core.Coercion import GHC.Core.Predicate ( mkClassPred ) import GHC.Core.TyCon ( TyCon ) import GHC.Core.Type import GHC.Data.Pair ( Pair(Pair) ) import GHC.Types.Var ( setTyVarKind ) import GHC.Types.Var.Env ( mkInScopeSet ) import GHC.Types.Var.Set ( TyCoVarSet ) import GHC.Utils.Misc ( HasDebugCallStack, equalLength ) import GHC.Utils.Outputable import GHC.Utils.Panic ( assertPpr, panic ) {- %************************************************************************ %* * Reductions %* * %************************************************************************ Note [The Reduction type] ~~~~~~~~~~~~~~~~~~~~~~~~~ Many functions in the type-checker rewrite a type, using Given type equalitie or type-family reductions, and return a Reduction, which is just a pair of the coercion and the RHS type of the coercion: data Reduction = Reduction Coercion !Type The order of the arguments to the constructor serves as a reminder of what the Type is. In Reduction co ty `ty` appears to the right of `co`, reminding us that we must have: co :: unrewritten_ty ~ ty Example functions that use this datatype: GHC.Core.FamInstEnv.topNormaliseType_maybe :: FamInstEnvs -> Type -> Maybe Reduction GHC.Tc.Solver.Rewrite.rewrite :: CtEvidence -> TcType -> TcS Reduction Having Reduction as a data type, with a strict Type field, rather than using a pair (Coercion,Type) gives several advantages (see #20161) * The strictness in Type improved performance in rewriting of type families (around 2.5% improvement in T9872), * Compared to the situation before, it gives improved consistency around orientation of rewritings, as a Reduction is always left-to-right (the coercion's RHS type is always the type stored in the 'Reduction'). No more 'mkSymCo's needed to convert between left-to-right and right-to-left. One could imagine storing the LHS type of the coercion in the Reduction as well, but in fact `reductionOriginalType` is very seldom used, so it's not worth it. -} -- | A 'Reduction' is the result of an operation that rewrites a type @ty_in@. -- The 'Reduction' includes the rewritten type @ty_out@ and a 'Coercion' @co@ -- such that @co :: ty_in ~ ty_out@, where the role of the coercion is determined -- by the context. That is, the LHS type of the coercion is the original type -- @ty_in@, while its RHS type is the rewritten type @ty_out@. -- -- A Reduction is always homogeneous, unless it is wrapped inside a 'HetReduction', -- which separately stores the kind coercion. -- -- See Note [The Reduction type]. data Reduction = Reduction { reductionCoercion :: Coercion , reductionReducedType :: !Type } -- N.B. the 'Coercion' field must be lazy: see for instance GHC.Tc.Solver.Rewrite.rewrite_tyvar2 -- which returns an error in the 'Coercion' field when dealing with a Derived constraint -- (which is OK as this Coercion gets ignored later). -- We might want to revisit the strictness once Deriveds are removed. -- | Stores a heterogeneous reduction. -- -- The stored kind coercion must relate the kinds of the -- stored reduction. That is, in @HetReduction (Reduction co xi) kco@, -- we must have: -- -- > co :: ty ~ xi -- > kco :: typeKind ty ~ typeKind xi data HetReduction = HetReduction Reduction MCoercionN -- N.B. strictness annotations don't seem to make a difference here -- | Create a heterogeneous reduction. -- -- Pre-condition: the provided kind coercion (second argument) -- relates the kinds of the stored reduction. -- That is, if the coercion stored in the 'Reduction' is of the form -- -- > co :: ty ~ xi -- -- Then the kind coercion supplied must be of the form: -- -- > kco :: typeKind ty ~ typeKind xi mkHetReduction :: Reduction -- ^ heterogeneous reduction -> MCoercionN -- ^ kind coercion -> HetReduction mkHetReduction redn mco = HetReduction redn mco {-# INLINE mkHetReduction #-} -- | Homogenise a heterogeneous reduction. -- -- Given @HetReduction (Reduction co xi) kco@, with -- -- > co :: ty ~ xi -- > kco :: typeKind(ty) ~ typeKind(xi) -- -- this returns the homogeneous reduction: -- -- > hco :: ty ~ ( xi |> sym kco ) homogeniseHetRedn :: Role -> HetReduction -> Reduction homogeniseHetRedn role (HetReduction redn kco) = mkCoherenceRightMRedn role redn (mkSymMCo kco) {-# INLINE homogeniseHetRedn #-} -- | Create a 'Reduction' from a pair of a 'Coercion' and a 'Type. -- -- Pre-condition: the RHS type of the coercion matches the provided type -- (perhaps up to zonking). -- -- Use 'coercionRedn' when you only have the coercion. mkReduction :: Coercion -> Type -> Reduction mkReduction co ty = Reduction co ty {-# INLINE mkReduction #-} instance Outputable Reduction where ppr redn = braces $ vcat [ text "reductionOriginalType:" <+> ppr (reductionOriginalType redn) , text " reductionReducedType:" <+> ppr (reductionReducedType redn) , text " reductionCoercion:" <+> ppr (reductionCoercion redn) ] -- | A 'Reduction' in which the 'Coercion' has 'Nominal' role. type ReductionN = Reduction -- | A 'Reduction' in which the 'Coercion' has 'Representational' role. type ReductionR = Reduction -- | Get the original, unreduced type corresponding to a 'Reduction'. -- -- This is obtained by computing the LHS kind of the stored coercion, -- which may be slow. reductionOriginalType :: Reduction -> Type reductionOriginalType = coercionLKind . reductionCoercion {-# INLINE reductionOriginalType #-} -- | Turn a 'Coercion' into a 'Reduction' -- by inspecting the RHS type of the coercion. -- -- Prefer using 'mkReduction' when you already know -- the RHS type of the coercion, to avoid computing it anew. coercionRedn :: Coercion -> Reduction coercionRedn co = Reduction co (coercionRKind co) {-# INLINE coercionRedn #-} -- | Downgrade the role of the coercion stored in the 'Reduction'. downgradeRedn :: Role -- ^ desired role -> Role -- ^ current role -> Reduction -> Reduction downgradeRedn new_role old_role redn@(Reduction co _) = redn { reductionCoercion = downgradeRole new_role old_role co } {-# INLINE downgradeRedn #-} -- | Downgrade the role of the coercion stored in the 'Reduction', -- from 'Nominal' to 'Representational'. mkSubRedn :: Reduction -> Reduction mkSubRedn redn@(Reduction co _) = redn { reductionCoercion = mkSubCo co } {-# INLINE mkSubRedn #-} -- | Compose a reduction with a coercion on the left. -- -- Pre-condition: the provided coercion's RHS type must match the LHS type -- of the coercion that is stored in the reduction. mkTransRedn :: Coercion -> Reduction -> Reduction mkTransRedn co1 redn@(Reduction co2 _) = redn { reductionCoercion = co1 `mkTransCo` co2 } {-# INLINE mkTransRedn #-} -- | The reflexive reduction. mkReflRedn :: Role -> Type -> Reduction mkReflRedn r ty = mkReduction (mkReflCo r ty) ty -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the rewritten type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @ty ~res_co~> (ty |> mco)@ -- at the given 'Role'. mkGReflRightRedn :: Role -> Type -> CoercionN -> Reduction mkGReflRightRedn role ty co = mkReduction (mkGReflRightCo role ty co) (mkCastTy ty co) {-# INLINE mkGReflRightRedn #-} -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the rewritten type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @ty ~res_co~> (ty |> mco)@ -- at the given 'Role'. mkGReflRightMRedn :: Role -> Type -> MCoercionN -> Reduction mkGReflRightMRedn role ty mco = mkReduction (mkGReflRightMCo role ty mco) (mkCastTyMCo ty mco) {-# INLINE mkGReflRightMRedn #-} -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the original (non-rewritten) type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @(ty |> mco) ~res_co~> ty@ -- at the given 'Role'. mkGReflLeftRedn :: Role -> Type -> CoercionN -> Reduction mkGReflLeftRedn role ty co = mkReduction (mkGReflLeftCo role ty co) ty {-# INLINE mkGReflLeftRedn #-} -- | Create a 'Reduction' from a kind cast, in which -- the casted type is the original (non-rewritten) type. -- -- Given @ty :: k1@, @mco :: k1 ~ k2@, -- produces the 'Reduction' @(ty |> mco) ~res_co~> ty@ -- at the given 'Role'. mkGReflLeftMRedn :: Role -> Type -> MCoercionN -> Reduction mkGReflLeftMRedn role ty mco = mkReduction (mkGReflLeftMCo role ty mco) ty {-# INLINE mkGReflLeftMRedn #-} -- | Apply a cast to the result of a 'Reduction'. -- -- Given a 'Reduction' @ty1 ~co1~> (ty2 :: k2)@ and a kind coercion @kco@ -- with LHS kind @k2@, produce a new 'Reduction' @ty1 ~co2~> ( ty2 |> kco )@ -- of the given 'Role' (which must match the role of the coercion stored -- in the 'Reduction' argument). mkCoherenceRightRedn :: Role -> Reduction -> CoercionN -> Reduction mkCoherenceRightRedn r (Reduction co1 ty2) kco = mkReduction (mkCoherenceRightCo r ty2 kco co1) (mkCastTy ty2 kco) {-# INLINE mkCoherenceRightRedn #-} -- | Apply a cast to the result of a 'Reduction', using an 'MCoercionN'. -- -- Given a 'Reduction' @ty1 ~co1~> (ty2 :: k2)@ and a kind coercion @mco@ -- with LHS kind @k2@, produce a new 'Reduction' @ty1 ~co2~> ( ty2 |> mco )@ -- of the given 'Role' (which must match the role of the coercion stored -- in the 'Reduction' argument). mkCoherenceRightMRedn :: Role -> Reduction -> MCoercionN -> Reduction mkCoherenceRightMRedn r (Reduction co1 ty2) kco = mkReduction (mkCoherenceRightMCo r ty2 kco co1) (mkCastTyMCo ty2 kco) {-# INLINE mkCoherenceRightMRedn #-} -- | Apply a cast to a 'Reduction', casting both the original and the reduced type. -- -- Given @cast_co@ and 'Reduction' @ty ~co~> xi@, this function returns -- the 'Reduction' @(ty |> cast_co) ~return_co~> (xi |> cast_co)@ -- of the given 'Role' (which must match the role of the coercion stored -- in the 'Reduction' argument). -- -- Pre-condition: the 'Type' passed in is the same as the LHS type -- of the coercion stored in the 'Reduction'. mkCastRedn1 :: Role -> Type -- ^ original type -> CoercionN -- ^ coercion to cast with -> Reduction -- ^ rewritten type, with rewriting coercion -> Reduction mkCastRedn1 r ty cast_co (Reduction co xi) -- co :: ty ~r ty' -- return_co :: (ty |> cast_co) ~r (ty' |> cast_co) = mkReduction (castCoercionKind1 co r ty xi cast_co) (mkCastTy xi cast_co) {-# INLINE mkCastRedn1 #-} -- | Apply casts on both sides of a 'Reduction' (of the given 'Role'). -- -- Use 'mkCastRedn1' when you want to cast both the original and reduced types -- in a 'Reduction' using the same coercion. -- -- Pre-condition: the 'Type' passed in is the same as the LHS type -- of the coercion stored in the 'Reduction'. mkCastRedn2 :: Role -> Type -- ^ original type -> CoercionN -- ^ coercion to cast with on the left -> Reduction -- ^ rewritten type, with rewriting coercion -> CoercionN -- ^ coercion to cast with on the right -> Reduction mkCastRedn2 r ty cast_co (Reduction nco nty) cast_co' = mkReduction (castCoercionKind2 nco r ty nty cast_co cast_co') (mkCastTy nty cast_co') {-# INLINE mkCastRedn2 #-} -- | Apply one 'Reduction' to another. -- -- Combines 'mkAppCo' and 'mkAppTy`. mkAppRedn :: Reduction -> Reduction -> Reduction mkAppRedn (Reduction co1 ty1) (Reduction co2 ty2) = mkReduction (mkAppCo co1 co2) (mkAppTy ty1 ty2) {-# INLINE mkAppRedn #-} -- | Create a function 'Reduction'. -- -- Combines 'mkFunCo' and 'mkFunTy'. mkFunRedn :: Role -> AnonArgFlag -> ReductionN -- ^ multiplicity reduction -> Reduction -- ^ argument reduction -> Reduction -- ^ result reduction -> Reduction mkFunRedn r vis (Reduction w_co w_ty) (Reduction arg_co arg_ty) (Reduction res_co res_ty) = mkReduction (mkFunCo r w_co arg_co res_co) (mkFunTy vis w_ty arg_ty res_ty) {-# INLINE mkFunRedn #-} -- | Create a 'Reduction' associated to a Π type, -- from a kind 'Reduction' and a body 'Reduction'. -- -- Combines 'mkForAllCo' and 'mkForAllTy'. mkForAllRedn :: ArgFlag -> TyVar -> ReductionN -- ^ kind reduction -> Reduction -- ^ body reduction -> Reduction mkForAllRedn vis tv1 (Reduction h ki') (Reduction co ty) = mkReduction (mkForAllCo tv1 h co) (mkForAllTy tv2 vis ty) where tv2 = setTyVarKind tv1 ki' {-# INLINE mkForAllRedn #-} -- | Create a 'Reduction' of a quantified type from a -- 'Reduction' of the body. -- -- Combines 'mkHomoForAllCos' and 'mkForAllTys'. mkHomoForAllRedn :: [TyVarBinder] -> Reduction -> Reduction mkHomoForAllRedn bndrs (Reduction co ty) = mkReduction (mkHomoForAllCos (binderVars bndrs) co) (mkForAllTys bndrs ty) {-# INLINE mkHomoForAllRedn #-} -- | Create a 'Reduction' from a coercion between coercions. -- -- Combines 'mkProofIrrelCo' and 'mkCoercionTy'. mkProofIrrelRedn :: Role -- ^ role of the created coercion, "r" -> CoercionN -- ^ co :: phi1 ~N phi2 -> Coercion -- ^ g1 :: phi1 -> Coercion -- ^ g2 :: phi2 -> Reduction -- ^ res_co :: g1 ~r g2 mkProofIrrelRedn role co g1 g2 = mkReduction (mkProofIrrelCo role co g1 g2) (mkCoercionTy g2) {-# INLINE mkProofIrrelRedn #-} -- | Create a reflexive 'Reduction' whose RHS is the given 'Coercion', -- with the specified 'Role'. mkReflCoRedn :: Role -> Coercion -> Reduction mkReflCoRedn role co = mkReduction (mkReflCo role co_ty) co_ty where co_ty = mkCoercionTy co {-# INLINE mkReflCoRedn #-} -- | A collection of 'Reduction's where the coercions and the types are stored separately. -- -- Use 'unzipRedns' to obtain 'Reductions' from a list of 'Reduction's. -- -- This datatype is used in 'mkAppRedns', 'mkClassPredRedns' and 'mkTyConAppRedn', -- which expect separate types and coercions. -- -- Invariant: the two stored lists are of the same length, -- and the RHS type of each coercion is the corresponding type. data Reductions = Reductions [Coercion] [Type] -- | Create 'Reductions' from individual lists of coercions and types. -- -- The lists should be of the same length, and the RHS type of each coercion -- should match the specified type in the other list. mkReductions :: [Coercion] -> [Type] -> Reductions mkReductions cos tys = Reductions cos tys {-# INLINE mkReductions #-} -- | Combines 'mkAppCos' and 'mkAppTys'. mkAppRedns :: Reduction -> Reductions -> Reduction mkAppRedns (Reduction co ty) (Reductions cos tys) = mkReduction (mkAppCos co cos) (mkAppTys ty tys) {-# INLINE mkAppRedns #-} -- | 'TyConAppCo' for 'Reduction's: combines 'mkTyConAppCo' and `mkTyConApp`. mkTyConAppRedn :: Role -> TyCon -> Reductions -> Reduction mkTyConAppRedn role tc (Reductions cos tys) = mkReduction (mkTyConAppCo role tc cos) (mkTyConApp tc tys) {-# INLINE mkTyConAppRedn #-} -- | Reduce the arguments of a 'Class' 'TyCon'. mkClassPredRedn :: Class -> Reductions -> Reduction mkClassPredRedn cls (Reductions cos tys) = mkReduction (mkTyConAppCo Nominal (classTyCon cls) cos) (mkClassPred cls tys) {-# INLINE mkClassPredRedn #-} -- | Obtain 'Reductions' from a list of 'Reduction's by unzipping. unzipRedns :: [Reduction] -> Reductions unzipRedns = foldr accRedn (Reductions [] []) where accRedn :: Reduction -> Reductions -> Reductions accRedn (Reduction co xi) (Reductions cos xis) = Reductions (co:cos) (xi:xis) {-# INLINE unzipRedns #-} -- NB: this function is currently used in two locations: -- -- - GHC.Tc.Gen.Foreign.normaliseFfiType', with one call of the form: -- -- unzipRedns <$> zipWithM f tys roles -- -- - GHC.Tc.Solver.Monad.breakTyEqCycle_maybe, with two calls of the form: -- -- unzipRedns <$> mapM f tys -- -- It is possible to write 'mapAndUnzipM' functions to handle these cases, -- but the above locations aren't performance critical, so it was deemed -- to not be worth it. {- %************************************************************************ %* * Simplifying types %* * %************************************************************************ The function below morally belongs in GHC.Tc.Solver.Rewrite, but it is used also in FamInstEnv, and so lives here. Note [simplifyArgsWorker] ~~~~~~~~~~~~~~~~~~~~~~~~~ Invariant (F2) of Note [Rewriting] in GHC.Tc.Solver.Rewrite says that rewriting is homogeneous. This causes some trouble when rewriting a function applied to a telescope of arguments, perhaps with dependency. For example, suppose type family F :: forall (j :: Type) (k :: Type). Maybe j -> Either j k -> Bool -> [k] and we wish to rewrite the args of (with kind applications explicit) F @a @b (Just @a c) (Right @a @b d) False where all variables are skolems and a :: Type b :: Type c :: a d :: b [G] aco :: a ~ fa [G] bco :: b ~ fb [G] cco :: c ~ fc [G] dco :: d ~ fd The first step is to rewrite all the arguments. This is done before calling simplifyArgsWorker. We start from a b Just @a c Right @a @b d False and get left-to-right reductions whose coercions are as follows: co1 :: a ~ fa co2 :: b ~ fb co3 :: (Just @a c) ~ (Just @fa (fc |> aco) |> co6) co4 :: (Right @a @b d) ~ (Right @fa @fb (fd |> bco) |> co7) co5 :: False ~ False where co6 = Maybe (sym aco) :: Maybe fa ~ Maybe a co7 = Either (sym aco) (sym bco) :: Either fa fb ~ Either a b We now process the rewritten args in left-to-right order. The first two args need no further processing. But now consider the third argument. Let f3 = the rewritten result, Just fa (fc |> aco) |> co6. This f3 rewritten argument has kind (Maybe a), due to homogeneity of rewriting (F2). And yet, when we build the application (F @fa @fb ...), we need this argument to have kind (Maybe fa), not (Maybe a). We must cast this argument. The coercion to use is determined by the kind of F: we see in F's kind that the third argument has kind Maybe j. Critically, we also know that the argument corresponding to j (in our example, a) rewrote with a coercion co1. We can thus know the coercion needed for the 3rd argument is (Maybe co1), thus building (f3 |> Maybe co1) More generally, we must use the Lifting Lemma, as implemented in Coercion.liftCoSubst. As we work left-to-right, any variable that is a dependent parameter (j and k, in our example) gets mapped in a lifting context to the coercion that is output from rewriting the corresponding argument (co1 and co2, in our example). Then, after rewriting later arguments, we lift the kind of these arguments in the lifting context that we've be building up. This coercion is then used to keep the result of rewriting well-kinded. Working through our example, this is what happens: 1. Extend the (empty) LC with [j |-> co1]. No new casting must be done, because the binder associated with the first argument has a closed type (no variables). 2. Extend the LC with [k |-> co2]. No casting to do. 3. Lifting the kind (Maybe j) with our LC yields co8 :: Maybe a ~ Maybe fa. Use (f3 |> co8) as the argument to F. 4. Lifting the kind (Either j k) with our LC yields co9 :: Either a b ~ Either fa fb. Use (f4 |> co9) as the 4th argument to F, where f4 is the rewritten form of argument 4, written above. 5. We lift Bool with our LC, getting ; casting has no effect. We're now almost done, but the new application F @fa @fb (f3 |> co8) (f4 |> co9) False has the wrong kind. Its kind is [fb], instead of the original [b]. So we must use our LC one last time to lift the result kind [k], getting res_co :: [fb] ~ [b], and we cast our result. Accordingly, the final result is F @fa @fb (Just @fa (fc |> aco) |> Maybe (sym aco) |> sym (Maybe (sym aco))) (Right @fa @fb (fd |> bco) |> Either (sym aco) (sym bco) |> sym (Either (sym aco) (sym bco))) False |> [sym bco] The res_co (in this case, [sym bco]) is the third component of the tuple returned by simplifyArgsWorker. Note [Last case in simplifyArgsWorker] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In writing simplifyArgsWorker's `go`, we know here that args cannot be empty, because that case is first. We've run out of binders. But perhaps inner_ki is a tyvar that has been instantiated with a Π-type. Here is an example. a :: forall (k :: Type). k -> k Proxy :: forall j. j -> Type type family Star axStar :: Star ~ Type type family NoWay :: Bool axNoWay :: NoWay ~ False bo :: Type [G] bc :: bo ~ Bool (in inert set) co :: (forall j. j -> Type) ~ (forall (j :: Star). (j |> axStar) -> Star) co = forall (j :: sym axStar). ( -> sym axStar) We are rewriting: a (forall (j :: Star). (j |> axStar) -> Star) -- 1 (Proxy |> co) -- 2 (bo |> sym axStar) -- 3 (NoWay |> sym bc) -- 4 :: Star First, we rewrite all the arguments (before simplifyArgsWorker), like so: co1 :: (forall (j :: Star). (j |> axStar) -> Star) ~ (forall j. j -> Type) -- 1 co2 :: (Proxy |> co) ~ (Proxy |> co) -- 2 co3 :: (bo |> sym axStar) ~ (Bool |> sym axStar) -- 3 co4 :: (NoWay |> sym bc) ~ (False |> sym bc) -- 4 Then we do the process described in Note [simplifyArgsWorker]. 1. Lifting Type (the kind of the first arg) gives us a reflexive coercion, so we don't use it. But we do build a lifting context [k -> co1] (where co1 is a result of rewriting an argument, written above). 2. Lifting k gives us co1, so the second argument becomes (Proxy |> co |> co1). This is not a dependent argument, so we don't extend the lifting context. Now we need to deal with argument (3). The way we normally proceed is to lift the kind of the binder, to see whether it's dependent. But here, the remainder of the kind of `a` that we're left with after processing two arguments is just `k`. The way forward is look up k in the lifting context, getting co1. If we're at all well-typed, co1 will be a coercion between Π-types, with at least one binder. So, let's decompose co1 with decomposePiCos. This decomposition needs arguments to use to instantiate any kind parameters. Look at the type of co1. If we just decomposed it, we would end up with coercions whose types include j, which is out of scope here. Accordingly, decomposePiCos takes a list of types whose kinds are the *unrewritten* types in the decomposed coercion. (See comments on decomposePiCos.) Because the rewritten types have unrewritten kinds (because rewriting is homogeneous), passing the list of rewritten types to decomposePiCos just won't do: later arguments' kinds won't be as expected. So we need to get the *unrewritten* types to pass to decomposePiCos. We can do this easily enough by taking the kind of the argument coercions, passed in originally. (Alternative 1: We could re-engineer decomposePiCos to deal with this situation. But that function is already gnarly, and other call sites of decomposePiCos would suffer from the change, even though they are much more common than this one.) (Alternative 2: We could avoid calling decomposePiCos entirely, integrating its behavior into simplifyArgsWorker. This would work, I think, but then all of the complication of decomposePiCos would end up layered on top of all the complication here. Please, no.) (Alternative 3: We could pass the unrewritten arguments into simplifyArgsWorker so that we don't have to recreate them. But that would complicate the interface of this function to handle a very dark, dark corner case. Better to keep our demons to ourselves here instead of exposing them to callers. This decision is easily reversed if there is ever any performance trouble due to the call of coercionKind.) So we now call decomposePiCos co1 (Pair (forall (j :: Star). (j |> axStar) -> Star) (forall j. j -> Type)) [bo |> sym axStar, NoWay |> sym bc] to get co5 :: Star ~ Type co6 :: (j |> axStar) ~ (j |> co5), substituted to (bo |> sym axStar |> axStar) ~ (bo |> sym axStar |> co5) == bo ~ bo res_co :: Type ~ Star We then use these casts on (the rewritten) (3) and (4) to get (Bool |> sym axStar |> co5 :: Type) -- (C3) (False |> sym bc |> co6 :: bo) -- (C4) We can simplify to Bool -- (C3) (False |> sym bc :: bo) -- (C4) Of course, we still must do the processing in Note [simplifyArgsWorker] to finish the job. We thus want to recur. Our new function kind is the left-hand type of co1 (gotten, recall, by lifting the variable k that was the return kind of the original function). Why the left-hand type (as opposed to the right-hand type)? Because we have casted all the arguments according to decomposePiCos, which gets us from the right-hand type to the left-hand one. We thus recur with that new function kind, zapping our lifting context, because we have essentially applied it. This recursive call returns ([Bool, False], [...], Refl). The Bool and False are the correct arguments we wish to return. But we must be careful about the result coercion: our new, rewritten application will have kind Type, but we want to make sure that the result coercion casts this back to Star. (Why? Because we started with an application of kind Star, and rewriting is homogeneous.) So, we have to twiddle the result coercion appropriately. Let's check whether this is well-typed. We know a :: forall (k :: Type). k -> k a (forall j. j -> Type) :: (forall j. j -> Type) -> forall j. j -> Type a (forall j. j -> Type) Proxy :: forall j. j -> Type a (forall j. j -> Type) Proxy Bool :: Bool -> Type a (forall j. j -> Type) Proxy Bool False :: Type a (forall j. j -> Type) Proxy Bool False |> res_co :: Star as desired. Whew. Historical note: I (Richard E) once thought that the final part of the kind had to be a variable k (as in the example above). But it might not be: it could be an application of a variable. Here is the example: let f :: forall (a :: Type) (b :: a -> Type). b (Any @a) k :: Type x :: k rewrite (f @Type @((->) k) x) After instantiating [a |-> Type, b |-> ((->) k)], we see that `b (Any @a)` is `k -> Any @a`, and thus the third argument of `x :: k` is well-kinded. -} -- | Stores 'Reductions' as well as a kind coercion. -- -- Used when rewriting arguments to a type function @f@. -- -- Invariant: -- when the stored reductions are of the form -- co_i :: ty_i ~ xi_i, -- the kind coercion is of the form -- kco :: typeKind (f ty_1 ... ty_n) ~ typeKind (f xi_1 ... xi_n) -- -- The type function @f@ depends on context. data ArgsReductions = ArgsReductions {-# UNPACK #-} !Reductions !MCoercionN -- The strictness annotations and UNPACK pragma here are crucial -- to getting good performance in simplifyArgsWorker's tight loop. -- This is shared between the rewriter and the normaliser in GHC.Core.FamInstEnv. -- See Note [simplifyArgsWorker] {-# INLINE simplifyArgsWorker #-} -- NB. INLINE yields a ~1% decrease in allocations in T9872d compared to INLINEABLE -- This function is only called in two locations, so the amount of code duplication -- should be rather reasonable despite the size of the function. simplifyArgsWorker :: HasDebugCallStack => [TyCoBinder] -> Kind -- the binders & result kind (not a Π-type) of the function applied to the args -- list of binders can be shorter or longer than the list of args -> TyCoVarSet -- free vars of the args -> [Role] -- list of roles, r -> [Reduction] -- rewritten type arguments, arg_i -- each comes with the coercion used to rewrite it, -- arg_co_i :: ty_i ~ arg_i -> ArgsReductions -- Returns ArgsReductions (Reductions cos xis) res_co, where co_i :: ty_i ~ xi_i, -- and res_co :: kind (f ty_1 ... ty_n) ~ kind (f xi_1 ... xi_n), where f is the function -- that we are applying. -- Precondition: if f :: forall bndrs. inner_ki (where bndrs and inner_ki are passed in), -- then (f ty_1 ... ty_n) is well kinded. Note that (f arg_1 ... arg_n) might *not* be well-kinded. -- Massaging the arg_i in order to make the function application well-kinded is what this -- function is all about. That is, (f xi_1 ... xi_n), where xi_i are the returned arguments, -- *is* well kinded. simplifyArgsWorker orig_ki_binders orig_inner_ki orig_fvs orig_roles orig_simplified_args = go orig_lc orig_ki_binders orig_inner_ki orig_roles orig_simplified_args where orig_lc = emptyLiftingContext $ mkInScopeSet orig_fvs go :: LiftingContext -- mapping from tyvars to rewriting coercions -> [TyCoBinder] -- Unsubsted binders of function's kind -> Kind -- Unsubsted result kind of function (not a Pi-type) -> [Role] -- Roles at which to rewrite these ... -> [Reduction] -- rewritten arguments, with their rewriting coercions -> ArgsReductions go !lc binders inner_ki _ [] -- The !lc makes the function strict in the lifting context -- which means GHC can unbox that pair. A modest win. = ArgsReductions (mkReductions [] []) kind_co where final_kind = mkPiTys binders inner_ki kind_co | noFreeVarsOfType final_kind = MRefl | otherwise = MCo $ liftCoSubst Nominal lc final_kind go lc (binder:binders) inner_ki (role:roles) (arg_redn:arg_redns) = -- We rewrite an argument ty with arg_redn = Reduction arg_co arg -- By Note [Rewriting] in GHC.Tc.Solver.Rewrite invariant (F2), -- tcTypeKind(ty) = tcTypeKind(arg). -- However, it is possible that arg will be used as an argument to a function -- whose kind is different, if earlier arguments have been rewritten. -- We thus need to compose the reduction with a kind coercion to ensure -- well-kindedness (see the call to mkCoherenceRightRedn below). -- -- The bangs here have been observed to improve performance -- significantly in optimized builds; see #18502 let !kind_co = liftCoSubst Nominal lc (tyCoBinderType binder) !(Reduction casted_co casted_xi) = mkCoherenceRightRedn role arg_redn kind_co -- now, extend the lifting context with the new binding !new_lc | Just tv <- tyCoBinderVar_maybe binder = extendLiftingContextAndInScope lc tv casted_co | otherwise = lc !(ArgsReductions (Reductions cos xis) final_kind_co) = go new_lc binders inner_ki roles arg_redns in ArgsReductions (Reductions (casted_co:cos) (casted_xi:xis)) final_kind_co -- See Note [Last case in simplifyArgsWorker] go lc [] inner_ki roles arg_redns = let co1 = liftCoSubst Nominal lc inner_ki co1_kind = coercionKind co1 unrewritten_tys = map reductionOriginalType arg_redns (arg_cos, res_co) = decomposePiCos co1 co1_kind unrewritten_tys casted_args = assertPpr (equalLength arg_redns arg_cos) (ppr arg_redns $$ ppr arg_cos) $ zipWith3 mkCoherenceRightRedn roles arg_redns arg_cos -- In general decomposePiCos can return fewer cos than tys, -- but not here; because we're well typed, there will be enough -- binders. Note that decomposePiCos does substitutions, so even -- if the original substitution results in something ending with -- ... -> k, that k will be substituted to perhaps reveal more -- binders. zapped_lc = zapLiftingContext lc Pair rewritten_kind _ = co1_kind (bndrs, new_inner) = splitPiTys rewritten_kind ArgsReductions redns_out res_co_out = go zapped_lc bndrs new_inner roles casted_args in ArgsReductions redns_out (res_co `mkTransMCoR` res_co_out) go _ _ _ _ _ = panic "simplifyArgsWorker wandered into deeper water than usual" -- This debug information is commented out because leaving it in -- causes a ~2% increase in allocations in T9872d. -- That's independent of the analogous case in rewrite_args_fast -- in GHC.Tc.Solver.Rewrite: -- each of these causes a 2% increase on its own, so commenting them -- both out gives a 4% decrease in T9872d. {- (vcat [ppr orig_binders, ppr orig_inner_ki, ppr (take 10 orig_roles), -- often infinite! ppr orig_tys]) -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/RoughMap.hs0000644000000000000000000004315714472400112020613 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE BangPatterns #-} -- | 'RoughMap' is an approximate finite map data structure keyed on -- @['RoughMatchTc']@. This is useful when keying maps on lists of 'Type's -- (e.g. an instance head). module GHC.Core.RoughMap ( -- * RoughMatchTc RoughMatchTc(..) , isRoughWildcard , typeToRoughMatchTc , RoughMatchLookupTc(..) , typeToRoughMatchLookupTc , roughMatchTcToLookup -- * RoughMap , RoughMap , emptyRM , lookupRM , lookupRM' , insertRM , filterRM , filterMatchingRM , elemsRM , sizeRM , foldRM , unionRM ) where import GHC.Prelude import GHC.Data.Bag import GHC.Core.TyCon import GHC.Core.TyCo.Rep import GHC.Core.Type import GHC.Utils.Outputable import GHC.Types.Name import GHC.Types.Name.Env import Control.Monad (join) import Data.Data (Data) import GHC.Utils.Misc import Data.Bifunctor import GHC.Utils.Panic {- Note [RoughMap] ~~~~~~~~~~~~~~~ We often want to compute whether one type matches another. That is, given `ty1` and `ty2`, we want to know whether `ty1` is a substitution instance of `ty2`. We can bail out early by taking advantage of the following observation: If `ty2` is headed by a generative type constructor, say `tc`, but `ty1` is not headed by that same type constructor, then `ty1` does not match `ty2`. The idea is that we can use a `RoughMap` as a pre-filter, to produce a short-list of candidates to examine more closely. This means we can avoid computing a full substitution if we represent types as applications of known generative type constructors. So, after type synonym expansion, we classify application heads into two categories ('RoughMatchTc') - `RM_KnownTc tc`: the head is the generative type constructor `tc`, - `RM_Wildcard`: anything else. A (RoughMap val) is semantically a list of (key,[val]) pairs, where key :: [RoughMatchTc] So, writing # for `OtherTc`, and Int for `KnownTc "Int"`, we might have [ ([#, Int, Maybe, #, Int], v1) , ([Int, #, List], v2 ] This map is stored as a trie, so looking up a key is very fast. See Note [Matching a RoughMap] and Note [Simple Matching Semantics] for details on lookup. We lookup a key of type [RoughMatchLookupTc], and return the list of all values whose keys "match": Given the above map, here are the results of some lookups: Lookup key Result ------------------------- [Int, Int] [v1,v2] -- Matches because the prefix of both entries matches [Int,Int,List] [v2] [Bool] [] Notice that a single key can map to /multiple/ values. E.g. if we started with (Maybe Int, val1) and (Maybe Bool, val2), we'd generate a RoughMap that is semantically the list [( Maybe, [val1,val2] )] Note [RoughMap and beta reduction] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is one tricky case we have to account for when matching a rough map due to Note [Eta reduction for data families] in `GHC.Core.Coercion.Axiom`: Consider that the user has written a program containing a data family: > data family Fam a b > data instance Fam Int a = SomeType -- known henceforth as FamIntInst The LHS of this instance will be eta reduced, as described in Note [Eta reduction for data families]. Consequently, we will end up with a `FamInst` with `fi_tcs = [KnownTc Int]`. Naturally, we need RoughMap to return this instance when queried for an instance with template, e.g., `[KnownTc Fam, KnownTc Int, KnownTc Char]`. This explains the third clause of the mightMatch specification in Note [Simple Matching Semantics]. As soon as the the lookup key runs out, the remaining instances might match. Note [Matching a RoughMap] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The /lookup key/ into a rough map (RoughMatchLookupTc) is slightly different to the /insertion key/ (RoughMatchTc). Like the insertion key each lookup argument is classified to a simpler key which describes what could match that position. There are three possibilities: * RML_KnownTc Name: The argument is headed by a known type constructor. Example: 'Bool' is classified as 'RML_KnownTc Bool' and '[Int]' is classified as `RML_KnownTc []` * RML_NoKnownTc: The argument is definitely not headed by any known type constructor. Example: For instance matching 'a[sk], a[tau]' and 'F a[sk], F a[tau]' are classified as 'RML_NoKnownTc', for family instance matching no examples. * RML_WildCard: The argument could match anything, we don't know enough about it. For instance matching no examples, for type family matching, things to do with variables. The interesting case for instance matching is the second case, because it does not appear in an insertion key. The second case arises in two situations: 1. The head of the application is a type variable. The type variable definitely doesn't match with any of the KnownTC instances so we can discard them all. For example: Show a[sk] or Show (a[sk] b[sk]). One place constraints like this arise is when typechecking derived instances. 2. The head of the application is a known type family. For example: F a[sk]. The application of F is stuck, and because F is a type family it won't match any KnownTC instance so it's safe to discard all these instances. Of course, these two cases can still match instances of the form `forall a . Show a =>`, and those instances are retained as they are classified as RM_WildCard instances. Note [Matches vs Unifiers] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The lookupRM' function returns a pair of potential /matches/ and potential /unifiers/. The potential matches is likely to be much smaller than the bag of potential unifiers due to the reasoning about rigid type variables described in Note [Matching a RoughMap]. On the other hand, the instances captured by the RML_NoKnownTC case can still potentially unify with any instance (depending on the substituion of said rigid variable) so they can't be discounted from the list of potential unifiers. This is achieved by the RML_NoKnownTC case continuing the lookup for unifiers by replacing RML_NoKnownTC with RML_LookupOtherTC. This distinction between matches and unifiers is also important for type families. During normal type family lookup, we care about matches and when checking for consistency we care about the unifiers. This is evident in the code as `lookup_fam_inst_env` is parameterised over a lookup function which either performs matching checking or unification checking. In addition to this, we only care whether there are zero or non-zero potential unifiers, even if we have many candidates, the search can stop before consulting each candidate. We only need the full list of unifiers when displaying error messages. Therefore the list is computed lazily so much work can be avoided constructing the list in the first place. Note [Simple Matching Semantics] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose `rm` is a RoughMap representing a set of (key,vals) pairs, where key::[RoughMapTc] and val::a. Suppose I look up a key lk :: [RoughMapLookupTc] in `rm` Then I get back (matches, unifiers) where matches = [ vals | (key,vals) <- rm, key `mightMatch` lk ] unifiers = [ vals | (key,vals) <- rm, key `mightUnify` lk ] Where mightMatch is defined like this: mightMatch :: [RoughMapTc] -> [RoughMapLookupTc] -> Bool mightMatch [] [] = True -- A perfectly sized match might match mightMatch key [] = True -- A shorter lookup key matches everything mightMatch [] (_:_) = True -- If the lookup key is longer, then still might match -- Note [RoughMatch and beta reduction] mightMatch (k:ks) (lk:lks) = = case (k,lk) of -- Standard case, matching on a specific known TyCon. (RM_KnownTc n1, RML_KnownTc n2) -> n1==n2 && mightMatch ks lks -- For example, if the key for 'Show Bool' is [RM_KnownTc Show, RM_KnownTc Bool] ---and we match against (Show a[sk]) [RM_KnownTc Show, RML_NoKnownTc] -- then Show Bool can never match Show a[sk] so return False. (RM_KnownTc _, RML_NoKnownTc) -> False -- Wildcard cases don't inform us anything about the match. (RM_WildCard, _ ) -> mightMatch ks lks (_, RML_WildCard) -> mightMatch ks lks -- Might unify is very similar to mightMatch apart from RML_NoKnownTc may -- unify with any instance. mightUnify :: [RoughMapTc] -> [RoughMapLookupTc] -> Bool mightUnify [] [] = True -- A perfectly sized match might unify mightUnify key [] = True -- A shorter lookup key matches everything mightUnify [] (_:_) = True mightUnify (k:ks) (lk:lks) = = case (k,lk) of (RM_KnownTc n1, RML_KnownTc n2) -> n1==n2 && mightUnify ks lks (RM_KnownTc _, RML_NoKnownTc) -> mightUnify (k:ks) (RML_WildCard:lks) (RM_WildCard, _ ) -> mightUnify ks lks (_, RML_WildCard) -> mightUnify ks lks The guarantee that RoughMap provides is that if insert_ty `tcMatchTy` lookup_ty then definitely typeToRoughMatchTc insert_ty `mightMatch` typeToRoughMatchLookupTc lookup_ty but not vice versa this statement encodes the intuition that the RoughMap is used as a quick pre-filter to remove instances from the matching pool. The contrapositive states that if the RoughMap reports that the instance doesn't match then `tcMatchTy` will report that the types don't match as well. -} -- Key for insertion into a RoughMap data RoughMatchTc = RM_KnownTc Name -- INVARIANT: Name refers to a TyCon tc that responds -- true to `isGenerativeTyCon tc Nominal`. See -- Note [Rough matching in class and family instances] | RM_WildCard -- e.g. type variable at the head deriving( Data ) -- Key for lookup into a RoughMap -- See Note [Matching a RoughMap] data RoughMatchLookupTc = RML_KnownTc Name -- ^ The position only matches the specified KnownTc | RML_NoKnownTc -- ^ The position definitely doesn't match any KnownTc | RML_WildCard -- ^ The position can match anything deriving ( Data ) instance Outputable RoughMatchLookupTc where ppr (RML_KnownTc nm) = text "RML_KnownTc" <+> ppr nm ppr RML_NoKnownTc = text "RML_NoKnownTC" ppr RML_WildCard = text "_" roughMatchTcToLookup :: RoughMatchTc -> RoughMatchLookupTc roughMatchTcToLookup (RM_KnownTc n) = RML_KnownTc n roughMatchTcToLookup RM_WildCard = RML_WildCard instance Outputable RoughMatchTc where ppr (RM_KnownTc nm) = text "KnownTc" <+> ppr nm ppr RM_WildCard = text "OtherTc" isRoughWildcard :: RoughMatchTc -> Bool isRoughWildcard RM_WildCard = True isRoughWildcard (RM_KnownTc {}) = False typeToRoughMatchLookupTc :: Type -> RoughMatchLookupTc typeToRoughMatchLookupTc ty | Just (ty', _) <- splitCastTy_maybe ty = typeToRoughMatchLookupTc ty' | otherwise = case splitAppTys ty of -- Case 1: Head of application is a type variable, does not match any KnownTc. (TyVarTy {}, _) -> RML_NoKnownTc (TyConApp tc _, _) -- Case 2: Head of application is a known type constructor, hence KnownTc. | not (isTypeFamilyTyCon tc) -> RML_KnownTc $! tyConName tc -- Case 3: Head is a type family so it's stuck and therefore doesn't match -- any KnownTc | isTypeFamilyTyCon tc -> RML_NoKnownTc -- Fallthrough: Otherwise, anything might match this position _ -> RML_WildCard typeToRoughMatchTc :: Type -> RoughMatchTc typeToRoughMatchTc ty | Just (ty', _) <- splitCastTy_maybe ty = typeToRoughMatchTc ty' | Just (tc,_) <- splitTyConApp_maybe ty , not (isTypeFamilyTyCon tc) = assertPpr (isGenerativeTyCon tc Nominal) (ppr tc) RM_KnownTc $! tyConName tc -- See Note [Rough matching in class and family instances] | otherwise = RM_WildCard -- | Trie of @[RoughMatchTc]@ -- -- *Examples* -- @ -- insert [OtherTc] 1 -- insert [OtherTc] 2 -- lookup [OtherTc] == [1,2] -- @ data RoughMap a = RM { rm_empty :: Bag a , rm_known :: DNameEnv (RoughMap a) -- See Note [InstEnv determinism] in GHC.Core.InstEnv , rm_unknown :: RoughMap a } | RMEmpty -- an optimised (finite) form of emptyRM -- invariant: Empty RoughMaps are always represented with RMEmpty deriving (Functor) instance Outputable a => Outputable (RoughMap a) where ppr (RM empty known unknown) = vcat [text "RM" , nest 2 (vcat [ text "Empty:" <+> ppr empty , text "Known:" <+> ppr known , text "Unknown:" <+> ppr unknown])] ppr RMEmpty = text "{}" emptyRM :: RoughMap a emptyRM = RMEmpty -- | Order of result is deterministic. lookupRM :: [RoughMatchLookupTc] -> RoughMap a -> [a] lookupRM tcs rm = bagToList (fst $ lookupRM' tcs rm) -- | N.B. Returns a 'Bag' for matches, which allows us to avoid rebuilding all of the lists -- we find in 'rm_empty', which would otherwise be necessary due to '++' if we -- returned a list. We use a list for unifiers becuase the tail is computed lazily and -- we often only care about the first couple of potential unifiers. Constructing a -- bag forces the tail which performs much too much work. -- -- See Note [Matching a RoughMap] -- See Note [Matches vs Unifiers] lookupRM' :: [RoughMatchLookupTc] -> RoughMap a -> (Bag a -- Potential matches , [a]) -- Potential unifiers lookupRM' _ RMEmpty = (emptyBag, []) -- See Note [Simple Matching Semantics] about why we return everything when the lookup -- key runs out. lookupRM' [] rm = let m = elemsRM rm in (listToBag m, m) lookupRM' (RML_KnownTc tc : tcs) rm = let (common_m, common_u) = lookupRM' tcs (rm_unknown rm) (m, u) = maybe (emptyBag, []) (lookupRM' tcs) (lookupDNameEnv (rm_known rm) tc) in (rm_empty rm `unionBags` common_m `unionBags` m , bagToList (rm_empty rm) ++ common_u ++ u) -- A RML_NoKnownTC does **not** match any KnownTC but can unify lookupRM' (RML_NoKnownTc : tcs) rm = let (u_m, _u_u) = lookupRM' tcs (rm_unknown rm) in (rm_empty rm `unionBags` u_m -- Definitely don't match , snd $ lookupRM' (RML_WildCard : tcs) rm) -- But could unify.. lookupRM' (RML_WildCard : tcs) rm = let (m, u) = bimap unionManyBags concat (mapAndUnzip (lookupRM' tcs) (eltsDNameEnv $ rm_known rm)) (u_m, u_u) = lookupRM' tcs (rm_unknown rm) in (rm_empty rm `unionBags` u_m `unionBags` m , bagToList (rm_empty rm) ++ u_u ++ u) unionRM :: RoughMap a -> RoughMap a -> RoughMap a unionRM RMEmpty a = a unionRM a RMEmpty = a unionRM a b = RM { rm_empty = rm_empty a `unionBags` rm_empty b , rm_known = plusDNameEnv_C unionRM (rm_known a) (rm_known b) , rm_unknown = rm_unknown a `unionRM` rm_unknown b } insertRM :: [RoughMatchTc] -> a -> RoughMap a -> RoughMap a insertRM k v RMEmpty = insertRM k v $ RM { rm_empty = emptyBag , rm_known = emptyDNameEnv , rm_unknown = emptyRM } insertRM [] v rm@(RM {}) = -- See Note [Simple Matching Semantics] rm { rm_empty = v `consBag` rm_empty rm } insertRM (RM_KnownTc k : ks) v rm@(RM {}) = rm { rm_known = alterDNameEnv f (rm_known rm) k } where f Nothing = Just $ (insertRM ks v emptyRM) f (Just m) = Just $ (insertRM ks v m) insertRM (RM_WildCard : ks) v rm@(RM {}) = rm { rm_unknown = insertRM ks v (rm_unknown rm) } filterRM :: (a -> Bool) -> RoughMap a -> RoughMap a filterRM _ RMEmpty = RMEmpty filterRM pred rm = normalise $ RM { rm_empty = filterBag pred (rm_empty rm), rm_known = mapDNameEnv (filterRM pred) (rm_known rm), rm_unknown = filterRM pred (rm_unknown rm) } -- | Place a 'RoughMap' in normal form, turning all empty 'RM's into -- 'RMEmpty's. Necessary after removing items. normalise :: RoughMap a -> RoughMap a normalise RMEmpty = RMEmpty normalise (RM empty known RMEmpty) | isEmptyBag empty , isEmptyDNameEnv known = RMEmpty normalise rm = rm -- | Filter all elements that might match a particular key with the given -- predicate. filterMatchingRM :: (a -> Bool) -> [RoughMatchTc] -> RoughMap a -> RoughMap a filterMatchingRM _ _ RMEmpty = RMEmpty filterMatchingRM pred [] rm = filterRM pred rm filterMatchingRM pred (RM_KnownTc tc : tcs) rm = normalise $ RM { rm_empty = filterBag pred (rm_empty rm), rm_known = alterDNameEnv (join . fmap (dropEmpty . filterMatchingRM pred tcs)) (rm_known rm) tc, rm_unknown = filterMatchingRM pred tcs (rm_unknown rm) } filterMatchingRM pred (RM_WildCard : tcs) rm = normalise $ RM { rm_empty = filterBag pred (rm_empty rm), rm_known = mapDNameEnv (filterMatchingRM pred tcs) (rm_known rm), rm_unknown = filterMatchingRM pred tcs (rm_unknown rm) } dropEmpty :: RoughMap a -> Maybe (RoughMap a) dropEmpty RMEmpty = Nothing dropEmpty rm = Just rm elemsRM :: RoughMap a -> [a] elemsRM = foldRM (:) [] foldRM :: (a -> b -> b) -> b -> RoughMap a -> b foldRM f = go where -- N.B. local worker ensures that the loop can be specialised to the fold -- function. go z RMEmpty = z go z (RM{ rm_unknown = unk, rm_known = known, rm_empty = empty}) = foldr f (foldDNameEnv (flip go) (go z unk) known ) empty nonDetStrictFoldRM :: (b -> a -> b) -> b -> RoughMap a -> b nonDetStrictFoldRM f = go where -- N.B. local worker ensures that the loop can be specialised to the fold -- function. go !z RMEmpty = z go z rm@(RM{}) = foldl' f (nonDetStrictFoldDNameEnv (flip go) (go z (rm_unknown rm)) (rm_known rm) ) (rm_empty rm) sizeRM :: RoughMap a -> Int sizeRM = nonDetStrictFoldRM (\acc _ -> acc + 1) 0 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Rules.hs0000644000000000000000000017427414472400112020170 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[CoreRules]{Rewrite rules} -} -- | Functions for collecting together and applying rewrite rules to a module. -- The 'CoreRule' datatype itself is declared elsewhere. module GHC.Core.Rules ( -- ** Constructing emptyRuleBase, mkRuleBase, extendRuleBaseList, pprRuleBase, extendRuleEnv, -- ** Checking rule applications ruleCheckProgram, -- ** Manipulating 'RuleInfo' rules extendRuleInfo, addRuleInfo, addIdSpecialisations, -- * Misc. CoreRule helpers rulesOfBinds, getRules, pprRulesForUser, lookupRule, mkRule, roughTopNames, initRuleOpts ) where import GHC.Prelude import GHC.Driver.Session ( DynFlags, gopt, targetPlatform, homeUnitId_ ) import GHC.Driver.Flags import GHC.Unit.Types ( primUnitId, bignumUnitId ) import GHC.Unit.Module ( Module ) import GHC.Unit.Module.Env import GHC.Core -- All of it import GHC.Core.Subst import GHC.Core.SimpleOpt ( exprIsLambda_maybe ) import GHC.Core.FVs ( exprFreeVars, exprsFreeVars, bindFreeVars , rulesFreeVarsDSet, exprsOrphNames, exprFreeVarsList ) import GHC.Core.Utils ( exprType, mkTick, mkTicks , stripTicksTopT, stripTicksTopE , isJoinBind, mkCastMCo ) import GHC.Core.Ppr ( pprRules ) import GHC.Core.Unify as Unify ( ruleMatchTyKiX ) import GHC.Core.Type as Type ( Type, TCvSubst, extendTvSubst, extendCvSubst , mkEmptyTCvSubst, substTy, getTyVar_maybe ) import GHC.Core.Coercion as Coercion import GHC.Core.Tidy ( tidyRules ) import GHC.Core.Map.Expr ( eqCoreExpr ) import GHC.Core.Opt.OccurAnal ( occurAnalyseExpr ) import GHC.Tc.Utils.TcType ( tcSplitTyConApp_maybe ) import GHC.Builtin.Types ( anyTypeOfKind ) import GHC.Types.Id import GHC.Types.Id.Info ( RuleInfo( RuleInfo ) ) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name ( Name, NamedThing(..), nameIsLocalOrFrom ) import GHC.Types.Name.Set import GHC.Types.Name.Env import GHC.Types.Unique.FM import GHC.Types.Tickish import GHC.Types.Basic import GHC.Data.FastString import GHC.Data.Maybe import GHC.Data.Bag import GHC.Utils.Misc as Utils import GHC.Utils.Trace import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import Data.List (sortBy, mapAccumL, isPrefixOf) import Data.Function ( on ) import Control.Monad ( guard ) {- Note [Overall plumbing for rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * After the desugarer: - The ModGuts initially contains mg_rules :: [CoreRule] of locally-declared rules for imported Ids. - Locally-declared rules for locally-declared Ids are attached to the IdInfo for that Id. See Note [Attach rules to local ids] in GHC.HsToCore.Binds * GHC.Iface.Tidy strips off all the rules from local Ids and adds them to mg_rules, so that the ModGuts has *all* the locally-declared rules. * The HomePackageTable contains a ModDetails for each home package module. Each contains md_rules :: [CoreRule] of rules declared in that module. The HomePackageTable grows as ghc --make does its up-sweep. In batch mode (ghc -c), the HPT is empty; all imported modules are treated by the "external" route, discussed next, regardless of which package they come from. * The ExternalPackageState has a single eps_rule_base :: RuleBase for Ids in other packages. This RuleBase simply grow monotonically, as ghc --make compiles one module after another. During simplification, interface files may get demand-loaded, as the simplifier explores the unfoldings for Ids it has in its hand. (Via an unsafePerformIO; the EPS is really a cache.) That in turn may make the EPS rule-base grow. In contrast, the HPT never grows in this way. * The result of all this is that during Core-to-Core optimisation there are four sources of rules: (a) Rules in the IdInfo of the Id they are a rule for. These are easy: fast to look up, and if you apply a substitution then it'll be applied to the IdInfo as a matter of course. (b) Rules declared in this module for imported Ids, kept in the ModGuts. If you do a substitution, you'd better apply the substitution to these. There are seldom many of these. (c) Rules declared in the HomePackageTable. These never change. (d) Rules in the ExternalPackageTable. These can grow in response to lazy demand-loading of interfaces. * At the moment (c) is carried in a reader-monad way by the GHC.Core.Opt.Monad. The HomePackageTable doesn't have a single RuleBase because technically we should only be able to "see" rules "below" this module; so we generate a RuleBase for (c) by combing rules from all the modules "below" us. That's why we can't just select the home-package RuleBase from HscEnv. [NB: we are inconsistent here. We should do the same for external packages, but we don't. Same for type-class instances.] * So in the outer simplifier loop (simplifyPgmIO), we combine (b & c) into a single RuleBase, reading (b) from the ModGuts, (c) from the GHC.Core.Opt.Monad, and just before doing rule matching we read (d) from its mutable variable and combine it with the results from (b & c). In a single simplifier run new rules can be added into the EPS so it matters to keep an up-to-date view of which rules have been loaded. For examples of where this went wrong and caused cryptic performance regressions seee see T19790 and !6735. ************************************************************************ * * \subsection[specialisation-IdInfo]{Specialisation info about an @Id@} * * ************************************************************************ A @CoreRule@ holds details of one rule for an @Id@, which includes its specialisations. For example, if a rule for @f@ contains the mapping: \begin{verbatim} forall a b d. [Type (List a), Type b, Var d] ===> f' a b \end{verbatim} then when we find an application of f to matching types, we simply replace it by the matching RHS: \begin{verbatim} f (List Int) Bool dict ===> f' Int Bool \end{verbatim} All the stuff about how many dictionaries to discard, and what types to apply the specialised function to, are handled by the fact that the Rule contains a template for the result of the specialisation. There is one more exciting case, which is dealt with in exactly the same way. If the specialised value is unboxed then it is lifted at its definition site and unlifted at its uses. For example: pi :: forall a. Num a => a might have a specialisation [Int#] ===> (case pi' of Lift pi# -> pi#) where pi' :: Lift Int# is the specialised version of pi. -} mkRule :: Module -> Bool -> Bool -> RuleName -> Activation -> Name -> [CoreBndr] -> [CoreExpr] -> CoreExpr -> CoreRule -- ^ Used to make 'CoreRule' for an 'Id' defined in the module being -- compiled. See also 'GHC.Core.CoreRule' mkRule this_mod is_auto is_local name act fn bndrs args rhs = Rule { ru_name = name , ru_act = act , ru_fn = fn , ru_bndrs = bndrs , ru_args = args , ru_rhs = occurAnalyseExpr rhs -- See Note [OccInfo in unfoldings and rules] , ru_rough = roughTopNames args , ru_origin = this_mod , ru_orphan = orph , ru_auto = is_auto , ru_local = is_local } where -- Compute orphanhood. See Note [Orphans] in GHC.Core.InstEnv -- A rule is an orphan only if none of the variables -- mentioned on its left-hand side are locally defined lhs_names = extendNameSet (exprsOrphNames args) fn -- Since rules get eventually attached to one of the free names -- from the definition when compiling the ABI hash, we should make -- it deterministic. This chooses the one with minimal OccName -- as opposed to uniq value. local_lhs_names = filterNameSet (nameIsLocalOrFrom this_mod) lhs_names orph = chooseOrphanAnchor local_lhs_names -------------- roughTopNames :: [CoreExpr] -> [Maybe Name] -- ^ Find the \"top\" free names of several expressions. -- Such names are either: -- -- 1. The function finally being applied to in an application chain -- (if that name is a GlobalId: see "GHC.Types.Var#globalvslocal"), or -- -- 2. The 'TyCon' if the expression is a 'Type' -- -- This is used for the fast-match-check for rules; -- if the top names don't match, the rest can't roughTopNames args = map roughTopName args roughTopName :: CoreExpr -> Maybe Name roughTopName (Type ty) = case tcSplitTyConApp_maybe ty of Just (tc,_) -> Just (getName tc) Nothing -> Nothing roughTopName (Coercion _) = Nothing roughTopName (App f _) = roughTopName f roughTopName (Var f) | isGlobalId f -- Note [Care with roughTopName] , isDataConWorkId f || idArity f > 0 = Just (idName f) roughTopName (Tick t e) | tickishFloatable t = roughTopName e roughTopName _ = Nothing ruleCantMatch :: [Maybe Name] -> [Maybe Name] -> Bool -- ^ @ruleCantMatch tpl actual@ returns True only if @actual@ -- definitely can't match @tpl@ by instantiating @tpl@. -- It's only a one-way match; unlike instance matching we -- don't consider unification. -- -- Notice that [_$_] -- @ruleCantMatch [Nothing] [Just n2] = False@ -- Reason: a template variable can be instantiated by a constant -- Also: -- @ruleCantMatch [Just n1] [Nothing] = False@ -- Reason: a local variable @v@ in the actuals might [_$_] ruleCantMatch (Just n1 : ts) (Just n2 : as) = n1 /= n2 || ruleCantMatch ts as ruleCantMatch (_ : ts) (_ : as) = ruleCantMatch ts as ruleCantMatch _ _ = False {- Note [Care with roughTopName] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this module M where { x = a:b } module N where { ...f x... RULE f (p:q) = ... } You'd expect the rule to match, because the matcher can look through the unfolding of 'x'. So we must avoid roughTopName returning 'M.x' for the call (f x), or else it'll say "can't match" and we won't even try!! However, suppose we have RULE g (M.h x) = ... foo = ...(g (M.k v)).... where k is a *function* exported by M. We never really match functions (lambdas) except by name, so in this case it seems like a good idea to treat 'M.k' as a roughTopName of the call. -} pprRulesForUser :: [CoreRule] -> SDoc -- (a) tidy the rules -- (b) sort them into order based on the rule name -- (c) suppress uniques (unless -dppr-debug is on) -- This combination makes the output stable so we can use in testing -- It's here rather than in GHC.Core.Ppr because it calls tidyRules pprRulesForUser rules = withPprStyle defaultUserStyle $ pprRules $ sortBy (lexicalCompareFS `on` ruleName) $ tidyRules emptyTidyEnv rules {- ************************************************************************ * * RuleInfo: the rules in an IdInfo * * ************************************************************************ -} extendRuleInfo :: RuleInfo -> [CoreRule] -> RuleInfo extendRuleInfo (RuleInfo rs1 fvs1) rs2 = RuleInfo (rs2 ++ rs1) (rulesFreeVarsDSet rs2 `unionDVarSet` fvs1) addRuleInfo :: RuleInfo -> RuleInfo -> RuleInfo addRuleInfo (RuleInfo rs1 fvs1) (RuleInfo rs2 fvs2) = RuleInfo (rs1 ++ rs2) (fvs1 `unionDVarSet` fvs2) addIdSpecialisations :: Id -> [CoreRule] -> Id addIdSpecialisations id rules | null rules = id | otherwise = setIdSpecialisation id $ extendRuleInfo (idSpecialisation id) rules -- | Gather all the rules for locally bound identifiers from the supplied bindings rulesOfBinds :: [CoreBind] -> [CoreRule] rulesOfBinds binds = concatMap (concatMap idCoreRules . bindersOf) binds getRules :: RuleEnv -> Id -> [CoreRule] -- See Note [Where rules are found] getRules (RuleEnv { re_base = rule_base, re_visible_orphs = orphs }) fn = idCoreRules fn ++ concatMap imp_rules rule_base where imp_rules rb = filter (ruleIsVisible orphs) (lookupNameEnv rb (idName fn) `orElse` []) ruleIsVisible :: ModuleSet -> CoreRule -> Bool ruleIsVisible _ BuiltinRule{} = True ruleIsVisible vis_orphs Rule { ru_orphan = orph, ru_origin = origin } = notOrphan orph || origin `elemModuleSet` vis_orphs {- Note [Where rules are found] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The rules for an Id come from two places: (a) the ones it is born with, stored inside the Id itself (idCoreRules fn), (b) rules added in other modules, stored in the global RuleBase (imp_rules) It's tempting to think that - LocalIds have only (a) - non-LocalIds have only (b) but that isn't quite right: - PrimOps and ClassOps are born with a bunch of rules inside the Id, even when they are imported - The rules in GHC.Core.Opt.ConstantFold.builtinRules should be active even in the module defining the Id (when it's a LocalId), but the rules are kept in the global RuleBase ************************************************************************ * * RuleBase * * ************************************************************************ -} -- RuleBase itself is defined in GHC.Core, along with CoreRule emptyRuleBase :: RuleBase emptyRuleBase = emptyNameEnv mkRuleBase :: [CoreRule] -> RuleBase mkRuleBase rules = extendRuleBaseList emptyRuleBase rules extendRuleBaseList :: RuleBase -> [CoreRule] -> RuleBase extendRuleBaseList rule_base new_guys = foldl' extendRuleBase rule_base new_guys extendRuleBase :: RuleBase -> CoreRule -> RuleBase extendRuleBase rule_base rule = extendNameEnv_Acc (:) Utils.singleton rule_base (ruleIdName rule) rule extendRuleEnv :: RuleEnv -> RuleBase -> RuleEnv extendRuleEnv (RuleEnv rules orphs) rb = (RuleEnv (rb:rules) orphs) pprRuleBase :: RuleBase -> SDoc pprRuleBase rules = pprUFM rules $ \rss -> vcat [ pprRules (tidyRules emptyTidyEnv rs) | rs <- rss ] {- ************************************************************************ * * Matching * * ************************************************************************ -} -- | The main rule matching function. Attempts to apply all (active) -- supplied rules to this instance of an application in a given -- context, returning the rule applied and the resulting expression if -- successful. lookupRule :: RuleOpts -> InScopeEnv -> (Activation -> Bool) -- When rule is active -> Id -- Function head -> [CoreExpr] -- Args -> [CoreRule] -- Rules -> Maybe (CoreRule, CoreExpr) -- See Note [Extra args in the target] -- See comments on matchRule lookupRule opts rule_env@(in_scope,_) is_active fn args rules = -- pprTrace "lookupRule" (ppr fn <+> ppr args $$ ppr rules $$ ppr in_scope) $ case go [] rules of [] -> Nothing (m:ms) -> Just (findBest in_scope (fn,args') m ms) where rough_args = map roughTopName args -- Strip ticks from arguments, see Note [Tick annotations in RULE -- matching]. We only collect ticks if a rule actually matches - -- this matters for performance tests. args' = map (stripTicksTopE tickishFloatable) args ticks = concatMap (stripTicksTopT tickishFloatable) args go :: [(CoreRule,CoreExpr)] -> [CoreRule] -> [(CoreRule,CoreExpr)] go ms [] = ms go ms (r:rs) | Just e <- matchRule opts rule_env is_active fn args' rough_args r = go ((r,mkTicks ticks e):ms) rs | otherwise = -- pprTrace "match failed" (ppr r $$ ppr args $$ -- ppr [ (arg_id, unfoldingTemplate unf) -- | Var arg_id <- args -- , let unf = idUnfolding arg_id -- , isCheapUnfolding unf] ) go ms rs findBest :: InScopeSet -> (Id, [CoreExpr]) -> (CoreRule,CoreExpr) -> [(CoreRule,CoreExpr)] -> (CoreRule,CoreExpr) -- All these pairs matched the expression -- Return the pair the most specific rule -- The (fn,args) is just for overlap reporting findBest _ _ (rule,ans) [] = (rule,ans) findBest in_scope target (rule1,ans1) ((rule2,ans2):prs) | isMoreSpecific in_scope rule1 rule2 = findBest in_scope target (rule1,ans1) prs | isMoreSpecific in_scope rule2 rule1 = findBest in_scope target (rule2,ans2) prs | debugIsOn = let pp_rule rule = ifPprDebug (ppr rule) (doubleQuotes (ftext (ruleName rule))) in pprTrace "Rules.findBest: rule overlap (Rule 1 wins)" (vcat [ whenPprDebug $ text "Expression to match:" <+> ppr fn <+> sep (map ppr args) , text "Rule 1:" <+> pp_rule rule1 , text "Rule 2:" <+> pp_rule rule2]) $ findBest in_scope target (rule1,ans1) prs | otherwise = findBest in_scope target (rule1,ans1) prs where (fn,args) = target isMoreSpecific :: InScopeSet -> CoreRule -> CoreRule -> Bool -- This tests if one rule is more specific than another -- We take the view that a BuiltinRule is less specific than -- anything else, because we want user-define rules to "win" -- In particular, class ops have a built-in rule, but we -- any user-specific rules to win -- eg (#4397) -- truncate :: (RealFrac a, Integral b) => a -> b -- {-# RULES "truncate/Double->Int" truncate = double2Int #-} -- double2Int :: Double -> Int -- We want the specific RULE to beat the built-in class-op rule isMoreSpecific _ (BuiltinRule {}) _ = False isMoreSpecific _ (Rule {}) (BuiltinRule {}) = True isMoreSpecific in_scope (Rule { ru_bndrs = bndrs1, ru_args = args1 }) (Rule { ru_bndrs = bndrs2, ru_args = args2 , ru_name = rule_name2, ru_rhs = rhs2 }) = isJust (matchN (full_in_scope, id_unfolding_fun) rule_name2 bndrs2 args2 args1 rhs2) where id_unfolding_fun _ = NoUnfolding -- Don't expand in templates full_in_scope = in_scope `extendInScopeSetList` bndrs1 noBlackList :: Activation -> Bool noBlackList _ = False -- Nothing is black listed {- Note [Extra args in the target] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we find a matching rule, we return (Just (rule, rhs)), /but/ the rule firing has only consumed as many of the input args as the ruleArity says. The unused arguments are handled by the code in GHC.Core.Opt.Simplify.tryRules, using the arity of the returned rule. E.g. Rule "foo": forall a b. f p1 p2 = rhs Target: f e1 e2 e3 Then lookupRule returns Just (Rule "foo", rhs), where Rule "foo" has ruleArity 2. The real rewrite is f e1 e2 e3 ==> rhs e3 You might think it'd be cleaner for lookupRule to deal with the leftover arguments, by applying 'rhs' to them, but the main call in the Simplifier works better as it is. Reason: the 'args' passed to lookupRule are the result of a lazy substitution Historical note: At one stage I tried to match even if there are more args in the /template/ than the target. I now think this is probably a bad idea. Should the template (map f xs) match (map g)? I think not. For a start, in general eta expansion wastes work. SLPJ July 99 -} ------------------------------------ matchRule :: RuleOpts -> InScopeEnv -> (Activation -> Bool) -> Id -> [CoreExpr] -> [Maybe Name] -> CoreRule -> Maybe CoreExpr -- If (matchRule rule args) returns Just (name,rhs) -- then (f args) matches the rule, and the corresponding -- rewritten RHS is rhs -- -- The returned expression is occurrence-analysed -- -- Example -- -- The rule -- forall f g x. map f (map g x) ==> map (f . g) x -- is stored -- CoreRule "map/map" -- [f,g,x] -- tpl_vars -- [f,map g x] -- tpl_args -- map (f.g) x) -- rhs -- -- Then the call: matchRule the_rule [e1,map e2 e3] -- = Just ("map/map", (\f,g,x -> rhs) e1 e2 e3) -- -- Any 'surplus' arguments in the input are simply put on the end -- of the output. matchRule opts rule_env _is_active fn args _rough_args (BuiltinRule { ru_try = match_fn }) -- Built-in rules can't be switched off, it seems = case match_fn opts rule_env fn args of Nothing -> Nothing Just expr -> Just expr matchRule _ rule_env is_active _ args rough_args (Rule { ru_name = rule_name, ru_act = act, ru_rough = tpl_tops , ru_bndrs = tpl_vars, ru_args = tpl_args, ru_rhs = rhs }) | not (is_active act) = Nothing | ruleCantMatch tpl_tops rough_args = Nothing | otherwise = matchN rule_env rule_name tpl_vars tpl_args args rhs -- | Initialize RuleOpts from DynFlags initRuleOpts :: DynFlags -> RuleOpts initRuleOpts dflags = RuleOpts { roPlatform = targetPlatform dflags , roNumConstantFolding = gopt Opt_NumConstantFolding dflags , roExcessRationalPrecision = gopt Opt_ExcessPrecision dflags -- disable bignum rules in ghc-prim and ghc-bignum itself , roBignumRules = homeUnitId_ dflags /= primUnitId && homeUnitId_ dflags /= bignumUnitId } --------------------------------------- matchN :: InScopeEnv -> RuleName -> [Var] -> [CoreExpr] -> [CoreExpr] -> CoreExpr -- ^ Target; can have more elements than the template -> Maybe CoreExpr -- For a given match template and context, find bindings to wrap around -- the entire result and what should be substituted for each template variable. -- -- Fail if there are too few actual arguments from the target to match the template -- -- See Note [Extra args in the target] -- If there are too /many/ actual arguments, we simply ignore the -- trailing ones, returning the result of applying the rule to a prefix -- of the actual arguments. matchN (in_scope, id_unf) rule_name tmpl_vars tmpl_es target_es rhs = do { rule_subst <- match_exprs init_menv emptyRuleSubst tmpl_es target_es ; let (_, matched_es) = mapAccumL (lookup_tmpl rule_subst) (mkEmptyTCvSubst in_scope) $ tmpl_vars `zip` tmpl_vars1 bind_wrapper = rs_binds rule_subst -- Floated bindings; see Note [Matching lets] ; return (bind_wrapper $ mkLams tmpl_vars rhs `mkApps` matched_es) } where (init_rn_env, tmpl_vars1) = mapAccumL rnBndrL (mkRnEnv2 in_scope) tmpl_vars -- See Note [Cloning the template binders] init_menv = RV { rv_tmpls = mkVarSet tmpl_vars1 , rv_lcl = init_rn_env , rv_fltR = mkEmptySubst (rnInScopeSet init_rn_env) , rv_unf = id_unf } lookup_tmpl :: RuleSubst -> TCvSubst -> (InVar,OutVar) -> (TCvSubst, CoreExpr) -- Need to return a RuleSubst solely for the benefit of mk_fake_ty lookup_tmpl (RS { rs_tv_subst = tv_subst, rs_id_subst = id_subst }) tcv_subst (tmpl_var, tmpl_var1) | isId tmpl_var1 = case lookupVarEnv id_subst tmpl_var1 of Just e | Coercion co <- e -> (Type.extendCvSubst tcv_subst tmpl_var1 co, Coercion co) | otherwise -> (tcv_subst, e) Nothing | Just refl_co <- isReflCoVar_maybe tmpl_var1 , let co = Coercion.substCo tcv_subst refl_co -> -- See Note [Unbound RULE binders] (Type.extendCvSubst tcv_subst tmpl_var1 co, Coercion co) | otherwise -> unbound tmpl_var | otherwise = (Type.extendTvSubst tcv_subst tmpl_var1 ty', Type ty') where ty' = case lookupVarEnv tv_subst tmpl_var1 of Just ty -> ty Nothing -> fake_ty -- See Note [Unbound RULE binders] fake_ty = anyTypeOfKind (Type.substTy tcv_subst (tyVarKind tmpl_var1)) -- This substitution is the sole reason we accumulate -- TCvSubst in lookup_tmpl unbound tmpl_var = pprPanic "Template variable unbound in rewrite rule" $ vcat [ text "Variable:" <+> ppr tmpl_var <+> dcolon <+> ppr (varType tmpl_var) , text "Rule" <+> pprRuleName rule_name , text "Rule bndrs:" <+> ppr tmpl_vars , text "LHS args:" <+> ppr tmpl_es , text "Actual args:" <+> ppr target_es ] {- Note [Unbound RULE binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It can be the case that the binder in a rule is not actually bound on the LHS: * Type variables. Type synonyms with phantom args can give rise to unbound template type variables. Consider this (#10689, simplCore/should_compile/T10689): type Foo a b = b f :: Eq a => a -> Bool f x = x==x {-# RULES "foo" forall (x :: Foo a Char). f x = True #-} finkle = f 'c' The rule looks like forall (a::*) (d::Eq Char) (x :: Foo a Char). f (Foo a Char) d x = True Matching the rule won't bind 'a', and legitimately so. We fudge by pretending that 'a' is bound to (Any :: *). * Coercion variables. On the LHS of a RULE for a local binder we might have RULE forall (c :: a~b). f (x |> c) = e Now, if that binding is inlined, so that a=b=Int, we'd get RULE forall (c :: Int~Int). f (x |> c) = e and now when we simplify the LHS (Simplify.simplRule) we optCoercion (look at the CoVarCo case) will turn that 'c' into Refl: RULE forall (c :: Int~Int). f (x |> ) = e and then perhaps drop it altogether. Now 'c' is unbound. It's tricky to be sure this never happens, so instead I say it's OK to have an unbound coercion binder in a RULE provided its type is (c :: t~t). Then, when the RULE fires we can substitute for c. This actually happened (in a RULE for a local function) in #13410, and also in test T10602. Note [Cloning the template binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following match (example 1): Template: forall x. f x Target: f (x+1) This should succeed, because the template variable 'x' has nothing to do with the 'x' in the target. Likewise this one (example 2): Template: forall x. f (\x.x) Target: f (\y.y) We achieve this simply by using rnBndrL to clone the template binders if they are already in scope. ------ Historical note ------- At one point I tried simply adding the template binders to the in-scope set /without/ cloning them, but that failed in a horribly obscure way in #14777. Problem was that during matching we look up target-term variables in the in-scope set (see Note [Lookup in-scope]). If a target-term variable happens to name-clash with a template variable, that lookup will find the template variable, which is /utterly/ bogus. In #14777, this transformed a term variable into a type variable, and then crashed when we wanted its idInfo. ------ End of historical note ------- ************************************************************************ * * The main matcher * * ********************************************************************* -} data RuleMatchEnv = RV { rv_lcl :: RnEnv2 -- Renamings for *local bindings* -- (lambda/case) , rv_tmpls :: VarSet -- Template variables -- (after applying envL of rv_lcl) , rv_fltR :: Subst -- Renamings for floated let-bindings -- (domain disjoint from envR of rv_lcl) -- See Note [Matching lets] -- N.B. The InScopeSet of rv_fltR is always ignored; -- see (4) in Note [Matching lets]. , rv_unf :: IdUnfoldingFun } {- Note [rv_lcl in RuleMatchEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider matching Template: \x->f Target: \f->f where 'f' is free in the template. When we meet the lambdas we must remember to rename f :-> f' in the target, as well as x :-> f in the template. The rv_lcl::RnEnv2 does that. Similarly, consider matching Template: {a} \b->b Target: \a->3 We must rename the \a. Otherwise when we meet the lambdas we might substitute [b :-> a] in the template, and then erroneously succeed in matching what looks like the template variable 'a' against 3. So we must add the template vars to the in-scope set before starting; see `init_menv` in `matchN`. -} rvInScopeEnv :: RuleMatchEnv -> InScopeEnv rvInScopeEnv renv = (rnInScopeSet (rv_lcl renv), rv_unf renv) -- * The domain of the TvSubstEnv and IdSubstEnv are the template -- variables passed into the match. -- -- * The BindWrapper in a RuleSubst are the bindings floated out -- from nested matches; see the Let case of match, below -- data RuleSubst = RS { rs_tv_subst :: TvSubstEnv -- Range is the , rs_id_subst :: IdSubstEnv -- template variables , rs_binds :: BindWrapper -- Floated bindings , rs_bndrs :: [Var] -- Variables bound by floated lets } type BindWrapper = CoreExpr -> CoreExpr -- See Notes [Matching lets] and [Matching cases] -- we represent the floated bindings as a core-to-core function emptyRuleSubst :: RuleSubst emptyRuleSubst = RS { rs_tv_subst = emptyVarEnv, rs_id_subst = emptyVarEnv , rs_binds = \e -> e, rs_bndrs = [] } ---------------------- match_exprs :: RuleMatchEnv -> RuleSubst -> [CoreExpr] -- Templates -> [CoreExpr] -- Targets -> Maybe RuleSubst -- If the targets are longer than templates, succeed, simply ignoring -- the leftover targets. This matters in the call in matchN. -- -- Precondition: corresponding elements of es1 and es2 have the same -- type, assumuing earlier elements match -- Example: f :: forall v. v -> blah -- match_exprs [Type a, y::a] [Type Int, 3] -- Then, after matching Type a against Type Int, -- the type of (y::a) matches that of (3::Int) match_exprs _ subst [] _ = Just subst match_exprs renv subst (e1:es1) (e2:es2) = do { subst' <- match renv subst e1 e2 MRefl ; match_exprs renv subst' es1 es2 } match_exprs _ _ _ _ = Nothing {- Note [Casts in the target] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As far as possible we don't want casts in the target to get in the way of matching. E.g. * (let bind in e) |> co * (case e of alts) |> co * (\ a b. f a b) |> co In the first two cases we want to float the cast inwards so we can match on the let/case. This is not important in practice because the Simplifier does this anyway. But the third case /is/ important: we don't want the cast to get in the way of eta-reduction. See Note [Cancel reflexive casts] for a real life example. The most convenient thing is to make 'match' take an MCoercion argument, thus: * The main matching function match env subst template target mco matches template ~ (target |> mco) * Invariant: typeof( subst(template) ) = typeof( target |> mco ) Note that for applications (e1 e2) ~ (d1 d2) |> co where 'co' is non-reflexive, we simply fail. You might wonder about (e1 e2) ~ ((d1 |> co1) d2) |> co2 but the Simplifer pushes the casts in an application to to the right, if it can, so this doesn't really arise. Note [Coercion arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~ What if we have (f co) in the template, where the 'co' is a coercion argument to f? Right now we have nothing in place to ensure that a coercion /argument/ in the template is a variable. We really should, perhaps by abstracting over that variable. C.f. the treatment of dictionaries in GHC.HsToCore.Binds.decompseRuleLhs. For now, though, we simply behave badly, by failing in match_co. We really should never rely on matching the structure of a coercion (which is just a proof). Note [Casts in the template] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the definition f x = e, and SpecConstr on call pattern f ((e1,e2) |> co) We'll make a RULE RULE forall a,b,g. f ((a,b)|> g) = $sf a b g $sf a b g = e[ ((a,b)|> g) / x ] So here is the invariant: In the template, in a cast (e |> co), the cast `co` is always a /variable/. Matching should bind that variable to an actual coercion, so that we can use it in $sf. So a Cast on the LHS (the template) calls match_co, which succeeds when the template cast is a variable -- which it always is. That is why match_co has so few cases. See also * Note [Coercion arguments] * Note [Matching coercion variables] in GHC.Core.Unify. * Note [Cast swizzling on rule LHSs] in GHC.Core.Opt.Simplify.Utils: sm_cast_swizzle is switched off in the template of a RULE -} ---------------------- match :: RuleMatchEnv -> RuleSubst -- Substitution applies to template only -> CoreExpr -- Template -> CoreExpr -- Target -> MCoercion -> Maybe RuleSubst -- Postcondition (TypeInv): if matching succeeds, then -- typeof( subst(template) ) = typeof( target |> mco ) -- But this is /not/ a pre-condition! The types of template and target -- may differ, see the (App e1 e2) case -- -- Invariant (CoInv): if mco :: ty ~ ty, then it is MRefl, not MCo co -- See Note [Cancel reflexive casts] -- -- See the notes with Unify.match, which matches types -- Everything is very similar for terms ------------------------ Ticks --------------------- -- We look through certain ticks. See Note [Tick annotations in RULE matching] match renv subst e1 (Tick t e2) mco | tickishFloatable t = match renv subst' e1 e2 mco | otherwise = Nothing where subst' = subst { rs_binds = rs_binds subst . mkTick t } match renv subst e@(Tick t e1) e2 mco | tickishFloatable t -- Ignore floatable ticks in rule template. = match renv subst e1 e2 mco | otherwise = pprPanic "Tick in rule" (ppr e) ------------------------ Types --------------------- match renv subst (Type ty1) (Type ty2) _mco = match_ty renv subst ty1 ty2 ------------------------ Coercions --------------------- -- See Note [Coercion arguments] for why this isn't really right match renv subst (Coercion co1) (Coercion co2) MRefl = match_co renv subst co1 co2 -- The MCo case corresponds to matching co ~ (co2 |> co3) -- and I have no idea what to do there -- or even if it can occur -- Failing seems the simplest thing to do; it's certainly safe. ------------------------ Casts --------------------- -- See Note [Casts in the template] -- Note [Casts in the target] -- Note [Cancel reflexive casts] match renv subst e1 (Cast e2 co2) mco = match renv subst e1 e2 (checkReflexiveMCo (mkTransMCoR co2 mco)) -- checkReflexiveMCo: cancel casts if possible -- This is important: see Note [Cancel reflexive casts] match renv subst (Cast e1 co1) e2 mco = -- See Note [Casts in the template] do { let co2 = case mco of MRefl -> mkRepReflCo (exprType e2) MCo co2 -> co2 ; subst1 <- match_co renv subst co1 co2 -- If match_co succeeds, then (exprType e1) = (exprType e2) -- Hence the MRefl in the next line ; match renv subst1 e1 e2 MRefl } ------------------------ Literals --------------------- match _ subst (Lit lit1) (Lit lit2) mco | lit1 == lit2 = assertPpr (isReflMCo mco) (ppr mco) $ Just subst ------------------------ Variables --------------------- -- The Var case follows closely what happens in GHC.Core.Unify.match match renv subst (Var v1) e2 mco = match_var renv subst v1 (mkCastMCo e2 mco) match renv subst e1 (Var v2) mco -- Note [Expanding variables] | not (inRnEnvR rn_env v2) -- Note [Do not expand locally-bound variables] , Just e2' <- expandUnfolding_maybe (rv_unf renv v2') = match (renv { rv_lcl = nukeRnEnvR rn_env }) subst e1 e2' mco where v2' = lookupRnInScope rn_env v2 rn_env = rv_lcl renv -- Notice that we look up v2 in the in-scope set -- See Note [Lookup in-scope] -- No need to apply any renaming first (hence no rnOccR) -- because of the not-inRnEnvR ------------------------ Applications --------------------- -- Note the match on MRefl! We fail if there is a cast in the target -- (e1 e2) ~ (d1 d2) |> co -- See Note [Cancel reflexive casts]: in the Cast equations for 'match' -- we agressively ensure that if MCo is reflective, it really is MRefl. match renv subst (App f1 a1) (App f2 a2) MRefl = do { subst' <- match renv subst f1 f2 MRefl ; match renv subst' a1 a2 MRefl } ------------------------ Float lets --------------------- match renv subst e1 (Let bind e2) mco | -- pprTrace "match:Let" (vcat [ppr bind, ppr $ okToFloat (rv_lcl renv) (bindFreeVars bind)]) $ not (isJoinBind bind) -- can't float join point out of argument position , okToFloat (rv_lcl renv) (bindFreeVars bind) -- See Note [Matching lets] = match (renv { rv_fltR = flt_subst' , rv_lcl = rv_lcl renv `extendRnInScopeSetList` new_bndrs }) -- We are floating the let-binding out, as if it had enclosed -- the entire target from Day 1. So we must add its binders to -- the in-scope set (#20200) (subst { rs_binds = rs_binds subst . Let bind' , rs_bndrs = new_bndrs ++ rs_bndrs subst }) e1 e2 mco | otherwise = Nothing where in_scope = rnInScopeSet (rv_lcl renv) `extendInScopeSetList` rs_bndrs subst -- in_scope: see (4) in Note [Matching lets] flt_subst = rv_fltR renv `setInScope` in_scope (flt_subst', bind') = substBind flt_subst bind new_bndrs = bindersOf bind' ------------------------ Lambdas --------------------- match renv subst (Lam x1 e1) e2 mco | Just (x2, e2', ts) <- exprIsLambda_maybe (rvInScopeEnv renv) (mkCastMCo e2 mco) -- See Note [Lambdas in the template] = let renv' = rnMatchBndr2 renv x1 x2 subst' = subst { rs_binds = rs_binds subst . flip (foldr mkTick) ts } in match renv' subst' e1 e2' MRefl match renv subst e1 e2@(Lam {}) mco | Just (renv', e2') <- eta_reduce renv e2 -- See Note [Eta reduction in the target] = match renv' subst e1 e2' mco {- Note [Lambdas in the template] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we match Template: (\x. blah_template) Target: (\y. blah_target) then we want to match inside the lambdas, using rv_lcl to match up x and y. But what about this? Template (\x. (blah1 |> cv)) Target (\y. blah2) |> co This happens quite readily, because the Simplifier generally moves casts outside lambdas: see Note [Casts and lambdas] in GHC.Core.Opt.Simplify.Utils. So, tiresomely, we want to push `co` back inside, which is what `exprIsLambda_maybe` does. But we've stripped off that cast, so now we need to put it back, hence mkCastMCo. Unlike the target, where we attempt eta-reduction, we do not attempt to eta-reduce the template, and may therefore fail on Template: \x. f True x Target f True It's not especially easy to deal with eta reducing the template, and never happens, because no one write eta-expanded left-hand-sides. -} ------------------------ Case expression --------------------- {- Disabled: see Note [Matching cases] below match renv (tv_subst, id_subst, binds) e1 (Case scrut case_bndr ty [(con, alt_bndrs, rhs)]) | exprOkForSpeculation scrut -- See Note [Matching cases] , okToFloat rn_env bndrs (exprFreeVars scrut) = match (renv { me_env = rn_env' }) (tv_subst, id_subst, binds . case_wrap) e1 rhs where rn_env = me_env renv rn_env' = extendRnInScopeList rn_env bndrs bndrs = case_bndr : alt_bndrs case_wrap rhs' = Case scrut case_bndr ty [(con, alt_bndrs, rhs')] -} match renv subst (Case e1 x1 ty1 alts1) (Case e2 x2 ty2 alts2) mco = do { subst1 <- match_ty renv subst ty1 ty2 ; subst2 <- match renv subst1 e1 e2 MRefl ; let renv' = rnMatchBndr2 renv x1 x2 ; match_alts renv' subst2 alts1 alts2 mco -- Alts are both sorted } -- Everything else fails match _ _ _e1 _e2 _mco = -- pprTrace "Failing at" ((text "e1:" <+> ppr _e1) $$ (text "e2:" <+> ppr _e2)) $ Nothing ------------- eta_reduce :: RuleMatchEnv -> CoreExpr -> Maybe (RuleMatchEnv, CoreExpr) -- See Note [Eta reduction in the target] eta_reduce renv e@(Lam {}) = go renv id [] e where go :: RuleMatchEnv -> BindWrapper -> [Var] -> CoreExpr -> Maybe (RuleMatchEnv, CoreExpr) go renv bw vs (Let b e) = go renv (bw . Let b) vs e go renv bw vs (Lam v e) = go renv' bw (v':vs) e where (rn_env', v') = rnBndrR (rv_lcl renv) v renv' = renv { rv_lcl = rn_env' } go renv bw (v:vs) (App f arg) | Var a <- arg, v == rnOccR (rv_lcl renv) a = go renv bw vs f | Type ty <- arg, Just tv <- getTyVar_maybe ty , v == rnOccR (rv_lcl renv) tv = go renv bw vs f go renv bw [] e = Just (renv, bw e) go _ _ (_:_) _ = Nothing eta_reduce _ _ = Nothing {- Note [Eta reduction in the target] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we are faced with this (#19790) Template {x} f x Target (\a b c. let blah in f x a b c) You might wonder why we have an eta-expanded target (see first subtle point below), but regardless of how it came about, we'd like eta-expansion not to impede matching. So eta_reduce does on-the-fly eta-reduction of the target expression. Given (\a b c. let blah in e a b c), it returns (let blah in e). Subtle points: * Consider a target: \x. f x In the main eta-reducer we do not eta-reduce this, because doing so might reduce the arity of the expression (from 1 to zero, because of ). But for rule-matching we /do/ want to match template (f a) against target (\x. f x), with a := This is a compelling reason for not relying on the Simplifier's eta-reducer. * The Lam case of eta_reduce renames as it goes. Consider (\x. \x. f x x). We should not eta-reduce this. As we go we rename the first x to x1, and the second to x2; then both argument x's are x2. * eta_reduce does /not/ need to check that the bindings 'blah' and expression 'e' don't mention a b c; but it /does/ extend the rv_lcl RnEnv2 (see rn_bndr in eta_reduce). * If 'blah' mentions the binders, the let-float rule won't fire; and * if 'e' mentions the binders we we'll also fail to match e.g. because of the exprFreeVars test in match_tmpl_var. Example: Template: {x} f a -- Some top-level 'a' Target: (\a b. f a a b) -- The \a shadows top level 'a' Then eta_reduce will /succeed/, with (rnEnvR = [a :-> a'], f a) The returned RnEnv will map [a :-> a'], where a' is fresh. (There is no need to rename 'b' because (in this example) it is not in scope. So it's as if we'd returned (f a') from eta_reduce; the renaming applied to the target is simply deferred. Note [Cancel reflexive casts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is an example (from #19790) which we want to catch (f x) ~ (\a b. (f x |> co) a b) |> sym co where f :: Int -> Stream co :: Stream ~ T1 -> T2 -> T3 when we eta-reduce (\a b. blah a b) to 'blah', we'll get (f x) ~ (f x) |> co |> sym co and we really want to spot that the co/sym-co cancels out. Hence * We keep an invariant that the MCoercion is always MRefl if the MCoercion is reflextve * We maintain this invariant via the call to checkReflexiveMCo in the Cast case of 'match'. -} ------------- match_co :: RuleMatchEnv -> RuleSubst -> Coercion -> Coercion -> Maybe RuleSubst -- We only match if the template is a coercion variable or Refl: -- see Note [Casts in the template] -- Like 'match' it is /not/ guaranteed that -- coercionKind template = coercionKind target -- But if match_co succeeds, it /is/ guaranteed that -- coercionKind (subst template) = coercionKind target match_co renv subst co1 co2 | Just cv <- getCoVar_maybe co1 = match_var renv subst cv (Coercion co2) | Just (ty1, r1) <- isReflCo_maybe co1 = do { (ty2, r2) <- isReflCo_maybe co2 ; guard (r1 == r2) ; match_ty renv subst ty1 ty2 } | debugIsOn = pprTrace "match_co: needs more cases" (ppr co1 $$ ppr co2) Nothing -- Currently just deals with CoVarCo and Refl | otherwise = Nothing ------------- rnMatchBndr2 :: RuleMatchEnv -> Var -> Var -> RuleMatchEnv rnMatchBndr2 renv x1 x2 = renv { rv_lcl = rnBndr2 (rv_lcl renv) x1 x2 , rv_fltR = delBndr (rv_fltR renv) x2 } ------------------------------------------ match_alts :: RuleMatchEnv -> RuleSubst -> [CoreAlt] -- Template -> [CoreAlt] -> MCoercion -- Target -> Maybe RuleSubst match_alts _ subst [] [] _ = return subst match_alts renv subst (Alt c1 vs1 r1:alts1) (Alt c2 vs2 r2:alts2) mco | c1 == c2 = do { subst1 <- match renv' subst r1 r2 mco ; match_alts renv subst1 alts1 alts2 mco } where renv' = foldl' mb renv (vs1 `zip` vs2) mb renv (v1,v2) = rnMatchBndr2 renv v1 v2 match_alts _ _ _ _ _ = Nothing ------------------------------------------ okToFloat :: RnEnv2 -> VarSet -> Bool okToFloat rn_env bind_fvs = allVarSet not_captured bind_fvs where not_captured fv = not (inRnEnvR rn_env fv) ------------------------------------------ match_var :: RuleMatchEnv -> RuleSubst -> Var -- Template -> CoreExpr -- Target -> Maybe RuleSubst match_var renv@(RV { rv_tmpls = tmpls, rv_lcl = rn_env, rv_fltR = flt_env }) subst v1 e2 | v1' `elemVarSet` tmpls = match_tmpl_var renv subst v1' e2 | otherwise -- v1' is not a template variable; check for an exact match with e2 = case e2 of -- Remember, envR of rn_env is disjoint from rv_fltR Var v2 | Just v2' <- rnOccR_maybe rn_env v2 -> -- v2 was bound by a nested lambda or case if v1' == v2' then Just subst else Nothing -- v2 is not bound nestedly; it is free -- in the whole expression being matched -- So it will be in the InScopeSet for flt_env (#20200) | Var v2' <- lookupIdSubst flt_env v2 , v1' == v2' -> Just subst | otherwise -> Nothing _ -> Nothing where v1' = rnOccL rn_env v1 -- If the template is -- forall x. f x (\x -> x) = ... -- Then the x inside the lambda isn't the -- template x, so we must rename first! ------------------------------------------ match_tmpl_var :: RuleMatchEnv -> RuleSubst -> Var -- Template -> CoreExpr -- Target -> Maybe RuleSubst match_tmpl_var renv@(RV { rv_lcl = rn_env, rv_fltR = flt_env }) subst@(RS { rs_id_subst = id_subst, rs_bndrs = let_bndrs }) v1' e2 | any (inRnEnvR rn_env) (exprFreeVarsList e2) = Nothing -- Skolem-escape failure -- e.g. match forall a. (\x-> a x) against (\y. y y) | Just e1' <- lookupVarEnv id_subst v1' = if eqCoreExpr e1' e2' then Just subst else Nothing | otherwise -- See Note [Matching variable types] = do { subst' <- match_ty renv subst (idType v1') (exprType e2) ; return (subst' { rs_id_subst = id_subst' }) } where -- e2' is the result of applying flt_env to e2 e2' | null let_bndrs = e2 | otherwise = substExpr flt_env e2 id_subst' = extendVarEnv (rs_id_subst subst) v1' e2' -- No further renaming to do on e2', -- because no free var of e2' is in the rnEnvR of the envt ------------------------------------------ match_ty :: RuleMatchEnv -> RuleSubst -> Type -- Template -> Type -- Target -> Maybe RuleSubst -- Matching Core types: use the matcher in GHC.Tc.Utils.TcType. -- Notice that we treat newtypes as opaque. For example, suppose -- we have a specialised version of a function at a newtype, say -- newtype T = MkT Int -- We only want to replace (f T) with f', not (f Int). match_ty renv subst ty1 ty2 = do { tv_subst' <- Unify.ruleMatchTyKiX (rv_tmpls renv) (rv_lcl renv) tv_subst ty1 ty2 ; return (subst { rs_tv_subst = tv_subst' }) } where tv_subst = rs_tv_subst subst {- Note [Matching variable types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When matching x ~ e, where 'x' is a template variable, we must check that x's type matches e's type, to establish (TypeInv). For example forall (c::Char->Int) (x::Char). f (c x) = "RULE FIRED" We must not match on, say (f (pred (3::Int))). It's actually quite difficult to come up with an example that shows you need type matching, esp since matching is left-to-right, so type args get matched first. But it's possible (e.g. simplrun008) and this is the Right Thing to do. An alternative would be to make (TypeInf) into a /pre-condition/. It is threatened only by the App rule. So when matching an application (e1 e2) ~ (d1 d2) would be to collect args of the application chain, match the types of the head, then match arg-by-arg. However that alternative seems a bit more complicated. And by matching types at variables we do one match_ty for each template variable, rather than one for each application chain. Usually there are fewer template variables, although for simple rules it could be the other way around. Note [Expanding variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is another Very Important rule: if the term being matched is a variable, we expand it so long as its unfolding is "expandable". (Its occurrence information is not necessarily up to date, so we don't use it.) By "expandable" we mean a WHNF or a "constructor-like" application. This is the key reason for "constructor-like" Ids. If we have {-# NOINLINE [1] CONLIKE g #-} {-# RULE f (g x) = h x #-} then in the term let v = g 3 in ....(f v).... we want to make the rule fire, to replace (f v) with (h 3). Note [Do not expand locally-bound variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do *not* expand locally-bound variables, else there's a worry that the unfolding might mention variables that are themselves renamed. Example case x of y { (p,q) -> ...y... } Don't expand 'y' to (p,q) because p,q might themselves have been renamed. Essentially we only expand unfoldings that are "outside" the entire match. Hence, (a) the guard (not (isLocallyBoundR v2)) (b) when we expand we nuke the renaming envt (nukeRnEnvR). Note [Tick annotations in RULE matching] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to unconditionally look through ticks in both template and expression being matched. This is actually illegal for counting or cost-centre-scoped ticks, because we have no place to put them without changing entry counts and/or costs. So now we just fail the match in these cases. On the other hand, where we are allowed to insert new cost into the tick scope, we can float them upwards to the rule application site. Moreover, we may encounter ticks in the template of a rule. There are a few ways in which these may be introduced (e.g. #18162, #17619). Such ticks are ignored by the matcher. See Note [Simplifying rules] in GHC.Core.Opt.Simplify.Utils for details. cf Note [Tick annotations in call patterns] in GHC.Core.Opt.SpecConstr Note [Matching lets] ~~~~~~~~~~~~~~~~~~~~ Matching a let-expression. Consider RULE forall x. f (g x) = and target expression f (let { w=R } in g E)) Then we'd like the rule to match, to generate let { w=R } in (\x. ) E In effect, we want to float the let-binding outward, to enable the match to happen. This is the WHOLE REASON for accumulating bindings in the RuleSubst We can only do this if the free variables of R are not bound by the part of the target expression outside the let binding; e.g. f (\v. let w = v+1 in g E) Here we obviously cannot float the let-binding for w. Hence the use of okToFloat. There are a couple of tricky points: (a) What if floating the binding captures a variable that is free in the entire expression? f (let v = x+1 in v) v --> NOT! let v = x+1 in f (x+1) v (b) What if the let shadows a local binding? f (\v -> (v, let v = x+1 in (v,v)) --> NOT! let v = x+1 in f (\v -> (v, (v,v))) (c) What if two non-nested let bindings bind the same variable? f (let v = e1 in b1) (let v = e2 in b2) --> NOT! let v = e1 in let v = e2 in (f b2 b2) See testsuite test `T4814`. Our cunning plan is this: (1) Along with the growing substitution for template variables we maintain a growing set of floated let-bindings (rs_binds) plus the set of variables thus bound (rs_bndrs). (2) The RnEnv2 in the MatchEnv binds only the local binders in the term (lambdas, case), not the floated let-bndrs. (3) When we encounter a `let` in the term to be matched, in the Let case of `match`, we use `okToFloat` to check that it does not mention any locally bound (lambda, case) variables. If so we fail. (4) In the Let case of `match`, we use GHC.Core.Subst.substBind to freshen the binding (which, remember (3), mentions no locally bound variables), in a lexically-scoped way (via rv_fltR in MatchEnv). The subtle point is that we want an in-scope set for this substitution that includes /two/ sets: * The in-scope variables at this point, so that we avoid using those local names for the floated binding; points (a) and (b) above. * All "earlier" floated bindings, so that we avoid using the same name for two different floated bindings; point (c) above. Because we have to compute the in-scope set here, the in-scope set stored in `rv_fltR` is always ignored; we leave it only because it's convenient to have `rv_fltR :: Subst` (with an always-ignored `InScopeSet`) rather than storing three separate substitutions. (5) We apply that freshening substitution, in a lexically-scoped way to the term, although lazily; this is the rv_fltR field. See #4814, which is an issue resulting from getting this wrong. Note [Matching cases] ~~~~~~~~~~~~~~~~~~~~~ {- NOTE: This idea is currently disabled. It really only works if the primops involved are OkForSpeculation, and, since they have side effects readIntOfAddr and touch are not. Maybe we'll get back to this later . -} Consider f (case readIntOffAddr# p# i# realWorld# of { (# s#, n# #) -> case touch# fp s# of { _ -> I# n# } } ) This happened in a tight loop generated by stream fusion that Roman encountered. We'd like to treat this just like the let case, because the primops concerned are ok-for-speculation. That is, we'd like to behave as if it had been case readIntOffAddr# p# i# realWorld# of { (# s#, n# #) -> case touch# fp s# of { _ -> f (I# n# } } ) Note [Lookup in-scope] ~~~~~~~~~~~~~~~~~~~~~~ Consider this example foo :: Int -> Maybe Int -> Int foo 0 (Just n) = n foo m (Just n) = foo (m-n) (Just n) SpecConstr sees this fragment: case w_smT of wild_Xf [Just A] { Data.Maybe.Nothing -> lvl_smf; Data.Maybe.Just n_acT [Just S(L)] -> case n_acT of wild1_ams [Just A] { GHC.Base.I# y_amr [Just L] -> $wfoo_smW (GHC.Prim.-# ds_Xmb y_amr) wild_Xf }}; and correctly generates the rule RULES: "SC:$wfoo1" [0] __forall {y_amr [Just L] :: GHC.Prim.Int# sc_snn :: GHC.Prim.Int#} $wfoo_smW sc_snn (Data.Maybe.Just @ GHC.Base.Int (GHC.Base.I# y_amr)) = $s$wfoo_sno y_amr sc_snn ;] BUT we must ensure that this rule matches in the original function! Note that the call to $wfoo is $wfoo_smW (GHC.Prim.-# ds_Xmb y_amr) wild_Xf During matching we expand wild_Xf to (Just n_acT). But then we must also expand n_acT to (I# y_amr). And we can only do that if we look up n_acT in the in-scope set, because in wild_Xf's unfolding it won't have an unfolding at all. That is why the 'lookupRnInScope' call in the (Var v2) case of 'match' is so important. ************************************************************************ * * Rule-check the program * * ************************************************************************ We want to know what sites have rules that could have fired but didn't. This pass runs over the tree (without changing it) and reports such. -} -- | Report partial matches for rules beginning with the specified -- string for the purposes of error reporting ruleCheckProgram :: RuleOpts -- ^ Rule options -> CompilerPhase -- ^ Rule activation test -> String -- ^ Rule pattern -> (Id -> [CoreRule]) -- ^ Rules for an Id -> CoreProgram -- ^ Bindings to check in -> SDoc -- ^ Resulting check message ruleCheckProgram ropts phase rule_pat rules binds | isEmptyBag results = text "Rule check results: no rule application sites" | otherwise = vcat [text "Rule check results:", line, vcat [ p $$ line | p <- bagToList results ] ] where env = RuleCheckEnv { rc_is_active = isActive phase , rc_id_unf = idUnfolding -- Not quite right -- Should use activeUnfolding , rc_pattern = rule_pat , rc_rules = rules , rc_ropts = ropts } results = unionManyBags (map (ruleCheckBind env) binds) line = text (replicate 20 '-') data RuleCheckEnv = RuleCheckEnv { rc_is_active :: Activation -> Bool, rc_id_unf :: IdUnfoldingFun, rc_pattern :: String, rc_rules :: Id -> [CoreRule], rc_ropts :: RuleOpts } ruleCheckBind :: RuleCheckEnv -> CoreBind -> Bag SDoc -- The Bag returned has one SDoc for each call site found ruleCheckBind env (NonRec _ r) = ruleCheck env r ruleCheckBind env (Rec prs) = unionManyBags [ruleCheck env r | (_,r) <- prs] ruleCheck :: RuleCheckEnv -> CoreExpr -> Bag SDoc ruleCheck _ (Var _) = emptyBag ruleCheck _ (Lit _) = emptyBag ruleCheck _ (Type _) = emptyBag ruleCheck _ (Coercion _) = emptyBag ruleCheck env (App f a) = ruleCheckApp env (App f a) [] ruleCheck env (Tick _ e) = ruleCheck env e ruleCheck env (Cast e _) = ruleCheck env e ruleCheck env (Let bd e) = ruleCheckBind env bd `unionBags` ruleCheck env e ruleCheck env (Lam _ e) = ruleCheck env e ruleCheck env (Case e _ _ as) = ruleCheck env e `unionBags` unionManyBags [ruleCheck env r | Alt _ _ r <- as] ruleCheckApp :: RuleCheckEnv -> Expr CoreBndr -> [Arg CoreBndr] -> Bag SDoc ruleCheckApp env (App f a) as = ruleCheck env a `unionBags` ruleCheckApp env f (a:as) ruleCheckApp env (Var f) as = ruleCheckFun env f as ruleCheckApp env other _ = ruleCheck env other ruleCheckFun :: RuleCheckEnv -> Id -> [CoreExpr] -> Bag SDoc -- Produce a report for all rules matching the predicate -- saying why it doesn't match the specified application ruleCheckFun env fn args | null name_match_rules = emptyBag | otherwise = unitBag (ruleAppCheck_help env fn args name_match_rules) where name_match_rules = filter match (rc_rules env fn) match rule = (rc_pattern env) `isPrefixOf` unpackFS (ruleName rule) ruleAppCheck_help :: RuleCheckEnv -> Id -> [CoreExpr] -> [CoreRule] -> SDoc ruleAppCheck_help env fn args rules = -- The rules match the pattern, so we want to print something vcat [text "Expression:" <+> ppr (mkApps (Var fn) args), vcat (map check_rule rules)] where n_args = length args i_args = args `zip` [1::Int ..] rough_args = map roughTopName args check_rule rule = rule_herald rule <> colon <+> rule_info (rc_ropts env) rule rule_herald (BuiltinRule { ru_name = name }) = text "Builtin rule" <+> doubleQuotes (ftext name) rule_herald (Rule { ru_name = name }) = text "Rule" <+> doubleQuotes (ftext name) rule_info opts rule | Just _ <- matchRule opts (emptyInScopeSet, rc_id_unf env) noBlackList fn args rough_args rule = text "matches (which is very peculiar!)" rule_info _ (BuiltinRule {}) = text "does not match" rule_info _ (Rule { ru_act = act, ru_bndrs = rule_bndrs, ru_args = rule_args}) | not (rc_is_active env act) = text "active only in later phase" | n_args < n_rule_args = text "too few arguments" | n_mismatches == n_rule_args = text "no arguments match" | n_mismatches == 0 = text "all arguments match (considered individually), but rule as a whole does not" | otherwise = text "arguments" <+> ppr mismatches <+> text "do not match (1-indexing)" where n_rule_args = length rule_args n_mismatches = length mismatches mismatches = [i | (rule_arg, (arg,i)) <- rule_args `zip` i_args, not (isJust (match_fn rule_arg arg))] lhs_fvs = exprsFreeVars rule_args -- Includes template tyvars match_fn rule_arg arg = match renv emptyRuleSubst rule_arg arg MRefl where in_scope = mkInScopeSet (lhs_fvs `unionVarSet` exprFreeVars arg) renv = RV { rv_lcl = mkRnEnv2 in_scope , rv_tmpls = mkVarSet rule_bndrs , rv_fltR = mkEmptySubst in_scope , rv_unf = rc_id_unf env } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Seq.hs0000644000000000000000000000757414472400112017624 0ustar0000000000000000-- | -- Various utilities for forcing Core structures -- -- It can often be useful to force various parts of the AST. This module -- provides a number of @seq@-like functions to accomplish this. module GHC.Core.Seq ( -- * Utilities for forcing Core structures seqExpr, seqExprs, seqUnfolding, seqRules, megaSeqIdInfo, seqRuleInfo, seqBinds, ) where import GHC.Prelude import GHC.Core import GHC.Types.Id.Info import GHC.Types.Demand( seqDemand, seqDmdSig ) import GHC.Types.Cpr( seqCprSig ) import GHC.Types.Basic( seqOccInfo ) import GHC.Types.Tickish import GHC.Types.Var.Set( seqDVarSet ) import GHC.Types.Var( varType, tyVarKind ) import GHC.Core.Type( seqType, isTyVar ) import GHC.Core.Coercion( seqCo ) import GHC.Types.Id( idInfo ) -- | Evaluate all the fields of the 'IdInfo' that are generally demanded by the -- compiler megaSeqIdInfo :: IdInfo -> () megaSeqIdInfo info = seqRuleInfo (ruleInfo info) `seq` -- Omitting this improves runtimes a little, presumably because -- some unfoldings are not calculated at all -- seqUnfolding (realUnfoldingInfo info) `seq` seqDemand (demandInfo info) `seq` seqDmdSig (dmdSigInfo info) `seq` seqCprSig (cprSigInfo info) `seq` seqCaf (cafInfo info) `seq` seqOneShot (oneShotInfo info) `seq` seqOccInfo (occInfo info) seqOneShot :: OneShotInfo -> () seqOneShot l = l `seq` () seqRuleInfo :: RuleInfo -> () seqRuleInfo (RuleInfo rules fvs) = seqRules rules `seq` seqDVarSet fvs seqCaf :: CafInfo -> () seqCaf c = c `seq` () seqRules :: [CoreRule] -> () seqRules [] = () seqRules (Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs } : rules) = seqBndrs bndrs `seq` seqExprs (rhs:args) `seq` seqRules rules seqRules (BuiltinRule {} : rules) = seqRules rules seqExpr :: CoreExpr -> () seqExpr (Var v) = v `seq` () seqExpr (Lit lit) = lit `seq` () seqExpr (App f a) = seqExpr f `seq` seqExpr a seqExpr (Lam b e) = seqBndr b `seq` seqExpr e seqExpr (Let b e) = seqBind b `seq` seqExpr e seqExpr (Case e b t as) = seqExpr e `seq` seqBndr b `seq` seqType t `seq` seqAlts as seqExpr (Cast e co) = seqExpr e `seq` seqCo co seqExpr (Tick n e) = seqTickish n `seq` seqExpr e seqExpr (Type t) = seqType t seqExpr (Coercion co) = seqCo co seqExprs :: [CoreExpr] -> () seqExprs [] = () seqExprs (e:es) = seqExpr e `seq` seqExprs es seqTickish :: CoreTickish -> () seqTickish ProfNote{ profNoteCC = cc } = cc `seq` () seqTickish HpcTick{} = () seqTickish Breakpoint{ breakpointFVs = ids } = seqBndrs ids seqTickish SourceNote{} = () seqBndr :: CoreBndr -> () seqBndr b | isTyVar b = seqType (tyVarKind b) | otherwise = seqType (varType b) `seq` megaSeqIdInfo (idInfo b) seqBndrs :: [CoreBndr] -> () seqBndrs [] = () seqBndrs (b:bs) = seqBndr b `seq` seqBndrs bs seqBinds :: [Bind CoreBndr] -> () seqBinds bs = foldr (seq . seqBind) () bs seqBind :: Bind CoreBndr -> () seqBind (NonRec b e) = seqBndr b `seq` seqExpr e seqBind (Rec prs) = seqPairs prs seqPairs :: [(CoreBndr, CoreExpr)] -> () seqPairs [] = () seqPairs ((b,e):prs) = seqBndr b `seq` seqExpr e `seq` seqPairs prs seqAlts :: [CoreAlt] -> () seqAlts [] = () seqAlts (Alt c bs e:alts) = c `seq` seqBndrs bs `seq` seqExpr e `seq` seqAlts alts seqUnfolding :: Unfolding -> () seqUnfolding (CoreUnfolding { uf_tmpl = e, uf_is_top = top, uf_cache = cache, uf_guidance = g}) = seqExpr e `seq` top `seq` cache `seq` seqGuidance g -- The unf_cache :: UnfoldingCache field is a strict data type, -- so it is sufficient to use plain `seq` for this field -- See Note [UnfoldingCache] in GHC.Core seqUnfolding _ = () seqGuidance :: UnfoldingGuidance -> () seqGuidance (UnfIfGoodArgs ns n b) = n `seq` sum ns `seq` b `seq` () seqGuidance _ = () ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/SimpleOpt.hs0000644000000000000000000014777514472400112021020 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Core.SimpleOpt ( SimpleOpts (..), defaultSimpleOpts, -- ** Simple expression optimiser simpleOptPgm, simpleOptExpr, simpleOptExprWith, -- ** Join points joinPointBinding_maybe, joinPointBindings_maybe, -- ** Predicates on expressions exprIsConApp_maybe, exprIsLiteral_maybe, exprIsLambda_maybe, ) where import GHC.Prelude import GHC.Core import GHC.Core.Opt.Arity import GHC.Core.Subst import GHC.Core.Utils import GHC.Core.FVs import GHC.Core.Unfold import GHC.Core.Unfold.Make import GHC.Core.Make ( FloatBind(..) ) import GHC.Core.Opt.OccurAnal( occurAnalyseExpr, occurAnalysePgm, zapLambdaBndrs ) import GHC.Types.Literal import GHC.Types.Id import GHC.Types.Id.Info ( realUnfoldingInfo, setUnfoldingInfo, setRuleInfo, IdInfo (..) ) import GHC.Types.Var ( isNonCoVarId ) import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Core.DataCon import GHC.Types.Demand( etaConvertDmdSig ) import GHC.Types.Tickish import GHC.Core.Coercion.Opt ( optCoercion, OptCoercionOpts (..) ) import GHC.Core.Type hiding ( substTy, extendTvSubst, extendCvSubst, extendTvSubstList , isInScope, substTyVarBndr, cloneTyVarBndr ) import GHC.Core.Coercion hiding ( substCo, substCoVarBndr ) import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Types.Basic import GHC.Unit.Module ( Module ) import GHC.Utils.Encoding import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Misc import GHC.Data.Maybe ( orElse ) import GHC.Data.Graph.UnVar import Data.List (mapAccumL) import qualified Data.ByteString as BS {- ************************************************************************ * * The Simple Optimiser * * ************************************************************************ Note [The simple optimiser] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The simple optimiser is a lightweight, pure (non-monadic) function that rapidly does a lot of simple optimisations, including - inlining things that occur just once, or whose RHS turns out to be trivial - beta reduction - case of known constructor - dead code elimination It does NOT do any call-site inlining; it only inlines a function if it can do so unconditionally, dropping the binding. It thereby guarantees to leave no un-reduced beta-redexes. It is careful to follow the guidance of "Secrets of the GHC inliner", and in particular the pre-inline-unconditionally and post-inline-unconditionally story, to do effective beta reduction on functions called precisely once, without repeatedly optimising the same expression. In fact, the simple optimiser is a good example of this little dance in action; the full Simplifier is a lot more complicated. -} -- | Simple optimiser options data SimpleOpts = SimpleOpts { so_uf_opts :: !UnfoldingOpts -- ^ Unfolding options , so_co_opts :: !OptCoercionOpts -- ^ Coercion optimiser options } -- | Default options for the Simple optimiser. defaultSimpleOpts :: SimpleOpts defaultSimpleOpts = SimpleOpts { so_uf_opts = defaultUnfoldingOpts , so_co_opts = OptCoercionOpts { optCoercionEnabled = False } } simpleOptExpr :: HasDebugCallStack => SimpleOpts -> CoreExpr -> CoreExpr -- See Note [The simple optimiser] -- Do simple optimisation on an expression -- The optimisation is very straightforward: just -- inline non-recursive bindings that are used only once, -- or where the RHS is trivial -- -- We also inline bindings that bind a Eq# box: see -- See Note [Getting the map/coerce RULE to work]. -- -- Also we convert functions to join points where possible (as -- the occurrence analyser does most of the work anyway). -- -- The result is NOT guaranteed occurrence-analysed, because -- in (let x = y in ....) we substitute for x; so y's occ-info -- may change radically -- -- Note that simpleOptExpr is a pure function that we want to be able to call -- from lots of places, including ones that don't have DynFlags (e.g to optimise -- unfoldings of statically defined Ids via mkCompulsoryUnfolding). It used to -- fetch its options directly from the DynFlags, however, so some callers had to -- resort to using unsafeGlobalDynFlags (a global mutable variable containing -- the DynFlags). It has been modified to take its own SimpleOpts that may be -- created from DynFlags, but not necessarily. simpleOptExpr opts expr = -- pprTrace "simpleOptExpr" (ppr init_subst $$ ppr expr) simpleOptExprWith opts init_subst expr where init_subst = mkEmptySubst (mkInScopeSet (exprFreeVars expr)) -- It's potentially important to make a proper in-scope set -- Consider let x = ..y.. in \y. ...x... -- Then we should remember to clone y before substituting -- for x. It's very unlikely to occur, because we probably -- won't *be* substituting for x if it occurs inside a -- lambda. -- -- It's a bit painful to call exprFreeVars, because it makes -- three passes instead of two (occ-anal, and go) simpleOptExprWith :: HasDebugCallStack => SimpleOpts -> Subst -> InExpr -> OutExpr -- See Note [The simple optimiser] simpleOptExprWith opts subst expr = simple_opt_expr init_env (occurAnalyseExpr expr) where init_env = (emptyEnv opts) { soe_subst = subst } ---------------------- simpleOptPgm :: SimpleOpts -> Module -> CoreProgram -> [CoreRule] -> (CoreProgram, [CoreRule], CoreProgram) -- See Note [The simple optimiser] simpleOptPgm opts this_mod binds rules = (reverse binds', rules', occ_anald_binds) where occ_anald_binds = occurAnalysePgm this_mod (\_ -> True) {- All unfoldings active -} (\_ -> False) {- No rules active -} rules binds (final_env, binds') = foldl' do_one (emptyEnv opts, []) occ_anald_binds final_subst = soe_subst final_env rules' = substRulesForImportedIds final_subst rules -- We never unconditionally inline into rules, -- hence paying just a substitution do_one (env, binds') bind = case simple_opt_bind env bind TopLevel of (env', Nothing) -> (env', binds') (env', Just bind') -> (env', bind':binds') -- In these functions the substitution maps InVar -> OutExpr ---------------------- type SimpleClo = (SimpleOptEnv, InExpr) data SimpleOptEnv = SOE { soe_co_opt_opts :: !OptCoercionOpts -- ^ Options for the coercion optimiser , soe_uf_opts :: !UnfoldingOpts -- ^ Unfolding options , soe_inl :: IdEnv SimpleClo -- ^ Deals with preInlineUnconditionally; things -- that occur exactly once and are inlined -- without having first been simplified , soe_subst :: Subst -- ^ Deals with cloning; includes the InScopeSet , soe_rec_ids :: !UnVarSet -- ^ Fast OutVarSet tracking which recursive RHSs we are analysing. -- See Note [Eta reduction in recursive RHSs] } instance Outputable SimpleOptEnv where ppr (SOE { soe_inl = inl, soe_subst = subst }) = text "SOE {" <+> vcat [ text "soe_inl =" <+> ppr inl , text "soe_subst =" <+> ppr subst ] <+> text "}" emptyEnv :: SimpleOpts -> SimpleOptEnv emptyEnv opts = SOE { soe_inl = emptyVarEnv , soe_subst = emptySubst , soe_rec_ids = emptyUnVarSet , soe_co_opt_opts = so_co_opts opts , soe_uf_opts = so_uf_opts opts } soeZapSubst :: SimpleOptEnv -> SimpleOptEnv soeZapSubst env@(SOE { soe_subst = subst }) = env { soe_inl = emptyVarEnv, soe_subst = zapSubstEnv subst } soeSetInScope :: SimpleOptEnv -> SimpleOptEnv -> SimpleOptEnv -- Take in-scope set from env1, and the rest from env2 soeSetInScope (SOE { soe_subst = subst1 }) env2@(SOE { soe_subst = subst2 }) = env2 { soe_subst = setInScope subst2 (substInScope subst1) } enterRecGroupRHSs :: SimpleOptEnv -> [OutBndr] -> (SimpleOptEnv -> (SimpleOptEnv, r)) -> (SimpleOptEnv, r) enterRecGroupRHSs env bndrs k = (env'{soe_rec_ids = soe_rec_ids env}, r) where (env', r) = k env{soe_rec_ids = extendUnVarSetList bndrs (soe_rec_ids env)} --------------- simple_opt_clo :: SimpleOptEnv -> SimpleClo -> OutExpr simple_opt_clo env (e_env, e) = simple_opt_expr (soeSetInScope env e_env) e simple_opt_expr :: HasCallStack => SimpleOptEnv -> InExpr -> OutExpr simple_opt_expr env expr = go expr where rec_ids = soe_rec_ids env subst = soe_subst env in_scope = substInScope subst in_scope_env = (in_scope, simpleUnfoldingFun) --------------- go (Var v) | Just clo <- lookupVarEnv (soe_inl env) v = simple_opt_clo env clo | otherwise = lookupIdSubst (soe_subst env) v go (App e1 e2) = simple_app env e1 [(env,e2)] go (Type ty) = Type (substTy subst ty) go (Coercion co) = Coercion (go_co co) go (Lit lit) = Lit lit go (Tick tickish e) = mkTick (substTickish subst tickish) (go e) go (Cast e co) = mk_cast (go e) (go_co co) go (Let bind body) = case simple_opt_bind env bind NotTopLevel of (env', Nothing) -> simple_opt_expr env' body (env', Just bind) -> Let bind (simple_opt_expr env' body) go lam@(Lam {}) = go_lam env [] lam go (Case e b ty as) -- See Note [Getting the map/coerce RULE to work] | isDeadBinder b , Just (_, [], con, _tys, es) <- exprIsConApp_maybe in_scope_env e' -- We don't need to be concerned about floats when looking for coerce. , Just (Alt altcon bs rhs) <- findAlt (DataAlt con) as = case altcon of DEFAULT -> go rhs _ -> foldr wrapLet (simple_opt_expr env' rhs) mb_prs where (env', mb_prs) = mapAccumL (simple_out_bind NotTopLevel) env $ zipEqual "simpleOptExpr" bs es -- Note [Getting the map/coerce RULE to work] | isDeadBinder b , [Alt DEFAULT _ rhs] <- as , isCoVarType (varType b) , (Var fun, _args) <- collectArgs e , fun `hasKey` coercibleSCSelIdKey -- without this last check, we get #11230 = go rhs | otherwise = Case e' b' (substTy subst ty) (map (go_alt env') as) where e' = go e (env', b') = subst_opt_bndr env b ---------------------- go_co co = optCoercion (soe_co_opt_opts env) (getTCvSubst subst) co ---------------------- go_alt env (Alt con bndrs rhs) = Alt con bndrs' (simple_opt_expr env' rhs) where (env', bndrs') = subst_opt_bndrs env bndrs ---------------------- -- go_lam tries eta reduction -- It is quite important that it does so. I tried removing this code and -- got a lot of regressions, e.g., +11% ghc/alloc in T18223 and many -- run/alloc increases. Presumably RULEs are affected. go_lam env bs' (Lam b e) = go_lam env' (b':bs') e where (env', b') = subst_opt_bndr env b go_lam env bs' e | Just etad_e <- tryEtaReduce rec_ids bs e' = etad_e | otherwise = mkLams bs e' where bs = reverse bs' e' = simple_opt_expr env e mk_cast :: CoreExpr -> CoercionR -> CoreExpr -- Like GHC.Core.Utils.mkCast, but does a full reflexivity check. -- mkCast doesn't do that because the Simplifier does (in simplCast) -- But in SimpleOpt it's nice to kill those nested casts (#18112) mk_cast (Cast e co1) co2 = mk_cast e (co1 `mkTransCo` co2) mk_cast (Tick t e) co = Tick t (mk_cast e co) mk_cast e co | isReflexiveCo co = e | otherwise = Cast e co ---------------------- -- simple_app collects arguments for beta reduction simple_app :: HasDebugCallStack => SimpleOptEnv -> InExpr -> [SimpleClo] -> CoreExpr simple_app env (Var v) as | Just (env', e) <- lookupVarEnv (soe_inl env) v = simple_app (soeSetInScope env env') e as | let unf = idUnfolding v , isCompulsoryUnfolding (idUnfolding v) , isAlwaysActive (idInlineActivation v) -- See Note [Unfold compulsory unfoldings in LHSs] = simple_app (soeZapSubst env) (unfoldingTemplate unf) as | otherwise , let out_fn = lookupIdSubst (soe_subst env) v = finish_app env out_fn as simple_app env (App e1 e2) as = simple_app env e1 ((env, e2) : as) simple_app env e@(Lam {}) as@(_:_) = do_beta env (zapLambdaBndrs e n_args) as -- Be careful to zap the lambda binders if necessary -- c.f. the Lam case of simplExprF1 in GHC.Core.Opt.Simplify -- Lacking this zap caused #19347, when we had a redex -- (\ a b. K a b) e1 e2 -- where (as it happens) the eta-expanded K is produced by -- Note [Typechecking data constructors] in GHC.Tc.Gen.Head where n_args = length as do_beta env (Lam b body) (a:as) | (env', mb_pr) <- simple_bind_pair env b Nothing a NotTopLevel = wrapLet mb_pr $ do_beta env' body as do_beta env body as = simple_app env body as simple_app env (Tick t e) as -- Okay to do "(Tick t e) x ==> Tick t (e x)"? | t `tickishScopesLike` SoftScope = mkTick t $ simple_app env e as -- (let x = e in b) a1 .. an => let x = e in (b a1 .. an) -- The let might appear there as a result of inlining -- e.g. let f = let x = e in b -- in f a1 a2 -- (#13208) -- However, do /not/ do this transformation for join points -- See Note [simple_app and join points] simple_app env (Let bind body) args = case simple_opt_bind env bind NotTopLevel of (env', Nothing) -> simple_app env' body args (env', Just bind') | isJoinBind bind' -> finish_app env expr' args | otherwise -> Let bind' (simple_app env' body args) where expr' = Let bind' (simple_opt_expr env' body) simple_app env e as = finish_app env (simple_opt_expr env e) as finish_app :: SimpleOptEnv -> OutExpr -> [SimpleClo] -> OutExpr finish_app _ fun [] = fun finish_app env fun (arg:args) = finish_app env (App fun (simple_opt_clo env arg)) args ---------------------- simple_opt_bind :: SimpleOptEnv -> InBind -> TopLevelFlag -> (SimpleOptEnv, Maybe OutBind) simple_opt_bind env (NonRec b r) top_level = (env', case mb_pr of Nothing -> Nothing Just (b,r) -> Just (NonRec b r)) where (b', r') = joinPointBinding_maybe b r `orElse` (b, r) (env', mb_pr) = simple_bind_pair env b' Nothing (env,r') top_level simple_opt_bind env (Rec prs) top_level = (env2, res_bind) where res_bind = Just (Rec (reverse rev_prs')) prs' = joinPointBindings_maybe prs `orElse` prs (env1, bndrs') = subst_opt_bndrs env (map fst prs') (env2, rev_prs') = enterRecGroupRHSs env1 bndrs' $ \env -> foldl' do_pr (env, []) (prs' `zip` bndrs') do_pr (env, prs) ((b,r), b') = (env', case mb_pr of Just pr -> pr : prs Nothing -> prs) where (env', mb_pr) = simple_bind_pair env b (Just b') (env,r) top_level ---------------------- simple_bind_pair :: SimpleOptEnv -> InVar -> Maybe OutVar -> SimpleClo -> TopLevelFlag -> (SimpleOptEnv, Maybe (OutVar, OutExpr)) -- (simple_bind_pair subst in_var out_rhs) -- either extends subst with (in_var -> out_rhs) -- or returns Nothing simple_bind_pair env@(SOE { soe_inl = inl_env, soe_subst = subst }) in_bndr mb_out_bndr clo@(rhs_env, in_rhs) top_level | Type ty <- in_rhs -- let a::* = TYPE ty in , let out_ty = substTy (soe_subst rhs_env) ty = assertPpr (isTyVar in_bndr) (ppr in_bndr $$ ppr in_rhs) $ (env { soe_subst = extendTvSubst subst in_bndr out_ty }, Nothing) | Coercion co <- in_rhs , let out_co = optCoercion (soe_co_opt_opts env) (getTCvSubst (soe_subst rhs_env)) co = assert (isCoVar in_bndr) (env { soe_subst = extendCvSubst subst in_bndr out_co }, Nothing) | assertPpr (isNonCoVarId in_bndr) (ppr in_bndr) -- The previous two guards got rid of tyvars and coercions -- See Note [Core type and coercion invariant] in GHC.Core pre_inline_unconditionally = (env { soe_inl = extendVarEnv inl_env in_bndr clo }, Nothing) | otherwise = simple_out_bind_pair env in_bndr mb_out_bndr out_rhs occ active stable_unf top_level where stable_unf = isStableUnfolding (idUnfolding in_bndr) active = isAlwaysActive (idInlineActivation in_bndr) occ = idOccInfo in_bndr out_rhs | Just join_arity <- isJoinId_maybe in_bndr = simple_join_rhs join_arity | otherwise = simple_opt_clo env clo simple_join_rhs join_arity -- See Note [Preserve join-binding arity] = mkLams join_bndrs' (simple_opt_expr env_body join_body) where env0 = soeSetInScope env rhs_env (join_bndrs, join_body) = collectNBinders join_arity in_rhs (env_body, join_bndrs') = subst_opt_bndrs env0 join_bndrs pre_inline_unconditionally :: Bool pre_inline_unconditionally | isExportedId in_bndr = False | stable_unf = False | not active = False -- Note [Inline prag in simplOpt] | not (safe_to_inline occ) = False | otherwise = True -- Unconditionally safe to inline safe_to_inline :: OccInfo -> Bool safe_to_inline IAmALoopBreaker{} = False safe_to_inline IAmDead = True safe_to_inline OneOcc{ occ_in_lam = NotInsideLam , occ_n_br = 1 } = True safe_to_inline OneOcc{} = False safe_to_inline ManyOccs{} = False ------------------- simple_out_bind :: TopLevelFlag -> SimpleOptEnv -> (InVar, OutExpr) -> (SimpleOptEnv, Maybe (OutVar, OutExpr)) simple_out_bind top_level env@(SOE { soe_subst = subst }) (in_bndr, out_rhs) | Type out_ty <- out_rhs = assertPpr (isTyVar in_bndr) (ppr in_bndr $$ ppr out_ty $$ ppr out_rhs) (env { soe_subst = extendTvSubst subst in_bndr out_ty }, Nothing) | Coercion out_co <- out_rhs = assert (isCoVar in_bndr) (env { soe_subst = extendCvSubst subst in_bndr out_co }, Nothing) | otherwise = simple_out_bind_pair env in_bndr Nothing out_rhs (idOccInfo in_bndr) True False top_level ------------------- simple_out_bind_pair :: SimpleOptEnv -> InId -> Maybe OutId -> OutExpr -> OccInfo -> Bool -> Bool -> TopLevelFlag -> (SimpleOptEnv, Maybe (OutVar, OutExpr)) simple_out_bind_pair env in_bndr mb_out_bndr out_rhs occ_info active stable_unf top_level | assertPpr (isNonCoVarId in_bndr) (ppr in_bndr) -- Type and coercion bindings are caught earlier -- See Note [Core type and coercion invariant] post_inline_unconditionally = ( env' { soe_subst = extendIdSubst (soe_subst env) in_bndr out_rhs } , Nothing) | otherwise = ( env', Just (out_bndr, out_rhs) ) where (env', bndr1) = case mb_out_bndr of Just out_bndr -> (env, out_bndr) Nothing -> subst_opt_bndr env in_bndr out_bndr = add_info env' in_bndr top_level out_rhs bndr1 post_inline_unconditionally :: Bool post_inline_unconditionally | isExportedId in_bndr = False -- Note [Exported Ids and trivial RHSs] | stable_unf = False -- Note [Stable unfoldings and postInlineUnconditionally] | not active = False -- in GHC.Core.Opt.Simplify.Utils | is_loop_breaker = False -- If it's a loop-breaker of any kind, don't inline -- because it might be referred to "earlier" | exprIsTrivial out_rhs = True | coercible_hack = True | otherwise = False is_loop_breaker = isWeakLoopBreaker occ_info -- See Note [Getting the map/coerce RULE to work] coercible_hack | (Var fun, args) <- collectArgs out_rhs , Just dc <- isDataConWorkId_maybe fun , dc `hasKey` heqDataConKey || dc `hasKey` coercibleDataConKey = all exprIsTrivial args | otherwise = False {- Note [Exported Ids and trivial RHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We obviously do not want to unconditionally inline an Id that is exported. In GHC.Core.Opt.Simplify.Utils, Note [Top level and postInlineUnconditionally], we explain why we don't inline /any/ top-level things unconditionally, even trivial ones. But we do here! Why? In the simple optimiser * We do no rule rewrites * We do no call-site inlining Those differences obviate the reasons for not inlining a trivial rhs, and increase the benefit for doing so. So we unconditionally inline trivial rhss here. Note [Preserve join-binding arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Be careful /not/ to eta-reduce the RHS of a join point, lest we lose the join-point arity invariant. #15108 was caused by simplifying the RHS with simple_opt_expr, which does eta-reduction. Solution: simplify the RHS of a join point by simplifying under the lambdas (which of course should be there). Note [simple_app and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general for let-bindings we can do this: (let { x = e } in b) a ==> let { x = e } in b a But not for join points! For two reasons: - We would need to push the continuation into the RHS: (join { j = e } in b) a ==> let { j' = e a } in b[j'/j] a NB ----^^ and also change the type of j, hence j'. That's a bit sophisticated for the very simple optimiser. - We might end up with something like join { j' = e a } in (case blah of ) ( True -> j' void# ) a ( False -> blah ) and now the call to j' doesn't look like a tail call, and Lint may reject. I say "may" because this is /explicitly/ allowed in the "Compiling without Continuations" paper (Section 3, "Managing \Delta"). But GHC currently does not allow this slightly-more-flexible form. See GHC.Core Note [Join points are less general than the paper]. The simple thing to do is to disable this transformation for join points in the simple optimiser Note [The Let-Unfoldings Invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A program has the Let-Unfoldings property iff: - For every let-bound variable f, whether top-level or nested, whether recursive or not: - Both the binding Id of f, and every occurrence Id of f, has an idUnfolding. - For non-INLINE things, that unfolding will be f's right hand sids - For INLINE things (which have a "stable" unfolding) that unfolding is semantically equivalent to f's RHS, but derived from the original RHS of f rather that its current RHS. Informally, we can say that in a program that has the Let-Unfoldings property, all let-bound Id's have an explicit unfolding attached to them. Currently, the simplifier guarantees the Let-Unfoldings invariant for anything it outputs. -} ---------------------- subst_opt_bndrs :: SimpleOptEnv -> [InVar] -> (SimpleOptEnv, [OutVar]) subst_opt_bndrs env bndrs = mapAccumL subst_opt_bndr env bndrs subst_opt_bndr :: SimpleOptEnv -> InVar -> (SimpleOptEnv, OutVar) subst_opt_bndr env bndr | isTyVar bndr = (env { soe_subst = subst_tv }, tv') | isCoVar bndr = (env { soe_subst = subst_cv }, cv') | otherwise = subst_opt_id_bndr env bndr where subst = soe_subst env (subst_tv, tv') = substTyVarBndr subst bndr (subst_cv, cv') = substCoVarBndr subst bndr subst_opt_id_bndr :: SimpleOptEnv -> InId -> (SimpleOptEnv, OutId) -- Nuke all fragile IdInfo, unfolding, and RULES; it gets added back later by -- add_info. -- -- Rather like SimplEnv.substIdBndr -- -- It's important to zap fragile OccInfo (which GHC.Core.Subst.substIdBndr -- carefully does not do) because simplOptExpr invalidates it subst_opt_id_bndr env@(SOE { soe_subst = subst, soe_inl = inl }) old_id = (env { soe_subst = new_subst, soe_inl = new_inl }, new_id) where Subst in_scope id_subst tv_subst cv_subst = subst id1 = uniqAway in_scope old_id id2 = updateIdTypeAndMult (substTy subst) id1 new_id = zapFragileIdInfo id2 -- Zaps rules, unfolding, and fragile OccInfo -- The unfolding and rules will get added back later, by add_info new_in_scope = in_scope `extendInScopeSet` new_id no_change = new_id == old_id -- Extend the substitution if the unique has changed, -- See the notes with substTyVarBndr for the delSubstEnv new_id_subst | no_change = delVarEnv id_subst old_id | otherwise = extendVarEnv id_subst old_id (Var new_id) new_subst = Subst new_in_scope new_id_subst tv_subst cv_subst new_inl = delVarEnv inl old_id ---------------------- add_info :: SimpleOptEnv -> InVar -> TopLevelFlag -> OutExpr -> OutVar -> OutVar add_info env old_bndr top_level new_rhs new_bndr | isTyVar old_bndr = new_bndr | otherwise = lazySetIdInfo new_bndr new_info where subst = soe_subst env uf_opts = soe_uf_opts env old_info = idInfo old_bndr -- Add back in the rules and unfolding which were -- removed by zapFragileIdInfo in subst_opt_id_bndr. -- -- See Note [The Let-Unfoldings Invariant] new_info = idInfo new_bndr `setRuleInfo` new_rules `setUnfoldingInfo` new_unfolding old_rules = ruleInfo old_info new_rules = substRuleInfo subst new_bndr old_rules old_unfolding = realUnfoldingInfo old_info new_unfolding | isStableUnfolding old_unfolding = substUnfolding subst old_unfolding | otherwise = unfolding_from_rhs unfolding_from_rhs = mkUnfolding uf_opts InlineRhs (isTopLevel top_level) False -- may be bottom or not new_rhs Nothing simpleUnfoldingFun :: IdUnfoldingFun simpleUnfoldingFun id | isAlwaysActive (idInlineActivation id) = idUnfolding id | otherwise = noUnfolding wrapLet :: Maybe (Id,CoreExpr) -> CoreExpr -> CoreExpr wrapLet Nothing body = body wrapLet (Just (b,r)) body = Let (NonRec b r) body {- Note [Inline prag in simplOpt] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If there's an INLINE/NOINLINE pragma that restricts the phase in which the binder can be inlined, we don't inline here; after all, we don't know what phase we're in. Here's an example foo :: Int -> Int -> Int {-# INLINE foo #-} foo m n = inner m where {-# INLINE [1] inner #-} inner m = m+n bar :: Int -> Int bar n = foo n 1 When inlining 'foo' in 'bar' we want the let-binding for 'inner' to remain visible until Phase 1 Note [Unfold compulsory unfoldings in LHSs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When the user writes `RULES map coerce = coerce` as a rule, the rule will only ever match if simpleOptExpr replaces coerce by its unfolding on the LHS, because that is the core that the rule matching engine will find. So do that for everything that has a compulsory unfolding. Also see Note [Desugaring coerce as cast] in GHC.HsToCore. However, we don't want to inline 'seq', which happens to also have a compulsory unfolding, so we only do this unfolding only for things that are always-active. See Note [User-defined RULES for seq] in GHC.Types.Id.Make. Note [Getting the map/coerce RULE to work] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We wish to allow the "map/coerce" RULE to fire: {-# RULES "map/coerce" map coerce = coerce #-} The naive core produced for this is forall a b (dict :: Coercible * a b). map @a @b (coerce @a @b @dict) = coerce @[a] @[b] @dict' where dict' :: Coercible [a] [b] dict' = ... This matches literal uses of `map coerce` in code, but that's not what we want. We want it to match, say, `map MkAge` (where newtype Age = MkAge Int) too. Some of this is addressed by compulsorily unfolding coerce on the LHS, yielding forall a b (dict :: Coercible * a b). map @a @b (\(x :: a) -> case dict of MkCoercible (co :: a ~R# b) -> x |> co) = ... Getting better. But this isn't exactly what gets produced. This is because Coercible essentially has ~R# as a superclass, and superclasses get eagerly extracted during solving. So we get this: forall a b (dict :: Coercible * a b). case Coercible_SCSel @* @a @b dict of _ [Dead] -> map @a @b (\(x :: a) -> case dict of MkCoercible (co :: a ~R# b) -> x |> co) = ... Unfortunately, this still abstracts over a Coercible dictionary. We really want it to abstract over the ~R# evidence. So, we have Desugar.unfold_coerce, which transforms the above to (see also Note [Desugaring coerce as cast] in Desugar) forall a b (co :: a ~R# b). let dict = MkCoercible @* @a @b co in case Coercible_SCSel @* @a @b dict of _ [Dead] -> map @a @b (\(x :: a) -> case dict of MkCoercible (co :: a ~R# b) -> x |> co) = let dict = ... in ... Now, we need simpleOptExpr to fix this up. It does so by taking three separate actions: 1. Inline certain non-recursive bindings. The choice whether to inline is made in simple_bind_pair. Note the rather specific check for MkCoercible in there. 2. Stripping case expressions like the Coercible_SCSel one. See the `Case` case of simple_opt_expr's `go` function. 3. Look for case expressions that unpack something that was just packed and inline them. This is also done in simple_opt_expr's `go` function. This is all a fair amount of special-purpose hackery, but it's for a good cause. And it won't hurt other RULES and such that it comes across. ************************************************************************ * * Join points * * ************************************************************************ -} {- Note [Strictness and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have let f = \x. if x>200 then e1 else e1 and we know that f is strict in x. Then if we subsequently discover that f is an arity-2 join point, we'll eta-expand it to let f = \x y. if x>200 then e1 else e1 and now it's only strict if applied to two arguments. So we should adjust the strictness info. A more common case is when f = \x. error ".." and again its arity increases (#15517) -} -- | Returns Just (bndr,rhs) if the binding is a join point: -- If it's a JoinId, just return it -- If it's not yet a JoinId but is always tail-called, -- make it into a JoinId and return it. -- In the latter case, eta-expand the RHS if necessary, to make the -- lambdas explicit, as is required for join points -- -- Precondition: the InBndr has been occurrence-analysed, -- so its OccInfo is valid joinPointBinding_maybe :: InBndr -> InExpr -> Maybe (InBndr, InExpr) joinPointBinding_maybe bndr rhs | not (isId bndr) = Nothing | isJoinId bndr = Just (bndr, rhs) | AlwaysTailCalled join_arity <- tailCallInfo (idOccInfo bndr) , (bndrs, body) <- etaExpandToJoinPoint join_arity rhs , let str_sig = idDmdSig bndr str_arity = count isId bndrs -- Strictness demands are for Ids only join_bndr = bndr `asJoinId` join_arity `setIdDmdSig` etaConvertDmdSig str_arity str_sig = Just (join_bndr, mkLams bndrs body) | otherwise = Nothing joinPointBindings_maybe :: [(InBndr, InExpr)] -> Maybe [(InBndr, InExpr)] joinPointBindings_maybe bndrs = mapM (uncurry joinPointBinding_maybe) bndrs {- ********************************************************************* * * exprIsConApp_maybe * * ************************************************************************ Note [exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~ exprIsConApp_maybe is a very important function. There are two principal uses: * case e of { .... } * cls_op e, where cls_op is a class operation In both cases you want to know if e is of form (C e1..en) where C is a data constructor. However e might not *look* as if Note [exprIsConApp_maybe on literal strings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See #9400 and #13317. Conceptually, a string literal "abc" is just ('a':'b':'c':[]), but in Core they are represented as unpackCString# "abc"# by GHC.Core.Make.mkStringExprFS, or unpackCStringUtf8# when the literal contains multi-byte UTF8 characters. For optimizations we want to be able to treat it as a list, so they can be decomposed when used in a case-statement. exprIsConApp_maybe detects those calls to unpackCString# and returns: Just (':', [Char], ['a', unpackCString# "bc"]). We need to be careful about UTF8 strings here. ""# contains an encoded ByteString, so we call utf8UnconsByteString to correctly deal with the encoding and splitting. We must also be careful about lvl = "foo"# ...(unpackCString# lvl)... to ensure that we see through the let-binding for 'lvl'. Hence the (exprIsLiteral_maybe .. arg) in the guard before the call to dealWithStringLiteral. The tests for this function are in T9400. Note [Push coercions in exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #13025 I found a case where we had op (df @t1 @t2) -- op is a ClassOp where df = (/\a b. K e1 e2) |> g To get this to come out we need to simplify on the fly ((/\a b. K e1 e2) |> g) @t1 @t2 Hence the use of pushCoArgs. Note [exprIsConApp_maybe on data constructors with wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem: - some data constructors have wrappers - these wrappers inline late (see MkId Note [Activation for data constructor wrappers]) - but we still want case-of-known-constructor to fire early. Example: data T = MkT !Int $WMkT n = case n of n' -> MkT n' -- Wrapper for MkT foo x = case $WMkT e of MkT y -> blah Here we want the case-of-known-constructor transformation to fire, giving foo x = case e of x' -> let y = x' in blah Here's how exprIsConApp_maybe achieves this: 0. Start with scrutinee = $WMkT e 1. Inline $WMkT on-the-fly. That's why data-constructor wrappers are marked as expandable. (See GHC.Core.Utils.isExpandableApp.) Now we have scrutinee = (\n. case n of n' -> MkT n') e 2. Beta-reduce the application, generating a floated 'let'. See Note [beta-reduction in exprIsConApp_maybe] below. Now we have scrutinee = case n of n' -> MkT n' with floats {Let n = e} 3. Float the "case x of x' ->" binding out. Now we have scrutinee = MkT n' with floats {Let n = e; case n of n' ->} And now we have a known-constructor MkT that we can return. Notice that both (2) and (3) require exprIsConApp_maybe to gather and return a bunch of floats, both let and case bindings. Note that this strategy introduces some subtle scenarios where a data-con wrapper can be replaced by a data-con worker earlier than we’d like, see Note [exprIsConApp_maybe for data-con wrappers: tricky corner]. Note [beta-reduction in exprIsConApp_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The unfolding a definition (_e.g._ a let-bound variable or a datacon wrapper) is typically a function. For instance, take the wrapper for MkT in Note [exprIsConApp_maybe on data constructors with wrappers]: $WMkT n = case n of { n' -> T n' } If `exprIsConApp_maybe` is trying to analyse `$MkT arg`, upon unfolding of $MkT, it will see (\n -> case n of { n' -> T n' }) arg In order to go progress, `exprIsConApp_maybe` must perform a beta-reduction. We don't want to blindly substitute `arg` in the body of the function, because it duplicates work. We can (and, in fact, used to) substitute `arg` in the body, but only when `arg` is a variable (or something equally work-free). But, because of Note [exprIsConApp_maybe on data constructors with wrappers], 'exprIsConApp_maybe' now returns floats. So, instead, we can beta-reduce _always_: (\x -> body) arg Is transformed into let x = arg in body Which, effectively, means emitting a float `let x = arg` and recursively analysing the body. For newtypes, this strategy requires that their wrappers have compulsory unfoldings. Suppose we have newtype T a b where MkT :: a -> T b a -- Note args swapped This defines a worker function MkT, a wrapper function $WMkT, and an axT: $WMkT :: forall a b. a -> T b a $WMkT = /\b a. \(x:a). MkT a b x -- A real binding MkT :: forall a b. a -> T a b MkT = /\a b. \(x:a). x |> (ax a b) -- A compulsory unfolding axiom axT :: a ~R# T a b Now we are optimising case $WMkT (I# 3) |> sym axT of I# y -> ... we clearly want to simplify this. If $WMkT did not have a compulsory unfolding, we would end up with let a = I#3 in case a of I# y -> ... because in general, we do this on-the-fly beta-reduction (\x. e) blah --> let x = blah in e and then float the let. (Substitution would risk duplicating 'blah'.) But if the case-of-known-constructor doesn't actually fire (i.e. exprIsConApp_maybe does not return Just) then nothing happens, and nothing will happen the next time either. See test T16254, which checks the behavior of newtypes. Note [Don't float join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ exprIsConApp_maybe should succeed on let v = e in Just v returning [x=e] as one of the [FloatBind]. But it must NOT succeed on join j x = rhs in Just v because join-points can't be gaily floated. Consider case (join j x = rhs in Just) of K p q -> blah We absolutely must not "simplify" this to join j x = rhs in blah because j's return type is (Maybe t), quite different to blah's. You might think this could never happen, because j can't be tail-called in the body if the body returns a constructor. But in !3113 we had a /dead/ join point (which is not illegal), and its return type was wonky. The simple thing is not to float a join point. The next iteration of the simplifier will sort everything out. And it there is a join point, the chances are that the body is not a constructor application, so failing faster is good. Note [exprIsConApp_maybe for data-con wrappers: tricky corner] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally speaking * exprIsConApp_maybe honours the inline phase; that is, it does not look inside the unfolding for an Id unless its unfolding is active in this phase. That phase-sensitivity is expressed in the InScopeEnv (specifically, the IdUnfoldingFun component of the InScopeEnv) passed to exprIsConApp_maybe. * Data-constructor wrappers are active only in phase 0 (the last phase); see Note [Activation for data constructor wrappers] in GHC.Types.Id.Make. On the face of it that means that exprIsConApp_maybe won't look inside data constructor wrappers until phase 0. But that seems pretty Bad. So we cheat. For data con wrappers we unconditionally look inside its unfolding, regardless of phase, so that we get case-of-known-constructor to fire in every phase. Perhaps unsurprisingly, this cheating can backfire. An example: data T = C !A B foo p q = let x = C e1 e2 in seq x $ f x {-# RULE "wurble" f (C a b) = b #-} In Core, the RHS of foo is let x = $WC e1 e2 in case x of y { C _ _ -> f x } and after doing a binder swap and inlining x, we have: case $WC e1 e2 of y { C _ _ -> f y } Case-of-known-constructor fires, but now we have to reconstruct a binding for `y` (which was dead before the binder swap) on the RHS of the case alternative. Naturally, we’ll use the worker: case e1 of a { DEFAULT -> let y = C a e2 in f y } and after inlining `y`, we have: case e1 of a { DEFAULT -> f (C a e2) } Now we might hope the "wurble" rule would fire, but alas, it will not: we have replaced $WC with C, but the (desugared) rule matches on $WC! We weren’t supposed to inline $WC yet for precisely that reason (see Note [Activation for data constructor wrappers]), but our cheating in exprIsConApp_maybe came back to bite us. This is rather unfortunate, especially since this can happen inside stable unfoldings as well as ordinary code (which really happened, see !3041). But there is no obvious solution except to delay case-of-known-constructor on data-con wrappers, and that cure would be worse than the disease. This Note exists solely to document the problem. -} data ConCont = CC [CoreExpr] Coercion -- Substitution already applied -- | Returns @Just ([b1..bp], dc, [t1..tk], [x1..xn])@ if the argument -- expression is a *saturated* constructor application of the form @let b1 in -- .. let bp in dc t1..tk x1 .. xn@, where t1..tk are the -- *universally-quantified* type args of 'dc'. Floats can also be (and most -- likely are) single-alternative case expressions. Why does -- 'exprIsConApp_maybe' return floats? We may have to look through lets and -- cases to detect that we are in the presence of a data constructor wrapper. In -- this case, we need to return the lets and cases that we traversed. See Note -- [exprIsConApp_maybe on data constructors with wrappers]. Data constructor wrappers -- are unfolded late, but we really want to trigger case-of-known-constructor as -- early as possible. See also Note [Activation for data constructor wrappers] -- in "GHC.Types.Id.Make". -- -- We also return the incoming InScopeSet, augmented with -- the binders from any [FloatBind] that we return exprIsConApp_maybe :: HasDebugCallStack => InScopeEnv -> CoreExpr -> Maybe (InScopeSet, [FloatBind], DataCon, [Type], [CoreExpr]) exprIsConApp_maybe (in_scope, id_unf) expr = go (Left in_scope) [] expr (CC [] (mkRepReflCo (exprType expr))) where go :: Either InScopeSet Subst -- Left in-scope means "empty substitution" -- Right subst means "apply this substitution to the CoreExpr" -- NB: in the call (go subst floats expr cont) -- the substitution applies to 'expr', but /not/ to 'floats' or 'cont' -> [FloatBind] -> CoreExpr -> ConCont -- Notice that the floats here are in reverse order -> Maybe (InScopeSet, [FloatBind], DataCon, [Type], [CoreExpr]) go subst floats (Tick t expr) cont | not (tickishIsCode t) = go subst floats expr cont go subst floats (Cast expr co1) (CC args co2) | Just (args', m_co1') <- pushCoArgs (subst_co subst co1) args -- See Note [Push coercions in exprIsConApp_maybe] = case m_co1' of MCo co1' -> go subst floats expr (CC args' (co1' `mkTransCo` co2)) MRefl -> go subst floats expr (CC args' co2) go subst floats (App fun arg) (CC args co) = go subst floats fun (CC (subst_expr subst arg : args) co) go subst floats (Lam bndr body) (CC (arg:args) co) | exprIsTrivial arg -- Don't duplicate stuff! = go (extend subst bndr arg) floats body (CC args co) | otherwise = let (subst', bndr') = subst_bndr subst bndr float = FloatLet (NonRec bndr' arg) in go subst' (float:floats) body (CC args co) go subst floats (Let (NonRec bndr rhs) expr) cont | not (isJoinId bndr) -- Crucial guard! See Note [Don't float join points] = let rhs' = subst_expr subst rhs (subst', bndr') = subst_bndr subst bndr float = FloatLet (NonRec bndr' rhs') in go subst' (float:floats) expr cont go subst floats (Case scrut b _ [Alt con vars expr]) cont = let scrut' = subst_expr subst scrut (subst', b') = subst_bndr subst b (subst'', vars') = subst_bndrs subst' vars float = FloatCase scrut' b' con vars' in go subst'' (float:floats) expr cont go (Right sub) floats (Var v) cont = go (Left (substInScope sub)) floats (lookupIdSubst sub v) cont go (Left in_scope) floats (Var fun) cont@(CC args co) | Just con <- isDataConWorkId_maybe fun , count isValArg args == idArity fun = succeedWith in_scope floats $ pushCoDataCon con args co -- Look through data constructor wrappers: they inline late (See Note -- [Activation for data constructor wrappers]) but we want to do -- case-of-known-constructor optimisation eagerly (see Note -- [exprIsConApp_maybe on data constructors with wrappers]). | isDataConWrapId fun , let rhs = uf_tmpl (realIdUnfolding fun) = go (Left in_scope) floats rhs cont -- Look through dictionary functions; see Note [Unfolding DFuns] | DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = dfun_args } <- unfolding , bndrs `equalLength` args -- See Note [DFun arity check] , let in_scope' = extend_in_scope (exprsFreeVars dfun_args) subst = mkOpenSubst in_scope' (bndrs `zip` args) -- We extend the in-scope set here to silence warnings from -- substExpr when it finds not-in-scope Ids in dfun_args. -- simplOptExpr initialises the in-scope set with exprFreeVars, -- but that doesn't account for DFun unfoldings = succeedWith in_scope floats $ pushCoDataCon con (map (substExpr subst) dfun_args) co -- Look through unfoldings, but only arity-zero one; -- if arity > 0 we are effectively inlining a function call, -- and that is the business of callSiteInline. -- In practice, without this test, most of the "hits" were -- CPR'd workers getting inlined back into their wrappers, | idArity fun == 0 , Just rhs <- expandUnfolding_maybe unfolding , let in_scope' = extend_in_scope (exprFreeVars rhs) = go (Left in_scope') floats rhs cont -- See Note [exprIsConApp_maybe on literal strings] | (fun `hasKey` unpackCStringIdKey) || (fun `hasKey` unpackCStringUtf8IdKey) , [arg] <- args , Just (LitString str) <- exprIsLiteral_maybe (in_scope, id_unf) arg = succeedWith in_scope floats $ dealWithStringLiteral fun str co where unfolding = id_unf fun extend_in_scope unf_fvs | isLocalId fun = in_scope `extendInScopeSetSet` unf_fvs | otherwise = in_scope -- A GlobalId has no (LocalId) free variables; and the -- in-scope set tracks only LocalIds go _ _ _ _ = Nothing succeedWith :: InScopeSet -> [FloatBind] -> Maybe (DataCon, [Type], [CoreExpr]) -> Maybe (InScopeSet, [FloatBind], DataCon, [Type], [CoreExpr]) succeedWith in_scope rev_floats x = do { (con, tys, args) <- x ; let floats = reverse rev_floats ; return (in_scope, floats, con, tys, args) } ---------------------------- -- Operations on the (Either InScopeSet GHC.Core.Subst) -- The Left case is wildly dominant subst_co (Left {}) co = co subst_co (Right s) co = GHC.Core.Subst.substCo s co subst_expr (Left {}) e = e subst_expr (Right s) e = substExpr s e subst_bndr msubst bndr = (Right subst', bndr') where (subst', bndr') = substBndr subst bndr subst = case msubst of Left in_scope -> mkEmptySubst in_scope Right subst -> subst subst_bndrs subst bs = mapAccumL subst_bndr subst bs extend (Left in_scope) v e = Right (extendSubst (mkEmptySubst in_scope) v e) extend (Right s) v e = Right (extendSubst s v e) -- See Note [exprIsConApp_maybe on literal strings] dealWithStringLiteral :: Var -> BS.ByteString -> Coercion -> Maybe (DataCon, [Type], [CoreExpr]) -- This is not possible with user-supplied empty literals, GHC.Core.Make.mkStringExprFS -- turns those into [] automatically, but just in case something else in GHC -- generates a string literal directly. dealWithStringLiteral fun str co = case utf8UnconsByteString str of Nothing -> pushCoDataCon nilDataCon [Type charTy] co Just (char, charTail) -> let char_expr = mkConApp charDataCon [mkCharLit char] -- In singleton strings, just add [] instead of unpackCstring# ""#. rest = if BS.null charTail then mkConApp nilDataCon [Type charTy] else App (Var fun) (Lit (LitString charTail)) in pushCoDataCon consDataCon [Type charTy, char_expr, rest] co {- Note [Unfolding DFuns] ~~~~~~~~~~~~~~~~~~~~~~ DFuns look like df :: forall a b. (Eq a, Eq b) -> Eq (a,b) df a b d_a d_b = MkEqD (a,b) ($c1 a b d_a d_b) ($c2 a b d_a d_b) So to split it up we just need to apply the ops $c1, $c2 etc to the very same args as the dfun. It takes a little more work to compute the type arguments to the dictionary constructor. Note [DFun arity check] ~~~~~~~~~~~~~~~~~~~~~~~ Here we check that the total number of supplied arguments (including type args) matches what the dfun is expecting. This may be *less* than the ordinary arity of the dfun: see Note [DFun unfoldings] in GHC.Core -} exprIsLiteral_maybe :: InScopeEnv -> CoreExpr -> Maybe Literal -- Same deal as exprIsConApp_maybe, but much simpler -- Nevertheless we do need to look through unfoldings for -- string literals, which are vigorously hoisted to top level -- and not subsequently inlined exprIsLiteral_maybe env@(_, id_unf) e = case e of Lit l -> Just l Tick _ e' -> exprIsLiteral_maybe env e' -- dubious? Var v -> expandUnfolding_maybe (id_unf v) >>= exprIsLiteral_maybe env _ -> Nothing {- Note [exprIsLambda_maybe] ~~~~~~~~~~~~~~~~~~~~~~~~~~ exprIsLambda_maybe will, given an expression `e`, try to turn it into the form `Lam v e'` (returned as `Just (v,e')`). Besides using lambdas, it looks through casts (using the Push rule), and it unfolds function calls if the unfolding has a greater arity than arguments are present. Currently, it is used in GHC.Core.Rules.match, and is required to make "map coerce = coerce" match. -} exprIsLambda_maybe :: HasDebugCallStack => InScopeEnv -> CoreExpr -> Maybe (Var, CoreExpr,[CoreTickish]) -- See Note [exprIsLambda_maybe] -- The simple case: It is a lambda already exprIsLambda_maybe _ (Lam x e) = Just (x, e, []) -- Still straightforward: Ticks that we can float out of the way exprIsLambda_maybe (in_scope_set, id_unf) (Tick t e) | tickishFloatable t , Just (x, e, ts) <- exprIsLambda_maybe (in_scope_set, id_unf) e = Just (x, e, t:ts) -- Also possible: A casted lambda. Push the coercion inside exprIsLambda_maybe (in_scope_set, id_unf) (Cast casted_e co) | Just (x, e,ts) <- exprIsLambda_maybe (in_scope_set, id_unf) casted_e -- Only do value lambdas. -- this implies that x is not in scope in gamma (makes this code simpler) , not (isTyVar x) && not (isCoVar x) , assert (not $ x `elemVarSet` tyCoVarsOfCo co) True , Just (x',e') <- pushCoercionIntoLambda in_scope_set x e co , let res = Just (x',e',ts) = --pprTrace "exprIsLambda_maybe:Cast" (vcat [ppr casted_e,ppr co,ppr res)]) res -- Another attempt: See if we find a partial unfolding exprIsLambda_maybe (in_scope_set, id_unf) e | (Var f, as, ts) <- collectArgsTicks tickishFloatable e , idArity f > count isValArg as -- Make sure there is hope to get a lambda , Just rhs <- expandUnfolding_maybe (id_unf f) -- Optimize, for beta-reduction , let e' = simpleOptExprWith defaultSimpleOpts (mkEmptySubst in_scope_set) (rhs `mkApps` as) -- Recurse, because of possible casts , Just (x', e'', ts') <- exprIsLambda_maybe (in_scope_set, id_unf) e' , let res = Just (x', e'', ts++ts') = -- pprTrace "exprIsLambda_maybe:Unfold" (vcat [ppr e, ppr (x',e'')]) res exprIsLambda_maybe _ _e = -- pprTrace "exprIsLambda_maybe:Fail" (vcat [ppr _e]) Nothing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Stats.hs0000644000000000000000000001137114472400112020160 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-2015 -} -- | Functions to computing the statistics reflective of the "size" -- of a Core expression module GHC.Core.Stats ( -- * Expression and bindings size coreBindsSize, exprSize, CoreStats(..), coreBindsStats, exprStats, ) where import GHC.Prelude import GHC.Types.Basic import GHC.Core import GHC.Utils.Outputable import GHC.Core.Coercion import GHC.Types.Tickish import GHC.Types.Var import GHC.Core.Type(Type, typeSize) import GHC.Types.Id (isJoinId) data CoreStats = CS { cs_tm :: !Int -- Terms , cs_ty :: !Int -- Types , cs_co :: !Int -- Coercions , cs_vb :: !Int -- Local value bindings , cs_jb :: !Int } -- Local join bindings instance Outputable CoreStats where ppr (CS { cs_tm = i1, cs_ty = i2, cs_co = i3, cs_vb = i4, cs_jb = i5 }) = braces (sep [text "terms:" <+> intWithCommas i1 <> comma, text "types:" <+> intWithCommas i2 <> comma, text "coercions:" <+> intWithCommas i3 <> comma, text "joins:" <+> intWithCommas i5 <> char '/' <> intWithCommas (i4 + i5) ]) plusCS :: CoreStats -> CoreStats -> CoreStats plusCS (CS { cs_tm = p1, cs_ty = q1, cs_co = r1, cs_vb = v1, cs_jb = j1 }) (CS { cs_tm = p2, cs_ty = q2, cs_co = r2, cs_vb = v2, cs_jb = j2 }) = CS { cs_tm = p1+p2, cs_ty = q1+q2, cs_co = r1+r2, cs_vb = v1+v2 , cs_jb = j1+j2 } zeroCS, oneTM :: CoreStats zeroCS = CS { cs_tm = 0, cs_ty = 0, cs_co = 0, cs_vb = 0, cs_jb = 0 } oneTM = zeroCS { cs_tm = 1 } sumCS :: (a -> CoreStats) -> [a] -> CoreStats sumCS f = foldl' (\s a -> plusCS s (f a)) zeroCS coreBindsStats :: [CoreBind] -> CoreStats coreBindsStats = sumCS (bindStats TopLevel) bindStats :: TopLevelFlag -> CoreBind -> CoreStats bindStats top_lvl (NonRec v r) = bindingStats top_lvl v r bindStats top_lvl (Rec prs) = sumCS (\(v,r) -> bindingStats top_lvl v r) prs bindingStats :: TopLevelFlag -> Var -> CoreExpr -> CoreStats bindingStats top_lvl v r = letBndrStats top_lvl v `plusCS` exprStats r bndrStats :: Var -> CoreStats bndrStats v = oneTM `plusCS` tyStats (varType v) letBndrStats :: TopLevelFlag -> Var -> CoreStats letBndrStats top_lvl v | isTyVar v || isTopLevel top_lvl = bndrStats v | isJoinId v = oneTM { cs_jb = 1 } `plusCS` ty_stats | otherwise = oneTM { cs_vb = 1 } `plusCS` ty_stats where ty_stats = tyStats (varType v) exprStats :: CoreExpr -> CoreStats exprStats (Var {}) = oneTM exprStats (Lit {}) = oneTM exprStats (Type t) = tyStats t exprStats (Coercion c) = coStats c exprStats (App f a) = exprStats f `plusCS` exprStats a exprStats (Lam b e) = bndrStats b `plusCS` exprStats e exprStats (Let b e) = bindStats NotTopLevel b `plusCS` exprStats e exprStats (Case e b _ as) = exprStats e `plusCS` bndrStats b `plusCS` sumCS altStats as exprStats (Cast e co) = coStats co `plusCS` exprStats e exprStats (Tick _ e) = exprStats e altStats :: CoreAlt -> CoreStats altStats (Alt _ bs r) = altBndrStats bs `plusCS` exprStats r altBndrStats :: [Var] -> CoreStats -- Charge one for the alternative, not for each binder altBndrStats vs = oneTM `plusCS` sumCS (tyStats . varType) vs tyStats :: Type -> CoreStats tyStats ty = zeroCS { cs_ty = typeSize ty } coStats :: Coercion -> CoreStats coStats co = zeroCS { cs_co = coercionSize co } coreBindsSize :: [CoreBind] -> Int -- We use coreBindStats for user printout -- but this one is a quick and dirty basis for -- the simplifier's tick limit coreBindsSize bs = sum (map bindSize bs) exprSize :: CoreExpr -> Int -- ^ A measure of the size of the expressions, strictly greater than 0 -- Counts *leaves*, not internal nodes. Types and coercions are not counted. exprSize (Var _) = 1 exprSize (Lit _) = 1 exprSize (App f a) = exprSize f + exprSize a exprSize (Lam b e) = bndrSize b + exprSize e exprSize (Let b e) = bindSize b + exprSize e exprSize (Case e b _ as) = exprSize e + bndrSize b + 1 + sum (map altSize as) exprSize (Cast e _) = 1 + exprSize e exprSize (Tick n e) = tickSize n + exprSize e exprSize (Type _) = 1 exprSize (Coercion _) = 1 tickSize :: CoreTickish -> Int tickSize (ProfNote _ _ _) = 1 tickSize _ = 1 bndrSize :: Var -> Int bndrSize _ = 1 bndrsSize :: [Var] -> Int bndrsSize = sum . map bndrSize bindSize :: CoreBind -> Int bindSize (NonRec b e) = bndrSize b + exprSize e bindSize (Rec prs) = sum (map pairSize prs) pairSize :: (Var, CoreExpr) -> Int pairSize (b,e) = bndrSize b + exprSize e altSize :: CoreAlt -> Int altSize (Alt _ bs e) = bndrsSize bs + exprSize e ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Subst.hs0000644000000000000000000007631114472400112020167 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Utility functions on @Core@ syntax -} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Core.Subst ( -- * Main data types Subst(..), -- Implementation exported for supercompiler's Renaming.hs only TvSubstEnv, IdSubstEnv, InScopeSet, -- ** Substituting into expressions and related types deShadowBinds, substRuleInfo, substRulesForImportedIds, substTy, substCo, substExpr, substExprSC, substBind, substBindSC, substUnfolding, substUnfoldingSC, lookupIdSubst, substIdType, substIdOcc, substTickish, substDVarSet, substIdInfo, -- ** Operations on substitutions emptySubst, mkEmptySubst, mkSubst, mkOpenSubst, substInScope, isEmptySubst, extendIdSubst, extendIdSubstList, extendTCvSubst, extendTvSubstList, extendSubst, extendSubstList, extendSubstWithVar, zapSubstEnv, extendSubstInScope, extendSubstInScopeList, extendSubstInScopeSet, isInScope, setInScope, getTCvSubst, extendTvSubst, extendCvSubst, delBndr, delBndrs, -- ** Substituting and cloning binders substBndr, substBndrs, substRecBndrs, substTyVarBndr, substCoVarBndr, cloneBndr, cloneBndrs, cloneIdBndr, cloneIdBndrs, cloneRecIdBndrs, ) where import GHC.Prelude import GHC.Core import GHC.Core.FVs import GHC.Core.Seq import GHC.Core.Utils import qualified GHC.Core.Type as Type import qualified GHC.Core.Coercion as Coercion -- We are defining local versions import GHC.Core.Type hiding ( substTy, extendTvSubst, extendCvSubst, extendTvSubstList , isInScope, substTyVarBndr, cloneTyVarBndr ) import GHC.Core.Coercion hiding ( substCo, substCoVarBndr ) import GHC.Types.Var.Set import GHC.Types.Var.Env as InScopeSet import GHC.Types.Id import GHC.Types.Name ( Name ) import GHC.Types.Var import GHC.Types.Tickish import GHC.Types.Id.Info import GHC.Types.Unique.Supply import GHC.Builtin.Names import GHC.Data.Maybe import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.List (mapAccumL) {- ************************************************************************ * * \subsection{Substitutions} * * ************************************************************************ -} -- | A substitution environment, containing 'Id', 'TyVar', and 'CoVar' -- substitutions. -- -- Some invariants apply to how you use the substitution: -- -- 1. Note [The substitution invariant] in "GHC.Core.TyCo.Subst" -- -- 2. Note [Substitutions apply only once] in "GHC.Core.TyCo.Subst" data Subst = Subst InScopeSet -- Variables in scope (both Ids and TyVars) /after/ -- applying the substitution IdSubstEnv -- Substitution from NcIds to CoreExprs TvSubstEnv -- Substitution from TyVars to Types CvSubstEnv -- Substitution from CoVars to Coercions -- INVARIANT 1: See TyCoSubst Note [The substitution invariant] -- This is what lets us deal with name capture properly -- It's a hard invariant to check... -- -- INVARIANT 2: The substitution is apply-once; see Note [Apply once] with -- Types.TvSubstEnv -- -- INVARIANT 3: See Note [Extending the Subst] {- Note [Extending the Subst] ~~~~~~~~~~~~~~~~~~~~~~~~~~ For a core Subst, which binds Ids as well, we make a different choice for Ids than we do for TyVars. For TyVars, see Note [Extending the TCvSubst] in GHC.Core.TyCo.Subst. For Ids, we have a different invariant The IdSubstEnv is extended *only* when the Unique on an Id changes Otherwise, we just extend the InScopeSet In consequence: * If all subst envs are empty, substExpr would be a no-op, so substExprSC ("short cut") does nothing. However, substExpr still goes ahead and substitutes. Reason: we may want to replace existing Ids with new ones from the in-scope set, to avoid space leaks. * In substIdBndr, we extend the IdSubstEnv only when the unique changes * If the CvSubstEnv, TvSubstEnv and IdSubstEnv are all empty, substExpr does nothing (Note that the above rule for substIdBndr maintains this property. If the incoming envts are both empty, then substituting the type and IdInfo can't change anything.) * In lookupIdSubst, we *must* look up the Id in the in-scope set, because it may contain non-trivial changes. Example: (/\a. \x:a. ...x...) Int We extend the TvSubstEnv with [a |-> Int]; but x's unique does not change so we only extend the in-scope set. Then we must look up in the in-scope set when we find the occurrence of x. * The requirement to look up the Id in the in-scope set means that we must NOT take no-op short cut when the IdSubst is empty. We must still look up every Id in the in-scope set. * (However, we don't need to do so for expressions found in the IdSubst itself, whose range is assumed to be correct wrt the in-scope set.) Why do we make a different choice for the IdSubstEnv than the TvSubstEnv and CvSubstEnv? * For Ids, we change the IdInfo all the time (e.g. deleting the unfolding), and adding it back later, so using the TyVar convention would entail extending the substitution almost all the time * The simplifier wants to look up in the in-scope set anyway, in case it can see a better unfolding from an enclosing case expression * For TyVars, only coercion variables can possibly change, and they are easy to spot -} -- | An environment for substituting for 'Id's type IdSubstEnv = IdEnv CoreExpr -- Domain is NcIds, i.e. not coercions ---------------------------- isEmptySubst :: Subst -> Bool isEmptySubst (Subst _ id_env tv_env cv_env) = isEmptyVarEnv id_env && isEmptyVarEnv tv_env && isEmptyVarEnv cv_env emptySubst :: Subst emptySubst = Subst emptyInScopeSet emptyVarEnv emptyVarEnv emptyVarEnv mkEmptySubst :: InScopeSet -> Subst mkEmptySubst in_scope = Subst in_scope emptyVarEnv emptyVarEnv emptyVarEnv mkSubst :: InScopeSet -> TvSubstEnv -> CvSubstEnv -> IdSubstEnv -> Subst mkSubst in_scope tvs cvs ids = Subst in_scope ids tvs cvs -- | Find the in-scope set: see "GHC.Core.TyCo.Subst" Note [The substitution invariant] substInScope :: Subst -> InScopeSet substInScope (Subst in_scope _ _ _) = in_scope -- | Remove all substitutions for 'Id's and 'Var's that might have been built up -- while preserving the in-scope set zapSubstEnv :: Subst -> Subst zapSubstEnv (Subst in_scope _ _ _) = Subst in_scope emptyVarEnv emptyVarEnv emptyVarEnv -- | Add a substitution for an 'Id' to the 'Subst': you must ensure that the in-scope set is -- such that TyCoSubst Note [The substitution invariant] -- holds after extending the substitution like this extendIdSubst :: Subst -> Id -> CoreExpr -> Subst -- ToDo: add an ASSERT that fvs(subst-result) is already in the in-scope set extendIdSubst (Subst in_scope ids tvs cvs) v r = assertPpr (isNonCoVarId v) (ppr v $$ ppr r) $ Subst in_scope (extendVarEnv ids v r) tvs cvs -- | Adds multiple 'Id' substitutions to the 'Subst': see also 'extendIdSubst' extendIdSubstList :: Subst -> [(Id, CoreExpr)] -> Subst extendIdSubstList (Subst in_scope ids tvs cvs) prs = assert (all (isNonCoVarId . fst) prs) $ Subst in_scope (extendVarEnvList ids prs) tvs cvs -- | Add a substitution for a 'TyVar' to the 'Subst' -- The 'TyVar' *must* be a real TyVar, and not a CoVar -- You must ensure that the in-scope set is such that -- "GHC.Core.TyCo.Subst" Note [The substitution invariant] holds -- after extending the substitution like this. extendTvSubst :: Subst -> TyVar -> Type -> Subst extendTvSubst (Subst in_scope ids tvs cvs) tv ty = assert (isTyVar tv) $ Subst in_scope ids (extendVarEnv tvs tv ty) cvs -- | Adds multiple 'TyVar' substitutions to the 'Subst': see also 'extendTvSubst' extendTvSubstList :: Subst -> [(TyVar,Type)] -> Subst extendTvSubstList subst vrs = foldl' extend subst vrs where extend subst (v, r) = extendTvSubst subst v r -- | Add a substitution from a 'CoVar' to a 'Coercion' to the 'Subst': -- you must ensure that the in-scope set satisfies -- "GHC.Core.TyCo.Subst" Note [The substitution invariant] -- after extending the substitution like this extendCvSubst :: Subst -> CoVar -> Coercion -> Subst extendCvSubst (Subst in_scope ids tvs cvs) v r = assert (isCoVar v) $ Subst in_scope ids tvs (extendVarEnv cvs v r) -- | Add a substitution appropriate to the thing being substituted -- (whether an expression, type, or coercion). See also -- 'extendIdSubst', 'extendTvSubst', 'extendCvSubst' extendSubst :: Subst -> Var -> CoreArg -> Subst extendSubst subst var arg = case arg of Type ty -> assert (isTyVar var) $ extendTvSubst subst var ty Coercion co -> assert (isCoVar var) $ extendCvSubst subst var co _ -> assert (isId var) $ extendIdSubst subst var arg extendSubstWithVar :: Subst -> Var -> Var -> Subst extendSubstWithVar subst v1 v2 | isTyVar v1 = assert (isTyVar v2) $ extendTvSubst subst v1 (mkTyVarTy v2) | isCoVar v1 = assert (isCoVar v2) $ extendCvSubst subst v1 (mkCoVarCo v2) | otherwise = assert (isId v2) $ extendIdSubst subst v1 (Var v2) -- | Add a substitution as appropriate to each of the terms being -- substituted (whether expressions, types, or coercions). See also -- 'extendSubst'. extendSubstList :: Subst -> [(Var,CoreArg)] -> Subst extendSubstList subst [] = subst extendSubstList subst ((var,rhs):prs) = extendSubstList (extendSubst subst var rhs) prs -- | Find the substitution for an 'Id' in the 'Subst' lookupIdSubst :: HasDebugCallStack => Subst -> Id -> CoreExpr lookupIdSubst (Subst in_scope ids _ _) v | not (isLocalId v) = Var v | Just e <- lookupVarEnv ids v = e | Just v' <- lookupInScope in_scope v = Var v' -- Vital! See Note [Extending the Subst] -- If v isn't in the InScopeSet, we panic, because -- it's a bad bug and we reallly want to know | otherwise = pprPanic "lookupIdSubst" (ppr v $$ ppr in_scope) delBndr :: Subst -> Var -> Subst delBndr (Subst in_scope ids tvs cvs) v | isCoVar v = Subst in_scope ids tvs (delVarEnv cvs v) | isTyVar v = Subst in_scope ids (delVarEnv tvs v) cvs | otherwise = Subst in_scope (delVarEnv ids v) tvs cvs delBndrs :: Subst -> [Var] -> Subst delBndrs (Subst in_scope ids tvs cvs) vs = Subst in_scope (delVarEnvList ids vs) (delVarEnvList tvs vs) (delVarEnvList cvs vs) -- Easiest thing is just delete all from all! -- | Simultaneously substitute for a bunch of variables -- No left-right shadowing -- ie the substitution for (\x \y. e) a1 a2 -- so neither x nor y scope over a1 a2 mkOpenSubst :: InScopeSet -> [(Var,CoreArg)] -> Subst mkOpenSubst in_scope pairs = Subst in_scope (mkVarEnv [(id,e) | (id, e) <- pairs, isId id]) (mkVarEnv [(tv,ty) | (tv, Type ty) <- pairs]) (mkVarEnv [(v,co) | (v, Coercion co) <- pairs]) ------------------------------ isInScope :: Var -> Subst -> Bool isInScope v (Subst in_scope _ _ _) = v `elemInScopeSet` in_scope -- | Add the 'Var' to the in-scope set extendSubstInScope :: Subst -> Var -> Subst extendSubstInScope (Subst in_scope ids tvs cvs) v = Subst (in_scope `InScopeSet.extendInScopeSet` v) ids tvs cvs -- | Add the 'Var's to the in-scope set: see also 'extendInScope' extendSubstInScopeList :: Subst -> [Var] -> Subst extendSubstInScopeList (Subst in_scope ids tvs cvs) vs = Subst (in_scope `extendInScopeSetList` vs) ids tvs cvs -- | Add the 'Var's to the in-scope set: see also 'extendInScope' extendSubstInScopeSet :: Subst -> VarSet -> Subst extendSubstInScopeSet (Subst in_scope ids tvs cvs) vs = Subst (in_scope `extendInScopeSetSet` vs) ids tvs cvs setInScope :: Subst -> InScopeSet -> Subst setInScope (Subst _ ids tvs cvs) in_scope = Subst in_scope ids tvs cvs -- Pretty printing, for debugging only instance Outputable Subst where ppr (Subst in_scope ids tvs cvs) = text " in_scope_doc $$ text " IdSubst =" <+> ppr ids $$ text " TvSubst =" <+> ppr tvs $$ text " CvSubst =" <+> ppr cvs <> char '>' where in_scope_doc = pprVarSet (getInScopeVars in_scope) (braces . fsep . map ppr) {- ************************************************************************ * * Substituting expressions * * ************************************************************************ -} substExprSC :: HasDebugCallStack => Subst -> CoreExpr -> CoreExpr -- Just like substExpr, but a no-op if the substitution is empty -- Note that this does /not/ replace occurrences of free vars with -- their canonical representatives in the in-scope set substExprSC subst orig_expr | isEmptySubst subst = orig_expr | otherwise = -- pprTrace "enter subst-expr" (doc $$ ppr orig_expr) $ substExpr subst orig_expr -- | substExpr applies a substitution to an entire 'CoreExpr'. Remember, -- you may only apply the substitution /once/: -- See Note [Substitutions apply only once] in "GHC.Core.TyCo.Subst" -- -- Do *not* attempt to short-cut in the case of an empty substitution! -- See Note [Extending the Subst] substExpr :: HasDebugCallStack => Subst -> CoreExpr -> CoreExpr -- HasDebugCallStack so we can track failures in lookupIdSubst substExpr subst expr = go expr where go (Var v) = lookupIdSubst subst v go (Type ty) = Type (substTy subst ty) go (Coercion co) = Coercion (substCo subst co) go (Lit lit) = Lit lit go (App fun arg) = App (go fun) (go arg) go (Tick tickish e) = mkTick (substTickish subst tickish) (go e) go (Cast e co) = Cast (go e) (substCo subst co) -- Do not optimise even identity coercions -- Reason: substitution applies to the LHS of RULES, and -- if you "optimise" an identity coercion, you may -- lose a binder. We optimise the LHS of rules at -- construction time go (Lam bndr body) = Lam bndr' (substExpr subst' body) where (subst', bndr') = substBndr subst bndr go (Let bind body) = Let bind' (substExpr subst' body) where (subst', bind') = substBind subst bind go (Case scrut bndr ty alts) = Case (go scrut) bndr' (substTy subst ty) (map (go_alt subst') alts) where (subst', bndr') = substBndr subst bndr go_alt subst (Alt con bndrs rhs) = Alt con bndrs' (substExpr subst' rhs) where (subst', bndrs') = substBndrs subst bndrs -- | Apply a substitution to an entire 'CoreBind', additionally returning an updated 'Subst' -- that should be used by subsequent substitutions. substBind, substBindSC :: HasDebugCallStack => Subst -> CoreBind -> (Subst, CoreBind) substBindSC subst bind -- Short-cut if the substitution is empty | not (isEmptySubst subst) = substBind subst bind | otherwise = case bind of NonRec bndr rhs -> (subst', NonRec bndr' rhs) where (subst', bndr') = substBndr subst bndr Rec pairs -> (subst', Rec (bndrs' `zip` rhss')) where (bndrs, rhss) = unzip pairs (subst', bndrs') = substRecBndrs subst bndrs rhss' | isEmptySubst subst' = rhss | otherwise = map (substExpr subst') rhss substBind subst (NonRec bndr rhs) = (subst', NonRec bndr' (substExpr subst rhs)) where (subst', bndr') = substBndr subst bndr substBind subst (Rec pairs) = (subst', Rec (bndrs' `zip` rhss')) where (bndrs, rhss) = unzip pairs (subst', bndrs') = substRecBndrs subst bndrs rhss' = map (substExpr subst') rhss -- | De-shadowing the program is sometimes a useful pre-pass. It can be done simply -- by running over the bindings with an empty substitution, because substitution -- returns a result that has no-shadowing guaranteed. -- -- (Actually, within a single /type/ there might still be shadowing, because -- 'substTy' is a no-op for the empty substitution, but that's probably OK.) -- -- [Aug 09] This function is not used in GHC at the moment, but seems so -- short and simple that I'm going to leave it here deShadowBinds :: CoreProgram -> CoreProgram deShadowBinds binds = snd (mapAccumL substBind emptySubst binds) {- ************************************************************************ * * Substituting binders * * ************************************************************************ Remember that substBndr and friends are used when doing expression substitution only. Their only business is substitution, so they preserve all IdInfo (suitably substituted). For example, we *want* to preserve occ info in rules. -} -- | Substitutes a 'Var' for another one according to the 'Subst' given, returning -- the result and an updated 'Subst' that should be used by subsequent substitutions. -- 'IdInfo' is preserved by this process, although it is substituted into appropriately. substBndr :: Subst -> Var -> (Subst, Var) substBndr subst bndr | isTyVar bndr = substTyVarBndr subst bndr | isCoVar bndr = substCoVarBndr subst bndr | otherwise = substIdBndr (text "var-bndr") subst subst bndr -- | Applies 'substBndr' to a number of 'Var's, accumulating a new 'Subst' left-to-right substBndrs :: Subst -> [Var] -> (Subst, [Var]) substBndrs subst bndrs = mapAccumL substBndr subst bndrs -- | Substitute in a mutually recursive group of 'Id's substRecBndrs :: Subst -> [Id] -> (Subst, [Id]) substRecBndrs subst bndrs = (new_subst, new_bndrs) where -- Here's the reason we need to pass rec_subst to subst_id (new_subst, new_bndrs) = mapAccumL (substIdBndr (text "rec-bndr") new_subst) subst bndrs substIdBndr :: SDoc -> Subst -- ^ Substitution to use for the IdInfo -> Subst -> Id -- ^ Substitution and Id to transform -> (Subst, Id) -- ^ Transformed pair -- NB: unfolding may be zapped substIdBndr _doc rec_subst subst@(Subst in_scope env tvs cvs) old_id = -- pprTrace "substIdBndr" (doc $$ ppr old_id $$ ppr in_scope) $ (Subst (in_scope `InScopeSet.extendInScopeSet` new_id) new_env tvs cvs, new_id) where id1 = uniqAway in_scope old_id -- id1 is cloned if necessary id2 | no_type_change = id1 | otherwise = updateIdTypeAndMult (substTy subst) id1 old_ty = idType old_id old_w = idMult old_id no_type_change = (isEmptyVarEnv tvs && isEmptyVarEnv cvs) || (noFreeVarsOfType old_ty && noFreeVarsOfType old_w) -- new_id has the right IdInfo -- The lazy-set is because we're in a loop here, with -- rec_subst, when dealing with a mutually-recursive group new_id = maybeModifyIdInfo mb_new_info id2 mb_new_info = substIdInfo rec_subst id2 (idInfo id2) -- NB: unfolding info may be zapped -- Extend the substitution if the unique has changed -- See the notes with substTyVarBndr for the delVarEnv new_env | no_change = delVarEnv env old_id | otherwise = extendVarEnv env old_id (Var new_id) no_change = id1 == old_id -- See Note [Extending the Subst] -- it's /not/ necessary to check mb_new_info and no_type_change {- Now a variant that unconditionally allocates a new unique. It also unconditionally zaps the OccInfo. -} -- | Very similar to 'substBndr', but it always allocates a new 'Unique' for -- each variable in its output. It substitutes the IdInfo though. cloneIdBndr :: Subst -> UniqSupply -> Id -> (Subst, Id) cloneIdBndr subst us old_id = clone_id subst subst (old_id, uniqFromSupply us) -- | Applies 'cloneIdBndr' to a number of 'Id's, accumulating a final -- substitution from left to right cloneIdBndrs :: Subst -> UniqSupply -> [Id] -> (Subst, [Id]) cloneIdBndrs subst us ids = mapAccumL (clone_id subst) subst (ids `zip` uniqsFromSupply us) cloneBndrs :: Subst -> UniqSupply -> [Var] -> (Subst, [Var]) -- Works for all kinds of variables (typically case binders) -- not just Ids cloneBndrs subst us vs = mapAccumL (\subst (v, u) -> cloneBndr subst u v) subst (vs `zip` uniqsFromSupply us) cloneBndr :: Subst -> Unique -> Var -> (Subst, Var) cloneBndr subst uniq v | isTyVar v = cloneTyVarBndr subst v uniq | otherwise = clone_id subst subst (v,uniq) -- Works for coercion variables too -- | Clone a mutually recursive group of 'Id's cloneRecIdBndrs :: Subst -> UniqSupply -> [Id] -> (Subst, [Id]) cloneRecIdBndrs subst us ids = (subst', ids') where (subst', ids') = mapAccumL (clone_id subst') subst (ids `zip` uniqsFromSupply us) -- Just like substIdBndr, except that it always makes a new unique -- It is given the unique to use clone_id :: Subst -- Substitution for the IdInfo -> Subst -> (Id, Unique) -- Substitution and Id to transform -> (Subst, Id) -- Transformed pair clone_id rec_subst subst@(Subst in_scope idvs tvs cvs) (old_id, uniq) = (Subst (in_scope `InScopeSet.extendInScopeSet` new_id) new_idvs tvs new_cvs, new_id) where id1 = setVarUnique old_id uniq id2 = substIdType subst id1 new_id = maybeModifyIdInfo (substIdInfo rec_subst id2 (idInfo old_id)) id2 (new_idvs, new_cvs) | isCoVar old_id = (idvs, extendVarEnv cvs old_id (mkCoVarCo new_id)) | otherwise = (extendVarEnv idvs old_id (Var new_id), cvs) {- ************************************************************************ * * Types and Coercions * * ************************************************************************ For types and coercions we just call the corresponding functions in Type and Coercion, but we have to repackage the substitution, from a Subst to a TCvSubst. -} substTyVarBndr :: Subst -> TyVar -> (Subst, TyVar) substTyVarBndr (Subst in_scope id_env tv_env cv_env) tv = case Type.substTyVarBndr (TCvSubst in_scope tv_env cv_env) tv of (TCvSubst in_scope' tv_env' cv_env', tv') -> (Subst in_scope' id_env tv_env' cv_env', tv') cloneTyVarBndr :: Subst -> TyVar -> Unique -> (Subst, TyVar) cloneTyVarBndr (Subst in_scope id_env tv_env cv_env) tv uniq = case Type.cloneTyVarBndr (TCvSubst in_scope tv_env cv_env) tv uniq of (TCvSubst in_scope' tv_env' cv_env', tv') -> (Subst in_scope' id_env tv_env' cv_env', tv') substCoVarBndr :: Subst -> CoVar -> (Subst, CoVar) substCoVarBndr (Subst in_scope id_env tv_env cv_env) cv = case Coercion.substCoVarBndr (TCvSubst in_scope tv_env cv_env) cv of (TCvSubst in_scope' tv_env' cv_env', cv') -> (Subst in_scope' id_env tv_env' cv_env', cv') -- | See 'GHC.Core.Type.substTy'. substTy :: Subst -> Type -> Type substTy subst ty = Type.substTyUnchecked (getTCvSubst subst) ty getTCvSubst :: Subst -> TCvSubst getTCvSubst (Subst in_scope _ tenv cenv) = TCvSubst in_scope tenv cenv -- | See 'Coercion.substCo' substCo :: HasCallStack => Subst -> Coercion -> Coercion substCo subst co = Coercion.substCo (getTCvSubst subst) co {- ************************************************************************ * * \section{IdInfo substitution} * * ************************************************************************ -} substIdType :: Subst -> Id -> Id substIdType subst@(Subst _ _ tv_env cv_env) id | (isEmptyVarEnv tv_env && isEmptyVarEnv cv_env) || (noFreeVarsOfType old_ty && noFreeVarsOfType old_w) = id | otherwise = updateIdTypeAndMult (substTy subst) id -- The tyCoVarsOfType is cheaper than it looks -- because we cache the free tyvars of the type -- in a Note in the id's type itself where old_ty = idType id old_w = varMult id ------------------ -- | Substitute into some 'IdInfo' with regard to the supplied new 'Id'. substIdInfo :: Subst -> Id -> IdInfo -> Maybe IdInfo substIdInfo subst new_id info | nothing_to_do = Nothing | otherwise = Just (info `setRuleInfo` substRuleInfo subst new_id old_rules `setUnfoldingInfo` substUnfolding subst old_unf) where old_rules = ruleInfo info old_unf = realUnfoldingInfo info nothing_to_do = isEmptyRuleInfo old_rules && not (hasCoreUnfolding old_unf) ------------------ -- | Substitutes for the 'Id's within an unfolding -- NB: substUnfolding /discards/ any unfolding without -- without a Stable source. This is usually what we want, -- but it may be a bit unexpected substUnfolding, substUnfoldingSC :: Subst -> Unfolding -> Unfolding -- Seq'ing on the returned Unfolding is enough to cause -- all the substitutions to happen completely substUnfoldingSC subst unf -- Short-cut version | isEmptySubst subst = unf | otherwise = substUnfolding subst unf substUnfolding subst df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) = df { df_bndrs = bndrs', df_args = args' } where (subst',bndrs') = substBndrs subst bndrs args' = map (substExpr subst') args substUnfolding subst unf@(CoreUnfolding { uf_tmpl = tmpl, uf_src = src }) -- Retain an InlineRule! | not (isStableSource src) -- Zap an unstable unfolding, to save substitution work = NoUnfolding | otherwise -- But keep a stable one! = seqExpr new_tmpl `seq` unf { uf_tmpl = new_tmpl } where new_tmpl = substExpr subst tmpl substUnfolding _ unf = unf -- NoUnfolding, OtherCon ------------------ substIdOcc :: Subst -> Id -> Id -- These Ids should not be substituted to non-Ids substIdOcc subst v = case lookupIdSubst subst v of Var v' -> v' other -> pprPanic "substIdOcc" (vcat [ppr v <+> ppr other, ppr subst]) ------------------ -- | Substitutes for the 'Id's within the 'RuleInfo' given the new function 'Id' substRuleInfo :: Subst -> Id -> RuleInfo -> RuleInfo substRuleInfo subst new_id (RuleInfo rules rhs_fvs) = RuleInfo (map (substRule subst subst_ru_fn) rules) (substDVarSet subst rhs_fvs) where subst_ru_fn = const (idName new_id) ------------------ substRulesForImportedIds :: Subst -> [CoreRule] -> [CoreRule] substRulesForImportedIds subst rules = map (substRule subst not_needed) rules where not_needed name = pprPanic "substRulesForImportedIds" (ppr name) ------------------ substRule :: Subst -> (Name -> Name) -> CoreRule -> CoreRule -- The subst_ru_fn argument is applied to substitute the ru_fn field -- of the rule: -- - Rules for *imported* Ids never change ru_fn -- - Rules for *local* Ids are in the IdInfo for that Id, -- and the ru_fn field is simply replaced by the new name -- of the Id substRule _ _ rule@(BuiltinRule {}) = rule substRule subst subst_ru_fn rule@(Rule { ru_bndrs = bndrs, ru_args = args , ru_fn = fn_name, ru_rhs = rhs , ru_local = is_local }) = rule { ru_bndrs = bndrs' , ru_fn = if is_local then subst_ru_fn fn_name else fn_name , ru_args = map (substExpr subst') args , ru_rhs = substExpr subst' rhs } -- Do NOT optimise the RHS (previously we did simplOptExpr here) -- See Note [Substitute lazily] where (subst', bndrs') = substBndrs subst bndrs ------------------ substDVarSet :: HasDebugCallStack => Subst -> DVarSet -> DVarSet substDVarSet subst@(Subst _ _ tv_env cv_env) fvs = mkDVarSet $ fst $ foldr subst_fv ([], emptyVarSet) $ dVarSetElems fvs where subst_fv :: Var -> ([Var], VarSet) -> ([Var], VarSet) subst_fv fv acc | isTyVar fv , let fv_ty = lookupVarEnv tv_env fv `orElse` mkTyVarTy fv = tyCoFVsOfType fv_ty (const True) emptyVarSet $! acc | isCoVar fv , let fv_co = lookupVarEnv cv_env fv `orElse` mkCoVarCo fv = tyCoFVsOfCo fv_co (const True) emptyVarSet $! acc | otherwise , let fv_expr = lookupIdSubst subst fv = expr_fvs fv_expr isLocalVar emptyVarSet $! acc ------------------ substTickish :: Subst -> CoreTickish -> CoreTickish substTickish subst (Breakpoint ext n ids) = Breakpoint ext n (map do_one ids) where do_one = getIdFromTrivialExpr . lookupIdSubst subst substTickish _subst other = other {- Note [Substitute lazily] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions that substitute over IdInfo must be pretty lazy, because they are knot-tied by substRecBndrs. One case in point was #10627 in which a rule for a function 'f' referred to 'f' (at a different type) on the RHS. But instead of just substituting in the rhs of the rule, we were calling simpleOptExpr, which looked at the idInfo for 'f'; result <>. In any case we don't need to optimise the RHS of rules, or unfoldings, because the simplifier will do that. Another place this went wrong was in `substRuleInfo`, which would immediately force the lazy call to substExpr, which led to an infinite loop (as reported by #20112). This time the call stack looked something like: * `substRecBndrs` * `substIdBndr` * `substIdInfo` * `substRuleInfo` * `substRule` * `substExpr` * `mkTick` * `isSaturatedConApp` * Look at `IdInfo` for thing we are currently substituting because the rule is attached to `transpose` and mentions it in the `RHS` of the rule. and the rule was {-# RULES "transpose/overlays1" forall xs. transpose (overlays1 xs) = overlays1 (fmap transpose xs) #-} This rule was attached to `transpose`, but also mentions itself in the RHS so we have to be careful to not force the `IdInfo` for transpose when dealing with the RHS of the rule. Note [substTickish] ~~~~~~~~~~~~~~~~~~~~~~ A Breakpoint contains a list of Ids. What happens if we ever want to substitute an expression for one of these Ids? First, we ensure that we only ever substitute trivial expressions for these Ids, by marking them as NoOccInfo in the occurrence analyser. Then, when substituting for the Id, we unwrap any type applications and abstractions to get back to an Id, with getIdFromTrivialExpr. Second, we have to ensure that we never try to substitute a literal for an Id in a breakpoint. We ensure this by never storing an Id with an unlifted type in a Breakpoint - see GHC.HsToCore.Coverage.mkTickish. Breakpoints can't handle free variables with unlifted types anyway. -} {- Note [Worker inlining] ~~~~~~~~~~~~~~~~~~~~~~ A worker can get substituted away entirely. - it might be trivial - it might simply be very small We do not treat an InlWrapper as an 'occurrence' in the occurrence analyser, so it's possible that the worker is not even in scope any more. In all these cases we simply drop the special case, returning to InlVanilla. The WARN is just so I can see if it happens a lot. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Tidy.hs0000644000000000000000000004045514472400112020000 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1996-1998 This module contains "tidying" code for *nested* expressions, bindings, rules. The code for *top-level* bindings is in GHC.Iface.Tidy. -} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Core.Tidy ( tidyExpr, tidyRules, tidyCbvInfoTop, tidyBndrs ) where import GHC.Prelude import GHC.Core import GHC.Core.Type import GHC.Core.Seq ( seqUnfolding ) import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand ( zapDmdEnvSig, isStrUsedDmd ) import GHC.Core.Coercion ( tidyCo ) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Unique (getUnique) import GHC.Types.Unique.FM import GHC.Types.Name hiding (tidyNameOcc) import GHC.Types.Name.Set import GHC.Types.SrcLoc import GHC.Types.Tickish import GHC.Data.Maybe import GHC.Utils.Misc import Data.List (mapAccumL) -- import GHC.Utils.Trace import GHC.Utils.Outputable import GHC.Types.RepType (typePrimRep) import GHC.Utils.Panic import GHC.Types.Basic (isMarkedCbv, CbvMark (..)) import GHC.Core.Utils (shouldUseCbvForId) {- ************************************************************************ * * \subsection{Tidying expressions, rules} * * ************************************************************************ -} tidyBind :: TidyEnv -> CoreBind -> (TidyEnv, CoreBind) tidyBind env (NonRec bndr rhs) = -- pprTrace "tidyBindNonRec" (ppr bndr) $ let cbv_bndr = (tidyCbvInfoLocal bndr rhs) (env', bndr') = tidyLetBndr env env cbv_bndr tidy_rhs = (tidyExpr env' rhs) in (env', NonRec bndr' tidy_rhs) tidyBind env (Rec prs) = -- pprTrace "tidyBindRec" (ppr $ map fst prs) $ let cbv_bndrs = map ((\(bnd,rhs) -> tidyCbvInfoLocal bnd rhs)) prs (_bndrs, rhss) = unzip prs (env', bndrs') = mapAccumL (tidyLetBndr env') env cbv_bndrs in map (tidyExpr env') rhss =: \ rhss' -> (env', Rec (zip bndrs' rhss')) -- Note [Attaching CBV Marks to ids] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- See Note [CBV Function Ids] for the *why*. -- Before tidy, we turn all worker functions into worker like ids. -- This way we can later tell if we can assume the existence of a wrapper. This also applies to -- specialized versions of functions generated by SpecConstr for which we, in a sense, -- consider the unspecialized version to be the wrapper. -- During tidy we take the demands on the arguments for these ids and compute -- CBV (call-by-value) semantics for each individual argument. -- The marks themselves then are put onto the function id itself. -- This means the code generator can get the full calling convention by only looking at the function -- itself without having to inspect the RHS. -- -- The actual logic is in computeCbvInfo and takes: -- * The function id -- * The functions rhs -- And gives us back the function annotated with the marks. -- We call it in: -- * tidyTopPair for top level bindings -- * tidyBind for local bindings. -- -- Not that we *have* to look at the untidied rhs. -- During tidying some knot-tying occurs which can blow up -- if we look at the post-tidy types of the arguments here. -- However we only care if the types are unlifted and that doesn't change during tidy. -- so we can just look at the untidied types. -- -- If the id is boot-exported we don't use a cbv calling convention via marks, -- as the boot file won't contain them. Which means code calling boot-exported -- ids might expect these ids to have a vanilla calling convention even if we -- determine a different one here. -- To be able to avoid this we pass a set of boot exported ids for this module around. -- For non top level ids we can skip this. Local ids are never boot-exported -- as boot files don't have unfoldings. So there this isn't a concern. -- See also Note [CBV Function Ids] -- See Note [CBV Function Ids] tidyCbvInfoTop :: HasDebugCallStack => NameSet -> Id -> CoreExpr -> Id tidyCbvInfoTop boot_exports id rhs -- Can't change calling convention for boot exported things | elemNameSet (idName id) boot_exports = id | otherwise = computeCbvInfo id rhs -- See Note [CBV Function Ids] tidyCbvInfoLocal :: HasDebugCallStack => Id -> CoreExpr -> Id tidyCbvInfoLocal id rhs | otherwise = computeCbvInfo id rhs -- | For a binding we: -- * Look at the args -- * Mark any argument as call-by-value if: -- - It's argument to a worker and demanded strictly -- - Unless it's an unlifted type already -- * Update the id -- See Note [CBV Function Ids] -- See Note [Attaching CBV Marks to ids] computeCbvInfo :: HasCallStack => Id -- The function -> CoreExpr -- It's RHS -> Id -- computeCbvInfo fun_id rhs = fun_id computeCbvInfo fun_id rhs | (isWorkerLike || isJoinId fun_id) && (valid_unlifted_worker val_args) = -- pprTrace "computeCbvInfo" -- (text "fun" <+> ppr fun_id $$ -- text "arg_tys" <+> ppr (map idType val_args) $$ -- text "prim_rep" <+> ppr (map typePrimRep_maybe $ map idType val_args) $$ -- text "rrarg" <+> ppr (map isRuntimeVar val_args) $$ -- text "cbv_marks" <+> ppr cbv_marks $$ -- text "out_id" <+> ppr cbv_bndr $$ -- ppr rhs) cbv_bndr | otherwise = fun_id where val_args = filter isId . fst $ collectBinders rhs cbv_marks = -- CBV marks are only set during tidy so none should be present already. assertPpr (maybe True null $ idCbvMarks_maybe fun_id) (ppr fun_id <+> (ppr $ idCbvMarks_maybe fun_id) $$ ppr rhs) $ map mkMark val_args cbv_bndr | valid_unlifted_worker val_args , any isMarkedCbv cbv_marks -- seqList to avoid retaining the original rhs = cbv_marks `seqList` setIdCbvMarks fun_id cbv_marks | otherwise = -- pprTraceDebug "tidyCbvInfo: Worker seems to take unboxed tuple/sum types!" (ppr fun_id <+> ppr rhs) asNonWorkerLikeId fun_id -- We don't set CBV marks on functions which take unboxed tuples or sums as arguments. -- Doing so would require us to compute the result of unarise here in order to properly determine -- argument positions at runtime. -- In practice this doesn't matter much. Most "interesting" functions will get a W/W split which will eliminate -- unboxed tuple arguments, and unboxed sums are rarely used. But we could change this in the future and support -- unboxed sums/tuples as well. valid_unlifted_worker args = -- pprTrace "valid_unlifted" (ppr fun_id $$ ppr args) $ all isSingleUnarisedArg args isSingleUnarisedArg v | isUnboxedSumType ty = False | isUnboxedTupleType ty = isSimplePrimRep (typePrimRep ty) | otherwise = isSimplePrimRep (typePrimRep ty) where ty = idType v isSimplePrimRep [] = True isSimplePrimRep [_] = True isSimplePrimRep _ = False mkMark arg | not $ shouldUseCbvForId arg = NotMarkedCbv -- We can only safely use cbv for strict arguments | (isStrUsedDmd (idDemandInfo arg)) , not (isDeadEndId fun_id) = MarkedCbv | otherwise = NotMarkedCbv isWorkerLike = isWorkerLikeId fun_id ------------ Expressions -------------- tidyExpr :: TidyEnv -> CoreExpr -> CoreExpr tidyExpr env (Var v) = Var (tidyVarOcc env v) tidyExpr env (Type ty) = Type (tidyType env ty) tidyExpr env (Coercion co) = Coercion (tidyCo env co) tidyExpr _ (Lit lit) = Lit lit tidyExpr env (App f a) = App (tidyExpr env f) (tidyExpr env a) tidyExpr env (Tick t e) = Tick (tidyTickish env t) (tidyExpr env e) tidyExpr env (Cast e co) = Cast (tidyExpr env e) (tidyCo env co) tidyExpr env (Let b e) = tidyBind env b =: \ (env', b') -> Let b' (tidyExpr env' e) tidyExpr env (Case e b ty alts) = tidyBndr env b =: \ (env', b) -> Case (tidyExpr env e) b (tidyType env ty) (map (tidyAlt env') alts) tidyExpr env (Lam b e) = tidyBndr env b =: \ (env', b) -> Lam b (tidyExpr env' e) ------------ Case alternatives -------------- tidyAlt :: TidyEnv -> CoreAlt -> CoreAlt tidyAlt env (Alt con vs rhs) = tidyBndrs env vs =: \ (env', vs) -> (Alt con vs (tidyExpr env' rhs)) ------------ Tickish -------------- tidyTickish :: TidyEnv -> CoreTickish -> CoreTickish tidyTickish env (Breakpoint ext ix ids) = Breakpoint ext ix (map (tidyVarOcc env) ids) tidyTickish _ other_tickish = other_tickish ------------ Rules -------------- tidyRules :: TidyEnv -> [CoreRule] -> [CoreRule] tidyRules _ [] = [] tidyRules env (rule : rules) = tidyRule env rule =: \ rule -> tidyRules env rules =: \ rules -> (rule : rules) tidyRule :: TidyEnv -> CoreRule -> CoreRule tidyRule _ rule@(BuiltinRule {}) = rule tidyRule env rule@(Rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = rhs, ru_fn = fn, ru_rough = mb_ns }) = tidyBndrs env bndrs =: \ (env', bndrs) -> map (tidyExpr env') args =: \ args -> rule { ru_bndrs = bndrs, ru_args = args, ru_rhs = tidyExpr env' rhs, ru_fn = tidyNameOcc env fn, ru_rough = map (fmap (tidyNameOcc env')) mb_ns } {- ************************************************************************ * * \subsection{Tidying non-top-level binders} * * ************************************************************************ -} tidyNameOcc :: TidyEnv -> Name -> Name -- In rules and instances, we have Names, and we must tidy them too -- Fortunately, we can lookup in the VarEnv with a name tidyNameOcc (_, var_env) n = case lookupUFM_Directly var_env (getUnique n) of Nothing -> n Just v -> idName v tidyVarOcc :: TidyEnv -> Var -> Var tidyVarOcc (_, var_env) v = lookupVarEnv var_env v `orElse` v -- tidyBndr is used for lambda and case binders tidyBndr :: TidyEnv -> Var -> (TidyEnv, Var) tidyBndr env var | isTyCoVar var = tidyVarBndr env var | otherwise = tidyIdBndr env var tidyBndrs :: TidyEnv -> [Var] -> (TidyEnv, [Var]) tidyBndrs env vars = mapAccumL tidyBndr env vars -- Non-top-level variables, not covars tidyIdBndr :: TidyEnv -> Id -> (TidyEnv, Id) tidyIdBndr env@(tidy_env, var_env) id = -- Do this pattern match strictly, otherwise we end up holding on to -- stuff in the OccName. case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') -> let -- Give the Id a fresh print-name, *and* rename its type -- The SrcLoc isn't important now, -- though we could extract it from the Id -- ty' = tidyType env (idType id) mult' = tidyType env (idMult id) name' = mkInternalName (idUnique id) occ' noSrcSpan id' = mkLocalIdWithInfo name' mult' ty' new_info var_env' = extendVarEnv var_env id id' -- Note [Tidy IdInfo] new_info = vanillaIdInfo `setOccInfo` occInfo old_info `setUnfoldingInfo` new_unf -- see Note [Preserve OneShotInfo] `setOneShotInfo` oneShotInfo old_info old_info = idInfo id old_unf = realUnfoldingInfo old_info new_unf = trimUnfolding old_unf -- See Note [Preserve evaluatedness] in ((tidy_env', var_env'), id') } tidyLetBndr :: TidyEnv -- Knot-tied version for unfoldings -> TidyEnv -- The one to extend -> Id -> (TidyEnv, Id) -- Used for local (non-top-level) let(rec)s -- Just like tidyIdBndr above, but with more IdInfo tidyLetBndr rec_tidy_env env@(tidy_env, var_env) id = case tidyOccName tidy_env (getOccName id) of { (tidy_env', occ') -> let ty' = tidyType env (idType id) mult' = tidyType env (idMult id) name' = mkInternalName (idUnique id) occ' noSrcSpan details = idDetails id id' = mkLocalVar details name' mult' ty' new_info var_env' = extendVarEnv var_env id id' -- Note [Tidy IdInfo] -- We need to keep around any interesting strictness and -- demand info because later on we may need to use it when -- converting to A-normal form. -- eg. -- f (g x), where f is strict in its argument, will be converted -- into case (g x) of z -> f z by CorePrep, but only if f still -- has its strictness info. -- -- Similarly for the demand info - on a let binder, this tells -- CorePrep to turn the let into a case. -- But: Remove the usage demand here -- (See Note [Zapping DmdEnv after Demand Analyzer] in GHC.Core.Opt.WorkWrap) -- -- Similarly arity info for eta expansion in CorePrep -- Don't attempt to recompute arity here; this is just tidying! -- Trying to do so led to #17294 -- -- Set inline-prag info so that we preserve it across -- separate compilation boundaries old_info = idInfo id new_info = vanillaIdInfo `setOccInfo` occInfo old_info `setArityInfo` arityInfo old_info `setDmdSigInfo` zapDmdEnvSig (dmdSigInfo old_info) `setDemandInfo` demandInfo old_info `setInlinePragInfo` inlinePragInfo old_info `setUnfoldingInfo` new_unf old_unf = realUnfoldingInfo old_info new_unf = tidyNestedUnfolding rec_tidy_env old_unf in ((tidy_env', var_env'), id') } ------------ Unfolding -------------- tidyNestedUnfolding :: TidyEnv -> Unfolding -> Unfolding tidyNestedUnfolding _ NoUnfolding = NoUnfolding tidyNestedUnfolding _ BootUnfolding = BootUnfolding tidyNestedUnfolding _ (OtherCon {}) = evaldUnfolding tidyNestedUnfolding tidy_env df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) = df { df_bndrs = bndrs', df_args = map (tidyExpr tidy_env') args } where (tidy_env', bndrs') = tidyBndrs tidy_env bndrs tidyNestedUnfolding tidy_env unf@(CoreUnfolding { uf_tmpl = unf_rhs, uf_src = src, uf_cache = cache }) | isStableSource src = seqIt $ unf { uf_tmpl = tidyExpr tidy_env unf_rhs } -- Preserves OccInfo -- This seqIt avoids a space leak: otherwise the uf_cache -- field may retain a reference to the pre-tidied -- expression forever (GHC.CoreToIface doesn't look at -- them) -- Discard unstable unfoldings, but see Note [Preserve evaluatedness] | uf_is_value cache = evaldUnfolding | otherwise = noUnfolding where seqIt unf = seqUnfolding unf `seq` unf {- Note [Tidy IdInfo] ~~~~~~~~~~~~~~~~~~ All nested Ids now have the same IdInfo, namely vanillaIdInfo, which should save some space; except that we preserve occurrence info for two reasons: (a) To make printing tidy core nicer (b) Because we tidy RULES and InlineRules, which may then propagate via --make into the compilation of the next module, and we want the benefit of that occurrence analysis when we use the rule or or inline the function. In particular, it's vital not to lose loop-breaker info, else we get an infinite inlining loop Note that tidyLetBndr puts more IdInfo back. Note [Preserve evaluatedness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT !Bool ....(case v of MkT y -> let z# = case y of True -> 1# False -> 2# in ...) The z# binding is ok because the RHS is ok-for-speculation, but Lint will complain unless it can *see* that. So we preserve the evaluated-ness on 'y' in tidyBndr. (Another alternative would be to tidy unboxed lets into cases, but that seems more indirect and surprising.) Note [Preserve OneShotInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We keep the OneShotInfo because we want it to propagate into the interface. Not all OneShotInfo is determined by a compiler analysis; some is added by a call of GHC.Exts.oneShot, which is then discarded before the end of the optimisation pipeline, leaving only the OneShotInfo on the lambda. Hence we must preserve this info in inlinings. See Note [The oneShot function] in GHC.Types.Id.Make. This applies to lambda binders only, hence it is stored in IfaceLamBndr. -} (=:) :: a -> (a -> b) -> b m =: k = m `seq` k m ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/FVs.hs0000644000000000000000000012105514472400112020437 0ustar0000000000000000 module GHC.Core.TyCo.FVs ( shallowTyCoVarsOfType, shallowTyCoVarsOfTypes, tyCoVarsOfType, tyCoVarsOfTypes, tyCoVarsOfTypeDSet, tyCoVarsOfTypesDSet, tyCoFVsBndr, tyCoFVsVarBndr, tyCoFVsVarBndrs, tyCoFVsOfType, tyCoVarsOfTypeList, tyCoFVsOfTypes, tyCoVarsOfTypesList, deepTcvFolder, shallowTyCoVarsOfTyVarEnv, shallowTyCoVarsOfCoVarEnv, shallowTyCoVarsOfCo, shallowTyCoVarsOfCos, tyCoVarsOfCo, tyCoVarsOfCos, tyCoVarsOfMCo, coVarsOfType, coVarsOfTypes, coVarsOfCo, coVarsOfCos, tyCoVarsOfCoDSet, tyCoFVsOfCo, tyCoFVsOfCos, tyCoVarsOfCoList, almostDevoidCoVarOfCo, -- Injective free vars injectiveVarsOfType, injectiveVarsOfTypes, invisibleVarsOfType, invisibleVarsOfTypes, -- Any and No Free vars anyFreeVarsOfType, anyFreeVarsOfTypes, anyFreeVarsOfCo, noFreeVarsOfType, noFreeVarsOfTypes, noFreeVarsOfCo, -- * Well-scoped free variables scopedSort, tyCoVarsOfTypeWellScoped, tyCoVarsOfTypesWellScoped, -- * Closing over kinds closeOverKindsDSet, closeOverKindsList, closeOverKinds, -- * Raw materials Endo(..), runTyCoVars ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.Type (coreView, partitionInvisibleTypes) import Data.Monoid as DM ( Endo(..), Any(..) ) import GHC.Core.TyCo.Rep import GHC.Core.TyCon import GHC.Types.Var import GHC.Utils.FV import GHC.Types.Unique.FM import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Utils.Misc import GHC.Utils.Panic {- %************************************************************************ %* * Free variables of types and coercions %* * %************************************************************************ -} {- Note [Shallow and deep free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Definitions * Shallow free variables of a type: the variables affected by substitution. Specifically, the (TyVarTy tv) and (CoVar cv) that appear - In the type and coercions appearing in the type - In shallow free variables of the kind of a Forall binder but NOT in the kind of the /occurrences/ of a type variable. * Deep free variables of a type: shallow free variables, plus the deep free variables of the kinds of those variables. That is, deepFVs( t ) = closeOverKinds( shallowFVs( t ) ) Examples: Type Shallow Deep --------------------------------- (a : (k:Type)) {a} {a,k} forall (a:(k:Type)). a {k} {k} (a:k->Type) (b:k) {a,b} {a,b,k} -} {- Note [Free variables of types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The family of functions tyCoVarsOfType, tyCoVarsOfTypes etc, returns a VarSet that is closed over the types of its variables. More precisely, if S = tyCoVarsOfType( t ) and (a:k) is in S then tyCoVarsOftype( k ) is a subset of S Example: The tyCoVars of this ((a:* -> k) Int) is {a, k}. We could /not/ close over the kinds of the variable occurrences, and instead do so at call sites, but it seems that we always want to do so, so it's easiest to do it here. It turns out that getting the free variables of types is performance critical, so we profiled several versions, exploring different implementation strategies. 1. Baseline version: uses FV naively. Essentially: tyCoVarsOfType ty = fvVarSet $ tyCoFVsOfType ty This is not nice, because FV introduces some overhead to implement determinism, and through its "interesting var" function, neither of which we need here, so they are a complete waste. 2. UnionVarSet version: instead of reusing the FV-based code, we simply used VarSets directly, trying to avoid the overhead of FV. E.g.: -- FV version: tyCoFVsOfType (AppTy fun arg) a b c = (tyCoFVsOfType fun `unionFV` tyCoFVsOfType arg) a b c -- UnionVarSet version: tyCoVarsOfType (AppTy fun arg) = (tyCoVarsOfType fun `unionVarSet` tyCoVarsOfType arg) This looks deceptively similar, but while FV internally builds a list- and set-generating function, the VarSet functions manipulate sets directly, and the latter performs a lot worse than the naive FV version. 3. Accumulator-style VarSet version: this is what we use now. We do use VarSet as our data structure, but delegate the actual work to a new ty_co_vars_of_... family of functions, which use accumulator style and the "in-scope set" filter found in the internals of FV, but without the determinism overhead. See #14880. Note [Closing over free variable kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ tyCoVarsOfType and tyCoFVsOfType, while traversing a type, will also close over free variable kinds. In previous GHC versions, this happened naively: whenever we would encounter an occurrence of a free type variable, we would close over its kind. This, however is wrong for two reasons (see #14880): 1. Efficiency. If we have Proxy (a::k) -> Proxy (a::k) -> Proxy (a::k), then we don't want to have to traverse k more than once. 2. Correctness. Imagine we have forall k. b -> k, where b has kind k, for some k bound in an outer scope. If we look at b's kind inside the forall, we'll collect that k is free and then remove k from the set of free variables. This is plain wrong. We must instead compute that b is free and then conclude that b's kind is free. An obvious first approach is to move the closing-over-kinds from the occurrences of a type variable to after finding the free vars - however, this turns out to introduce performance regressions, and isn't even entirely correct. In fact, it isn't even important *when* we close over kinds; what matters is that we handle each type var exactly once, and that we do it in the right context. So the next approach we tried was to use the "in-scope set" part of FV or the equivalent argument in the accumulator-style `ty_co_vars_of_type` function, to say "don't bother with variables we have already closed over". This should work fine in theory, but the code is complicated and doesn't perform well. But there is a simpler way, which is implemented here. Consider the two points above: 1. Efficiency: we now have an accumulator, so the second time we encounter 'a', we'll ignore it, certainly not looking at its kind - this is why pre-checking set membership before inserting ends up not only being faster, but also being correct. 2. Correctness: we have an "in-scope set" (I think we should call it it a "bound-var set"), specifying variables that are bound by a forall in the type we are traversing; we simply ignore these variables, certainly not looking at their kind. So now consider: forall k. b -> k where b :: k->Type is free; but of course, it's a different k! When looking at b -> k we'll have k in the bound-var set. So we'll ignore the k. But suppose this is our first encounter with b; we want the free vars of its kind. But we want to behave as if we took the free vars of its kind at the end; that is, with no bound vars in scope. So the solution is easy. The old code was this: ty_co_vars_of_type (TyVarTy v) is acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = ty_co_vars_of_type (tyVarKind v) is (extendVarSet acc v) Now all we need to do is take the free vars of tyVarKind v *with an empty bound-var set*, thus: ty_co_vars_of_type (TyVarTy v) is acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = ty_co_vars_of_type (tyVarKind v) emptyVarSet (extendVarSet acc v) ^^^^^^^^^^^ And that's it. This works because a variable is either bound or free. If it is bound, then we won't look at it at all. If it is free, then all the variables free in its kind are free -- regardless of whether some local variable has the same Unique. So if we're looking at a variable occurrence at all, then all variables in its kind are free. -} {- ********************************************************************* * * Endo for free variables * * ********************************************************************* -} {- Note [Acumulating parameter free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We can use foldType to build an accumulating-parameter version of a free-var finder, thus: fvs :: Type -> TyCoVarSet fvs ty = appEndo (foldType folder ty) emptyVarSet Recall that foldType :: TyCoFolder env a -> env -> Type -> a newtype Endo a = Endo (a -> a) -- In Data.Monoid instance Monoid a => Monoid (Endo a) where (Endo f) `mappend` (Endo g) = Endo (f.g) appEndo :: Endo a -> a -> a appEndo (Endo f) x = f x So `mappend` for Endos is just function composition. It's very important that, after optimisation, we end up with * an arity-three function * that is strict in the accumulator fvs env (TyVarTy v) acc | v `elemVarSet` env = acc | v `elemVarSet` acc = acc | otherwise = acc `extendVarSet` v fvs env (AppTy t1 t2) = fvs env t1 (fvs env t2 acc) ... The "strict in the accumulator" part is to ensure that in the AppTy equation we don't build a thunk for (fvs env t2 acc). The optimiser does do all this, but not very robustly. It depends critially on the basic arity-2 function not being exported, so that all its calls are visibly to three arguments. This analysis is done by the Call Arity pass. TL;DR: check this regularly! -} runTyCoVars :: Endo TyCoVarSet -> TyCoVarSet {-# INLINE runTyCoVars #-} runTyCoVars f = appEndo f emptyVarSet {- ********************************************************************* * * Deep free variables See Note [Shallow and deep free variables] * * ********************************************************************* -} tyCoVarsOfType :: Type -> TyCoVarSet tyCoVarsOfType ty = runTyCoVars (deep_ty ty) -- Alternative: -- tyCoVarsOfType ty = closeOverKinds (shallowTyCoVarsOfType ty) tyCoVarsOfTypes :: [Type] -> TyCoVarSet tyCoVarsOfTypes tys = runTyCoVars (deep_tys tys) -- Alternative: -- tyCoVarsOfTypes tys = closeOverKinds (shallowTyCoVarsOfTypes tys) tyCoVarsOfCo :: Coercion -> TyCoVarSet -- See Note [Free variables of Coercions] tyCoVarsOfCo co = runTyCoVars (deep_co co) tyCoVarsOfMCo :: MCoercion -> TyCoVarSet tyCoVarsOfMCo MRefl = emptyVarSet tyCoVarsOfMCo (MCo co) = tyCoVarsOfCo co tyCoVarsOfCos :: [Coercion] -> TyCoVarSet tyCoVarsOfCos cos = runTyCoVars (deep_cos cos) deep_ty :: Type -> Endo TyCoVarSet deep_tys :: [Type] -> Endo TyCoVarSet deep_co :: Coercion -> Endo TyCoVarSet deep_cos :: [Coercion] -> Endo TyCoVarSet (deep_ty, deep_tys, deep_co, deep_cos) = foldTyCo deepTcvFolder emptyVarSet deepTcvFolder :: TyCoFolder TyCoVarSet (Endo TyCoVarSet) deepTcvFolder = TyCoFolder { tcf_view = noView , tcf_tyvar = do_tcv, tcf_covar = do_tcv , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tcv is v = Endo do_it where do_it acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = appEndo (deep_ty (varType v)) $ acc `extendVarSet` v do_bndr is tcv _ = extendVarSet is tcv do_hole is hole = do_tcv is (coHoleCoVar hole) -- See Note [CoercionHoles and coercion free variables] -- in GHC.Core.TyCo.Rep {- ********************************************************************* * * Shallow free variables See Note [Shallow and deep free variables] * * ********************************************************************* -} shallowTyCoVarsOfType :: Type -> TyCoVarSet -- See Note [Free variables of types] shallowTyCoVarsOfType ty = runTyCoVars (shallow_ty ty) shallowTyCoVarsOfTypes :: [Type] -> TyCoVarSet shallowTyCoVarsOfTypes tys = runTyCoVars (shallow_tys tys) shallowTyCoVarsOfCo :: Coercion -> TyCoVarSet shallowTyCoVarsOfCo co = runTyCoVars (shallow_co co) shallowTyCoVarsOfCos :: [Coercion] -> TyCoVarSet shallowTyCoVarsOfCos cos = runTyCoVars (shallow_cos cos) -- | Returns free variables of types, including kind variables as -- a non-deterministic set. For type synonyms it does /not/ expand the -- synonym. shallowTyCoVarsOfTyVarEnv :: TyVarEnv Type -> TyCoVarSet -- See Note [Free variables of types] shallowTyCoVarsOfTyVarEnv tys = shallowTyCoVarsOfTypes (nonDetEltsUFM tys) -- It's OK to use nonDetEltsUFM here because we immediately -- forget the ordering by returning a set shallowTyCoVarsOfCoVarEnv :: CoVarEnv Coercion -> TyCoVarSet shallowTyCoVarsOfCoVarEnv cos = shallowTyCoVarsOfCos (nonDetEltsUFM cos) -- It's OK to use nonDetEltsUFM here because we immediately -- forget the ordering by returning a set shallow_ty :: Type -> Endo TyCoVarSet shallow_tys :: [Type] -> Endo TyCoVarSet shallow_co :: Coercion -> Endo TyCoVarSet shallow_cos :: [Coercion] -> Endo TyCoVarSet (shallow_ty, shallow_tys, shallow_co, shallow_cos) = foldTyCo shallowTcvFolder emptyVarSet shallowTcvFolder :: TyCoFolder TyCoVarSet (Endo TyCoVarSet) shallowTcvFolder = TyCoFolder { tcf_view = noView , tcf_tyvar = do_tcv, tcf_covar = do_tcv , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tcv is v = Endo do_it where do_it acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = acc `extendVarSet` v do_bndr is tcv _ = extendVarSet is tcv do_hole _ _ = mempty -- Ignore coercion holes {- ********************************************************************* * * Free coercion variables * * ********************************************************************* -} {- Note [Finding free coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here we are only interested in the free /coercion/ variables. We can achieve this through a slightly different TyCo folder. Notice that we look deeply, into kinds. See #14880. -} -- See Note [Finding free coercion variables] coVarsOfType :: Type -> CoVarSet coVarsOfTypes :: [Type] -> CoVarSet coVarsOfCo :: Coercion -> CoVarSet coVarsOfCos :: [Coercion] -> CoVarSet coVarsOfType ty = runTyCoVars (deep_cv_ty ty) coVarsOfTypes tys = runTyCoVars (deep_cv_tys tys) coVarsOfCo co = runTyCoVars (deep_cv_co co) coVarsOfCos cos = runTyCoVars (deep_cv_cos cos) deep_cv_ty :: Type -> Endo CoVarSet deep_cv_tys :: [Type] -> Endo CoVarSet deep_cv_co :: Coercion -> Endo CoVarSet deep_cv_cos :: [Coercion] -> Endo CoVarSet (deep_cv_ty, deep_cv_tys, deep_cv_co, deep_cv_cos) = foldTyCo deepCoVarFolder emptyVarSet deepCoVarFolder :: TyCoFolder TyCoVarSet (Endo CoVarSet) deepCoVarFolder = TyCoFolder { tcf_view = noView , tcf_tyvar = do_tyvar, tcf_covar = do_covar , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tyvar _ _ = mempty -- This do_tyvar means we won't see any CoVars in this -- TyVar's kind. This may be wrong; but it's the way it's -- always been. And its awkward to change, because -- the tyvar won't end up in the accumulator, so -- we'd look repeatedly. Blargh. do_covar is v = Endo do_it where do_it acc | v `elemVarSet` is = acc | v `elemVarSet` acc = acc | otherwise = appEndo (deep_cv_ty (varType v)) $ acc `extendVarSet` v do_bndr is tcv _ = extendVarSet is tcv do_hole is hole = do_covar is (coHoleCoVar hole) -- See Note [CoercionHoles and coercion free variables] -- in GHC.Core.TyCo.Rep {- ********************************************************************* * * Closing over kinds * * ********************************************************************* -} ------------- Closing over kinds ----------------- closeOverKinds :: TyCoVarSet -> TyCoVarSet -- For each element of the input set, -- add the deep free variables of its kind closeOverKinds vs = nonDetStrictFoldVarSet do_one vs vs where do_one v acc = appEndo (deep_ty (varType v)) acc {- --------------- Alternative version 1 (using FV) ------------ closeOverKinds = fvVarSet . closeOverKindsFV . nonDetEltsUniqSet -} {- ---------------- Alternative version 2 ------------- -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a non-deterministic set. closeOverKinds :: TyCoVarSet -> TyCoVarSet closeOverKinds vs = go vs vs where go :: VarSet -- Work list -> VarSet -- Accumulator, always a superset of wl -> VarSet go wl acc | isEmptyVarSet wl = acc | otherwise = go wl_kvs (acc `unionVarSet` wl_kvs) where k v inner_acc = ty_co_vars_of_type (varType v) acc inner_acc wl_kvs = nonDetFoldVarSet k emptyVarSet wl -- wl_kvs = union of shallow free vars of the kinds of wl -- but don't bother to collect vars in acc -} {- ---------------- Alternative version 3 ------------- -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a non-deterministic set. closeOverKinds :: TyVarSet -> TyVarSet closeOverKinds vs = close_over_kinds vs emptyVarSet close_over_kinds :: TyVarSet -- Work list -> TyVarSet -- Accumulator -> TyVarSet -- Precondition: in any call (close_over_kinds wl acc) -- for every tv in acc, the shallow kind-vars of tv -- are either in the work list wl, or in acc -- Postcondition: result is the deep free vars of (wl `union` acc) close_over_kinds wl acc = nonDetFoldVarSet do_one acc wl where do_one :: Var -> TyVarSet -> TyVarSet -- (do_one v acc) adds v and its deep free-vars to acc do_one v acc | v `elemVarSet` acc = acc | otherwise = close_over_kinds (shallowTyCoVarsOfType (varType v)) $ acc `extendVarSet` v -} {- ********************************************************************* * * The FV versions return deterministic results * * ********************************************************************* -} -- | Given a list of tyvars returns a deterministic FV computation that -- returns the given tyvars with the kind variables free in the kinds of the -- given tyvars. closeOverKindsFV :: [TyVar] -> FV closeOverKindsFV tvs = mapUnionFV (tyCoFVsOfType . tyVarKind) tvs `unionFV` mkFVs tvs -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a deterministically ordered list. closeOverKindsList :: [TyVar] -> [TyVar] closeOverKindsList tvs = fvVarList $ closeOverKindsFV tvs -- | Add the kind variables free in the kinds of the tyvars in the given set. -- Returns a deterministic set. closeOverKindsDSet :: DTyVarSet -> DTyVarSet closeOverKindsDSet = fvDVarSet . closeOverKindsFV . dVarSetElems -- | `tyCoFVsOfType` that returns free variables of a type in a deterministic -- set. For explanation of why using `VarSet` is not deterministic see -- Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfTypeDSet :: Type -> DTyCoVarSet -- See Note [Free variables of types] tyCoVarsOfTypeDSet ty = fvDVarSet $ tyCoFVsOfType ty -- | `tyCoFVsOfType` that returns free variables of a type in deterministic -- order. For explanation of why using `VarSet` is not deterministic see -- Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfTypeList :: Type -> [TyCoVar] -- See Note [Free variables of types] tyCoVarsOfTypeList ty = fvVarList $ tyCoFVsOfType ty -- | Returns free variables of types, including kind variables as -- a deterministic set. For type synonyms it does /not/ expand the -- synonym. tyCoVarsOfTypesDSet :: [Type] -> DTyCoVarSet -- See Note [Free variables of types] tyCoVarsOfTypesDSet tys = fvDVarSet $ tyCoFVsOfTypes tys -- | Returns free variables of types, including kind variables as -- a deterministically ordered list. For type synonyms it does /not/ expand the -- synonym. tyCoVarsOfTypesList :: [Type] -> [TyCoVar] -- See Note [Free variables of types] tyCoVarsOfTypesList tys = fvVarList $ tyCoFVsOfTypes tys -- | The worker for `tyCoFVsOfType` and `tyCoFVsOfTypeList`. -- The previous implementation used `unionVarSet` which is O(n+m) and can -- make the function quadratic. -- It's exported, so that it can be composed with -- other functions that compute free variables. -- See Note [FV naming conventions] in "GHC.Utils.FV". -- -- Eta-expanded because that makes it run faster (apparently) -- See Note [FV eta expansion] in "GHC.Utils.FV" for explanation. tyCoFVsOfType :: Type -> FV -- See Note [Free variables of types] tyCoFVsOfType (TyVarTy v) f bound_vars (acc_list, acc_set) | not (f v) = (acc_list, acc_set) | v `elemVarSet` bound_vars = (acc_list, acc_set) | v `elemVarSet` acc_set = (acc_list, acc_set) | otherwise = tyCoFVsOfType (tyVarKind v) f emptyVarSet -- See Note [Closing over free variable kinds] (v:acc_list, extendVarSet acc_set v) tyCoFVsOfType (TyConApp _ tys) f bound_vars acc = tyCoFVsOfTypes tys f bound_vars acc tyCoFVsOfType (LitTy {}) f bound_vars acc = emptyFV f bound_vars acc tyCoFVsOfType (AppTy fun arg) f bound_vars acc = (tyCoFVsOfType fun `unionFV` tyCoFVsOfType arg) f bound_vars acc tyCoFVsOfType (FunTy _ w arg res) f bound_vars acc = (tyCoFVsOfType w `unionFV` tyCoFVsOfType arg `unionFV` tyCoFVsOfType res) f bound_vars acc tyCoFVsOfType (ForAllTy bndr ty) f bound_vars acc = tyCoFVsBndr bndr (tyCoFVsOfType ty) f bound_vars acc tyCoFVsOfType (CastTy ty co) f bound_vars acc = (tyCoFVsOfType ty `unionFV` tyCoFVsOfCo co) f bound_vars acc tyCoFVsOfType (CoercionTy co) f bound_vars acc = tyCoFVsOfCo co f bound_vars acc tyCoFVsBndr :: TyCoVarBinder -> FV -> FV -- Free vars of (forall b. ) tyCoFVsBndr (Bndr tv _) fvs = tyCoFVsVarBndr tv fvs tyCoFVsVarBndrs :: [Var] -> FV -> FV tyCoFVsVarBndrs vars fvs = foldr tyCoFVsVarBndr fvs vars tyCoFVsVarBndr :: Var -> FV -> FV tyCoFVsVarBndr var fvs = tyCoFVsOfType (varType var) -- Free vars of its type/kind `unionFV` delFV var fvs -- Delete it from the thing-inside tyCoFVsOfTypes :: [Type] -> FV -- See Note [Free variables of types] tyCoFVsOfTypes (ty:tys) fv_cand in_scope acc = (tyCoFVsOfType ty `unionFV` tyCoFVsOfTypes tys) fv_cand in_scope acc tyCoFVsOfTypes [] fv_cand in_scope acc = emptyFV fv_cand in_scope acc -- | Get a deterministic set of the vars free in a coercion tyCoVarsOfCoDSet :: Coercion -> DTyCoVarSet -- See Note [Free variables of types] tyCoVarsOfCoDSet co = fvDVarSet $ tyCoFVsOfCo co tyCoVarsOfCoList :: Coercion -> [TyCoVar] -- See Note [Free variables of types] tyCoVarsOfCoList co = fvVarList $ tyCoFVsOfCo co tyCoFVsOfMCo :: MCoercion -> FV tyCoFVsOfMCo MRefl = emptyFV tyCoFVsOfMCo (MCo co) = tyCoFVsOfCo co tyCoFVsOfCo :: Coercion -> FV -- Extracts type and coercion variables from a coercion -- See Note [Free variables of types] tyCoFVsOfCo (Refl ty) fv_cand in_scope acc = tyCoFVsOfType ty fv_cand in_scope acc tyCoFVsOfCo (GRefl _ ty mco) fv_cand in_scope acc = (tyCoFVsOfType ty `unionFV` tyCoFVsOfMCo mco) fv_cand in_scope acc tyCoFVsOfCo (TyConAppCo _ _ cos) fv_cand in_scope acc = tyCoFVsOfCos cos fv_cand in_scope acc tyCoFVsOfCo (AppCo co arg) fv_cand in_scope acc = (tyCoFVsOfCo co `unionFV` tyCoFVsOfCo arg) fv_cand in_scope acc tyCoFVsOfCo (ForAllCo tv kind_co co) fv_cand in_scope acc = (tyCoFVsVarBndr tv (tyCoFVsOfCo co) `unionFV` tyCoFVsOfCo kind_co) fv_cand in_scope acc tyCoFVsOfCo (FunCo _ w co1 co2) fv_cand in_scope acc = (tyCoFVsOfCo co1 `unionFV` tyCoFVsOfCo co2 `unionFV` tyCoFVsOfCo w) fv_cand in_scope acc tyCoFVsOfCo (CoVarCo v) fv_cand in_scope acc = tyCoFVsOfCoVar v fv_cand in_scope acc tyCoFVsOfCo (HoleCo h) fv_cand in_scope acc = tyCoFVsOfCoVar (coHoleCoVar h) fv_cand in_scope acc -- See Note [CoercionHoles and coercion free variables] tyCoFVsOfCo (AxiomInstCo _ _ cos) fv_cand in_scope acc = tyCoFVsOfCos cos fv_cand in_scope acc tyCoFVsOfCo (UnivCo p _ t1 t2) fv_cand in_scope acc = (tyCoFVsOfProv p `unionFV` tyCoFVsOfType t1 `unionFV` tyCoFVsOfType t2) fv_cand in_scope acc tyCoFVsOfCo (SymCo co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (TransCo co1 co2) fv_cand in_scope acc = (tyCoFVsOfCo co1 `unionFV` tyCoFVsOfCo co2) fv_cand in_scope acc tyCoFVsOfCo (NthCo _ _ co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (LRCo _ co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (InstCo co arg) fv_cand in_scope acc = (tyCoFVsOfCo co `unionFV` tyCoFVsOfCo arg) fv_cand in_scope acc tyCoFVsOfCo (KindCo co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (SubCo co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfCo (AxiomRuleCo _ cs) fv_cand in_scope acc = tyCoFVsOfCos cs fv_cand in_scope acc tyCoFVsOfCoVar :: CoVar -> FV tyCoFVsOfCoVar v fv_cand in_scope acc = (unitFV v `unionFV` tyCoFVsOfType (varType v)) fv_cand in_scope acc tyCoFVsOfProv :: UnivCoProvenance -> FV tyCoFVsOfProv (PhantomProv co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfProv (ProofIrrelProv co) fv_cand in_scope acc = tyCoFVsOfCo co fv_cand in_scope acc tyCoFVsOfProv (PluginProv _) fv_cand in_scope acc = emptyFV fv_cand in_scope acc tyCoFVsOfProv (CorePrepProv _) fv_cand in_scope acc = emptyFV fv_cand in_scope acc tyCoFVsOfCos :: [Coercion] -> FV tyCoFVsOfCos [] fv_cand in_scope acc = emptyFV fv_cand in_scope acc tyCoFVsOfCos (co:cos) fv_cand in_scope acc = (tyCoFVsOfCo co `unionFV` tyCoFVsOfCos cos) fv_cand in_scope acc ----- Whether a covar is /Almost Devoid/ in a type or coercion ---- -- | Given a covar and a coercion, returns True if covar is almost devoid in -- the coercion. That is, covar can only appear in Refl and GRefl. -- See last wrinkle in Note [Unused coercion variable in ForAllCo] in "GHC.Core.Coercion" almostDevoidCoVarOfCo :: CoVar -> Coercion -> Bool almostDevoidCoVarOfCo cv co = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co :: Coercion -> CoVar -> Bool almost_devoid_co_var_of_co (Refl {}) _ = True -- covar is allowed in Refl and almost_devoid_co_var_of_co (GRefl {}) _ = True -- GRefl, so we don't look into -- the coercions almost_devoid_co_var_of_co (TyConAppCo _ _ cos) cv = almost_devoid_co_var_of_cos cos cv almost_devoid_co_var_of_co (AppCo co arg) cv = almost_devoid_co_var_of_co co cv && almost_devoid_co_var_of_co arg cv almost_devoid_co_var_of_co (ForAllCo v kind_co co) cv = almost_devoid_co_var_of_co kind_co cv && (v == cv || almost_devoid_co_var_of_co co cv) almost_devoid_co_var_of_co (FunCo _ w co1 co2) cv = almost_devoid_co_var_of_co w cv && almost_devoid_co_var_of_co co1 cv && almost_devoid_co_var_of_co co2 cv almost_devoid_co_var_of_co (CoVarCo v) cv = v /= cv almost_devoid_co_var_of_co (HoleCo h) cv = (coHoleCoVar h) /= cv almost_devoid_co_var_of_co (AxiomInstCo _ _ cos) cv = almost_devoid_co_var_of_cos cos cv almost_devoid_co_var_of_co (UnivCo p _ t1 t2) cv = almost_devoid_co_var_of_prov p cv && almost_devoid_co_var_of_type t1 cv && almost_devoid_co_var_of_type t2 cv almost_devoid_co_var_of_co (SymCo co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (TransCo co1 co2) cv = almost_devoid_co_var_of_co co1 cv && almost_devoid_co_var_of_co co2 cv almost_devoid_co_var_of_co (NthCo _ _ co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (LRCo _ co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (InstCo co arg) cv = almost_devoid_co_var_of_co co cv && almost_devoid_co_var_of_co arg cv almost_devoid_co_var_of_co (KindCo co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (SubCo co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_co (AxiomRuleCo _ cs) cv = almost_devoid_co_var_of_cos cs cv almost_devoid_co_var_of_cos :: [Coercion] -> CoVar -> Bool almost_devoid_co_var_of_cos [] _ = True almost_devoid_co_var_of_cos (co:cos) cv = almost_devoid_co_var_of_co co cv && almost_devoid_co_var_of_cos cos cv almost_devoid_co_var_of_prov :: UnivCoProvenance -> CoVar -> Bool almost_devoid_co_var_of_prov (PhantomProv co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_prov (ProofIrrelProv co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_prov (PluginProv _) _ = True almost_devoid_co_var_of_prov (CorePrepProv _) _ = True almost_devoid_co_var_of_type :: Type -> CoVar -> Bool almost_devoid_co_var_of_type (TyVarTy _) _ = True almost_devoid_co_var_of_type (TyConApp _ tys) cv = almost_devoid_co_var_of_types tys cv almost_devoid_co_var_of_type (LitTy {}) _ = True almost_devoid_co_var_of_type (AppTy fun arg) cv = almost_devoid_co_var_of_type fun cv && almost_devoid_co_var_of_type arg cv almost_devoid_co_var_of_type (FunTy _ w arg res) cv = almost_devoid_co_var_of_type w cv && almost_devoid_co_var_of_type arg cv && almost_devoid_co_var_of_type res cv almost_devoid_co_var_of_type (ForAllTy (Bndr v _) ty) cv = almost_devoid_co_var_of_type (varType v) cv && (v == cv || almost_devoid_co_var_of_type ty cv) almost_devoid_co_var_of_type (CastTy ty co) cv = almost_devoid_co_var_of_type ty cv && almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_type (CoercionTy co) cv = almost_devoid_co_var_of_co co cv almost_devoid_co_var_of_types :: [Type] -> CoVar -> Bool almost_devoid_co_var_of_types [] _ = True almost_devoid_co_var_of_types (ty:tys) cv = almost_devoid_co_var_of_type ty cv && almost_devoid_co_var_of_types tys cv {- ********************************************************************* * * Injective free vars * * ********************************************************************* -} -- | Returns the free variables of a 'Type' that are in injective positions. -- Specifically, it finds the free variables while: -- -- * Expanding type synonyms -- -- * Ignoring the coercion in @(ty |> co)@ -- -- * Ignoring the non-injective fields of a 'TyConApp' -- -- -- For example, if @F@ is a non-injective type family, then: -- -- @ -- injectiveTyVarsOf( Either c (Maybe (a, F b c)) ) = {a,c} -- @ -- -- If @'injectiveVarsOfType' ty = itvs@, then knowing @ty@ fixes @itvs@. -- More formally, if -- @a@ is in @'injectiveVarsOfType' ty@ -- and @S1(ty) ~ S2(ty)@, -- then @S1(a) ~ S2(a)@, -- where @S1@ and @S2@ are arbitrary substitutions. -- -- See @Note [When does a tycon application need an explicit kind signature?]@. injectiveVarsOfType :: Bool -- ^ Should we look under injective type families? -- See Note [Coverage condition for injective type families] -- in "GHC.Tc.Instance.Family". -> Type -> FV injectiveVarsOfType look_under_tfs = go where go ty | Just ty' <- coreView ty = go ty' go (TyVarTy v) = unitFV v `unionFV` go (tyVarKind v) go (AppTy f a) = go f `unionFV` go a go (FunTy _ w ty1 ty2) = go w `unionFV` go ty1 `unionFV` go ty2 go (TyConApp tc tys) = case tyConInjectivityInfo tc of Injective inj | look_under_tfs || not (isTypeFamilyTyCon tc) -> mapUnionFV go $ filterByList (inj ++ repeat True) tys -- Oversaturated arguments to a tycon are -- always injective, hence the repeat True _ -> emptyFV go (ForAllTy (Bndr tv _) ty) = go (tyVarKind tv) `unionFV` delFV tv (go ty) go LitTy{} = emptyFV go (CastTy ty _) = go ty go CoercionTy{} = emptyFV -- | Returns the free variables of a 'Type' that are in injective positions. -- Specifically, it finds the free variables while: -- -- * Expanding type synonyms -- -- * Ignoring the coercion in @(ty |> co)@ -- -- * Ignoring the non-injective fields of a 'TyConApp' -- -- See @Note [When does a tycon application need an explicit kind signature?]@. injectiveVarsOfTypes :: Bool -- ^ look under injective type families? -- See Note [Coverage condition for injective type families] -- in "GHC.Tc.Instance.Family". -> [Type] -> FV injectiveVarsOfTypes look_under_tfs = mapUnionFV (injectiveVarsOfType look_under_tfs) {- ********************************************************************* * * Invisible vars * * ********************************************************************* -} -- | Returns the set of variables that are used invisibly anywhere within -- the given type. A variable will be included even if it is used both visibly -- and invisibly. An invisible use site includes: -- * In the kind of a variable -- * In the kind of a bound variable in a forall -- * In a coercion -- * In a Specified or Inferred argument to a function -- See Note [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] in "GHC.Core.TyCo.Rep" invisibleVarsOfType :: Type -> FV invisibleVarsOfType = go where go ty | Just ty' <- coreView ty = go ty' go (TyVarTy v) = go (tyVarKind v) go (AppTy f a) = go f `unionFV` go a go (FunTy _ w ty1 ty2) = go w `unionFV` go ty1 `unionFV` go ty2 go (TyConApp tc tys) = tyCoFVsOfTypes invisibles `unionFV` invisibleVarsOfTypes visibles where (invisibles, visibles) = partitionInvisibleTypes tc tys go (ForAllTy tvb ty) = tyCoFVsBndr tvb $ go ty go LitTy{} = emptyFV go (CastTy ty co) = tyCoFVsOfCo co `unionFV` go ty go (CoercionTy co) = tyCoFVsOfCo co -- | Like 'invisibleVarsOfType', but for many types. invisibleVarsOfTypes :: [Type] -> FV invisibleVarsOfTypes = mapUnionFV invisibleVarsOfType {- ********************************************************************* * * Any free vars * * ********************************************************************* -} {-# INLINE afvFolder #-} -- so that specialization to (const True) works afvFolder :: (TyCoVar -> Bool) -> TyCoFolder TyCoVarSet DM.Any afvFolder check_fv = TyCoFolder { tcf_view = noView , tcf_tyvar = do_tcv, tcf_covar = do_tcv , tcf_hole = do_hole, tcf_tycobinder = do_bndr } where do_tcv is tv = Any (not (tv `elemVarSet` is) && check_fv tv) do_hole _ _ = Any False -- I'm unsure; probably never happens do_bndr is tv _ = is `extendVarSet` tv anyFreeVarsOfType :: (TyCoVar -> Bool) -> Type -> Bool anyFreeVarsOfType check_fv ty = DM.getAny (f ty) where (f, _, _, _) = foldTyCo (afvFolder check_fv) emptyVarSet anyFreeVarsOfTypes :: (TyCoVar -> Bool) -> [Type] -> Bool anyFreeVarsOfTypes check_fv tys = DM.getAny (f tys) where (_, f, _, _) = foldTyCo (afvFolder check_fv) emptyVarSet anyFreeVarsOfCo :: (TyCoVar -> Bool) -> Coercion -> Bool anyFreeVarsOfCo check_fv co = DM.getAny (f co) where (_, _, f, _) = foldTyCo (afvFolder check_fv) emptyVarSet noFreeVarsOfType :: Type -> Bool noFreeVarsOfType ty = not $ DM.getAny (f ty) where (f, _, _, _) = foldTyCo (afvFolder (const True)) emptyVarSet noFreeVarsOfTypes :: [Type] -> Bool noFreeVarsOfTypes tys = not $ DM.getAny (f tys) where (_, f, _, _) = foldTyCo (afvFolder (const True)) emptyVarSet noFreeVarsOfCo :: Coercion -> Bool noFreeVarsOfCo co = not $ DM.getAny (f co) where (_, _, f, _) = foldTyCo (afvFolder (const True)) emptyVarSet {- ********************************************************************* * * scopedSort * * ********************************************************************* -} {- Note [ScopedSort] ~~~~~~~~~~~~~~~~~~~~ Consider foo :: Proxy a -> Proxy (b :: k) -> Proxy (a :: k2) -> () This function type is implicitly generalised over [a, b, k, k2]. These variables will be Specified; that is, they will be available for visible type application. This is because they are written in the type signature by the user. However, we must ask: what order will they appear in? In cases without dependency, this is easy: we just use the lexical left-to-right ordering of first occurrence. With dependency, we cannot get off the hook so easily. We thus state: * These variables appear in the order as given by ScopedSort, where the input to ScopedSort is the left-to-right order of first occurrence. Note that this applies only to *implicit* quantification, without a `forall`. If the user writes a `forall`, then we just use the order given. ScopedSort is defined thusly (as proposed in #15743): * Work left-to-right through the input list, with a cursor. * If variable v at the cursor is depended on by any earlier variable w, move v immediately before the leftmost such w. INVARIANT: The prefix of variables before the cursor form a valid telescope. Note that ScopedSort makes sense only after type inference is done and all types/kinds are fully settled and zonked. -} -- | Do a topological sort on a list of tyvars, -- so that binders occur before occurrences -- E.g. given [ a::k, k::*, b::k ] -- it'll return a well-scoped list [ k::*, a::k, b::k ] -- -- This is a deterministic sorting operation -- (that is, doesn't depend on Uniques). -- -- It is also meant to be stable: that is, variables should not -- be reordered unnecessarily. This is specified in Note [ScopedSort] -- See also Note [Ordering of implicit variables] in "GHC.Rename.HsType" scopedSort :: [TyCoVar] -> [TyCoVar] scopedSort = go [] [] where go :: [TyCoVar] -- already sorted, in reverse order -> [TyCoVarSet] -- each set contains all the variables which must be placed -- before the tv corresponding to the set; they are accumulations -- of the fvs in the sorted tvs' kinds -- This list is in 1-to-1 correspondence with the sorted tyvars -- INVARIANT: -- all (\tl -> all (`subVarSet` head tl) (tail tl)) (tails fv_list) -- That is, each set in the list is a superset of all later sets. -> [TyCoVar] -- yet to be sorted -> [TyCoVar] go acc _fv_list [] = reverse acc go acc fv_list (tv:tvs) = go acc' fv_list' tvs where (acc', fv_list') = insert tv acc fv_list insert :: TyCoVar -- var to insert -> [TyCoVar] -- sorted list, in reverse order -> [TyCoVarSet] -- list of fvs, as above -> ([TyCoVar], [TyCoVarSet]) -- augmented lists insert tv [] [] = ([tv], [tyCoVarsOfType (tyVarKind tv)]) insert tv (a:as) (fvs:fvss) | tv `elemVarSet` fvs , (as', fvss') <- insert tv as fvss = (a:as', fvs `unionVarSet` fv_tv : fvss') | otherwise = (tv:a:as, fvs `unionVarSet` fv_tv : fvs : fvss) where fv_tv = tyCoVarsOfType (tyVarKind tv) -- lists not in correspondence insert _ _ _ = panic "scopedSort" -- | Get the free vars of a type in scoped order tyCoVarsOfTypeWellScoped :: Type -> [TyVar] tyCoVarsOfTypeWellScoped = scopedSort . tyCoVarsOfTypeList -- | Get the free vars of types in scoped order tyCoVarsOfTypesWellScoped :: [Type] -> [TyVar] tyCoVarsOfTypesWellScoped = scopedSort . tyCoVarsOfTypesList ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/Ppr.hs0000644000000000000000000003055714472400112020510 0ustar0000000000000000{-# LANGUAGE PatternSynonyms #-} -- | Pretty-printing types and coercions. module GHC.Core.TyCo.Ppr ( -- * Precedence PprPrec(..), topPrec, sigPrec, opPrec, funPrec, appPrec, maybeParen, -- * Pretty-printing types pprType, pprParendType, pprTidiedType, pprPrecType, pprPrecTypeX, pprTypeApp, pprTCvBndr, pprTCvBndrs, pprSigmaType, pprTheta, pprParendTheta, pprForAll, pprUserForAll, pprTyVar, pprTyVars, pprThetaArrowTy, pprClassPred, pprKind, pprParendKind, pprTyLit, pprDataCons, pprWithExplicitKindsWhen, pprWithTYPE, pprSourceTyCon, -- * Pretty-printing coercions pprCo, pprParendCo, debugPprType, ) where import GHC.Prelude import {-# SOURCE #-} GHC.CoreToIface ( toIfaceTypeX, toIfaceTyLit, toIfaceForAllBndr , toIfaceTyCon, toIfaceTcArgs, toIfaceCoercionX ) import {-# SOURCE #-} GHC.Core.DataCon ( dataConFullSig , dataConUserTyVarBinders, DataCon ) import GHC.Core.Type ( pickyIsLiftedTypeKind, pattern One, pattern Many, splitForAllReqTVBinders, splitForAllInvisTVBinders ) import GHC.Core.TyCon import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Tidy import GHC.Core.TyCo.FVs import GHC.Core.Class import GHC.Types.Var import GHC.Iface.Type import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Basic ( PprPrec(..), topPrec, sigPrec, opPrec , funPrec, appPrec, maybeParen ) {- %************************************************************************ %* * Pretty-printing types Defined very early because of debug printing in assertions %* * %************************************************************************ @pprType@ is the standard @Type@ printer; the overloaded @ppr@ function is defined to use this. @pprParendType@ is the same, except it puts parens around the type, except for the atomic cases. @pprParendType@ works just by setting the initial context precedence very high. Note that any function which pretty-prints a @Type@ first converts the @Type@ to an @IfaceType@. See Note [IfaceType and pretty-printing] in GHC.Iface.Type. See Note [Precedence in types] in GHC.Types.Basic. -} -------------------------------------------------------- -- When pretty-printing types, we convert to IfaceType, -- and pretty-print that. -- See Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr -------------------------------------------------------- pprType, pprParendType, pprTidiedType :: Type -> SDoc pprType = pprPrecType topPrec pprParendType = pprPrecType appPrec -- already pre-tidied pprTidiedType = pprIfaceType . toIfaceTypeX emptyVarSet pprPrecType :: PprPrec -> Type -> SDoc pprPrecType = pprPrecTypeX emptyTidyEnv pprPrecTypeX :: TidyEnv -> PprPrec -> Type -> SDoc pprPrecTypeX env prec ty = getPprStyle $ \sty -> getPprDebug $ \debug -> if debug -- Use debugPprType when in then debug_ppr_ty prec ty -- when in debug-style else pprPrecIfaceType prec (tidyToIfaceTypeStyX env ty sty) -- NB: debug-style is used for -dppr-debug -- dump-style is used for -ddump-tc-trace etc pprTyLit :: TyLit -> SDoc pprTyLit = pprIfaceTyLit . toIfaceTyLit pprKind, pprParendKind :: Kind -> SDoc pprKind = pprType pprParendKind = pprParendType tidyToIfaceTypeStyX :: TidyEnv -> Type -> PprStyle -> IfaceType tidyToIfaceTypeStyX env ty sty | userStyle sty = tidyToIfaceTypeX env ty | otherwise = toIfaceTypeX (tyCoVarsOfType ty) ty -- in latter case, don't tidy, as we'll be printing uniques. tidyToIfaceType :: Type -> IfaceType tidyToIfaceType = tidyToIfaceTypeX emptyTidyEnv tidyToIfaceTypeX :: TidyEnv -> Type -> IfaceType -- It's vital to tidy before converting to an IfaceType -- or nested binders will become indistinguishable! -- -- Also for the free type variables, tell toIfaceTypeX to -- leave them as IfaceFreeTyVar. This is super-important -- for debug printing. tidyToIfaceTypeX env ty = toIfaceTypeX (mkVarSet free_tcvs) (tidyType env' ty) where env' = tidyFreeTyCoVars env free_tcvs free_tcvs = tyCoVarsOfTypeWellScoped ty ------------ pprCo, pprParendCo :: Coercion -> SDoc pprCo co = getPprStyle $ \ sty -> pprIfaceCoercion (tidyToIfaceCoSty co sty) pprParendCo co = getPprStyle $ \ sty -> pprParendIfaceCoercion (tidyToIfaceCoSty co sty) tidyToIfaceCoSty :: Coercion -> PprStyle -> IfaceCoercion tidyToIfaceCoSty co sty | userStyle sty = tidyToIfaceCo co | otherwise = toIfaceCoercionX (tyCoVarsOfCo co) co -- in latter case, don't tidy, as we'll be printing uniques. tidyToIfaceCo :: Coercion -> IfaceCoercion -- It's vital to tidy before converting to an IfaceType -- or nested binders will become indistinguishable! -- -- Also for the free type variables, tell toIfaceCoercionX to -- leave them as IfaceFreeCoVar. This is super-important -- for debug printing. tidyToIfaceCo co = toIfaceCoercionX (mkVarSet free_tcvs) (tidyCo env co) where env = tidyFreeTyCoVars emptyTidyEnv free_tcvs free_tcvs = scopedSort $ tyCoVarsOfCoList co ------------ pprClassPred :: Class -> [Type] -> SDoc pprClassPred clas tys = pprTypeApp (classTyCon clas) tys ------------ pprTheta :: ThetaType -> SDoc pprTheta = pprIfaceContext topPrec . map tidyToIfaceType pprParendTheta :: ThetaType -> SDoc pprParendTheta = pprIfaceContext appPrec . map tidyToIfaceType pprThetaArrowTy :: ThetaType -> SDoc pprThetaArrowTy = pprIfaceContextArr . map tidyToIfaceType ------------------ pprSigmaType :: Type -> SDoc pprSigmaType = pprIfaceSigmaType ShowForAllWhen . tidyToIfaceType pprForAll :: [TyCoVarBinder] -> SDoc pprForAll tvs = pprIfaceForAll (map toIfaceForAllBndr tvs) -- | Print a user-level forall; see @Note [When to print foralls]@ in -- "GHC.Iface.Type". pprUserForAll :: [TyCoVarBinder] -> SDoc pprUserForAll = pprUserIfaceForAll . map toIfaceForAllBndr pprTCvBndrs :: [TyCoVarBinder] -> SDoc pprTCvBndrs tvs = sep (map pprTCvBndr tvs) pprTCvBndr :: TyCoVarBinder -> SDoc pprTCvBndr = pprTyVar . binderVar pprTyVars :: [TyVar] -> SDoc pprTyVars tvs = sep (map pprTyVar tvs) pprTyVar :: TyVar -> SDoc -- Print a type variable binder with its kind (but not if *) -- Here we do not go via IfaceType, because the duplication with -- pprIfaceTvBndr is minimal, and the loss of uniques etc in -- debug printing is disastrous pprTyVar tv | pickyIsLiftedTypeKind kind = ppr tv -- See Note [Suppressing * kinds] | otherwise = parens (ppr tv <+> dcolon <+> ppr kind) where kind = tyVarKind tv {- Note [Suppressing * kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally we want to print forall a. a->a not forall (a::*). a->a or forall (a::Type). a->a That is, for brevity we suppress a kind ascription of '*' (or Type). But what if the kind is (Const Type x)? type Const p q = p Then (Const Type x) is just a long way of saying Type. But it may be jolly confusing to suppress the 'x'. Suppose we have (polykinds/T18451a) foo :: forall a b (c :: Const Type b). Proxy '[a, c] Then this error message • These kind and type variables: a b (c :: Const Type b) are out of dependency order. Perhaps try this ordering: (b :: k) (a :: Const (*) b) (c :: Const (*) b) would be much less helpful if we suppressed the kind ascription on 'a'. Hence the use of pickyIsLiftedTypeKind. -} ----------------- debugPprType :: Type -> SDoc -- ^ debugPprType is a simple pretty printer that prints a type -- without going through IfaceType. It does not format as prettily -- as the normal route, but it's much more direct, and that can -- be useful for debugging. E.g. with -dppr-debug it prints the -- kind on type-variable /occurrences/ which the normal route -- fundamentally cannot do. debugPprType ty = debug_ppr_ty topPrec ty debug_ppr_ty :: PprPrec -> Type -> SDoc debug_ppr_ty _ (LitTy l) = ppr l debug_ppr_ty _ (TyVarTy tv) = ppr tv -- With -dppr-debug we get (tv :: kind) debug_ppr_ty prec ty@(FunTy { ft_af = af, ft_mult = mult, ft_arg = arg, ft_res = res }) = maybeParen prec funPrec $ sep [debug_ppr_ty funPrec arg, arr <+> debug_ppr_ty prec res] where arr = case af of VisArg -> case mult of One -> lollipop Many -> arrow w -> mulArrow (ppr w) InvisArg -> case mult of Many -> darrow _ -> pprPanic "unexpected multiplicity" (ppr ty) debug_ppr_ty prec (TyConApp tc tys) | null tys = ppr tc | otherwise = maybeParen prec appPrec $ hang (ppr tc) 2 (sep (map (debug_ppr_ty appPrec) tys)) debug_ppr_ty _ (AppTy t1 t2) = hang (debug_ppr_ty appPrec t1) -- Print parens so we see ((a b) c) 2 (debug_ppr_ty appPrec t2) -- so that we can distinguish -- TyConApp from AppTy debug_ppr_ty prec (CastTy ty co) = maybeParen prec topPrec $ hang (debug_ppr_ty topPrec ty) 2 (text "|>" <+> ppr co) debug_ppr_ty _ (CoercionTy co) = parens (text "CO" <+> ppr co) -- Invisible forall: forall {k} (a :: k). t debug_ppr_ty prec t | (bndrs, body) <- splitForAllInvisTVBinders t , not (null bndrs) = maybeParen prec funPrec $ sep [ text "forall" <+> fsep (map ppr_bndr bndrs) <> dot, ppr body ] where -- (ppr tv) will print the binder kind-annotated -- when in debug-style ppr_bndr (Bndr tv InferredSpec) = braces (ppr tv) ppr_bndr (Bndr tv SpecifiedSpec) = ppr tv -- Visible forall: forall x y -> t debug_ppr_ty prec t | (bndrs, body) <- splitForAllReqTVBinders t , not (null bndrs) = maybeParen prec funPrec $ sep [ text "forall" <+> fsep (map ppr_bndr bndrs) <+> arrow, ppr body ] where -- (ppr tv) will print the binder kind-annotated -- when in debug-style ppr_bndr (Bndr tv ()) = ppr tv -- Impossible case: neither visible nor invisible forall. debug_ppr_ty _ ForAllTy{} = panic "debug_ppr_ty: neither splitForAllInvisTVBinders nor splitForAllReqTVBinders returned any binders" {- Note [Infix type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ With TypeOperators you can say f :: (a ~> b) -> b and the (~>) is considered a type variable. However, the type pretty-printer in this module will just see (a ~> b) as App (App (TyVarTy "~>") (TyVarTy "a")) (TyVarTy "b") So it'll print the type in prefix form. To avoid confusion we must remember to parenthesise the operator, thus (~>) a b -> b See #2766. -} pprDataCons :: TyCon -> SDoc pprDataCons = sepWithVBars . fmap pprDataConWithArgs . tyConDataCons where sepWithVBars [] = empty sepWithVBars docs = sep (punctuate (space <> vbar) docs) pprDataConWithArgs :: DataCon -> SDoc pprDataConWithArgs dc = sep [forAllDoc, thetaDoc, ppr dc <+> argsDoc] where (_univ_tvs, _ex_tvs, _eq_spec, theta, arg_tys, _res_ty) = dataConFullSig dc user_bndrs = tyVarSpecToBinders $ dataConUserTyVarBinders dc forAllDoc = pprUserForAll user_bndrs thetaDoc = pprThetaArrowTy theta argsDoc = hsep (fmap pprParendType (map scaledThing arg_tys)) pprTypeApp :: TyCon -> [Type] -> SDoc pprTypeApp tc tys = pprIfaceTypeApp topPrec (toIfaceTyCon tc) (toIfaceTcArgs tc tys) -- TODO: toIfaceTcArgs seems rather wasteful here ------------------ -- | Display all kind information (with @-fprint-explicit-kinds@) when the -- provided 'Bool' argument is 'True'. -- See @Note [Kind arguments in error messages]@ in "GHC.Tc.Errors". pprWithExplicitKindsWhen :: Bool -> SDoc -> SDoc pprWithExplicitKindsWhen b = updSDocContext $ \ctx -> if b then ctx { sdocPrintExplicitKinds = True } else ctx -- | This variant preserves any use of TYPE in a type, effectively -- locally setting -fprint-explicit-runtime-reps. pprWithTYPE :: Type -> SDoc pprWithTYPE ty = updSDocContext (\ctx -> ctx { sdocPrintExplicitRuntimeReps = True }) $ ppr ty -- | Pretty prints a 'TyCon', using the family instance in case of a -- representation tycon. For example: -- -- > data T [a] = ... -- -- In that case we want to print @T [a]@, where @T@ is the family 'TyCon' pprSourceTyCon :: TyCon -> SDoc pprSourceTyCon tycon | Just (fam_tc, tys) <- tyConFamInst_maybe tycon = ppr $ fam_tc `TyConApp` tys -- can't be FunTyCon | otherwise = ppr tycon ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/Rep.hs0000644000000000000000000023532314472400112020473 0ustar0000000000000000 {-# LANGUAGE DeriveDataTypeable #-} {-# OPTIONS_HADDOCK not-home #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 \section[GHC.Core.TyCo.Rep]{Type and Coercion - friends' interface} Note [The Type-related module hierarchy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC.Core.Class GHC.Core.Coercion.Axiom GHC.Core.TyCon imports GHC.Core.{Class, Coercion.Axiom} GHC.Core.TyCo.Rep imports GHC.Core.{Class, Coercion.Axiom, TyCon} GHC.Core.TyCo.Ppr imports GHC.Core.TyCo.Rep GHC.Core.TyCo.FVs imports GHC.Core.TyCo.Rep GHC.Core.TyCo.Subst imports GHC.Core.TyCo.{Rep, FVs, Ppr} GHC.Core.TyCo.Tidy imports GHC.Core.TyCo.{Rep, FVs} GHC.Builtin.Types.Prim imports GHC.Core.TyCo.Rep ( including mkTyConTy ) GHC.Core.Coercion imports GHC.Core.Type -} -- We expose the relevant stuff from this module via the Type module module GHC.Core.TyCo.Rep ( -- * Types Type(..), TyLit(..), KindOrType, Kind, RuntimeRepType, KnotTied, PredType, ThetaType, FRRType, -- Synonyms ArgFlag(..), AnonArgFlag(..), -- * Coercions Coercion(..), UnivCoProvenance(..), CoercionHole(..), coHoleCoVar, setCoHoleCoVar, CoercionN, CoercionR, CoercionP, KindCoercion, MCoercion(..), MCoercionR, MCoercionN, -- * Functions over types mkNakedTyConTy, mkTyVarTy, mkTyVarTys, mkTyCoVarTy, mkTyCoVarTys, mkFunTy, mkVisFunTy, mkInvisFunTy, mkVisFunTys, mkForAllTy, mkForAllTys, mkInvisForAllTys, mkPiTy, mkPiTys, mkFunTyMany, mkScaledFunTy, mkVisFunTyMany, mkVisFunTysMany, mkInvisFunTyMany, mkInvisFunTysMany, nonDetCmpTyLit, cmpTyLit, -- * Functions over binders TyCoBinder(..), TyCoVarBinder, TyBinder, binderVar, binderVars, binderType, binderArgFlag, delBinderVar, isInvisibleArgFlag, isVisibleArgFlag, isInvisibleBinder, isVisibleBinder, isTyBinder, isNamedBinder, -- * Functions over coercions pickLR, -- ** Analyzing types TyCoFolder(..), foldTyCo, noView, -- * Sizes typeSize, coercionSize, provSize, -- * Multiplicities Scaled(..), scaledMult, scaledThing, mapScaledType, Mult ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType, pprCo, pprTyLit ) -- Transitively pulls in a LOT of stuff, better to break the loop -- friends: import GHC.Types.Var import GHC.Types.Var.Set import GHC.Core.TyCon import GHC.Core.Coercion.Axiom -- others import {-# SOURCE #-} GHC.Builtin.Types ( manyDataConTy ) import GHC.Types.Basic ( LeftOrRight(..), pickLR ) import GHC.Types.Unique ( Uniquable(..) ) import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Misc import GHC.Utils.Panic -- libraries import qualified Data.Data as Data hiding ( TyCon ) import Data.IORef ( IORef ) -- for CoercionHole {- ********************************************************************** * * Type * * ********************************************************************** -} -- | The key representation of types within the compiler type KindOrType = Type -- See Note [Arguments to type constructors] -- | The key type representing kinds in the compiler. type Kind = Type -- | Type synonym used for types of kind RuntimeRep. type RuntimeRepType = Type -- A type with a syntactically fixed RuntimeRep, in the sense -- of Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. type FRRType = Type -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Type -- See Note [Non-trivial definitional equality] = TyVarTy Var -- ^ Vanilla type or kind variable (*never* a coercion variable) | AppTy Type Type -- ^ Type application to something other than a 'TyCon'. Parameters: -- -- 1) Function: must /not/ be a 'TyConApp' or 'CastTy', -- must be another 'AppTy', or 'TyVarTy' -- See Note [Respecting definitional equality] \(EQ1) about the -- no 'CastTy' requirement -- -- 2) Argument type | TyConApp TyCon [KindOrType] -- ^ Application of a 'TyCon', including newtypes /and/ synonyms. -- Invariant: saturated applications of 'FunTyCon' must -- use 'FunTy' and saturated synonyms must use their own -- constructors. However, /unsaturated/ 'FunTyCon's -- do appear as 'TyConApp's. -- Parameters: -- -- 1) Type constructor being applied to. -- -- 2) Type arguments. Might not have enough type arguments -- here to saturate the constructor. -- Even type synonyms are not necessarily saturated; -- for example unsaturated type synonyms -- can appear as the right hand side of a type synonym. | ForAllTy {-# UNPACK #-} !TyCoVarBinder Type -- ^ A Π type. -- Note [When we quantify over a coercion variable] -- INVARIANT: If the binder is a coercion variable, it must -- be mentioned in the Type. See -- Note [Unused coercion variable in ForAllTy] | FunTy -- ^ FUN m t1 t2 Very common, so an important special case -- See Note [Function types] { ft_af :: AnonArgFlag -- Is this (->) or (=>)? , ft_mult :: Mult -- Multiplicity , ft_arg :: Type -- Argument type , ft_res :: Type } -- Result type | LitTy TyLit -- ^ Type literals are similar to type constructors. | CastTy Type KindCoercion -- ^ A kind cast. The coercion is always nominal. -- INVARIANT: The cast is never reflexive \(EQ2) -- INVARIANT: The Type is not a CastTy (use TransCo instead) \(EQ3) -- INVARIANT: The Type is not a ForAllTy over a tyvar \(EQ4) -- See Note [Respecting definitional equality] | CoercionTy Coercion -- ^ Injection of a Coercion into a type -- This should only ever be used in the RHS of an AppTy, -- in the list of a TyConApp, when applying a promoted -- GADT data constructor deriving Data.Data instance Outputable Type where ppr = pprType -- NOTE: Other parts of the code assume that type literals do not contain -- types or type variables. data TyLit = NumTyLit Integer | StrTyLit FastString | CharTyLit Char deriving (Eq, Data.Data) -- Non-determinism arises due to uniqCompareFS nonDetCmpTyLit :: TyLit -> TyLit -> Ordering nonDetCmpTyLit = cmpTyLitWith NonDetFastString -- Slower than nonDetCmpTyLit but deterministic cmpTyLit :: TyLit -> TyLit -> Ordering cmpTyLit = cmpTyLitWith LexicalFastString {-# INLINE cmpTyLitWith #-} cmpTyLitWith :: Ord r => (FastString -> r) -> TyLit -> TyLit -> Ordering cmpTyLitWith _ (NumTyLit x) (NumTyLit y) = compare x y cmpTyLitWith w (StrTyLit x) (StrTyLit y) = compare (w x) (w y) cmpTyLitWith _ (CharTyLit x) (CharTyLit y) = compare x y cmpTyLitWith _ a b = compare (tag a) (tag b) where tag :: TyLit -> Int tag NumTyLit{} = 0 tag StrTyLit{} = 1 tag CharTyLit{} = 2 instance Outputable TyLit where ppr = pprTyLit {- Note [Function types] ~~~~~~~~~~~~~~~~~~~~~~~~ FunTy is the constructor for a function type. Here are the details: * The primitive function type constructor FUN has kind FUN :: forall (m :: Multiplicity) -> forall {r1 :: RuntimeRep} {r2 :: RuntimeRep}. TYPE r1 -> TYPE r2 -> Type mkTyConApp ensures that we convert a saturated application TyConApp FUN [m,r1,r2,t1,t2] into FunTy VisArg m t1 t2 dropping the 'r1' and 'r2' arguments; they are easily recovered from 't1' and 't2'. The visibility is always VisArg, because we build constraint arrows (=>) with e.g. mkPhiTy and friends, never `mkTyConApp funTyCon args`. * For the time being its RuntimeRep quantifiers are left inferred. This is to allow for it to evolve. * Because the RuntimeRep args came first historically (that is, the arrow type constructor gained these arguments before gaining the Multiplicity argument), we wanted to be able to say type (->) = FUN Many which we do in library module GHC.Types. This means that the Multiplicity argument must precede the RuntimeRep arguments -- and it means changing the name of the primitive constructor from (->) to FUN. * The multiplicity argument is dependent, because Typeable does not support a type such as `Multiplicity -> forall {r1 r2 :: RuntimeRep}. ...`. There is a plan to change the argument order and make the multiplicity argument nondependent in #20164. * The ft_af field says whether or not this is an invisible argument VisArg: t1 -> t2 Ordinary function type InvisArg: t1 => t2 t1 is guaranteed to be a predicate type, i.e. t1 :: Constraint See Note [Types for coercions, predicates, and evidence] This visibility info makes no difference in Core; it matters only when we regard the type as a Haskell source type. Note [Types for coercions, predicates, and evidence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We treat differently: (a) Predicate types Test: isPredTy Binders: DictIds Kind: Constraint Examples: (Eq a), and (a ~ b) (b) Coercion types are primitive, unboxed equalities Test: isCoVarTy Binders: CoVars (can appear in coercions) Kind: TYPE (TupleRep []) Examples: (t1 ~# t2) or (t1 ~R# t2) (c) Evidence types is the type of evidence manipulated by the type constraint solver. Test: isEvVarType Binders: EvVars Kind: Constraint or TYPE (TupleRep []) Examples: all coercion types and predicate types Coercion types and predicate types are mutually exclusive, but evidence types are a superset of both. When treated as a user type, - Predicates (of kind Constraint) are invisible and are implicitly instantiated - Coercion types, and non-pred evidence types (i.e. not of kind Constrain), are just regular old types, are visible, and are not implicitly instantiated. In a FunTy { ft_af = InvisArg }, the argument type is always a Predicate type. Note [Weird typing rule for ForAllTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here are the typing rules for ForAllTy: tyvar : Type inner : TYPE r tyvar does not occur in r ------------------------------------ ForAllTy (Bndr tyvar vis) inner : TYPE r inner : TYPE r ------------------------------------ ForAllTy (Bndr covar vis) inner : Type Note that the kind of the result depends on whether the binder is a tyvar or a covar. The kind of a forall-over-tyvar is the same as the kind of the inner type. This is because quantification over types is erased before runtime. By contrast, the kind of a forall-over-covar is always Type, because a forall-over-covar is compiled into a function taking a 0-bit-wide erased coercion argument. Because the tyvar form above includes r in its result, we must be careful not to let any variables escape -- thus the last premise of the rule above. Note [Constraints in kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do we allow a type constructor to have a kind like S :: Eq a => a -> Type No, we do not. Doing so would mean would need a TyConApp like S @k @(d :: Eq k) (ty :: k) and we have no way to build, or decompose, evidence like (d :: Eq k) at the type level. But we admit one exception: equality. We /do/ allow, say, MkT :: (a ~ b) => a -> b -> Type a b Why? Because we can, without much difficulty. Moreover we can promote a GADT data constructor (see TyCon Note [Promoted data constructors]), like data GT a b where MkGT : a -> a -> GT a a so programmers might reasonably expect to be able to promote MkT as well. How does this work? * In GHC.Tc.Validity.checkConstraintsOK we reject kinds that have constraints other than (a~b) and (a~~b). * In Inst.tcInstInvisibleTyBinder we instantiate a call of MkT by emitting [W] co :: alpha ~# beta and producing the elaborated term MkT @alpha @beta (Eq# alpha beta co) We don't generate a boxed "Wanted"; we generate only a regular old /unboxed/ primitive-equality Wanted, and build the box on the spot. * How can we get such a MkT? By promoting a GADT-style data constructor data T a b where MkT :: (a~b) => a -> b -> T a b See DataCon.mkPromotedDataCon and Note [Promoted data constructors] in GHC.Core.TyCon * We support both homogeneous (~) and heterogeneous (~~) equality. (See Note [The equality types story] in GHC.Builtin.Types.Prim for a primer on these equality types.) * How do we prevent a MkT having an illegal constraint like Eq a? We check for this at use-sites; see GHC.Tc.Gen.HsType.tcTyVar, specifically dc_theta_illegal_constraint. * Notice that nothing special happens if K :: (a ~# b) => blah because (a ~# b) is not a predicate type, and is never implicitly instantiated. (Mind you, it's not clear how you could creates a type constructor with such a kind.) See Note [Types for coercions, predicates, and evidence] * The existence of promoted MkT with an equality-constraint argument is the (only) reason that the AnonTCB constructor of TyConBndrVis carries an AnonArgFlag (VisArg/InvisArg). For example, when we promote the data constructor MkT :: forall a b. (a~b) => a -> b -> T a b we get a PromotedDataCon with tyConBinders Bndr (a :: Type) (NamedTCB Inferred) Bndr (b :: Type) (NamedTCB Inferred) Bndr (_ :: a ~ b) (AnonTCB InvisArg) Bndr (_ :: a) (AnonTCB VisArg)) Bndr (_ :: b) (AnonTCB VisArg)) * One might reasonably wonder who *unpacks* these boxes once they are made. After all, there is no type-level `case` construct. The surprising answer is that no one ever does. Instead, if a GADT constructor is used on the left-hand side of a type family equation, that occurrence forces GHC to unify the types in question. For example: data G a where MkG :: G Bool type family F (x :: G a) :: a where F MkG = False When checking the LHS `F MkG`, GHC sees the MkG constructor and then must unify F's implicit parameter `a` with Bool. This succeeds, making the equation F Bool (MkG @Bool ) = False Note that we never need unpack the coercion. This is because type family equations are *not* parametric in their kind variables. That is, we could have just said type family H (x :: G a) :: a where H _ = False The presence of False on the RHS also forces `a` to become Bool, giving us H Bool _ = False The fact that any of this works stems from the lack of phase separation between types and kinds (unlike the very present phase separation between terms and types). Once we have the ability to pattern-match on types below top-level, this will no longer cut it, but it seems fine for now. Note [Arguments to type constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Because of kind polymorphism, in addition to type application we now have kind instantiation. We reuse the same notations to do so. For example: Just (* -> *) Maybe Right * Nat Zero are represented by: TyConApp (PromotedDataCon Just) [* -> *, Maybe] TyConApp (PromotedDataCon Right) [*, Nat, (PromotedDataCon Zero)] Important note: Nat is used as a *kind* and not as a type. This can be confusing, since type-level Nat and kind-level Nat are identical. We use the kind of (PromotedDataCon Right) to know if its arguments are kinds or types. This kind instantiation only happens in TyConApp currently. Note [Non-trivial definitional equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Is Int |> <*> the same as Int? YES! In order to reduce headaches, we decide that any reflexive casts in types are just ignored. (Indeed they must be. See Note [Respecting definitional equality].) More generally, the `eqType` function, which defines Core's type equality relation, ignores casts and coercion arguments, as long as the two types have the same kind. This allows us to be a little sloppier in keeping track of coercions, which is a good thing. It also means that eqType does not depend on eqCoercion, which is also a good thing. Why is this sensible? That is, why is something different than α-equivalence appropriate for the implementation of eqType? Anything smaller than ~ and homogeneous is an appropriate definition for equality. The type safety of FC depends only on ~. Let's say η : τ ~ σ. Any expression of type τ can be transmuted to one of type σ at any point by casting. The same is true of expressions of type σ. So in some sense, τ and σ are interchangeable. But let's be more precise. If we examine the typing rules of FC (say, those in https://richarde.dev/papers/2015/equalities/equalities.pdf) there are several places where the same metavariable is used in two different premises to a rule. (For example, see Ty_App.) There is an implicit equality check here. What definition of equality should we use? By convention, we use α-equivalence. Take any rule with one (or more) of these implicit equality checks. Then there is an admissible rule that uses ~ instead of the implicit check, adding in casts as appropriate. The only problem here is that ~ is heterogeneous. To make the kinds work out in the admissible rule that uses ~, it is necessary to homogenize the coercions. That is, if we have η : (τ : κ1) ~ (σ : κ2), then we don't use η; we use η |> kind η, which is homogeneous. The effect of this all is that eqType, the implementation of the implicit equality check, can use any homogeneous relation that is smaller than ~, as those rules must also be admissible. A more drawn out argument around all of this is presented in Section 7.2 of Richard E's thesis (http://richarde.dev/papers/2016/thesis/eisenberg-thesis.pdf). What would go wrong if we insisted on the casts matching? See the beginning of Section 8 in the unpublished paper above. Theoretically, nothing at all goes wrong. But in practical terms, getting the coercions right proved to be nightmarish. And types would explode: during kind-checking, we often produce reflexive kind coercions. When we try to cast by these, mkCastTy just discards them. But if we used an eqType that distinguished between Int and Int |> <*>, then we couldn't discard -- the output of kind-checking would be enormous, and we would need enormous casts with lots of CoherenceCo's to straighten them out. Would anything go wrong if eqType looked through type families? No, not at all. But that makes eqType rather hard to implement. Thus, the guideline for eqType is that it should be the largest easy-to-implement relation that is still smaller than ~ and homogeneous. The precise choice of relation is somewhat incidental, as long as the smart constructors and destructors in Type respect whatever relation is chosen. Another helpful principle with eqType is this: (EQ) If (t1 `eqType` t2) then I can replace t1 by t2 anywhere. This principle also tells us that eqType must relate only types with the same kinds. Interestingly, it must be the case that the free variables of t1 and t2 might be different, even if t1 `eqType` t2. A simple example of this is if we have both cv1 :: k1 ~ k2 and cv2 :: k1 ~ k2 in the environment. Then t1 = t |> cv1 and t2 = t |> cv2 are eqType; yet cv1 is in the free vars of t1 and cv2 is in the free vars of t2. Unless we choose to implement eqType to be just α-equivalence, this wrinkle around free variables remains. Yet not all is lost: we can say that any two equal types share the same *relevant* free variables. Here, a relevant variable is a shallow free variable (see Note [Shallow and deep free variables] in GHC.Core.TyCo.FVs) that does not appear within a coercion. Note that type variables can appear within coercions (in, say, a Refl node), but that coercion variables cannot appear outside a coercion. We do not (yet) have a function to extract relevant free variables, but it would not be hard to write if the need arises. Besides eqType, another equality relation that upholds the (EQ) property above is /typechecker equality/, which is implemented as GHC.Tc.Utils.TcType.tcEqType. See Note [Typechecker equality vs definitional equality] in GHC.Tc.Utils.TcType for what the difference between eqType and tcEqType is. Note [Respecting definitional equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Non-trivial definitional equality] introduces the property (EQ). How is this upheld? Any function that pattern matches on all the constructors will have to consider the possibility of CastTy. Presumably, those functions will handle CastTy appropriately and we'll be OK. More dangerous are the splitXXX functions. Let's focus on splitTyConApp. We don't want it to fail on (T a b c |> co). Happily, if we have (T a b c |> co) `eqType` (T d e f) then co must be reflexive. Why? eqType checks that the kinds are equal, as well as checking that (a `eqType` d), (b `eqType` e), and (c `eqType` f). By the kind check, we know that (T a b c |> co) and (T d e f) have the same kind. So the only way that co could be non-reflexive is for (T a b c) to have a different kind than (T d e f). But because T's kind is closed (all tycon kinds are closed), the only way for this to happen is that one of the arguments has to differ, leading to a contradiction. Thus, co is reflexive. Accordingly, by eliminating reflexive casts, splitTyConApp need not worry about outermost casts to uphold (EQ). Eliminating reflexive casts is done in mkCastTy. This is (EQ1) below. Unforunately, that's not the end of the story. Consider comparing (T a b c) =? (T a b |> (co -> )) (c |> co) These two types have the same kind (Type), but the left type is a TyConApp while the right type is not. To handle this case, we say that the right-hand type is ill-formed, requiring an AppTy never to have a casted TyConApp on its left. It is easy enough to pull around the coercions to maintain this invariant, as done in Type.mkAppTy. In the example above, trying to form the right-hand type will instead yield (T a b (c |> co |> sym co) |> ). Both the casts there are reflexive and will be dropped. Huzzah. This idea of pulling coercions to the right works for splitAppTy as well. However, there is one hiccup: it's possible that a coercion doesn't relate two Pi-types. For example, if we have @type family Fun a b where Fun a b = a -> b@, then we might have (T :: Fun Type Type) and (T |> axFun) Int. That axFun can't be pulled to the right. But we don't need to pull it: (T |> axFun) Int is not `eqType` to any proper TyConApp -- thus, leaving it where it is doesn't violate our (EQ) property. In order to detect reflexive casts reliably, we must make sure not to have nested casts: we update (t |> co1 |> co2) to (t |> (co1 `TransCo` co2)). This is (EQ2) below. One other troublesome case is ForAllTy. See Note [Weird typing rule for ForAllTy]. The kind of the body is the same as the kind of the ForAllTy. Accordingly, ForAllTy tv (ty |> co) and (ForAllTy tv ty) |> co are `eqType`. But only the first can be split by splitForAllTy. So we forbid the second form, instead pushing the coercion inside to get the first form. This is done in mkCastTy. In sum, in order to uphold (EQ), we need the following invariants: (EQ1) No decomposable CastTy to the left of an AppTy, where a decomposable cast is one that relates either a FunTy to a FunTy or a ForAllTy to a ForAllTy. (EQ2) No reflexive casts in CastTy. (EQ3) No nested CastTys. (EQ4) No CastTy over (ForAllTy (Bndr tyvar vis) body). See Note [Weird typing rule for ForAllTy] These invariants are all documented above, in the declaration for Type. Note [Equality on FunTys] ~~~~~~~~~~~~~~~~~~~~~~~~~ A (FunTy vis mult arg res) is just an abbreviation for a TyConApp funTyCon [mult, arg_rep, res_rep, arg, res] where arg :: TYPE arg_rep res :: TYPE res_rep Note that the vis field of a FunTy appears nowhere in the equivalent TyConApp. In Core, this is OK, because we no longer care about the visibility of the argument in a FunTy (the vis distinguishes between arg -> res and arg => res). In the type-checker, we are careful not to decompose FunTys with an invisible argument. See also Note [Decomposing fat arrow c=>t] in GHC.Core.Type. In order to compare FunTys while respecting how they could expand into TyConApps, we must check the kinds of the arg and the res. Note [When we quantify over a coercion variable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The TyCoVarBinder in a ForAllTy can be (most often) a TyVar or (rarely) a CoVar. We support quantifying over a CoVar here in order to support a homogeneous (~#) relation (someday -- not yet implemented). Here is the example: type (:~~:) :: forall k1 k2. k1 -> k2 -> Type data a :~~: b where HRefl :: a :~~: a Assuming homogeneous equality (that is, with (~#) :: forall k. k -> k -> TYPE (TupleRep '[]) ) after rejigging to make equalities explicit, we get a constructor that looks like HRefl :: forall k1 k2 (a :: k1) (b :: k2). forall (cv :: k1 ~# k2). (a |> cv) ~# b => (:~~:) k1 k2 a b Note that we must cast `a` by a cv bound in the same type in order to make this work out. See also https://gitlab.haskell.org/ghc/ghc/-/wikis/dependent-haskell/phase2 which gives a general road map that covers this space. Having this feature in Core does *not* mean we have it in source Haskell. See #15710 about that. Note [Unused coercion variable in ForAllTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have \(co:t1 ~ t2). e What type should we give to this expression? (1) forall (co:t1 ~ t2) -> t (2) (t1 ~ t2) -> t If co is used in t, (1) should be the right choice. if co is not used in t, we would like to have (1) and (2) equivalent. However, we want to keep eqType simple and don't want eqType (1) (2) to return True in any case. We decide to always construct (2) if co is not used in t. Thus in mkLamType, we check whether the variable is a coercion variable (of type (t1 ~# t2), and whether it is un-used in the body. If so, it returns a FunTy instead of a ForAllTy. There are cases we want to skip the check. For example, the check is unnecessary when it is known from the context that the input variable is a type variable. In those cases, we use mkForAllTy. Note [Weird typing rule for ForAllTy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is the (truncated) typing rule for the dependent ForAllTy: inner : TYPE r tyvar is not free in r ---------------------------------------- ForAllTy (Bndr tyvar vis) inner : TYPE r Note that the kind of `inner` is the kind of the overall ForAllTy. This is necessary because every ForAllTy over a type variable is erased at runtime. Thus the runtime representation of a ForAllTy (as encoded, via TYPE rep, in the kind) must be the same as the representation of the body. We must check for skolem-escape, though. The skolem-escape would prevent a definition like undefined :: forall (r :: RuntimeRep) (a :: TYPE r). a because the type's kind (TYPE r) mentions the out-of-scope r. Luckily, the real type of undefined is undefined :: forall (r :: RuntimeRep) (a :: TYPE r). HasCallStack => a and that HasCallStack constraint neatly sidesteps the potential skolem-escape problem. If the bound variable is a coercion variable: inner : TYPE r covar is free in inner ------------------------------------ ForAllTy (Bndr covar vis) inner : Type Here, the kind of the ForAllTy is just Type, because coercion abstractions are *not* erased. The "covar is free in inner" premise is solely to maintain the representation invariant documented in Note [Unused coercion variable in ForAllTy]. Though there is surface similarity between this free-var check and the one in the tyvar rule, these two restrictions are truly unrelated. -} -- | A type labeled 'KnotTied' might have knot-tied tycons in it. See -- Note [Type checking recursive type and class declarations] in -- "GHC.Tc.TyCl" type KnotTied ty = ty {- ********************************************************************** * * TyCoBinder and ArgFlag * * ********************************************************************** -} -- | A 'TyCoBinder' represents an argument to a function. TyCoBinders can be -- dependent ('Named') or nondependent ('Anon'). They may also be visible or -- not. See Note [TyCoBinders] data TyCoBinder = Named TyCoVarBinder -- A type-lambda binder | Anon AnonArgFlag (Scaled Type) -- A term-lambda binder. Type here can be CoercionTy. -- Visibility is determined by the AnonArgFlag deriving Data.Data instance Outputable TyCoBinder where ppr (Anon af ty) = ppr af <+> ppr ty ppr (Named (Bndr v Required)) = ppr v ppr (Named (Bndr v Specified)) = char '@' <> ppr v ppr (Named (Bndr v Inferred)) = braces (ppr v) -- | 'TyBinder' is like 'TyCoBinder', but there can only be 'TyVarBinder' -- in the 'Named' field. type TyBinder = TyCoBinder -- | Remove the binder's variable from the set, if the binder has -- a variable. delBinderVar :: VarSet -> TyCoVarBinder -> VarSet delBinderVar vars (Bndr tv _) = vars `delVarSet` tv -- | Does this binder bind an invisible argument? isInvisibleBinder :: TyCoBinder -> Bool isInvisibleBinder (Named (Bndr _ vis)) = isInvisibleArgFlag vis isInvisibleBinder (Anon InvisArg _) = True isInvisibleBinder (Anon VisArg _) = False -- | Does this binder bind a visible argument? isVisibleBinder :: TyCoBinder -> Bool isVisibleBinder = not . isInvisibleBinder isNamedBinder :: TyCoBinder -> Bool isNamedBinder (Named {}) = True isNamedBinder (Anon {}) = False -- | If its a named binder, is the binder a tyvar? -- Returns True for nondependent binder. -- This check that we're really returning a *Ty*Binder (as opposed to a -- coercion binder). That way, if/when we allow coercion quantification -- in more places, we'll know we missed updating some function. isTyBinder :: TyCoBinder -> Bool isTyBinder (Named bnd) = isTyVarBinder bnd isTyBinder _ = True {- Note [TyCoBinders] ~~~~~~~~~~~~~~~~~~~ A ForAllTy contains a TyCoVarBinder. But a type can be decomposed to a telescope consisting of a [TyCoBinder] A TyCoBinder represents the type of binders -- that is, the type of an argument to a Pi-type. GHC Core currently supports two different Pi-types: * A non-dependent function type, written with ->, e.g. ty1 -> ty2 represented as FunTy ty1 ty2. These are lifted to Coercions with the corresponding FunCo. * A dependent compile-time-only polytype, written with forall, e.g. forall (a:*). ty represented as ForAllTy (Bndr a v) ty Both Pi-types classify terms/types that take an argument. In other words, if `x` is either a function or a polytype, `x arg` makes sense (for an appropriate `arg`). Note [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * A ForAllTy (used for both types and kinds) contains a TyCoVarBinder. Each TyCoVarBinder Bndr a tvis is equipped with tvis::ArgFlag, which says whether or not arguments for this binder should be visible (explicit) in source Haskell. * A TyCon contains a list of TyConBinders. Each TyConBinder Bndr a cvis is equipped with cvis::TyConBndrVis, which says whether or not type and kind arguments for this TyCon should be visible (explicit) in source Haskell. This table summarises the visibility rules: --------------------------------------------------------------------------------------- | Occurrences look like this | GHC displays type as in Haskell source code |-------------------------------------------------------------------------------------- | Bndr a tvis :: TyCoVarBinder, in the binder of ForAllTy for a term | tvis :: ArgFlag | tvis = Inferred: f :: forall {a}. type Arg not allowed: f f :: forall {co}. type Arg not allowed: f | tvis = Specified: f :: forall a. type Arg optional: f or f @Int | tvis = Required: T :: forall k -> type Arg required: T * | This last form is illegal in terms: See Note [No Required TyCoBinder in terms] | | Bndr k cvis :: TyConBinder, in the TyConBinders of a TyCon | cvis :: TyConBndrVis | cvis = AnonTCB: T :: kind -> kind Required: T * | cvis = NamedTCB Inferred: T :: forall {k}. kind Arg not allowed: T | T :: forall {co}. kind Arg not allowed: T | cvis = NamedTCB Specified: T :: forall k. kind Arg not allowed[1]: T | cvis = NamedTCB Required: T :: forall k -> kind Required: T * --------------------------------------------------------------------------------------- [1] In types, in the Specified case, it would make sense to allow optional kind applications, thus (T @*), but we have not yet implemented that ---- In term declarations ---- * Inferred. Function defn, with no signature: f1 x = x We infer f1 :: forall {a}. a -> a, with 'a' Inferred It's Inferred because it doesn't appear in any user-written signature for f1 * Specified. Function defn, with signature (implicit forall): f2 :: a -> a; f2 x = x So f2 gets the type f2 :: forall a. a -> a, with 'a' Specified even though 'a' is not bound in the source code by an explicit forall * Specified. Function defn, with signature (explicit forall): f3 :: forall a. a -> a; f3 x = x So f3 gets the type f3 :: forall a. a -> a, with 'a' Specified * Inferred. Function defn, with signature (explicit forall), marked as inferred: f4 :: forall {a}. a -> a; f4 x = x So f4 gets the type f4 :: forall {a}. a -> a, with 'a' Inferred It's Inferred because the user marked it as such, even though it does appear in the user-written signature for f4 * Inferred/Specified. Function signature with inferred kind polymorphism. f5 :: a b -> Int So 'f5' gets the type f5 :: forall {k} (a:k->*) (b:k). a b -> Int Here 'k' is Inferred (it's not mentioned in the type), but 'a' and 'b' are Specified. * Specified. Function signature with explicit kind polymorphism f6 :: a (b :: k) -> Int This time 'k' is Specified, because it is mentioned explicitly, so we get f6 :: forall (k:*) (a:k->*) (b:k). a b -> Int * Similarly pattern synonyms: Inferred - from inferred types (e.g. no pattern type signature) - or from inferred kind polymorphism ---- In type declarations ---- * Inferred (k) data T1 a b = MkT1 (a b) Here T1's kind is T1 :: forall {k:*}. (k->*) -> k -> * The kind variable 'k' is Inferred, since it is not mentioned Note that 'a' and 'b' correspond to /Anon/ TyCoBinders in T1's kind, and Anon binders don't have a visibility flag. (Or you could think of Anon having an implicit Required flag.) * Specified (k) data T2 (a::k->*) b = MkT (a b) Here T's kind is T :: forall (k:*). (k->*) -> k -> * The kind variable 'k' is Specified, since it is mentioned in the signature. * Required (k) data T k (a::k->*) b = MkT (a b) Here T's kind is T :: forall k:* -> (k->*) -> k -> * The kind is Required, since it bound in a positional way in T's declaration Every use of T must be explicitly applied to a kind * Inferred (k1), Specified (k) data T a b (c :: k) = MkT (a b) (Proxy c) Here T's kind is T :: forall {k1:*} (k:*). (k1->*) -> k1 -> k -> * So 'k' is Specified, because it appears explicitly, but 'k1' is Inferred, because it does not Generally, in the list of TyConBinders for a TyCon, * Inferred arguments always come first * Specified, Anon and Required can be mixed e.g. data Foo (a :: Type) :: forall b. (a -> b -> Type) -> Type where ... Here Foo's TyConBinders are [Required 'a', Specified 'b', Anon] and its kind prints as Foo :: forall a -> forall b. (a -> b -> Type) -> Type See also Note [Required, Specified, and Inferred for types] in GHC.Tc.TyCl ---- Printing ----- We print forall types with enough syntax to tell you their visibility flag. But this is not source Haskell, and these types may not all be parsable. Specified: a list of Specified binders is written between `forall` and `.`: const :: forall a b. a -> b -> a Inferred: like Specified, but every binder is written in braces: f :: forall {k} (a:k). S k a -> Int Required: binders are put between `forall` and `->`: T :: forall k -> * ---- Other points ----- * In classic Haskell, all named binders (that is, the type variables in a polymorphic function type f :: forall a. a -> a) have been Inferred. * Inferred variables correspond to "generalized" variables from the Visible Type Applications paper (ESOP'16). Note [No Required TyCoBinder in terms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't allow Required foralls for term variables, including pattern synonyms and data constructors. Why? Because then an application would need a /compulsory/ type argument (possibly without an "@"?), thus (f Int); and we don't have concrete syntax for that. We could change this decision, but Required, Named TyCoBinders are rare anyway. (Most are Anons.) However the type of a term can (just about) have a required quantifier; see Note [Required quantifiers in the type of a term] in GHC.Tc.Gen.Expr. -} {- ********************************************************************** * * PredType * * ********************************************************************** -} -- | A type of the form @p@ of constraint kind represents a value whose type is -- the Haskell predicate @p@, where a predicate is what occurs before -- the @=>@ in a Haskell type. -- -- We use 'PredType' as documentation to mark those types that we guarantee to -- have this kind. -- -- It can be expanded into its representation, but: -- -- * The type checker must treat it as opaque -- -- * The rest of the compiler treats it as transparent -- -- Consider these examples: -- -- > f :: (Eq a) => a -> Int -- > g :: (?x :: Int -> Int) => a -> Int -- > h :: (r\l) => {r} => {l::Int | r} -- -- Here the @Eq a@ and @?x :: Int -> Int@ and @r\l@ are all called \"predicates\" type PredType = Type -- | A collection of 'PredType's type ThetaType = [PredType] {- (We don't support TREX records yet, but the setup is designed to expand to allow them.) A Haskell qualified type, such as that for f,g,h above, is represented using * a FunTy for the double arrow * with a type of kind Constraint as the function argument The predicate really does turn into a real extra argument to the function. If the argument has type (p :: Constraint) then the predicate p is represented by evidence of type p. %************************************************************************ %* * Simple constructors %* * %************************************************************************ These functions are here so that they can be used by GHC.Builtin.Types.Prim, which in turn is imported by Type -} mkTyVarTy :: TyVar -> Type mkTyVarTy v = assertPpr (isTyVar v) (ppr v <+> dcolon <+> ppr (tyVarKind v)) $ TyVarTy v mkTyVarTys :: [TyVar] -> [Type] mkTyVarTys = map mkTyVarTy -- a common use of mkTyVarTy mkTyCoVarTy :: TyCoVar -> Type mkTyCoVarTy v | isTyVar v = TyVarTy v | otherwise = CoercionTy (CoVarCo v) mkTyCoVarTys :: [TyCoVar] -> [Type] mkTyCoVarTys = map mkTyCoVarTy infixr 3 `mkFunTy`, `mkVisFunTy`, `mkInvisFunTy`, `mkVisFunTyMany`, `mkInvisFunTyMany` -- Associates to the right mkFunTy :: AnonArgFlag -> Mult -> Type -> Type -> Type mkFunTy af mult arg res = FunTy { ft_af = af , ft_mult = mult , ft_arg = arg , ft_res = res } mkScaledFunTy :: AnonArgFlag -> Scaled Type -> Type -> Type mkScaledFunTy af (Scaled mult arg) res = mkFunTy af mult arg res mkVisFunTy, mkInvisFunTy :: Mult -> Type -> Type -> Type mkVisFunTy = mkFunTy VisArg mkInvisFunTy = mkFunTy InvisArg mkFunTyMany :: AnonArgFlag -> Type -> Type -> Type mkFunTyMany af = mkFunTy af manyDataConTy -- | Special, common, case: Arrow type with mult Many mkVisFunTyMany :: Type -> Type -> Type mkVisFunTyMany = mkVisFunTy manyDataConTy mkInvisFunTyMany :: Type -> Type -> Type mkInvisFunTyMany = mkInvisFunTy manyDataConTy -- | Make nested arrow types mkVisFunTys :: [Scaled Type] -> Type -> Type mkVisFunTys tys ty = foldr (mkScaledFunTy VisArg) ty tys mkVisFunTysMany :: [Type] -> Type -> Type mkVisFunTysMany tys ty = foldr mkVisFunTyMany ty tys mkInvisFunTysMany :: [Type] -> Type -> Type mkInvisFunTysMany tys ty = foldr mkInvisFunTyMany ty tys -- | Like 'mkTyCoForAllTy', but does not check the occurrence of the binder -- See Note [Unused coercion variable in ForAllTy] mkForAllTy :: TyCoVar -> ArgFlag -> Type -> Type mkForAllTy tv vis ty = ForAllTy (Bndr tv vis) ty -- | Wraps foralls over the type using the provided 'TyCoVar's from left to right mkForAllTys :: [TyCoVarBinder] -> Type -> Type mkForAllTys tyvars ty = foldr ForAllTy ty tyvars -- | Wraps foralls over the type using the provided 'InvisTVBinder's from left to right mkInvisForAllTys :: [InvisTVBinder] -> Type -> Type mkInvisForAllTys tyvars = mkForAllTys (tyVarSpecToBinders tyvars) mkPiTy :: TyCoBinder -> Type -> Type mkPiTy (Anon af ty1) ty2 = mkScaledFunTy af ty1 ty2 mkPiTy (Named (Bndr tv vis)) ty = mkForAllTy tv vis ty mkPiTys :: [TyCoBinder] -> Type -> Type mkPiTys tbs ty = foldr mkPiTy ty tbs -- | 'mkNakedTyConTy' creates a nullary 'TyConApp'. In general you -- should rather use 'GHC.Core.Type.mkTyConTy', which picks the shared -- nullary TyConApp from inside the TyCon (via tyConNullaryTy. But -- we have to build the TyConApp tc [] in that TyCon field; that's -- what 'mkNakedTyConTy' is for. mkNakedTyConTy :: TyCon -> Type mkNakedTyConTy tycon = TyConApp tycon [] {- %************************************************************************ %* * Coercions %* * %************************************************************************ -} -- | A 'Coercion' is concrete evidence of the equality/convertibility -- of two types. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data Coercion -- Each constructor has a "role signature", indicating the way roles are -- propagated through coercions. -- - P, N, and R stand for coercions of the given role -- - e stands for a coercion of a specific unknown role -- (think "role polymorphism") -- - "e" stands for an explicit role parameter indicating role e. -- - _ stands for a parameter that is not a Role or Coercion. -- These ones mirror the shape of types = -- Refl :: _ -> N -- A special case reflexivity for a very common case: Nominal reflexivity -- If you need Representational, use (GRefl Representational ty MRefl) -- not (SubCo (Refl ty)) Refl Type -- See Note [Refl invariant] -- GRefl :: "e" -> _ -> Maybe N -> e -- See Note [Generalized reflexive coercion] | GRefl Role Type MCoercionN -- See Note [Refl invariant] -- Use (Refl ty), not (GRefl Nominal ty MRefl) -- Use (GRefl Representational _ _), not (SubCo (GRefl Nominal _ _)) -- These ones simply lift the correspondingly-named -- Type constructors into Coercions -- TyConAppCo :: "e" -> _ -> ?? -> e -- See Note [TyConAppCo roles] | TyConAppCo Role TyCon [Coercion] -- lift TyConApp -- The TyCon is never a synonym; -- we expand synonyms eagerly -- But it can be a type function -- TyCon is never a saturated (->); use FunCo instead | AppCo Coercion CoercionN -- lift AppTy -- AppCo :: e -> N -> e -- See Note [Forall coercions] | ForAllCo TyCoVar KindCoercion Coercion -- ForAllCo :: _ -> N -> e -> e | FunCo Role CoercionN Coercion Coercion -- lift FunTy -- FunCo :: "e" -> N -> e -> e -> e -- Note: why doesn't FunCo have a AnonArgFlag, like FunTy? -- Because the AnonArgFlag has no impact on Core; it is only -- there to guide implicit instantiation of Haskell source -- types, and that is irrelevant for coercions, which are -- Core-only. -- These are special | CoVarCo CoVar -- :: _ -> (N or R) -- result role depends on the tycon of the variable's type -- AxiomInstCo :: e -> _ -> ?? -> e | AxiomInstCo (CoAxiom Branched) BranchIndex [Coercion] -- See also [CoAxiom index] -- The coercion arguments always *precisely* saturate -- arity of (that branch of) the CoAxiom. If there are -- any left over, we use AppCo. -- See [Coercion axioms applied to coercions] -- The roles of the argument coercions are determined -- by the cab_roles field of the relevant branch of the CoAxiom | AxiomRuleCo CoAxiomRule [Coercion] -- AxiomRuleCo is very like AxiomInstCo, but for a CoAxiomRule -- The number coercions should match exactly the expectations -- of the CoAxiomRule (i.e., the rule is fully saturated). | UnivCo UnivCoProvenance Role Type Type -- :: _ -> "e" -> _ -> _ -> e | SymCo Coercion -- :: e -> e | TransCo Coercion Coercion -- :: e -> e -> e | NthCo Role Int Coercion -- Zero-indexed; decomposes (T t0 ... tn) -- :: "e" -> _ -> e0 -> e (inverse of TyConAppCo, see Note [TyConAppCo roles]) -- Using NthCo on a ForAllCo gives an N coercion always -- See Note [NthCo and newtypes] -- -- Invariant: (NthCo r i co), it is always the case that r = role of (Nth i co) -- That is: the role of the entire coercion is redundantly cached here. -- See Note [NthCo Cached Roles] | LRCo LeftOrRight CoercionN -- Decomposes (t_left t_right) -- :: _ -> N -> N | InstCo Coercion CoercionN -- :: e -> N -> e -- See Note [InstCo roles] -- Extract a kind coercion from a (heterogeneous) type coercion -- NB: all kind coercions are Nominal | KindCo Coercion -- :: e -> N | SubCo CoercionN -- Turns a ~N into a ~R -- :: N -> R | HoleCo CoercionHole -- ^ See Note [Coercion holes] -- Only present during typechecking deriving Data.Data type CoercionN = Coercion -- always nominal type CoercionR = Coercion -- always representational type CoercionP = Coercion -- always phantom type KindCoercion = CoercionN -- always nominal instance Outputable Coercion where ppr = pprCo -- | A semantically more meaningful type to represent what may or may not be a -- useful 'Coercion'. data MCoercion = MRefl -- A trivial Reflexivity coercion | MCo Coercion -- Other coercions deriving Data.Data type MCoercionR = MCoercion type MCoercionN = MCoercion instance Outputable MCoercion where ppr MRefl = text "MRefl" ppr (MCo co) = text "MCo" <+> ppr co {- Note [Refl invariant] ~~~~~~~~~~~~~~~~~~~~~~~~ Invariant 1: Refl lifting Refl (similar for GRefl r ty MRefl) is always lifted as far as possible. For example (Refl T) (Refl a) (Refl b) is normalised (by mkAPpCo) to (Refl (T a b)). You might think that a consequences is: Every identity coercion has Refl at the root But that's not quite true because of coercion variables. Consider g where g :: Int~Int Left h where h :: Maybe Int ~ Maybe Int etc. So the consequence is only true of coercions that have no coercion variables. Invariant 2: TyConAppCo An application of (Refl T) to some coercions, at least one of which is NOT the identity, is normalised to TyConAppCo. (They may not be fully saturated however.) TyConAppCo coercions (like all coercions other than Refl) are NEVER the identity. Note [Generalized reflexive coercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRefl is a generalized reflexive coercion (see #15192). It wraps a kind coercion, which might be reflexive (MRefl) or any coercion (MCo co). The typing rules for GRefl: ty : k1 ------------------------------------ GRefl r ty MRefl: ty ~r ty ty : k1 co :: k1 ~ k2 ------------------------------------ GRefl r ty (MCo co) : ty ~r ty |> co Consider we have g1 :: s ~r t s :: k1 g2 :: k1 ~ k2 and we want to construct a coercions co which has type (s |> g2) ~r t We can define co = Sym (GRefl r s g2) ; g1 It is easy to see that Refl == GRefl Nominal ty MRefl :: ty ~n ty A nominal reflexive coercion is quite common, so we keep the special form Refl to save allocation. Note [Coercion axioms applied to coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The reason coercion axioms can be applied to coercions and not just types is to allow for better optimization. There are some cases where we need to be able to "push transitivity inside" an axiom in order to expose further opportunities for optimization. For example, suppose we have C a : t[a] ~ F a g : b ~ c and we want to optimize sym (C b) ; t[g] ; C c which has the kind F b ~ F c (stopping through t[b] and t[c] along the way). We'd like to optimize this to just F g -- but how? The key is that we need to allow axioms to be instantiated by *coercions*, not just by types. Then we can (in certain cases) push transitivity inside the axiom instantiations, and then react opposite-polarity instantiations of the same axiom. In this case, e.g., we match t[g] against the LHS of (C c)'s kind, to obtain the substitution a |-> g (note this operation is sort of the dual of lifting!) and hence end up with C g : t[b] ~ F c which indeed has the same kind as t[g] ; C c. Now we have sym (C b) ; C g which can be optimized to F g. Note [CoAxiom index] ~~~~~~~~~~~~~~~~~~~~ A CoAxiom has 1 or more branches. Each branch has contains a list of the free type variables in that branch, the LHS type patterns, and the RHS type for that branch. When we apply an axiom to a list of coercions, we must choose which branch of the axiom we wish to use, as the different branches may have different numbers of free type variables. (The number of type patterns is always the same among branches, but that doesn't quite concern us here.) The Int in the AxiomInstCo constructor is the 0-indexed number of the chosen branch. Note [Forall coercions] ~~~~~~~~~~~~~~~~~~~~~~~ Constructing coercions between forall-types can be a bit tricky, because the kinds of the bound tyvars can be different. The typing rule is: kind_co : k1 ~ k2 tv1:k1 |- co : t1 ~ t2 ------------------------------------------------------------------- ForAllCo tv1 kind_co co : all tv1:k1. t1 ~ all tv1:k2. (t2[tv1 |-> tv1 |> sym kind_co]) First, the TyCoVar stored in a ForAllCo is really an optimisation: this field should be a Name, as its kind is redundant. Thinking of the field as a Name is helpful in understanding what a ForAllCo means. The kind of TyCoVar always matches the left-hand kind of the coercion. The idea is that kind_co gives the two kinds of the tyvar. See how, in the conclusion, tv1 is assigned kind k1 on the left but kind k2 on the right. Of course, a type variable can't have different kinds at the same time. So, we arbitrarily prefer the first kind when using tv1 in the inner coercion co, which shows that t1 equals t2. The last wrinkle is that we need to fix the kinds in the conclusion. In t2, tv1 is assumed to have kind k1, but it has kind k2 in the conclusion of the rule. So we do a kind-fixing substitution, replacing (tv1:k1) with (tv1:k2) |> sym kind_co. This substitution is slightly bizarre, because it mentions the same name with different kinds, but it *is* well-kinded, noting that `(tv1:k2) |> sym kind_co` has kind k1. This all really would work storing just a Name in the ForAllCo. But we can't add Names to, e.g., VarSets, and there generally is just an impedance mismatch in a bunch of places. So we use tv1. When we need tv2, we can use setTyVarKind. Note [Predicate coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have g :: a~b How can we coerce between types ([c]~a) => [a] -> c and ([c]~b) => [b] -> c where the equality predicate *itself* differs? Answer: we simply treat (~) as an ordinary type constructor, so these types really look like ((~) [c] a) -> [a] -> c ((~) [c] b) -> [b] -> c So the coercion between the two is obviously ((~) [c] g) -> [g] -> c Another way to see this to say that we simply collapse predicates to their representation type (see Type.coreView and Type.predTypeRep). This collapse is done by mkPredCo; there is no PredCo constructor in Coercion. This is important because we need Nth to work on predicates too: Nth 1 ((~) [c] g) = g See Simplify.simplCoercionF, which generates such selections. Note [Roles] ~~~~~~~~~~~~ Roles are a solution to the GeneralizedNewtypeDeriving problem, articulated in #1496. The full story is in docs/core-spec/core-spec.pdf. Also, see https://gitlab.haskell.org/ghc/ghc/wikis/roles-implementation Here is one way to phrase the problem: Given: newtype Age = MkAge Int type family F x type instance F Age = Bool type instance F Int = Char This compiles down to: axAge :: Age ~ Int axF1 :: F Age ~ Bool axF2 :: F Int ~ Char Then, we can make: (sym (axF1) ; F axAge ; axF2) :: Bool ~ Char Yikes! The solution is _roles_, as articulated in "Generative Type Abstraction and Type-level Computation" (POPL 2010), available at http://www.seas.upenn.edu/~sweirich/papers/popl163af-weirich.pdf The specification for roles has evolved somewhat since that paper. For the current full details, see the documentation in docs/core-spec. Here are some highlights. We label every equality with a notion of type equivalence, of which there are three options: Nominal, Representational, and Phantom. A ground type is nominally equivalent only with itself. A newtype (which is considered a ground type in Haskell) is representationally equivalent to its representation. Anything is "phantomly" equivalent to anything else. We use "N", "R", and "P" to denote the equivalences. The axioms above would be: axAge :: Age ~R Int axF1 :: F Age ~N Bool axF2 :: F Age ~N Char Then, because transitivity applies only to coercions proving the same notion of equivalence, the above construction is impossible. However, there is still an escape hatch: we know that any two types that are nominally equivalent are representationally equivalent as well. This is what the form SubCo proves -- it "demotes" a nominal equivalence into a representational equivalence. So, it would seem the following is possible: sub (sym axF1) ; F axAge ; sub axF2 :: Bool ~R Char -- WRONG What saves us here is that the arguments to a type function F, lifted into a coercion, *must* prove nominal equivalence. So, (F axAge) is ill-formed, and we are safe. Roles are attached to parameters to TyCons. When lifting a TyCon into a coercion (through TyConAppCo), we need to ensure that the arguments to the TyCon respect their roles. For example: data T a b = MkT a (F b) If we know that a1 ~R a2, then we know (T a1 b) ~R (T a2 b). But, if we know that b1 ~R b2, we know nothing about (T a b1) and (T a b2)! This is because the type function F branches on b's *name*, not representation. So, we say that 'a' has role Representational and 'b' has role Nominal. The third role, Phantom, is for parameters not used in the type's definition. Given the following definition data Q a = MkQ Int the Phantom role allows us to say that (Q Bool) ~R (Q Char), because we can construct the coercion Bool ~P Char (using UnivCo). See the paper cited above for more examples and information. Note [TyConAppCo roles] ~~~~~~~~~~~~~~~~~~~~~~~ The TyConAppCo constructor has a role parameter, indicating the role at which the coercion proves equality. The choice of this parameter affects the required roles of the arguments of the TyConAppCo. To help explain it, assume the following definition: type instance F Int = Bool -- Axiom axF : F Int ~N Bool newtype Age = MkAge Int -- Axiom axAge : Age ~R Int data Foo a = MkFoo a -- Role on Foo's parameter is Representational TyConAppCo Nominal Foo axF : Foo (F Int) ~N Foo Bool For (TyConAppCo Nominal) all arguments must have role Nominal. Why? So that Foo Age ~N Foo Int does *not* hold. TyConAppCo Representational Foo (SubCo axF) : Foo (F Int) ~R Foo Bool TyConAppCo Representational Foo axAge : Foo Age ~R Foo Int For (TyConAppCo Representational), all arguments must have the roles corresponding to the result of tyConRoles on the TyCon. This is the whole point of having roles on the TyCon to begin with. So, we can have Foo Age ~R Foo Int, if Foo's parameter has role R. If a Representational TyConAppCo is over-saturated (which is otherwise fine), the spill-over arguments must all be at Nominal. This corresponds to the behavior for AppCo. TyConAppCo Phantom Foo (UnivCo Phantom Int Bool) : Foo Int ~P Foo Bool All arguments must have role Phantom. This one isn't strictly necessary for soundness, but this choice removes ambiguity. The rules here dictate the roles of the parameters to mkTyConAppCo (should be checked by Lint). Note [NthCo and newtypes] ~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have newtype N a = MkN Int type role N representational This yields axiom NTCo:N :: forall a. N a ~R Int We can then build co :: forall a b. N a ~R N b co = NTCo:N a ; sym (NTCo:N b) for any `a` and `b`. Because of the role annotation on N, if we use NthCo, we'll get out a representational coercion. That is: NthCo r 0 co :: forall a b. a ~R b Yikes! Clearly, this is terrible. The solution is simple: forbid NthCo to be used on newtypes if the internal coercion is representational. This is not just some corner case discovered by a segfault somewhere; it was discovered in the proof of soundness of roles and described in the "Safe Coercions" paper (ICFP '14). Note [NthCo Cached Roles] ~~~~~~~~~~~~~~~~~~~~~~~~~ Why do we cache the role of NthCo in the NthCo constructor? Because computing role(Nth i co) involves figuring out that co :: T tys1 ~ T tys2 using coercionKind, and finding (coercionRole co), and then looking at the tyConRoles of T. Avoiding bad asymptotic behaviour here means we have to compute the kind and role of a coercion simultaneously, which makes the code complicated and inefficient. This only happens for NthCo. Caching the role solves the problem, and allows coercionKind and coercionRole to be simple. See #11735 Note [InstCo roles] ~~~~~~~~~~~~~~~~~~~ Here is (essentially) the typing rule for InstCo: g :: (forall a. t1) ~r (forall a. t2) w :: s1 ~N s2 ------------------------------- InstCo InstCo g w :: (t1 [a |-> s1]) ~r (t2 [a |-> s2]) Note that the Coercion w *must* be nominal. This is necessary because the variable a might be used in a "nominal position" (that is, a place where role inference would require a nominal role) in t1 or t2. If we allowed w to be representational, we could get bogus equalities. A more nuanced treatment might be able to relax this condition somewhat, by checking if t1 and/or t2 use their bound variables in nominal ways. If not, having w be representational is OK. %************************************************************************ %* * UnivCoProvenance %* * %************************************************************************ A UnivCo is a coercion whose proof does not directly express its role and kind (indeed for some UnivCos, like PluginProv, there /is/ no proof). The different kinds of UnivCo are described by UnivCoProvenance. Really each is entirely separate, but they all share the need to represent their role and kind, which is done in the UnivCo constructor. -} -- | For simplicity, we have just one UnivCo that represents a coercion from -- some type to some other type, with (in general) no restrictions on the -- type. The UnivCoProvenance specifies more exactly what the coercion really -- is and why a program should (or shouldn't!) trust the coercion. -- It is reasonable to consider each constructor of 'UnivCoProvenance' -- as a totally independent coercion form; their only commonality is -- that they don't tell you what types they coercion between. (That info -- is in the 'UnivCo' constructor of 'Coercion'. data UnivCoProvenance = PhantomProv KindCoercion -- ^ See Note [Phantom coercions]. Only in Phantom -- roled coercions | ProofIrrelProv KindCoercion -- ^ From the fact that any two coercions are -- considered equivalent. See Note [ProofIrrelProv]. -- Can be used in Nominal or Representational coercions | PluginProv String -- ^ From a plugin, which asserts that this coercion -- is sound. The string is for the use of the plugin. | CorePrepProv -- See Note [Unsafe coercions] in GHC.Core.CoreToStg.Prep Bool -- True <=> the UnivCo must be homogeneously kinded -- False <=> allow hetero-kinded, e.g. Int ~ Int# deriving Data.Data instance Outputable UnivCoProvenance where ppr (PhantomProv _) = text "(phantom)" ppr (ProofIrrelProv _) = text "(proof irrel.)" ppr (PluginProv str) = parens (text "plugin" <+> brackets (text str)) ppr (CorePrepProv _) = text "(CorePrep)" -- | A coercion to be filled in by the type-checker. See Note [Coercion holes] data CoercionHole = CoercionHole { ch_co_var :: CoVar -- See Note [CoercionHoles and coercion free variables] , ch_ref :: IORef (Maybe Coercion) } coHoleCoVar :: CoercionHole -> CoVar coHoleCoVar = ch_co_var setCoHoleCoVar :: CoercionHole -> CoVar -> CoercionHole setCoHoleCoVar h cv = h { ch_co_var = cv } instance Data.Data CoercionHole where -- don't traverse? toConstr _ = abstractConstr "CoercionHole" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "CoercionHole" instance Outputable CoercionHole where ppr (CoercionHole { ch_co_var = cv }) = braces (ppr cv) instance Uniquable CoercionHole where getUnique (CoercionHole { ch_co_var = cv }) = getUnique cv {- Note [Phantom coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T a = T1 | T2 Then we have T s ~R T t for any old s,t. The witness for this is (TyConAppCo T Rep co), where (co :: s ~P t) is a phantom coercion built with PhantomProv. The role of the UnivCo is always Phantom. The Coercion stored is the (nominal) kind coercion between the types kind(s) ~N kind (t) Note [Coercion holes] ~~~~~~~~~~~~~~~~~~~~~~~~ During typechecking, constraint solving for type classes works by - Generate an evidence Id, d7 :: Num a - Wrap it in a Wanted constraint, [W] d7 :: Num a - Use the evidence Id where the evidence is needed - Solve the constraint later - When solved, add an enclosing let-binding let d7 = .... in .... which actually binds d7 to the (Num a) evidence For equality constraints we use a different strategy. See Note [The equality types story] in GHC.Builtin.Types.Prim for background on equality constraints. - For /boxed/ equality constraints, (t1 ~N t2) and (t1 ~R t2), it's just like type classes above. (Indeed, boxed equality constraints *are* classes.) - But for /unboxed/ equality constraints (t1 ~R# t2) and (t1 ~N# t2) we use a different plan For unboxed equalities: - Generate a CoercionHole, a mutable variable just like a unification variable - Wrap the CoercionHole in a Wanted constraint; see GHC.Tc.Utils.TcEvDest - Use the CoercionHole in a Coercion, via HoleCo - Solve the constraint later - When solved, fill in the CoercionHole by side effect, instead of doing the let-binding thing The main reason for all this is that there may be no good place to let-bind the evidence for unboxed equalities: - We emit constraints for kind coercions, to be used to cast a type's kind. These coercions then must be used in types. Because they might appear in a top-level type, there is no place to bind these (unlifted) coercions in the usual way. - A coercion for (forall a. t1) ~ (forall a. t2) will look like forall a. (coercion for t1~t2) But the coercion for (t1~t2) may mention 'a', and we don't have let-bindings within coercions. We could add them, but coercion holes are easier. - Moreover, nothing is lost from the lack of let-bindings. For dictionaries want to achieve sharing to avoid recomoputing the dictionary. But coercions are entirely erased, so there's little benefit to sharing. Indeed, even if we had a let-binding, we always inline types and coercions at every use site and drop the binding. Other notes about HoleCo: * INVARIANT: CoercionHole and HoleCo are used only during type checking, and should never appear in Core. Just like unification variables; a Type can contain a TcTyVar, but only during type checking. If, one day, we use type-level information to separate out forms that can appear during type-checking vs forms that can appear in core proper, holes in Core will be ruled out. * See Note [CoercionHoles and coercion free variables] * Coercion holes can be compared for equality like other coercions: by looking at the types coerced. Note [CoercionHoles and coercion free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why does a CoercionHole contain a CoVar, as well as reference to fill in? Because we want to treat that CoVar as a free variable of the coercion. See #14584, and Note [What prevents a constraint from floating] in GHC.Tc.Solver, item (4): forall k. [W] co1 :: t1 ~# t2 |> co2 [W] co2 :: k ~# * Here co2 is a CoercionHole. But we /must/ know that it is free in co1, because that's all that stops it floating outside the implication. Note [ProofIrrelProv] ~~~~~~~~~~~~~~~~~~~~~ A ProofIrrelProv is a coercion between coercions. For example: data G a where MkG :: G Bool In core, we get G :: * -> * MkG :: forall (a :: *). (a ~ Bool) -> G a Now, consider 'MkG -- that is, MkG used in a type -- and suppose we want a proof that ('MkG a1 co1) ~ ('MkG a2 co2). This will have to be TyConAppCo Nominal MkG [co3, co4] where co3 :: co1 ~ co2 co4 :: a1 ~ a2 Note that co1 :: a1 ~ Bool co2 :: a2 ~ Bool Here, co3 = UnivCo (ProofIrrelProv co5) Nominal (CoercionTy co1) (CoercionTy co2) where co5 :: (a1 ~ Bool) ~ (a2 ~ Bool) co5 = TyConAppCo Nominal (~#) [<*>, <*>, co4, ] -} {- ********************************************************************* * * foldType and foldCoercion * * ********************************************************************* -} {- Note [foldType] ~~~~~~~~~~~~~~~~~~ foldType is a bit more powerful than perhaps it looks: * You can fold with an accumulating parameter, via TyCoFolder env (Endo a) Recall newtype Endo a = Endo (a->a) * You can fold monadically with a monad M, via TyCoFolder env (M a) provided you have instance .. => Monoid (M a) Note [mapType vs foldType] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We define foldType here, but mapType in module Type. Why? * foldType is used in GHC.Core.TyCo.FVs for finding free variables. It's a very simple function that analyses a type, but does not construct one. * mapType constructs new types, and so it needs to call the "smart constructors", mkAppTy, mkCastTy, and so on. These are sophisticated functions, and can't be defined here in GHC.Core.TyCo.Rep. Note [Specialising foldType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We inline foldType at every call site (there are not many), so that it becomes specialised for the particular monoid *and* TyCoFolder at that site. This is just for efficiency, but walking over types is done a *lot* in GHC, so worth optimising. We were worried that TyCoFolder env (Endo a) might not eta-expand. Recall newtype Endo a = Endo (a->a). In particular, given fvs :: Type -> TyCoVarSet fvs ty = appEndo (foldType tcf emptyVarSet ty) emptyVarSet tcf :: TyCoFolder enf (Endo a) tcf = TyCoFolder { tcf_tyvar = do_tv, ... } where do_tvs is tv = Endo do_it where do_it acc | tv `elemVarSet` is = acc | tv `elemVarSet` acc = acc | otherwise = acc `extendVarSet` tv we want to end up with fvs ty = go emptyVarSet ty emptyVarSet where go env (TyVarTy tv) acc = acc `extendVarSet` tv ..etc.. And indeed this happens. - Selections from 'tcf' are done at compile time - 'go' is nicely eta-expanded. We were also worried about deep_fvs :: Type -> TyCoVarSet deep_fvs ty = appEndo (foldType deep_tcf emptyVarSet ty) emptyVarSet deep_tcf :: TyCoFolder enf (Endo a) deep_tcf = TyCoFolder { tcf_tyvar = do_tv, ... } where do_tvs is tv = Endo do_it where do_it acc | tv `elemVarSet` is = acc | tv `elemVarSet` acc = acc | otherwise = deep_fvs (varType tv) `unionVarSet` acc `extendVarSet` tv Here deep_fvs and deep_tcf are mutually recursive, unlike fvs and tcf. But, amazingly, we get good code here too. GHC is careful not to mark TyCoFolder data constructor for deep_tcf as a loop breaker, so the record selections still cancel. And eta expansion still happens too. -} data TyCoFolder env a = TyCoFolder { tcf_view :: Type -> Maybe Type -- Optional "view" function -- E.g. expand synonyms , tcf_tyvar :: env -> TyVar -> a -- Does not automatically recur , tcf_covar :: env -> CoVar -> a -- into kinds of variables , tcf_hole :: env -> CoercionHole -> a -- ^ What to do with coercion holes. -- See Note [Coercion holes] in "GHC.Core.TyCo.Rep". , tcf_tycobinder :: env -> TyCoVar -> ArgFlag -> env -- ^ The returned env is used in the extended scope } {-# INLINE foldTyCo #-} -- See Note [Specialising foldType] foldTyCo :: Monoid a => TyCoFolder env a -> env -> (Type -> a, [Type] -> a, Coercion -> a, [Coercion] -> a) foldTyCo (TyCoFolder { tcf_view = view , tcf_tyvar = tyvar , tcf_tycobinder = tycobinder , tcf_covar = covar , tcf_hole = cohole }) env = (go_ty env, go_tys env, go_co env, go_cos env) where go_ty env ty | Just ty' <- view ty = go_ty env ty' go_ty env (TyVarTy tv) = tyvar env tv go_ty env (AppTy t1 t2) = go_ty env t1 `mappend` go_ty env t2 go_ty _ (LitTy {}) = mempty go_ty env (CastTy ty co) = go_ty env ty `mappend` go_co env co go_ty env (CoercionTy co) = go_co env co go_ty env (FunTy _ w arg res) = go_ty env w `mappend` go_ty env arg `mappend` go_ty env res go_ty env (TyConApp _ tys) = go_tys env tys go_ty env (ForAllTy (Bndr tv vis) inner) = let !env' = tycobinder env tv vis -- Avoid building a thunk here in go_ty env (varType tv) `mappend` go_ty env' inner -- Explicit recursion because using foldr builds a local -- loop (with env free) and I'm not confident it'll be -- lambda lifted in the end go_tys _ [] = mempty go_tys env (t:ts) = go_ty env t `mappend` go_tys env ts go_cos _ [] = mempty go_cos env (c:cs) = go_co env c `mappend` go_cos env cs go_co env (Refl ty) = go_ty env ty go_co env (GRefl _ ty MRefl) = go_ty env ty go_co env (GRefl _ ty (MCo co)) = go_ty env ty `mappend` go_co env co go_co env (TyConAppCo _ _ args) = go_cos env args go_co env (AppCo c1 c2) = go_co env c1 `mappend` go_co env c2 go_co env (FunCo _ cw c1 c2) = go_co env cw `mappend` go_co env c1 `mappend` go_co env c2 go_co env (CoVarCo cv) = covar env cv go_co env (AxiomInstCo _ _ args) = go_cos env args go_co env (HoleCo hole) = cohole env hole go_co env (UnivCo p _ t1 t2) = go_prov env p `mappend` go_ty env t1 `mappend` go_ty env t2 go_co env (SymCo co) = go_co env co go_co env (TransCo c1 c2) = go_co env c1 `mappend` go_co env c2 go_co env (AxiomRuleCo _ cos) = go_cos env cos go_co env (NthCo _ _ co) = go_co env co go_co env (LRCo _ co) = go_co env co go_co env (InstCo co arg) = go_co env co `mappend` go_co env arg go_co env (KindCo co) = go_co env co go_co env (SubCo co) = go_co env co go_co env (ForAllCo tv kind_co co) = go_co env kind_co `mappend` go_ty env (varType tv) `mappend` go_co env' co where env' = tycobinder env tv Inferred go_prov env (PhantomProv co) = go_co env co go_prov env (ProofIrrelProv co) = go_co env co go_prov _ (PluginProv _) = mempty go_prov _ (CorePrepProv _) = mempty -- | A view function that looks through nothing. noView :: Type -> Maybe Type noView _ = Nothing {- ********************************************************************* * * typeSize, coercionSize * * ********************************************************************* -} -- NB: We put typeSize/coercionSize here because they are mutually -- recursive, and have the CPR property. If we have mutual -- recursion across a hi-boot file, we don't get the CPR property -- and these functions allocate a tremendous amount of rubbish. -- It's not critical (because typeSize is really only used in -- debug mode, but I tripped over an example (T5642) in which -- typeSize was one of the biggest single allocators in all of GHC. -- And it's easy to fix, so I did. -- NB: typeSize does not respect `eqType`, in that two types that -- are `eqType` may return different sizes. This is OK, because this -- function is used only in reporting, not decision-making. typeSize :: Type -> Int typeSize (LitTy {}) = 1 typeSize (TyVarTy {}) = 1 typeSize (AppTy t1 t2) = typeSize t1 + typeSize t2 typeSize (FunTy _ _ t1 t2) = typeSize t1 + typeSize t2 typeSize (ForAllTy (Bndr tv _) t) = typeSize (varType tv) + typeSize t typeSize (TyConApp _ ts) = 1 + sum (map typeSize ts) typeSize (CastTy ty co) = typeSize ty + coercionSize co typeSize (CoercionTy co) = coercionSize co coercionSize :: Coercion -> Int coercionSize (Refl ty) = typeSize ty coercionSize (GRefl _ ty MRefl) = typeSize ty coercionSize (GRefl _ ty (MCo co)) = 1 + typeSize ty + coercionSize co coercionSize (TyConAppCo _ _ args) = 1 + sum (map coercionSize args) coercionSize (AppCo co arg) = coercionSize co + coercionSize arg coercionSize (ForAllCo _ h co) = 1 + coercionSize co + coercionSize h coercionSize (FunCo _ w co1 co2) = 1 + coercionSize co1 + coercionSize co2 + coercionSize w coercionSize (CoVarCo _) = 1 coercionSize (HoleCo _) = 1 coercionSize (AxiomInstCo _ _ args) = 1 + sum (map coercionSize args) coercionSize (UnivCo p _ t1 t2) = 1 + provSize p + typeSize t1 + typeSize t2 coercionSize (SymCo co) = 1 + coercionSize co coercionSize (TransCo co1 co2) = 1 + coercionSize co1 + coercionSize co2 coercionSize (NthCo _ _ co) = 1 + coercionSize co coercionSize (LRCo _ co) = 1 + coercionSize co coercionSize (InstCo co arg) = 1 + coercionSize co + coercionSize arg coercionSize (KindCo co) = 1 + coercionSize co coercionSize (SubCo co) = 1 + coercionSize co coercionSize (AxiomRuleCo _ cs) = 1 + sum (map coercionSize cs) provSize :: UnivCoProvenance -> Int provSize (PhantomProv co) = 1 + coercionSize co provSize (ProofIrrelProv co) = 1 + coercionSize co provSize (PluginProv _) = 1 provSize (CorePrepProv _) = 1 {- ************************************************************************ * * Multiplicities * * ************************************************************************ These definitions are here to avoid module loops, and to keep GHC.Core.Multiplicity above this module. -} -- | A shorthand for data with an attached 'Mult' element (the multiplicity). data Scaled a = Scaled !Mult a deriving (Data.Data) -- You might think that this would be a natural candidate for -- Functor, Traversable but Krzysztof says (!3674) "it was too easy -- to accidentally lift functions (substitutions, zonking etc.) from -- Type -> Type to Scaled Type -> Scaled Type, ignoring -- multiplicities and causing bugs". So we don't. -- -- Being strict in a is worse for performance, so we are only strict on the -- Mult part of scaled. instance (Outputable a) => Outputable (Scaled a) where ppr (Scaled _cnt t) = ppr t -- Do not print the multiplicity here because it tends to be too verbose scaledMult :: Scaled a -> Mult scaledMult (Scaled m _) = m scaledThing :: Scaled a -> a scaledThing (Scaled _ t) = t -- | Apply a function to both the Mult and the Type in a 'Scaled Type' mapScaledType :: (Type -> Type) -> Scaled Type -> Scaled Type mapScaledType f (Scaled m t) = Scaled (f m) (f t) {- | Mult is a type alias for Type. Mult must contain Type because multiplicity variables are mere type variables (of kind Multiplicity) in Haskell. So the simplest implementation is to make Mult be Type. Multiplicities can be formed with: - One: GHC.Types.One (= oneDataCon) - Many: GHC.Types.Many (= manyDataCon) - Multiplication: GHC.Types.MultMul (= multMulTyCon) So that Mult feels a bit more structured, we provide pattern synonyms and smart constructors for these. -} type Mult = Type ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/Subst.hs0000644000000000000000000012637214472400112021050 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 Type and Coercion - friends' interface -} {-# LANGUAGE BangPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Substitution into types and coercions. module GHC.Core.TyCo.Subst ( -- * Substitutions TCvSubst(..), TvSubstEnv, CvSubstEnv, emptyTvSubstEnv, emptyCvSubstEnv, composeTCvSubstEnv, composeTCvSubst, emptyTCvSubst, mkEmptyTCvSubst, isEmptyTCvSubst, mkTCvSubst, mkTvSubst, mkCvSubst, getTvSubstEnv, getCvSubstEnv, getTCvInScope, getTCvSubstRangeFVs, isInScope, elemTCvSubst, notElemTCvSubst, setTvSubstEnv, setCvSubstEnv, zapTCvSubst, extendTCvInScope, extendTCvInScopeList, extendTCvInScopeSet, extendTCvSubst, extendTCvSubstWithClone, extendCvSubst, extendCvSubstWithClone, extendTvSubst, extendTvSubstBinderAndInScope, extendTvSubstWithClone, extendTvSubstList, extendTvSubstAndInScope, extendTCvSubstList, unionTCvSubst, zipTyEnv, zipCoEnv, zipTvSubst, zipCvSubst, zipTCvSubst, mkTvSubstPrs, substTyWith, substTyWithCoVars, substTysWith, substTysWithCoVars, substCoWith, substTy, substTyAddInScope, substScaledTy, substTyUnchecked, substTysUnchecked, substScaledTysUnchecked, substThetaUnchecked, substTyWithUnchecked, substScaledTyUnchecked, substCoUnchecked, substCoWithUnchecked, substTyWithInScope, substTys, substScaledTys, substTheta, lookupTyVar, substCo, substCos, substCoVar, substCoVars, lookupCoVar, cloneTyVarBndr, cloneTyVarBndrs, substVarBndr, substVarBndrs, substTyVarBndr, substTyVarBndrs, substCoVarBndr, substTyVar, substTyVars, substTyCoVars, substTyCoBndr, substForAllCoBndr, substVarBndrUsing, substForAllCoBndrUsing, checkValidSubst, isValidTCvSubst, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.Type ( mkCastTy, mkAppTy, isCoercionTy, mkTyConApp ) import {-# SOURCE #-} GHC.Core.Coercion ( mkCoVarCo, mkKindCo, mkNthCo, mkTransCo , mkNomReflCo, mkSubCo, mkSymCo , mkFunCo, mkForAllCo, mkUnivCo , mkAxiomInstCo, mkAppCo, mkGReflCo , mkInstCo, mkLRCo, mkTyConAppCo , mkCoercionType , coercionKind, coercionLKind, coVarKindsTypesRole ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprTyVar ) import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Data.Pair import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Misc import GHC.Types.Unique.Supply import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.List (mapAccumL) {- %************************************************************************ %* * Substitutions Data type defined here to avoid unnecessary mutual recursion %* * %************************************************************************ -} -- | Type & coercion substitution -- -- #tcvsubst_invariant# -- The following invariants must hold of a 'TCvSubst': -- -- 1. The in-scope set is needed /only/ to -- guide the generation of fresh uniques -- -- 2. In particular, the /kind/ of the type variables in -- the in-scope set is not relevant -- -- 3. The substitution is only applied ONCE! This is because -- in general such application will not reach a fixed point. data TCvSubst = TCvSubst InScopeSet -- The in-scope type and kind variables TvSubstEnv -- Substitutes both type and kind variables CvSubstEnv -- Substitutes coercion variables -- See Note [Substitutions apply only once] -- and Note [Extending the TCvSubstEnv] -- and Note [Substituting types and coercions] -- and Note [The substitution invariant] -- | A substitution of 'Type's for 'TyVar's -- and 'Kind's for 'KindVar's type TvSubstEnv = TyVarEnv Type -- NB: A TvSubstEnv is used -- both inside a TCvSubst (with the apply-once invariant -- discussed in Note [Substitutions apply only once], -- and also independently in the middle of matching, -- and unification (see Types.Unify). -- So you have to look at the context to know if it's idempotent or -- apply-once or whatever -- | A substitution of 'Coercion's for 'CoVar's type CvSubstEnv = CoVarEnv Coercion {- Note [The substitution invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When calling (substTy subst ty) it should be the case that the in-scope set in the substitution is a superset of both: (SIa) The free vars of the range of the substitution (SIb) The free vars of ty minus the domain of the substitution The same rules apply to other substitutions (notably GHC.Core.Subst.Subst) * Reason for (SIa). Consider substTy [a :-> Maybe b] (forall b. b->a) we must rename the forall b, to get forall b2. b2 -> Maybe b Making 'b' part of the in-scope set forces this renaming to take place. * Reason for (SIb). Consider substTy [a :-> Maybe b] (forall b. (a,b,x)) Then if we use the in-scope set {b}, satisfying (SIa), there is a danger we will rename the forall'd variable to 'x' by mistake, getting this: forall x. (Maybe b, x, x) Breaking (SIb) caused the bug from #11371. Note: if the free vars of the range of the substitution are freshly created, then the problems of (SIa) can't happen, and so it would be sound to ignore (SIa). Note [Substitutions apply only once] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use TCvSubsts to instantiate things, and we might instantiate forall a b. ty with the types [a, b], or [b, a]. So the substitution might go [a->b, b->a]. A similar situation arises in Core when we find a beta redex like (/\ a /\ b -> e) b a Then we also end up with a substitution that permutes type variables. Other variations happen to; for example [a -> (a, b)]. ******************************************************** *** So a substitution must be applied precisely once *** ******************************************************** A TCvSubst is not idempotent, but, unlike the non-idempotent substitution we use during unifications, it must not be repeatedly applied. Note [Extending the TCvSubstEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See #tcvsubst_invariant# for the invariants that must hold. This invariant allows a short-cut when the subst envs are empty: if the TvSubstEnv and CvSubstEnv are empty --- i.e. (isEmptyTCvSubst subst) holds --- then (substTy subst ty) does nothing. For example, consider: (/\a. /\b:(a~Int). ...b..) Int We substitute Int for 'a'. The Unique of 'b' does not change, but nevertheless we add 'b' to the TvSubstEnv, because b's kind does change This invariant has several crucial consequences: * In substVarBndr, we need extend the TvSubstEnv - if the unique has changed - or if the kind has changed * In substTyVar, we do not need to consult the in-scope set; the TvSubstEnv is enough * In substTy, substTheta, we can short-circuit when the TvSubstEnv is empty Note [Substituting types and coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Types and coercions are mutually recursive, and either may have variables "belonging" to the other. Thus, every time we wish to substitute in a type, we may also need to substitute in a coercion, and vice versa. However, the constructor used to create type variables is distinct from that of coercion variables, so we carry two VarEnvs in a TCvSubst. Note that it would be possible to use the CoercionTy constructor to combine these environments, but that seems like a false economy. Note that the TvSubstEnv should *never* map a CoVar (built with the Id constructor) and the CvSubstEnv should *never* map a TyVar. Furthermore, the range of the TvSubstEnv should *never* include a type headed with CoercionTy. -} emptyTvSubstEnv :: TvSubstEnv emptyTvSubstEnv = emptyVarEnv emptyCvSubstEnv :: CvSubstEnv emptyCvSubstEnv = emptyVarEnv composeTCvSubstEnv :: InScopeSet -> (TvSubstEnv, CvSubstEnv) -> (TvSubstEnv, CvSubstEnv) -> (TvSubstEnv, CvSubstEnv) -- ^ @(compose env1 env2)(x)@ is @env1(env2(x))@; i.e. apply @env2@ then @env1@. -- It assumes that both are idempotent. -- Typically, @env1@ is the refinement to a base substitution @env2@ composeTCvSubstEnv in_scope (tenv1, cenv1) (tenv2, cenv2) = ( tenv1 `plusVarEnv` mapVarEnv (substTy subst1) tenv2 , cenv1 `plusVarEnv` mapVarEnv (substCo subst1) cenv2 ) -- First apply env1 to the range of env2 -- Then combine the two, making sure that env1 loses if -- both bind the same variable; that's why env1 is the -- *left* argument to plusVarEnv, because the right arg wins where subst1 = TCvSubst in_scope tenv1 cenv1 -- | Composes two substitutions, applying the second one provided first, -- like in function composition. composeTCvSubst :: TCvSubst -> TCvSubst -> TCvSubst composeTCvSubst (TCvSubst is1 tenv1 cenv1) (TCvSubst is2 tenv2 cenv2) = TCvSubst is3 tenv3 cenv3 where is3 = is1 `unionInScope` is2 (tenv3, cenv3) = composeTCvSubstEnv is3 (tenv1, cenv1) (tenv2, cenv2) emptyTCvSubst :: TCvSubst emptyTCvSubst = TCvSubst emptyInScopeSet emptyTvSubstEnv emptyCvSubstEnv mkEmptyTCvSubst :: InScopeSet -> TCvSubst mkEmptyTCvSubst is = TCvSubst is emptyTvSubstEnv emptyCvSubstEnv isEmptyTCvSubst :: TCvSubst -> Bool -- See Note [Extending the TCvSubstEnv] isEmptyTCvSubst (TCvSubst _ tenv cenv) = isEmptyVarEnv tenv && isEmptyVarEnv cenv mkTCvSubst :: InScopeSet -> (TvSubstEnv, CvSubstEnv) -> TCvSubst mkTCvSubst in_scope (tenv, cenv) = TCvSubst in_scope tenv cenv mkTvSubst :: InScopeSet -> TvSubstEnv -> TCvSubst -- ^ Make a TCvSubst with specified tyvar subst and empty covar subst mkTvSubst in_scope tenv = TCvSubst in_scope tenv emptyCvSubstEnv mkCvSubst :: InScopeSet -> CvSubstEnv -> TCvSubst -- ^ Make a TCvSubst with specified covar subst and empty tyvar subst mkCvSubst in_scope cenv = TCvSubst in_scope emptyTvSubstEnv cenv getTvSubstEnv :: TCvSubst -> TvSubstEnv getTvSubstEnv (TCvSubst _ env _) = env getCvSubstEnv :: TCvSubst -> CvSubstEnv getCvSubstEnv (TCvSubst _ _ env) = env getTCvInScope :: TCvSubst -> InScopeSet getTCvInScope (TCvSubst in_scope _ _) = in_scope -- | Returns the free variables of the types in the range of a substitution as -- a non-deterministic set. getTCvSubstRangeFVs :: TCvSubst -> VarSet getTCvSubstRangeFVs (TCvSubst _ tenv cenv) = unionVarSet tenvFVs cenvFVs where tenvFVs = shallowTyCoVarsOfTyVarEnv tenv cenvFVs = shallowTyCoVarsOfCoVarEnv cenv isInScope :: Var -> TCvSubst -> Bool isInScope v (TCvSubst in_scope _ _) = v `elemInScopeSet` in_scope elemTCvSubst :: Var -> TCvSubst -> Bool elemTCvSubst v (TCvSubst _ tenv cenv) | isTyVar v = v `elemVarEnv` tenv | otherwise = v `elemVarEnv` cenv notElemTCvSubst :: Var -> TCvSubst -> Bool notElemTCvSubst v = not . elemTCvSubst v setTvSubstEnv :: TCvSubst -> TvSubstEnv -> TCvSubst setTvSubstEnv (TCvSubst in_scope _ cenv) tenv = TCvSubst in_scope tenv cenv setCvSubstEnv :: TCvSubst -> CvSubstEnv -> TCvSubst setCvSubstEnv (TCvSubst in_scope tenv _) cenv = TCvSubst in_scope tenv cenv zapTCvSubst :: TCvSubst -> TCvSubst zapTCvSubst (TCvSubst in_scope _ _) = TCvSubst in_scope emptyVarEnv emptyVarEnv extendTCvInScope :: TCvSubst -> Var -> TCvSubst extendTCvInScope (TCvSubst in_scope tenv cenv) var = TCvSubst (extendInScopeSet in_scope var) tenv cenv extendTCvInScopeList :: TCvSubst -> [Var] -> TCvSubst extendTCvInScopeList (TCvSubst in_scope tenv cenv) vars = TCvSubst (extendInScopeSetList in_scope vars) tenv cenv extendTCvInScopeSet :: TCvSubst -> VarSet -> TCvSubst extendTCvInScopeSet (TCvSubst in_scope tenv cenv) vars = TCvSubst (extendInScopeSetSet in_scope vars) tenv cenv extendTCvSubst :: TCvSubst -> TyCoVar -> Type -> TCvSubst extendTCvSubst subst v ty | isTyVar v = extendTvSubst subst v ty | CoercionTy co <- ty = extendCvSubst subst v co | otherwise = pprPanic "extendTCvSubst" (ppr v <+> text "|->" <+> ppr ty) extendTCvSubstWithClone :: TCvSubst -> TyCoVar -> TyCoVar -> TCvSubst extendTCvSubstWithClone subst tcv | isTyVar tcv = extendTvSubstWithClone subst tcv | otherwise = extendCvSubstWithClone subst tcv extendTvSubst :: TCvSubst -> TyVar -> Type -> TCvSubst extendTvSubst (TCvSubst in_scope tenv cenv) tv ty = TCvSubst in_scope (extendVarEnv tenv tv ty) cenv extendTvSubstBinderAndInScope :: TCvSubst -> TyCoBinder -> Type -> TCvSubst extendTvSubstBinderAndInScope subst (Named (Bndr v _)) ty = assert (isTyVar v ) extendTvSubstAndInScope subst v ty extendTvSubstBinderAndInScope subst (Anon {}) _ = subst extendTvSubstWithClone :: TCvSubst -> TyVar -> TyVar -> TCvSubst -- Adds a new tv -> tv mapping, /and/ extends the in-scope set extendTvSubstWithClone (TCvSubst in_scope tenv cenv) tv tv' = TCvSubst (extendInScopeSetSet in_scope new_in_scope) (extendVarEnv tenv tv (mkTyVarTy tv')) cenv where new_in_scope = tyCoVarsOfType (tyVarKind tv') `extendVarSet` tv' extendCvSubst :: TCvSubst -> CoVar -> Coercion -> TCvSubst extendCvSubst (TCvSubst in_scope tenv cenv) v co = TCvSubst in_scope tenv (extendVarEnv cenv v co) extendCvSubstWithClone :: TCvSubst -> CoVar -> CoVar -> TCvSubst extendCvSubstWithClone (TCvSubst in_scope tenv cenv) cv cv' = TCvSubst (extendInScopeSetSet in_scope new_in_scope) tenv (extendVarEnv cenv cv (mkCoVarCo cv')) where new_in_scope = tyCoVarsOfType (varType cv') `extendVarSet` cv' extendTvSubstAndInScope :: TCvSubst -> TyVar -> Type -> TCvSubst -- Also extends the in-scope set extendTvSubstAndInScope (TCvSubst in_scope tenv cenv) tv ty = TCvSubst (in_scope `extendInScopeSetSet` tyCoVarsOfType ty) (extendVarEnv tenv tv ty) cenv extendTvSubstList :: TCvSubst -> [Var] -> [Type] -> TCvSubst extendTvSubstList subst tvs tys = foldl2 extendTvSubst subst tvs tys extendTCvSubstList :: TCvSubst -> [Var] -> [Type] -> TCvSubst extendTCvSubstList subst tvs tys = foldl2 extendTCvSubst subst tvs tys unionTCvSubst :: TCvSubst -> TCvSubst -> TCvSubst -- Works when the ranges are disjoint unionTCvSubst (TCvSubst in_scope1 tenv1 cenv1) (TCvSubst in_scope2 tenv2 cenv2) = assert (tenv1 `disjointVarEnv` tenv2 && cenv1 `disjointVarEnv` cenv2 ) TCvSubst (in_scope1 `unionInScope` in_scope2) (tenv1 `plusVarEnv` tenv2) (cenv1 `plusVarEnv` cenv2) -- mkTvSubstPrs and zipTvSubst generate the in-scope set from -- the types given; but it's just a thunk so with a bit of luck -- it'll never be evaluated -- | Generates the in-scope set for the 'TCvSubst' from the types in the incoming -- environment. No CoVars, please! zipTvSubst :: HasDebugCallStack => [TyVar] -> [Type] -> TCvSubst zipTvSubst tvs tys = mkTvSubst (mkInScopeSet (shallowTyCoVarsOfTypes tys)) tenv where tenv = zipTyEnv tvs tys -- | Generates the in-scope set for the 'TCvSubst' from the types in the incoming -- environment. No TyVars, please! zipCvSubst :: HasDebugCallStack => [CoVar] -> [Coercion] -> TCvSubst zipCvSubst cvs cos = TCvSubst (mkInScopeSet (shallowTyCoVarsOfCos cos)) emptyTvSubstEnv cenv where cenv = zipCoEnv cvs cos zipTCvSubst :: HasDebugCallStack => [TyCoVar] -> [Type] -> TCvSubst zipTCvSubst tcvs tys = zip_tcvsubst tcvs tys $ mkEmptyTCvSubst $ mkInScopeSet $ shallowTyCoVarsOfTypes tys where zip_tcvsubst :: [TyCoVar] -> [Type] -> TCvSubst -> TCvSubst zip_tcvsubst (tv:tvs) (ty:tys) subst = zip_tcvsubst tvs tys (extendTCvSubst subst tv ty) zip_tcvsubst [] [] subst = subst -- empty case zip_tcvsubst _ _ _ = pprPanic "zipTCvSubst: length mismatch" (ppr tcvs <+> ppr tys) -- | Generates the in-scope set for the 'TCvSubst' from the types in the -- incoming environment. No CoVars, please! mkTvSubstPrs :: [(TyVar, Type)] -> TCvSubst mkTvSubstPrs [] = emptyTCvSubst mkTvSubstPrs prs = assertPpr onlyTyVarsAndNoCoercionTy (text "prs" <+> ppr prs) $ mkTvSubst in_scope tenv where tenv = mkVarEnv prs in_scope = mkInScopeSet $ shallowTyCoVarsOfTypes $ map snd prs onlyTyVarsAndNoCoercionTy = and [ isTyVar tv && not (isCoercionTy ty) | (tv, ty) <- prs ] zipTyEnv :: HasDebugCallStack => [TyVar] -> [Type] -> TvSubstEnv zipTyEnv tyvars tys | debugIsOn , not (all isTyVar tyvars && (tyvars `equalLength` tys)) = pprPanic "zipTyEnv" (ppr tyvars $$ ppr tys) | otherwise = assert (all (not . isCoercionTy) tys ) zipToUFM tyvars tys -- There used to be a special case for when -- ty == TyVarTy tv -- (a not-uncommon case) in which case the substitution was dropped. -- But the type-tidier changes the print-name of a type variable without -- changing the unique, and that led to a bug. Why? Pre-tidying, we had -- a type {Foo t}, where Foo is a one-method class. So Foo is really a newtype. -- And it happened that t was the type variable of the class. Post-tiding, -- it got turned into {Foo t2}. The ext-core printer expanded this using -- sourceTypeRep, but that said "Oh, t == t2" because they have the same unique, -- and so generated a rep type mentioning t not t2. -- -- Simplest fix is to nuke the "optimisation" zipCoEnv :: HasDebugCallStack => [CoVar] -> [Coercion] -> CvSubstEnv zipCoEnv cvs cos | debugIsOn , not (all isCoVar cvs) = pprPanic "zipCoEnv" (ppr cvs <+> ppr cos) | otherwise = mkVarEnv (zipEqual "zipCoEnv" cvs cos) instance Outputable TCvSubst where ppr (TCvSubst ins tenv cenv) = brackets $ sep[ text "TCvSubst", nest 2 (text "In scope:" <+> ppr ins), nest 2 (text "Type env:" <+> ppr tenv), nest 2 (text "Co env:" <+> ppr cenv) ] {- %************************************************************************ %* * Performing type or kind substitutions %* * %************************************************************************ Note [Sym and ForAllCo] ~~~~~~~~~~~~~~~~~~~~~~~ In OptCoercion, we try to push "sym" out to the leaves of a coercion. But, how do we push sym into a ForAllCo? It's a little ugly. Here is the typing rule: h : k1 ~# k2 (tv : k1) |- g : ty1 ~# ty2 ---------------------------- ForAllCo tv h g : (ForAllTy (tv : k1) ty1) ~# (ForAllTy (tv : k2) (ty2[tv |-> tv |> sym h])) Here is what we want: ForAllCo tv h' g' : (ForAllTy (tv : k2) (ty2[tv |-> tv |> sym h])) ~# (ForAllTy (tv : k1) ty1) Because the kinds of the type variables to the right of the colon are the kinds coerced by h', we know (h' : k2 ~# k1). Thus, (h' = sym h). Now, we can rewrite ty1 to be (ty1[tv |-> tv |> sym h' |> h']). We thus want ForAllCo tv h' g' : (ForAllTy (tv : k2) (ty2[tv |-> tv |> h'])) ~# (ForAllTy (tv : k1) (ty1[tv |-> tv |> h'][tv |-> tv |> sym h'])) We thus see that we want g' : ty2[tv |-> tv |> h'] ~# ty1[tv |-> tv |> h'] and thus g' = sym (g[tv |-> tv |> h']). Putting it all together, we get this: sym (ForAllCo tv h g) ==> ForAllCo tv (sym h) (sym g[tv |-> tv |> sym h]) Note [Substituting in a coercion hole] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It seems highly suspicious to be substituting in a coercion that still has coercion holes. Yet, this can happen in a situation like this: f :: forall k. k :~: Type -> () f Refl = let x :: forall (a :: k). [a] -> ... x = ... When we check x's type signature, we require that k ~ Type. We indeed know this due to the Refl pattern match, but the eager unifier can't make use of givens. So, when we're done looking at x's type, a coercion hole will remain. Then, when we're checking x's definition, we skolemise x's type (in order to, e.g., bring the scoped type variable `a` into scope). This requires performing a substitution for the fresh skolem variables. This substitution needs to affect the kind of the coercion hole, too -- otherwise, the kind will have an out-of-scope variable in it. More problematically in practice (we won't actually notice the out-of-scope variable ever), skolems in the kind might have too high a level, triggering a failure to uphold the invariant that no free variables in a type have a higher level than the ambient level in the type checker. In the event of having free variables in the hole's kind, I'm pretty sure we'll always have an erroneous program, so we don't need to worry what will happen when the hole gets filled in. After all, a hole relating a locally-bound type variable will be unable to be solved. This is why it's OK not to look through the IORef of a coercion hole during substitution. -} -- | Type substitution, see 'zipTvSubst' substTyWith :: HasDebugCallStack => [TyVar] -> [Type] -> Type -> Type -- Works only if the domain of the substitution is a -- superset of the type being substituted into substTyWith tvs tys = {-#SCC "substTyWith" #-} assert (tvs `equalLength` tys ) substTy (zipTvSubst tvs tys) -- | Type substitution, see 'zipTvSubst'. Disables sanity checks. -- The problems that the sanity checks in substTy catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTyUnchecked to -- substTy and remove this function. Please don't use in new code. substTyWithUnchecked :: [TyVar] -> [Type] -> Type -> Type substTyWithUnchecked tvs tys = assert (tvs `equalLength` tys ) substTyUnchecked (zipTvSubst tvs tys) -- | Substitute tyvars within a type using a known 'InScopeSet'. -- Pre-condition: the 'in_scope' set should satisfy Note [The substitution -- invariant]; specifically it should include the free vars of 'tys', -- and of 'ty' minus the domain of the subst. substTyWithInScope :: InScopeSet -> [TyVar] -> [Type] -> Type -> Type substTyWithInScope in_scope tvs tys ty = assert (tvs `equalLength` tys ) substTy (mkTvSubst in_scope tenv) ty where tenv = zipTyEnv tvs tys -- | Coercion substitution, see 'zipTvSubst' substCoWith :: HasDebugCallStack => [TyVar] -> [Type] -> Coercion -> Coercion substCoWith tvs tys = assert (tvs `equalLength` tys ) substCo (zipTvSubst tvs tys) -- | Coercion substitution, see 'zipTvSubst'. Disables sanity checks. -- The problems that the sanity checks in substCo catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substCoUnchecked to -- substCo and remove this function. Please don't use in new code. substCoWithUnchecked :: [TyVar] -> [Type] -> Coercion -> Coercion substCoWithUnchecked tvs tys = assert (tvs `equalLength` tys ) substCoUnchecked (zipTvSubst tvs tys) -- | Substitute covars within a type substTyWithCoVars :: [CoVar] -> [Coercion] -> Type -> Type substTyWithCoVars cvs cos = substTy (zipCvSubst cvs cos) -- | Type substitution, see 'zipTvSubst' substTysWith :: [TyVar] -> [Type] -> [Type] -> [Type] substTysWith tvs tys = assert (tvs `equalLength` tys ) substTys (zipTvSubst tvs tys) -- | Type substitution, see 'zipTvSubst' substTysWithCoVars :: [CoVar] -> [Coercion] -> [Type] -> [Type] substTysWithCoVars cvs cos = assert (cvs `equalLength` cos ) substTys (zipCvSubst cvs cos) -- | Substitute within a 'Type' after adding the free variables of the type -- to the in-scope set. This is useful for the case when the free variables -- aren't already in the in-scope set or easily available. -- See also Note [The substitution invariant]. substTyAddInScope :: TCvSubst -> Type -> Type substTyAddInScope subst ty = substTy (extendTCvInScopeSet subst $ tyCoVarsOfType ty) ty -- | When calling `substTy` it should be the case that the in-scope set in -- the substitution is a superset of the free vars of the range of the -- substitution. -- See also Note [The substitution invariant]. isValidTCvSubst :: TCvSubst -> Bool isValidTCvSubst (TCvSubst in_scope tenv cenv) = (tenvFVs `varSetInScope` in_scope) && (cenvFVs `varSetInScope` in_scope) where tenvFVs = shallowTyCoVarsOfTyVarEnv tenv cenvFVs = shallowTyCoVarsOfCoVarEnv cenv -- | This checks if the substitution satisfies the invariant from -- Note [The substitution invariant]. checkValidSubst :: HasDebugCallStack => TCvSubst -> [Type] -> [Coercion] -> a -> a checkValidSubst subst@(TCvSubst in_scope tenv cenv) tys cos a = assertPpr (isValidTCvSubst subst) (text "in_scope" <+> ppr in_scope $$ text "tenv" <+> ppr tenv $$ text "tenvFVs" <+> ppr (shallowTyCoVarsOfTyVarEnv tenv) $$ text "cenv" <+> ppr cenv $$ text "cenvFVs" <+> ppr (shallowTyCoVarsOfCoVarEnv cenv) $$ text "tys" <+> ppr tys $$ text "cos" <+> ppr cos) $ assertPpr tysCosFVsInScope (text "in_scope" <+> ppr in_scope $$ text "tenv" <+> ppr tenv $$ text "cenv" <+> ppr cenv $$ text "tys" <+> ppr tys $$ text "cos" <+> ppr cos $$ text "needInScope" <+> ppr needInScope) a where substDomain = nonDetKeysUFM tenv ++ nonDetKeysUFM cenv -- It's OK to use nonDetKeysUFM here, because we only use this list to -- remove some elements from a set needInScope = (shallowTyCoVarsOfTypes tys `unionVarSet` shallowTyCoVarsOfCos cos) `delListFromUniqSet_Directly` substDomain tysCosFVsInScope = needInScope `varSetInScope` in_scope -- | Substitute within a 'Type' -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substTy :: HasDebugCallStack => TCvSubst -> Type -> Type substTy subst ty | isEmptyTCvSubst subst = ty | otherwise = checkValidSubst subst [ty] [] $ subst_ty subst ty -- | Substitute within a 'Type' disabling the sanity checks. -- The problems that the sanity checks in substTy catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTyUnchecked to -- substTy and remove this function. Please don't use in new code. substTyUnchecked :: TCvSubst -> Type -> Type substTyUnchecked subst ty | isEmptyTCvSubst subst = ty | otherwise = subst_ty subst ty substScaledTy :: HasDebugCallStack => TCvSubst -> Scaled Type -> Scaled Type substScaledTy subst scaled_ty = mapScaledType (substTy subst) scaled_ty substScaledTyUnchecked :: HasDebugCallStack => TCvSubst -> Scaled Type -> Scaled Type substScaledTyUnchecked subst scaled_ty = mapScaledType (substTyUnchecked subst) scaled_ty -- | Substitute within several 'Type's -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substTys :: HasDebugCallStack => TCvSubst -> [Type] -> [Type] substTys subst tys | isEmptyTCvSubst subst = tys | otherwise = checkValidSubst subst tys [] $ map (subst_ty subst) tys substScaledTys :: HasDebugCallStack => TCvSubst -> [Scaled Type] -> [Scaled Type] substScaledTys subst scaled_tys | isEmptyTCvSubst subst = scaled_tys | otherwise = checkValidSubst subst (map scaledMult scaled_tys ++ map scaledThing scaled_tys) [] $ map (mapScaledType (subst_ty subst)) scaled_tys -- | Substitute within several 'Type's disabling the sanity checks. -- The problems that the sanity checks in substTys catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTysUnchecked to -- substTys and remove this function. Please don't use in new code. substTysUnchecked :: TCvSubst -> [Type] -> [Type] substTysUnchecked subst tys | isEmptyTCvSubst subst = tys | otherwise = map (subst_ty subst) tys substScaledTysUnchecked :: TCvSubst -> [Scaled Type] -> [Scaled Type] substScaledTysUnchecked subst tys | isEmptyTCvSubst subst = tys | otherwise = map (mapScaledType (subst_ty subst)) tys -- | Substitute within a 'ThetaType' -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substTheta :: HasDebugCallStack => TCvSubst -> ThetaType -> ThetaType substTheta = substTys -- | Substitute within a 'ThetaType' disabling the sanity checks. -- The problems that the sanity checks in substTys catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substThetaUnchecked to -- substTheta and remove this function. Please don't use in new code. substThetaUnchecked :: TCvSubst -> ThetaType -> ThetaType substThetaUnchecked = substTysUnchecked subst_ty :: TCvSubst -> Type -> Type -- subst_ty is the main workhorse for type substitution -- -- Note that the in_scope set is poked only if we hit a forall -- so it may often never be fully computed subst_ty subst ty = go ty where go (TyVarTy tv) = substTyVar subst tv go (AppTy fun arg) = (mkAppTy $! (go fun)) $! (go arg) -- The mkAppTy smart constructor is important -- we might be replacing (a Int), represented with App -- by [Int], represented with TyConApp go ty@(TyConApp tc []) = tc `seq` ty -- avoid allocation in this common case go (TyConApp tc tys) = (mkTyConApp $! tc) $! strictMap go tys -- NB: mkTyConApp, not TyConApp. -- mkTyConApp has optimizations. -- See Note [Using synonyms to compress types] -- in GHC.Core.Type go ty@(FunTy { ft_mult = mult, ft_arg = arg, ft_res = res }) = let !mult' = go mult !arg' = go arg !res' = go res in ty { ft_mult = mult', ft_arg = arg', ft_res = res' } go (ForAllTy (Bndr tv vis) ty) = case substVarBndrUnchecked subst tv of (subst', tv') -> (ForAllTy $! ((Bndr $! tv') vis)) $! (subst_ty subst' ty) go (LitTy n) = LitTy $! n go (CastTy ty co) = (mkCastTy $! (go ty)) $! (subst_co subst co) go (CoercionTy co) = CoercionTy $! (subst_co subst co) substTyVar :: TCvSubst -> TyVar -> Type substTyVar (TCvSubst _ tenv _) tv = assert (isTyVar tv) $ case lookupVarEnv tenv tv of Just ty -> ty Nothing -> TyVarTy tv substTyVars :: TCvSubst -> [TyVar] -> [Type] substTyVars subst = map $ substTyVar subst substTyCoVars :: TCvSubst -> [TyCoVar] -> [Type] substTyCoVars subst = map $ substTyCoVar subst substTyCoVar :: TCvSubst -> TyCoVar -> Type substTyCoVar subst tv | isTyVar tv = substTyVar subst tv | otherwise = CoercionTy $ substCoVar subst tv lookupTyVar :: TCvSubst -> TyVar -> Maybe Type -- See Note [Extending the TCvSubst] lookupTyVar (TCvSubst _ tenv _) tv = assert (isTyVar tv ) lookupVarEnv tenv tv -- | Substitute within a 'Coercion' -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substCo :: HasDebugCallStack => TCvSubst -> Coercion -> Coercion substCo subst co | isEmptyTCvSubst subst = co | otherwise = checkValidSubst subst [] [co] $ subst_co subst co -- | Substitute within a 'Coercion' disabling sanity checks. -- The problems that the sanity checks in substCo catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substCoUnchecked to -- substCo and remove this function. Please don't use in new code. substCoUnchecked :: TCvSubst -> Coercion -> Coercion substCoUnchecked subst co | isEmptyTCvSubst subst = co | otherwise = subst_co subst co -- | Substitute within several 'Coercion's -- The substitution has to satisfy the invariants described in -- Note [The substitution invariant]. substCos :: HasDebugCallStack => TCvSubst -> [Coercion] -> [Coercion] substCos subst cos | isEmptyTCvSubst subst = cos | otherwise = checkValidSubst subst [] cos $ map (subst_co subst) cos subst_co :: TCvSubst -> Coercion -> Coercion subst_co subst co = go co where go_ty :: Type -> Type go_ty = subst_ty subst go_mco :: MCoercion -> MCoercion go_mco MRefl = MRefl go_mco (MCo co) = MCo (go co) go :: Coercion -> Coercion go (Refl ty) = mkNomReflCo $! (go_ty ty) go (GRefl r ty mco) = (mkGReflCo r $! (go_ty ty)) $! (go_mco mco) go (TyConAppCo r tc args)= let args' = map go args in args' `seqList` mkTyConAppCo r tc args' go (AppCo co arg) = (mkAppCo $! go co) $! go arg go (ForAllCo tv kind_co co) = case substForAllCoBndrUnchecked subst tv kind_co of (subst', tv', kind_co') -> ((mkForAllCo $! tv') $! kind_co') $! subst_co subst' co go (FunCo r w co1 co2) = ((mkFunCo r $! go w) $! go co1) $! go co2 go (CoVarCo cv) = substCoVar subst cv go (AxiomInstCo con ind cos) = mkAxiomInstCo con ind $! map go cos go (UnivCo p r t1 t2) = (((mkUnivCo $! go_prov p) $! r) $! (go_ty t1)) $! (go_ty t2) go (SymCo co) = mkSymCo $! (go co) go (TransCo co1 co2) = (mkTransCo $! (go co1)) $! (go co2) go (NthCo r d co) = mkNthCo r d $! (go co) go (LRCo lr co) = mkLRCo lr $! (go co) go (InstCo co arg) = (mkInstCo $! (go co)) $! go arg go (KindCo co) = mkKindCo $! (go co) go (SubCo co) = mkSubCo $! (go co) go (AxiomRuleCo c cs) = let cs1 = map go cs in cs1 `seqList` AxiomRuleCo c cs1 go (HoleCo h) = HoleCo $! go_hole h go_prov (PhantomProv kco) = PhantomProv (go kco) go_prov (ProofIrrelProv kco) = ProofIrrelProv (go kco) go_prov p@(PluginProv _) = p go_prov p@(CorePrepProv _) = p -- See Note [Substituting in a coercion hole] go_hole h@(CoercionHole { ch_co_var = cv }) = h { ch_co_var = updateVarType go_ty cv } substForAllCoBndr :: TCvSubst -> TyCoVar -> KindCoercion -> (TCvSubst, TyCoVar, Coercion) substForAllCoBndr subst = substForAllCoBndrUsing False (substCo subst) subst -- | Like 'substForAllCoBndr', but disables sanity checks. -- The problems that the sanity checks in substCo catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substCoUnchecked to -- substCo and remove this function. Please don't use in new code. substForAllCoBndrUnchecked :: TCvSubst -> TyCoVar -> KindCoercion -> (TCvSubst, TyCoVar, Coercion) substForAllCoBndrUnchecked subst = substForAllCoBndrUsing False (substCoUnchecked subst) subst -- See Note [Sym and ForAllCo] substForAllCoBndrUsing :: Bool -- apply sym to binder? -> (Coercion -> Coercion) -- transformation to kind co -> TCvSubst -> TyCoVar -> KindCoercion -> (TCvSubst, TyCoVar, KindCoercion) substForAllCoBndrUsing sym sco subst old_var | isTyVar old_var = substForAllCoTyVarBndrUsing sym sco subst old_var | otherwise = substForAllCoCoVarBndrUsing sym sco subst old_var substForAllCoTyVarBndrUsing :: Bool -- apply sym to binder? -> (Coercion -> Coercion) -- transformation to kind co -> TCvSubst -> TyVar -> KindCoercion -> (TCvSubst, TyVar, KindCoercion) substForAllCoTyVarBndrUsing sym sco (TCvSubst in_scope tenv cenv) old_var old_kind_co = assert (isTyVar old_var ) ( TCvSubst (in_scope `extendInScopeSet` new_var) new_env cenv , new_var, new_kind_co ) where new_env | no_change && not sym = delVarEnv tenv old_var | sym = extendVarEnv tenv old_var $ TyVarTy new_var `CastTy` new_kind_co | otherwise = extendVarEnv tenv old_var (TyVarTy new_var) no_kind_change = noFreeVarsOfCo old_kind_co no_change = no_kind_change && (new_var == old_var) new_kind_co | no_kind_change = old_kind_co | otherwise = sco old_kind_co new_ki1 = coercionLKind new_kind_co -- We could do substitution to (tyVarKind old_var). We don't do so because -- we already substituted new_kind_co, which contains the kind information -- we want. We don't want to do substitution once more. Also, in most cases, -- new_kind_co is a Refl, in which case coercionKind is really fast. new_var = uniqAway in_scope (setTyVarKind old_var new_ki1) substForAllCoCoVarBndrUsing :: Bool -- apply sym to binder? -> (Coercion -> Coercion) -- transformation to kind co -> TCvSubst -> CoVar -> KindCoercion -> (TCvSubst, CoVar, KindCoercion) substForAllCoCoVarBndrUsing sym sco (TCvSubst in_scope tenv cenv) old_var old_kind_co = assert (isCoVar old_var ) ( TCvSubst (in_scope `extendInScopeSet` new_var) tenv new_cenv , new_var, new_kind_co ) where new_cenv | no_change && not sym = delVarEnv cenv old_var | otherwise = extendVarEnv cenv old_var (mkCoVarCo new_var) no_kind_change = noFreeVarsOfCo old_kind_co no_change = no_kind_change && (new_var == old_var) new_kind_co | no_kind_change = old_kind_co | otherwise = sco old_kind_co Pair h1 h2 = coercionKind new_kind_co new_var = uniqAway in_scope $ mkCoVar (varName old_var) new_var_type new_var_type | sym = h2 | otherwise = h1 substCoVar :: TCvSubst -> CoVar -> Coercion substCoVar (TCvSubst _ _ cenv) cv = case lookupVarEnv cenv cv of Just co -> co Nothing -> CoVarCo cv substCoVars :: TCvSubst -> [CoVar] -> [Coercion] substCoVars subst cvs = map (substCoVar subst) cvs lookupCoVar :: TCvSubst -> Var -> Maybe Coercion lookupCoVar (TCvSubst _ _ cenv) v = lookupVarEnv cenv v substTyVarBndr :: HasDebugCallStack => TCvSubst -> TyVar -> (TCvSubst, TyVar) substTyVarBndr = substTyVarBndrUsing substTy substTyVarBndrs :: HasDebugCallStack => TCvSubst -> [TyVar] -> (TCvSubst, [TyVar]) substTyVarBndrs = mapAccumL substTyVarBndr substVarBndr :: HasDebugCallStack => TCvSubst -> TyCoVar -> (TCvSubst, TyCoVar) substVarBndr = substVarBndrUsing substTy substVarBndrs :: HasDebugCallStack => TCvSubst -> [TyCoVar] -> (TCvSubst, [TyCoVar]) substVarBndrs = mapAccumL substVarBndr substCoVarBndr :: HasDebugCallStack => TCvSubst -> CoVar -> (TCvSubst, CoVar) substCoVarBndr = substCoVarBndrUsing substTy -- | Like 'substVarBndr', but disables sanity checks. -- The problems that the sanity checks in substTy catch are described in -- Note [The substitution invariant]. -- The goal of #11371 is to migrate all the calls of substTyUnchecked to -- substTy and remove this function. Please don't use in new code. substVarBndrUnchecked :: TCvSubst -> TyCoVar -> (TCvSubst, TyCoVar) substVarBndrUnchecked = substVarBndrUsing substTyUnchecked substVarBndrUsing :: (TCvSubst -> Type -> Type) -> TCvSubst -> TyCoVar -> (TCvSubst, TyCoVar) substVarBndrUsing subst_fn subst v | isTyVar v = substTyVarBndrUsing subst_fn subst v | otherwise = substCoVarBndrUsing subst_fn subst v -- | Substitute a tyvar in a binding position, returning an -- extended subst and a new tyvar. -- Use the supplied function to substitute in the kind substTyVarBndrUsing :: (TCvSubst -> Type -> Type) -- ^ Use this to substitute in the kind -> TCvSubst -> TyVar -> (TCvSubst, TyVar) substTyVarBndrUsing subst_fn subst@(TCvSubst in_scope tenv cenv) old_var = assertPpr _no_capture (pprTyVar old_var $$ pprTyVar new_var $$ ppr subst) $ assert (isTyVar old_var ) (TCvSubst (in_scope `extendInScopeSet` new_var) new_env cenv, new_var) where new_env | no_change = delVarEnv tenv old_var | otherwise = extendVarEnv tenv old_var (TyVarTy new_var) _no_capture = not (new_var `elemVarSet` shallowTyCoVarsOfTyVarEnv tenv) -- Assertion check that we are not capturing something in the substitution old_ki = tyVarKind old_var no_kind_change = noFreeVarsOfType old_ki -- verify that kind is closed no_change = no_kind_change && (new_var == old_var) -- no_change means that the new_var is identical in -- all respects to the old_var (same unique, same kind) -- See Note [Extending the TCvSubst] -- -- In that case we don't need to extend the substitution -- to map old to new. But instead we must zap any -- current substitution for the variable. For example: -- (\x.e) with id_subst = [x |-> e'] -- Here we must simply zap the substitution for x new_var | no_kind_change = uniqAway in_scope old_var | otherwise = uniqAway in_scope $ setTyVarKind old_var (subst_fn subst old_ki) -- The uniqAway part makes sure the new variable is not already in scope -- | Substitute a covar in a binding position, returning an -- extended subst and a new covar. -- Use the supplied function to substitute in the kind substCoVarBndrUsing :: (TCvSubst -> Type -> Type) -> TCvSubst -> CoVar -> (TCvSubst, CoVar) substCoVarBndrUsing subst_fn subst@(TCvSubst in_scope tenv cenv) old_var = assert (isCoVar old_var) (TCvSubst (in_scope `extendInScopeSet` new_var) tenv new_cenv, new_var) where new_co = mkCoVarCo new_var no_kind_change = noFreeVarsOfTypes [t1, t2] no_change = new_var == old_var && no_kind_change new_cenv | no_change = delVarEnv cenv old_var | otherwise = extendVarEnv cenv old_var new_co new_var = uniqAway in_scope subst_old_var subst_old_var = mkCoVar (varName old_var) new_var_type (_, _, t1, t2, role) = coVarKindsTypesRole old_var t1' = subst_fn subst t1 t2' = subst_fn subst t2 new_var_type = mkCoercionType role t1' t2' -- It's important to do the substitution for coercions, -- because they can have free type variables cloneTyVarBndr :: TCvSubst -> TyVar -> Unique -> (TCvSubst, TyVar) cloneTyVarBndr subst@(TCvSubst in_scope tv_env cv_env) tv uniq = assertPpr (isTyVar tv) (ppr tv) -- I think it's only called on TyVars (TCvSubst (extendInScopeSet in_scope tv') (extendVarEnv tv_env tv (mkTyVarTy tv')) cv_env, tv') where old_ki = tyVarKind tv no_kind_change = noFreeVarsOfType old_ki -- verify that kind is closed tv1 | no_kind_change = tv | otherwise = setTyVarKind tv (substTy subst old_ki) tv' = setVarUnique tv1 uniq cloneTyVarBndrs :: TCvSubst -> [TyVar] -> UniqSupply -> (TCvSubst, [TyVar]) cloneTyVarBndrs subst [] _usupply = (subst, []) cloneTyVarBndrs subst (t:ts) usupply = (subst'', tv:tvs) where (uniq, usupply') = takeUniqFromSupply usupply (subst' , tv ) = cloneTyVarBndr subst t uniq (subst'', tvs) = cloneTyVarBndrs subst' ts usupply' substTyCoBndr :: TCvSubst -> TyCoBinder -> (TCvSubst, TyCoBinder) substTyCoBndr subst (Anon af ty) = (subst, Anon af (substScaledTy subst ty)) substTyCoBndr subst (Named (Bndr tv vis)) = (subst', Named (Bndr tv' vis)) where (subst', tv') = substVarBndr subst tv ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/Tidy.hs0000644000000000000000000002364514472400112020660 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Tidying types and coercions for printing in error messages. module GHC.Core.TyCo.Tidy ( -- * Tidying type related things up for printing tidyType, tidyTypes, tidyOpenType, tidyOpenTypes, tidyVarBndr, tidyVarBndrs, tidyFreeTyCoVars, avoidNameClashes, tidyOpenTyCoVar, tidyOpenTyCoVars, tidyTyCoVarOcc, tidyTopType, tidyCo, tidyCos, tidyTyCoVarBinder, tidyTyCoVarBinders ) where import GHC.Prelude import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs (tyCoVarsOfTypesWellScoped, tyCoVarsOfTypeList) import GHC.Types.Name hiding (varName) import GHC.Types.Var import GHC.Types.Var.Env import GHC.Utils.Misc (strictMap) import Data.List (mapAccumL) {- %************************************************************************ %* * \subsection{TidyType} %* * %************************************************************************ -} -- | This tidies up a type for printing in an error message, or in -- an interface file. -- -- It doesn't change the uniques at all, just the print names. tidyVarBndrs :: TidyEnv -> [TyCoVar] -> (TidyEnv, [TyCoVar]) tidyVarBndrs tidy_env tvs = mapAccumL tidyVarBndr (avoidNameClashes tvs tidy_env) tvs tidyVarBndr :: TidyEnv -> TyCoVar -> (TidyEnv, TyCoVar) tidyVarBndr tidy_env@(occ_env, subst) var = case tidyOccName occ_env (getHelpfulOccName var) of (occ_env', occ') -> ((occ_env', subst'), var') where subst' = extendVarEnv subst var var' var' = updateVarType (tidyType tidy_env) (setVarName var name') name' = tidyNameOcc name occ' name = varName var avoidNameClashes :: [TyCoVar] -> TidyEnv -> TidyEnv -- Seed the occ_env with clashes among the names, see -- Note [Tidying multiple names at once] in GHC.Types.Name.Occurrence avoidNameClashes tvs (occ_env, subst) = (avoidClashesOccEnv occ_env occs, subst) where occs = map getHelpfulOccName tvs getHelpfulOccName :: TyCoVar -> OccName -- A TcTyVar with a System Name is probably a -- unification variable; when we tidy them we give them a trailing -- "0" (or 1 etc) so that they don't take precedence for the -- un-modified name. Plus, indicating a unification variable in -- this way is a helpful clue for users getHelpfulOccName tv | isSystemName name, isTcTyVar tv = mkTyVarOcc (occNameString occ ++ "0") | otherwise = occ where name = varName tv occ = getOccName name tidyTyCoVarBinder :: TidyEnv -> VarBndr TyCoVar vis -> (TidyEnv, VarBndr TyCoVar vis) tidyTyCoVarBinder tidy_env (Bndr tv vis) = (tidy_env', Bndr tv' vis) where (tidy_env', tv') = tidyVarBndr tidy_env tv tidyTyCoVarBinders :: TidyEnv -> [VarBndr TyCoVar vis] -> (TidyEnv, [VarBndr TyCoVar vis]) tidyTyCoVarBinders tidy_env tvbs = mapAccumL tidyTyCoVarBinder (avoidNameClashes (binderVars tvbs) tidy_env) tvbs --------------- tidyFreeTyCoVars :: TidyEnv -> [TyCoVar] -> TidyEnv -- ^ Add the free 'TyVar's to the env in tidy form, -- so that we can tidy the type they are free in tidyFreeTyCoVars tidy_env tyvars = fst (tidyOpenTyCoVars tidy_env tyvars) --------------- tidyOpenTyCoVars :: TidyEnv -> [TyCoVar] -> (TidyEnv, [TyCoVar]) tidyOpenTyCoVars env tyvars = mapAccumL tidyOpenTyCoVar env tyvars --------------- tidyOpenTyCoVar :: TidyEnv -> TyCoVar -> (TidyEnv, TyCoVar) -- ^ Treat a new 'TyCoVar' as a binder, and give it a fresh tidy name -- using the environment if one has not already been allocated. See -- also 'tidyVarBndr' tidyOpenTyCoVar env@(_, subst) tyvar = case lookupVarEnv subst tyvar of Just tyvar' -> (env, tyvar') -- Already substituted Nothing -> let env' = tidyFreeTyCoVars env (tyCoVarsOfTypeList (tyVarKind tyvar)) in tidyVarBndr env' tyvar -- Treat it as a binder --------------- tidyTyCoVarOcc :: TidyEnv -> TyCoVar -> TyCoVar tidyTyCoVarOcc env@(_, subst) tv = case lookupVarEnv subst tv of Nothing -> updateVarType (tidyType env) tv Just tv' -> tv' --------------- {- Note [Strictness in tidyType and friends] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since the result of tidying will be inserted into the HPT, a potentially long-lived structure, we generally want to avoid pieces of the old AST being retained by the thunks produced by tidying. For this reason we take great care to ensure that all pieces of the tidied AST are evaluated strictly. So you will see lots of strict applications ($!) and uses of `strictMap` in `tidyType`, `tidyTypes` and `tidyCo`. In the case of tidying of lists (e.g. lists of arguments) we prefer to use `strictMap f xs` rather than `seqList (map f xs)` as the latter will unnecessarily allocate a thunk, which will then be almost-immediately evaluated, for each list element. Making `tidyType` strict has a rather large effect on performance: see #14738. Sometimes as much as a 5% reduction in allocation. -} -- | Tidy a list of Types -- -- See Note [Strictness in tidyType and friends] tidyTypes :: TidyEnv -> [Type] -> [Type] tidyTypes env tys = strictMap (tidyType env) tys --------------- -- | Tidy a Type -- -- See Note [Strictness in tidyType and friends] tidyType :: TidyEnv -> Type -> Type tidyType _ t@(LitTy {}) = t -- Preserve sharing tidyType env (TyVarTy tv) = TyVarTy $! tidyTyCoVarOcc env tv tidyType _ t@(TyConApp _ []) = t -- Preserve sharing if possible tidyType env (TyConApp tycon tys) = TyConApp tycon $! tidyTypes env tys tidyType env (AppTy fun arg) = (AppTy $! (tidyType env fun)) $! (tidyType env arg) tidyType env ty@(FunTy _ w arg res) = let { !w' = tidyType env w ; !arg' = tidyType env arg ; !res' = tidyType env res } in ty { ft_mult = w', ft_arg = arg', ft_res = res' } tidyType env (ty@(ForAllTy{})) = (mkForAllTys' $! (zip tvs' vis)) $! tidyType env' body_ty where (tvs, vis, body_ty) = splitForAllTyCoVars' ty (env', tvs') = tidyVarBndrs env tvs tidyType env (CastTy ty co) = (CastTy $! tidyType env ty) $! (tidyCo env co) tidyType env (CoercionTy co) = CoercionTy $! (tidyCo env co) -- The following two functions differ from mkForAllTys and splitForAllTyCoVars in that -- they expect/preserve the ArgFlag argument. These belong to "GHC.Core.Type", but -- how should they be named? mkForAllTys' :: [(TyCoVar, ArgFlag)] -> Type -> Type mkForAllTys' tvvs ty = foldr strictMkForAllTy ty tvvs where strictMkForAllTy (tv,vis) ty = (ForAllTy $! ((Bndr $! tv) $! vis)) $! ty splitForAllTyCoVars' :: Type -> ([TyCoVar], [ArgFlag], Type) splitForAllTyCoVars' ty = go ty [] [] where go (ForAllTy (Bndr tv vis) ty) tvs viss = go ty (tv:tvs) (vis:viss) go ty tvs viss = (reverse tvs, reverse viss, ty) --------------- -- | Grabs the free type variables, tidies them -- and then uses 'tidyType' to work over the type itself tidyOpenTypes :: TidyEnv -> [Type] -> (TidyEnv, [Type]) tidyOpenTypes env tys = (env', tidyTypes (trimmed_occ_env, var_env) tys) where (env'@(_, var_env), tvs') = tidyOpenTyCoVars env $ tyCoVarsOfTypesWellScoped tys trimmed_occ_env = initTidyOccEnv (map getOccName tvs') -- The idea here was that we restrict the new TidyEnv to the -- _free_ vars of the types, so that we don't gratuitously rename -- the _bound_ variables of the types. --------------- tidyOpenType :: TidyEnv -> Type -> (TidyEnv, Type) tidyOpenType env ty = let (env', [ty']) = tidyOpenTypes env [ty] in (env', ty') --------------- -- | Calls 'tidyType' on a top-level type (i.e. with an empty tidying environment) tidyTopType :: Type -> Type tidyTopType ty = tidyType emptyTidyEnv ty --------------- -- | Tidy a Coercion -- -- See Note [Strictness in tidyType and friends] tidyCo :: TidyEnv -> Coercion -> Coercion tidyCo env@(_, subst) co = go co where go_mco MRefl = MRefl go_mco (MCo co) = MCo $! go co go (Refl ty) = Refl $! tidyType env ty go (GRefl r ty mco) = (GRefl r $! tidyType env ty) $! go_mco mco go (TyConAppCo r tc cos) = TyConAppCo r tc $! strictMap go cos go (AppCo co1 co2) = (AppCo $! go co1) $! go co2 go (ForAllCo tv h co) = ((ForAllCo $! tvp) $! (go h)) $! (tidyCo envp co) where (envp, tvp) = tidyVarBndr env tv -- the case above duplicates a bit of work in tidying h and the kind -- of tv. But the alternative is to use coercionKind, which seems worse. go (FunCo r w co1 co2) = ((FunCo r $! go w) $! go co1) $! go co2 go (CoVarCo cv) = case lookupVarEnv subst cv of Nothing -> CoVarCo cv Just cv' -> CoVarCo cv' go (HoleCo h) = HoleCo h go (AxiomInstCo con ind cos) = AxiomInstCo con ind $! strictMap go cos go (UnivCo p r t1 t2) = (((UnivCo $! (go_prov p)) $! r) $! tidyType env t1) $! tidyType env t2 go (SymCo co) = SymCo $! go co go (TransCo co1 co2) = (TransCo $! go co1) $! go co2 go (NthCo r d co) = NthCo r d $! go co go (LRCo lr co) = LRCo lr $! go co go (InstCo co ty) = (InstCo $! go co) $! go ty go (KindCo co) = KindCo $! go co go (SubCo co) = SubCo $! go co go (AxiomRuleCo ax cos) = AxiomRuleCo ax $ strictMap go cos go_prov (PhantomProv co) = PhantomProv $! go co go_prov (ProofIrrelProv co) = ProofIrrelProv $! go co go_prov p@(PluginProv _) = p go_prov p@(CorePrepProv _) = p tidyCos :: TidyEnv -> [Coercion] -> [Coercion] tidyCos env = strictMap (tidyCo env) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCon.hs0000644000000000000000000035610414472400112020124 0ustar0000000000000000 {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE DeriveDataTypeable #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 The @TyCon@ datatype -} module GHC.Core.TyCon( -- * Main TyCon data types TyCon, AlgTyConRhs(..), visibleDataCons, AlgTyConFlav(..), isNoParent, FamTyConFlav(..), Role(..), Injectivity(..), RuntimeRepInfo(..), TyConFlavour(..), -- * TyConBinder TyConBinder, TyConBndrVis(..), TyConTyCoBinder, mkNamedTyConBinder, mkNamedTyConBinders, mkRequiredTyConBinder, mkAnonTyConBinder, mkAnonTyConBinders, tyConBinderArgFlag, tyConBndrVisArgFlag, isNamedTyConBinder, isVisibleTyConBinder, isInvisibleTyConBinder, isVisibleTcbVis, -- ** Field labels tyConFieldLabels, lookupTyConFieldLabel, -- ** Constructing TyCons mkAlgTyCon, mkClassTyCon, mkFunTyCon, mkPrimTyCon, mkTupleTyCon, mkSumTyCon, mkDataTyConRhs, mkLevPolyDataTyConRhs, mkSynonymTyCon, mkFamilyTyCon, mkPromotedDataCon, mkTcTyCon, noTcTyConScopedTyVars, -- ** Predicates on TyCons isAlgTyCon, isVanillaAlgTyCon, isConstraintKindCon, isClassTyCon, isFamInstTyCon, isFunTyCon, isPrimTyCon, isTupleTyCon, isUnboxedTupleTyCon, isBoxedTupleTyCon, isUnboxedSumTyCon, isPromotedTupleTyCon, isLiftedAlgTyCon, isTypeSynonymTyCon, mustBeSaturated, isPromotedDataCon, isPromotedDataCon_maybe, isKindTyCon, isLiftedTypeKindTyConName, isTauTyCon, isFamFreeTyCon, isForgetfulSynTyCon, isDataTyCon, isEnumerationTyCon, isNewTyCon, isAbstractTyCon, isFamilyTyCon, isOpenFamilyTyCon, isTypeFamilyTyCon, isDataFamilyTyCon, isOpenTypeFamilyTyCon, isClosedSynFamilyTyConWithAxiom_maybe, tyConInjectivityInfo, isBuiltInSynFamTyCon_maybe, isGadtSyntaxTyCon, isInjectiveTyCon, isGenerativeTyCon, isGenInjAlgRhs, isTyConAssoc, tyConAssoc_maybe, tyConFlavourAssoc_maybe, isImplicitTyCon, isTyConWithSrcDataCons, isTcTyCon, setTcTyConKind, tcHasFixedRuntimeRep, isConcreteTyCon, -- ** Extracting information out of TyCons tyConName, tyConSkolem, tyConKind, tyConUnique, tyConTyVars, tyConVisibleTyVars, tyConCType, tyConCType_maybe, tyConDataCons, tyConDataCons_maybe, tyConSingleDataCon_maybe, tyConSingleDataCon, tyConAlgDataCons_maybe, tyConSingleAlgDataCon_maybe, tyConFamilySize, tyConStupidTheta, tyConArity, tyConNullaryTy, tyConRoles, tyConFlavour, tyConTuple_maybe, tyConClass_maybe, tyConATs, tyConFamInst_maybe, tyConFamInstSig_maybe, tyConFamilyCoercion_maybe, tyConFamilyResVar_maybe, synTyConDefn_maybe, synTyConRhs_maybe, famTyConFlav_maybe, famTcResVar, algTyConRhs, newTyConRhs, newTyConEtadArity, newTyConEtadRhs, unwrapNewTyCon_maybe, unwrapNewTyConEtad_maybe, newTyConDataCon_maybe, algTcFields, tyConRuntimeRepInfo, tyConBinders, tyConResKind, tyConInvisTVBinders, tcTyConScopedTyVars, tcTyConIsPoly, mkTyConTagMap, -- ** Manipulating TyCons expandSynTyCon_maybe, newTyConCo, newTyConCo_maybe, pprPromotionQuote, mkTyConKind, -- ** Predicated on TyConFlavours tcFlavourIsOpen, -- * Runtime type representation TyConRepName, tyConRepName_maybe, mkPrelTyConRepName, tyConRepModOcc, -- * Primitive representations of Types PrimRep(..), PrimElemRep(..), primElemRepToPrimRep, isVoidRep, isGcPtrRep, primRepSizeB, primElemRepSizeB, primRepIsFloat, primRepsCompatible, primRepCompatible, primRepIsWord, primRepIsInt, ) where import GHC.Prelude import GHC.Platform import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Kind, Type, PredType, mkForAllTy, mkFunTyMany, mkNakedTyConTy ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr ( pprType ) import {-# SOURCE #-} GHC.Builtin.Types ( runtimeRepTyCon, constraintKind, levityTyCon , multiplicityTyCon , vecCountTyCon, vecElemTyCon, liftedTypeKind ) import {-# SOURCE #-} GHC.Core.DataCon ( DataCon, dataConFieldLabels , dataConTyCon, dataConFullSig , isUnboxedSumDataCon ) import {-# SOURCE #-} GHC.Core.Type ( isLiftedTypeKind ) import GHC.Builtin.Uniques ( tyConRepNameUnique , dataConTyRepNameUnique ) import GHC.Utils.Binary import GHC.Types.Var import GHC.Types.Var.Set import GHC.Core.Class import GHC.Types.Basic import GHC.Types.ForeignCall import GHC.Types.Name import GHC.Types.Name.Env import GHC.Core.Coercion.Axiom import GHC.Builtin.Names import GHC.Data.Maybe import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString.Env import GHC.Types.FieldLabel import GHC.Settings.Constants import GHC.Utils.Misc import GHC.Types.Unique.Set import GHC.Unit.Module import qualified Data.Data as Data {- ----------------------------------------------- Notes about type families ----------------------------------------------- Note [Type synonym families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Type synonym families, also known as "type functions", map directly onto the type functions in FC: type family F a :: * type instance F Int = Bool ..etc... * Reply "yes" to isTypeFamilyTyCon, and isFamilyTyCon * From the user's point of view (F Int) and Bool are simply equivalent types. * A Haskell 98 type synonym is a degenerate form of a type synonym family. * Type functions can't appear in the LHS of a type function: type instance F (F Int) = ... -- BAD! * Translation of type family decl: type family F a :: * translates to a FamilyTyCon 'F', whose FamTyConFlav is OpenSynFamilyTyCon type family G a :: * where G Int = Bool G Bool = Char G a = () translates to a FamilyTyCon 'G', whose FamTyConFlav is ClosedSynFamilyTyCon, with the appropriate CoAxiom representing the equations We also support injective type families -- see Note [Injective type families] Note [Data type families] ~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [Wrappers for data instance tycons] in GHC.Types.Id.Make * Data type families are declared thus data family T a :: * data instance T Int = T1 | T2 Bool Here T is the "family TyCon". * Reply "yes" to isDataFamilyTyCon, and isFamilyTyCon * The user does not see any "equivalent types" as they did with type synonym families. They just see constructors with types T1 :: T Int T2 :: Bool -> T Int * Here's the FC version of the above declarations: data T a data R:TInt = T1 | T2 Bool axiom ax_ti : T Int ~R R:TInt Note that this is a *representational* coercion The R:TInt is the "representation TyCons". It has an AlgTyConFlav of DataFamInstTyCon T [Int] ax_ti * The axiom ax_ti may be eta-reduced; see Note [Eta reduction for data families] in GHC.Core.Coercion.Axiom * Data family instances may have a different arity than the data family. See Note [Arity of data families] in GHC.Core.FamInstEnv * The data constructor T2 has a wrapper (which is what the source-level "T2" invokes): $WT2 :: Bool -> T Int $WT2 b = T2 b `cast` sym ax_ti * A data instance can declare a fully-fledged GADT: data instance T (a,b) where X1 :: T (Int,Bool) X2 :: a -> b -> T (a,b) Here's the FC version of the above declaration: data R:TPair a b where X1 :: R:TPair Int Bool X2 :: a -> b -> R:TPair a b axiom ax_pr :: T (a,b) ~R R:TPair a b $WX1 :: forall a b. a -> b -> T (a,b) $WX1 a b (x::a) (y::b) = X2 a b x y `cast` sym (ax_pr a b) The R:TPair are the "representation TyCons". We have a bit of work to do, to unpick the result types of the data instance declaration for T (a,b), to get the result type in the representation; e.g. T (a,b) --> R:TPair a b The representation TyCon R:TList, has an AlgTyConFlav of DataFamInstTyCon T [(a,b)] ax_pr * Notice that T is NOT translated to a FC type function; it just becomes a "data type" with no constructors, which can be coerced into R:TInt, R:TPair by the axioms. These axioms axioms come into play when (and *only* when) you - use a data constructor - do pattern matching Rather like newtype, in fact As a result - T behaves just like a data type so far as decomposition is concerned - (T Int) is not implicitly converted to R:TInt during type inference. Indeed the latter type is unknown to the programmer. - There *is* an instance for (T Int) in the type-family instance environment, but it is looked up (via tcLookupDataFamilyInst) in can_eq_nc (via tcTopNormaliseNewTypeTF_maybe) when trying to solve representational equalities like T Int ~R# Bool Here we look up (T Int), convert it to R:TInt, and then unwrap the newtype R:TInt. It is also looked up in reduceTyFamApp_maybe. - It's fine to have T in the LHS of a type function: type instance F (T a) = [a] It was this last point that confused me! The big thing is that you should not think of a data family T as a *type function* at all, not even an injective one! We can't allow even injective type functions on the LHS of a type function: type family injective G a :: * type instance F (G Int) = Bool is no good, even if G is injective, because consider type instance G Int = Bool type instance F Bool = Char So a data type family is not an injective type function. It's just a data type with some axioms that connect it to other data types. * The tyConTyVars of the representation tycon are the tyvars that the user wrote in the patterns. This is important in GHC.Tc.Deriv, where we bring these tyvars into scope before type-checking the deriving clause. This fact is arranged for in TcInstDecls.tcDataFamInstDecl. Note [Associated families and their parent class] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ *Associated* families are just like *non-associated* families, except that they have a famTcParent field of (Just cls_tc), which identifies the parent class. However there is an important sharing relationship between * the tyConTyVars of the parent Class * the tyConTyVars of the associated TyCon class C a b where data T p a type F a q b Here the 'a' and 'b' are shared with the 'Class'; that is, they have the same Unique. This is important. In an instance declaration we expect * all the shared variables to be instantiated the same way * the non-shared variables of the associated type should not be instantiated at all instance C [x] (Tree y) where data T p [x] = T1 x | T2 p type F [x] q (Tree y) = (x,y,q) Note [TyCon Role signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Every tycon has a role signature, assigning a role to each of the tyConTyVars (or of equal length to the tyConArity, if there are no tyConTyVars). An example demonstrates these best: say we have a tycon T, with parameters a at nominal, b at representational, and c at phantom. Then, to prove representational equality between T a1 b1 c1 and T a2 b2 c2, we need to have nominal equality between a1 and a2, representational equality between b1 and b2, and nothing in particular (i.e., phantom equality) between c1 and c2. This might happen, say, with the following declaration: data T a b c where MkT :: b -> T Int b c Data and class tycons have their roles inferred (see inferRoles in GHC.Tc.TyCl.Utils), as do vanilla synonym tycons. Family tycons have all parameters at role N, though it is conceivable that we could relax this restriction. (->)'s and tuples' parameters are at role R. Each primitive tycon declares its roles; it's worth noting that (~#)'s parameters are at role N. Promoted data constructors' type arguments are at role R. All kind arguments are at role N. Note [Unboxed tuple RuntimeRep vars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The contents of an unboxed tuple may have any representation. Accordingly, the kind of the unboxed tuple constructor is runtime-representation polymorphic. Type constructor (2 kind arguments) (#,#) :: forall (q :: RuntimeRep) (r :: RuntimeRep). TYPE q -> TYPE r -> TYPE (TupleRep [q, r]) Data constructor (4 type arguments) (#,#) :: forall (q :: RuntimeRep) (r :: RuntimeRep) (a :: TYPE q) (b :: TYPE r). a -> b -> (# a, b #) These extra tyvars (q and r) cause some delicate processing around tuples, where we need to manually insert RuntimeRep arguments. The same situation happens with unboxed sums: each alternative has its own RuntimeRep. For boxed tuples, there is no representation polymorphism, and therefore we add RuntimeReps only for the unboxed version. Type constructor (no kind arguments) (,) :: Type -> Type -> Type Data constructor (2 type arguments) (,) :: forall a b. a -> b -> (a, b) Note [Injective type families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We allow injectivity annotations for type families (both open and closed): type family F (a :: k) (b :: k) = r | r -> a type family G a b = res | res -> a b where ... Injectivity information is stored in the `famTcInj` field of `FamilyTyCon`. `famTcInj` maybe stores a list of Bools, where each entry corresponds to a single element of `tyConTyVars` (both lists should have identical length). If no injectivity annotation was provided `famTcInj` is Nothing. From this follows an invariant that if `famTcInj` is a Just then at least one element in the list must be True. See also: * [Injectivity annotation] in GHC.Hs.Decls * [Renaming injectivity annotation] in GHC.Rename.Module * [Verifying injectivity annotation] in GHC.Core.FamInstEnv * [Type inference for type families with injectivity] in GHC.Tc.Solver.Interact Note [Sharing nullary TyConApps] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nullary type constructor applications are extremely common. For this reason each TyCon carries with it a @TyConApp tycon []@. This ensures that 'mkTyConTy' does not need to allocate and eliminates quite a bit of heap residency. Furthermore, we use 'mkTyConTy' in the nullary case of 'mkTyConApp', ensuring that this function also benefits from sharing. This optimisation improves allocations in the Cabal test by around 0.3% and decreased cache misses measurably. See #19367. ************************************************************************ * * TyConBinder, TyConTyCoBinder * * ************************************************************************ -} type TyConBinder = VarBndr TyVar TyConBndrVis type TyConTyCoBinder = VarBndr TyCoVar TyConBndrVis -- Only PromotedDataCon has TyConTyCoBinders -- See Note [Promoted GADT data constructors] data TyConBndrVis = NamedTCB ArgFlag | AnonTCB AnonArgFlag instance Outputable TyConBndrVis where ppr (NamedTCB flag) = text "NamedTCB" <> ppr flag ppr (AnonTCB af) = text "AnonTCB" <> ppr af mkAnonTyConBinder :: AnonArgFlag -> TyVar -> TyConBinder mkAnonTyConBinder af tv = assert (isTyVar tv) $ Bndr tv (AnonTCB af) mkAnonTyConBinders :: AnonArgFlag -> [TyVar] -> [TyConBinder] mkAnonTyConBinders af tvs = map (mkAnonTyConBinder af) tvs mkNamedTyConBinder :: ArgFlag -> TyVar -> TyConBinder -- The odd argument order supports currying mkNamedTyConBinder vis tv = assert (isTyVar tv) $ Bndr tv (NamedTCB vis) mkNamedTyConBinders :: ArgFlag -> [TyVar] -> [TyConBinder] -- The odd argument order supports currying mkNamedTyConBinders vis tvs = map (mkNamedTyConBinder vis) tvs -- | Make a Required TyConBinder. It chooses between NamedTCB and -- AnonTCB based on whether the tv is mentioned in the dependent set mkRequiredTyConBinder :: TyCoVarSet -- these are used dependently -> TyVar -> TyConBinder mkRequiredTyConBinder dep_set tv | tv `elemVarSet` dep_set = mkNamedTyConBinder Required tv | otherwise = mkAnonTyConBinder VisArg tv tyConBinderArgFlag :: TyConBinder -> ArgFlag tyConBinderArgFlag (Bndr _ vis) = tyConBndrVisArgFlag vis tyConBndrVisArgFlag :: TyConBndrVis -> ArgFlag tyConBndrVisArgFlag (NamedTCB vis) = vis tyConBndrVisArgFlag (AnonTCB VisArg) = Required tyConBndrVisArgFlag (AnonTCB InvisArg) = Inferred -- See Note [AnonTCB InvisArg] isNamedTyConBinder :: TyConBinder -> Bool -- Identifies kind variables -- E.g. data T k (a:k) = blah -- Here 'k' is a NamedTCB, a variable used in the kind of other binders isNamedTyConBinder (Bndr _ (NamedTCB {})) = True isNamedTyConBinder _ = False isVisibleTyConBinder :: VarBndr tv TyConBndrVis -> Bool -- Works for IfaceTyConBinder too isVisibleTyConBinder (Bndr _ tcb_vis) = isVisibleTcbVis tcb_vis isVisibleTcbVis :: TyConBndrVis -> Bool isVisibleTcbVis (NamedTCB vis) = isVisibleArgFlag vis isVisibleTcbVis (AnonTCB VisArg) = True isVisibleTcbVis (AnonTCB InvisArg) = False isInvisibleTyConBinder :: VarBndr tv TyConBndrVis -> Bool -- Works for IfaceTyConBinder too isInvisibleTyConBinder tcb = not (isVisibleTyConBinder tcb) -- Build the 'tyConKind' from the binders and the result kind. -- Keep in sync with 'mkTyConKind' in GHC.Iface.Type. mkTyConKind :: [TyConBinder] -> Kind -> Kind mkTyConKind bndrs res_kind = foldr mk res_kind bndrs where mk :: TyConBinder -> Kind -> Kind mk (Bndr tv (AnonTCB af)) k = mkFunTyMany af (varType tv) k mk (Bndr tv (NamedTCB vis)) k = mkForAllTy tv vis k tyConInvisTVBinders :: [TyConBinder] -- From the TyCon -> [InvisTVBinder] -- Suitable for the foralls of a term function -- See Note [Building TyVarBinders from TyConBinders] tyConInvisTVBinders tc_bndrs = map mk_binder tc_bndrs where mk_binder (Bndr tv tc_vis) = mkTyVarBinder vis tv where vis = case tc_vis of AnonTCB VisArg -> SpecifiedSpec AnonTCB InvisArg -> InferredSpec -- See Note [AnonTCB InvisArg] NamedTCB Required -> SpecifiedSpec NamedTCB (Invisible vis) -> vis -- Returns only tyvars, as covars are always inferred tyConVisibleTyVars :: TyCon -> [TyVar] tyConVisibleTyVars tc = [ tv | Bndr tv vis <- tyConBinders tc , isVisibleTcbVis vis ] {- Note [AnonTCB InvisArg] ~~~~~~~~~~~~~~~~~~~~~~~~~~ It's pretty rare to have an (AnonTCB InvisArg) binder. The only way it can occur is through equality constraints in kinds. These can arise in one of two ways: * In a PromotedDataCon whose kind has an equality constraint: 'MkT :: forall a b. (a~b) => blah See Note [Constraints in kinds] in GHC.Core.TyCo.Rep, and Note [Promoted data constructors] in this module. * In a data type whose kind has an equality constraint, as in the following example from #12102: data T :: forall a. (IsTypeLit a ~ 'True) => a -> Type When mapping an (AnonTCB InvisArg) to an ArgFlag, in tyConBndrVisArgFlag, we use "Inferred" to mean "the user cannot specify this arguments, even with visible type/kind application; instead the type checker must fill it in. We map (AnonTCB VisArg) to Required, of course: the user must provide it. It would be utterly wrong to do this for constraint arguments, which is why AnonTCB must have the AnonArgFlag in the first place. Note [Building TyVarBinders from TyConBinders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We sometimes need to build the quantified type of a value from the TyConBinders of a type or class. For that we need not TyConBinders but TyVarBinders (used in forall-type) E.g: * From data T a = MkT (Maybe a) we are going to make a data constructor with type MkT :: forall a. Maybe a -> T a See the TyCoVarBinders passed to buildDataCon * From class C a where { op :: a -> Maybe a } we are going to make a default method $dmop :: forall a. C a => a -> Maybe a See the TyCoVarBinders passed to mkSigmaTy in mkDefaultMethodType Both of these are user-callable. (NB: default methods are not callable directly by the user but rather via the code generated by 'deriving', which uses visible type application; see mkDefMethBind.) Since they are user-callable we must get their type-argument visibility information right; and that info is in the TyConBinders. Here is an example: data App a b = MkApp (a b) -- App :: forall {k}. (k->*) -> k -> * The TyCon has tyConTyBinders = [ Named (Bndr (k :: *) Inferred), Anon (k->*), Anon k ] The TyConBinders for App line up with App's kind, given above. But the DataCon MkApp has the type MkApp :: forall {k} (a:k->*) (b:k). a b -> App k a b That is, its TyCoVarBinders should be dataConUnivTyVarBinders = [ Bndr (k:*) Inferred , Bndr (a:k->*) Specified , Bndr (b:k) Specified ] So tyConTyVarBinders converts TyCon's TyConBinders into TyVarBinders: - variable names from the TyConBinders - but changing Anon/Required to Specified The last part about Required->Specified comes from this: data T k (a:k) b = MkT (a b) Here k is Required in T's kind, but we don't have Required binders in the TyCoBinders for a term (see Note [No Required TyCoBinder in terms] in GHC.Core.TyCo.Rep), so we change it to Specified when making MkT's TyCoBinders -} {- Note [The binders/kind/arity fields of a TyCon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All TyCons have this group of fields tyConBinders :: [TyConBinder/TyConTyCoBinder] tyConResKind :: Kind tyConTyVars :: [TyVar] -- Cached = binderVars tyConBinders -- NB: Currently (Aug 2018), TyCons that own this -- field really only contain TyVars. So it is -- [TyVar] instead of [TyCoVar]. tyConKind :: Kind -- Cached = mkTyConKind tyConBinders tyConResKind tyConArity :: Arity -- Cached = length tyConBinders They fit together like so: * tyConBinders gives the telescope of type/coercion variables on the LHS of the type declaration. For example: type App a (b :: k) = a b tyConBinders = [ Bndr (k::*) (NamedTCB Inferred) , Bndr (a:k->*) AnonTCB , Bndr (b:k) AnonTCB ] Note that there are three binders here, including the kind variable k. See Note [tyConBinders and lexical scoping] * See Note [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep for what the visibility flag means. * Each TyConBinder tyConBinders has a TyVar (sometimes it is TyCoVar), and that TyVar may scope over some other part of the TyCon's definition. Eg type T a = a -> a we have tyConBinders = [ Bndr (a:*) AnonTCB ] synTcRhs = a -> a So the 'a' scopes over the synTcRhs * From the tyConBinders and tyConResKind we can get the tyConKind E.g for our App example: App :: forall k. (k->*) -> k -> * We get a 'forall' in the kind for each NamedTCB, and an arrow for each AnonTCB tyConKind is the full kind of the TyCon, not just the result kind * For type families, tyConArity is the arguments this TyCon must be applied to, to be considered saturated. Here we mean "applied to in the actual Type", not surface syntax; i.e. including implicit kind variables. So it's just (length tyConBinders) * For an algebraic data type, or data instance, the tyConResKind is always (TYPE r); that is, the tyConBinders are enough to saturate the type constructor. I'm not quite sure why we have this invariant, but it's enforced by splitTyConKind Note [tyConBinders and lexical scoping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a TyCon, and a PolyTcTyCon, we obey the following rule: The Name of the TyConBinder is precisely the lexically scoped Name from the original declaration (precisely = both OccName and Unique) For example, data T a (b :: wombat) = MkT We will get tyConBinders of [k, wombat, a::k, b::wombat] The 'k' is made up; the user didn't specify it. But for the kind of 'b' we must use 'wombat'. Why do we have this invariant? * Similarly, when typechecking default definitions for class methods, in GHC.Tc.TyCl.Class.tcClassDecl2, we only have the (final) Class available; but the variables bound in that class must be in scope. Eample (#19738): type P :: k -> Type data P a = MkP type T :: k -> Constraint class T (a :: j) where f :: P a f = MkP @j @a -- 'j' must be in scope when we typecheck 'f' * When typechecking `deriving` clauses for top-level data declarations, the tcTyConScopedTyVars are brought into scope in through the `di_scoped_tvs` field of GHC.Tc.Deriv.DerivInfo. Example (#16731): class C x1 x2 type T :: a -> Type data T (x :: z) deriving (C z) When typechecking `C z`, we want `z` to map to `a`, which is exactly what the tcTyConScopedTyVars for T give us. -} instance OutputableBndr tv => Outputable (VarBndr tv TyConBndrVis) where ppr (Bndr v bi) = ppr_bi bi <+> parens (pprBndr LetBind v) where ppr_bi (AnonTCB VisArg) = text "anon-vis" ppr_bi (AnonTCB InvisArg) = text "anon-invis" ppr_bi (NamedTCB Required) = text "req" ppr_bi (NamedTCB Specified) = text "spec" ppr_bi (NamedTCB Inferred) = text "inf" instance Binary TyConBndrVis where put_ bh (AnonTCB af) = do { putByte bh 0; put_ bh af } put_ bh (NamedTCB vis) = do { putByte bh 1; put_ bh vis } get bh = do { h <- getByte bh ; case h of 0 -> do { af <- get bh; return (AnonTCB af) } _ -> do { vis <- get bh; return (NamedTCB vis) } } {- ********************************************************************* * * The TyCon type * * ************************************************************************ -} -- | TyCons represent type constructors. Type constructors are introduced by -- things such as: -- -- 1) Data declarations: @data Foo = ...@ creates the @Foo@ type constructor of -- kind @*@ -- -- 2) Type synonyms: @type Foo = ...@ creates the @Foo@ type constructor -- -- 3) Newtypes: @newtype Foo a = MkFoo ...@ creates the @Foo@ type constructor -- of kind @* -> *@ -- -- 4) Class declarations: @class Foo where@ creates the @Foo@ type constructor -- of kind @*@ -- -- This data type also encodes a number of primitive, built in type constructors -- such as those for function and tuple types. -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data TyCon = -- | The function type constructor, @(->)@ FunTyCon { tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant: -- identical to Unique of Name stored in -- tyConName field. tyConName :: Name, -- ^ Name of the constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, tcRepName :: TyConRepName } -- | Algebraic data types, from -- - @data@ declarations -- - @newtype@ declarations -- - data instance declarations -- - type instance declarations -- - the TyCon generated by a class declaration -- - boxed tuples -- - unboxed tuples -- - constraint tuples -- - unboxed sums -- Data/newtype/type /families/ are handled by 'FamilyTyCon'. -- See 'AlgTyConRhs' for more information. | AlgTyCon { tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant: -- identical to Unique of Name stored in -- tyConName field. tyConName :: Name, -- ^ Name of the constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConTyVars :: [TyVar], -- ^ TyVar binders tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ -- The tyConTyVars scope over: -- -- 1. The 'algTcStupidTheta' -- 2. The cached types in algTyConRhs.NewTyCon -- 3. The family instance types if present -- -- Note that it does /not/ scope over the data -- constructors. tcRoles :: [Role], -- ^ The role for each type variable -- This list has length = tyConArity -- See also Note [TyCon Role signatures] tyConCType :: Maybe CType,-- ^ The C type that should be used -- for this type when using the FFI -- and CAPI algTcGadtSyntax :: Bool, -- ^ Was the data type declared with GADT -- syntax? If so, that doesn't mean it's a -- true GADT; only that the "where" form -- was used. This field is used only to -- guide pretty-printing algTcStupidTheta :: [PredType], -- ^ The \"stupid theta\" for the data -- type (always empty for GADTs). A -- \"stupid theta\" is the context to -- the left of an algebraic type -- declaration, e.g. @Eq a@ in the -- declaration @data Eq a => T a ...@. -- See @Note [The stupid context]@ in -- "GHC.Core.DataCon". algTcRhs :: AlgTyConRhs, -- ^ Contains information about the -- data constructors of the algebraic type algTcFields :: FieldLabelEnv, -- ^ Maps a label to information -- about the field algTcFlavour :: AlgTyConFlav -- ^ The flavour of this algebraic tycon. -- Gives the class or family declaration -- 'TyCon' for derived 'TyCon's representing -- class or family instances, respectively. } -- | Represents type synonyms | SynonymTyCon { tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant: -- identical to Unique of Name stored in -- tyConName field. tyConName :: Name, -- ^ Name of the constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConTyVars :: [TyVar], -- ^ TyVar binders tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ -- tyConTyVars scope over: synTcRhs tcRoles :: [Role], -- ^ The role for each type variable -- This list has length = tyConArity -- See also Note [TyCon Role signatures] synTcRhs :: Type, -- ^ Contains information about the expansion -- of the synonym synIsTau :: Bool, -- True <=> the RHS of this synonym does not -- have any foralls, after expanding any -- nested synonyms synIsFamFree :: Bool, -- True <=> the RHS of this synonym does not mention -- any type synonym families (data families -- are fine), again after expanding any -- nested synonyms synIsForgetful :: Bool -- True <= at least one argument is not mentioned -- in the RHS (or is mentioned only under -- forgetful synonyms) -- Test is conservative, so True does not guarantee -- forgetfulness. } -- | Represents families (both type and data) -- Argument roles are all Nominal | FamilyTyCon { tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant: -- identical to Unique of Name stored in -- tyConName field. tyConName :: Name, -- ^ Name of the constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConTyVars :: [TyVar], -- ^ TyVar binders tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ -- tyConTyVars connect an associated family TyCon -- with its parent class; see GHC.Tc.Validity.checkConsistentFamInst famTcResVar :: Maybe Name, -- ^ Name of result type variable, used -- for pretty-printing with --show-iface -- and for reifying TyCon in Template -- Haskell famTcFlav :: FamTyConFlav, -- ^ Type family flavour: open, closed, -- abstract, built-in. See comments for -- FamTyConFlav famTcParent :: Maybe TyCon, -- ^ For *associated* type/data families -- The class tycon in which the family is declared -- See Note [Associated families and their parent class] famTcInj :: Injectivity -- ^ is this a type family injective in -- its type variables? Nothing if no -- injectivity annotation was given } -- | Primitive types; cannot be defined in Haskell. This includes -- the usual suspects (such as @Int#@) as well as foreign-imported -- types and kinds (@*@, @#@, and @?@) | PrimTyCon { tyConUnique :: Unique, -- ^ A Unique of this TyCon. Invariant: -- identical to Unique of Name stored in -- tyConName field. tyConName :: Name, -- ^ Name of the constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ tcRoles :: [Role], -- ^ The role for each type variable -- This list has length = tyConArity -- See also Note [TyCon Role signatures] primRepName :: TyConRepName -- ^ The 'Typeable' representation. -- A cached version of -- @'mkPrelTyConRepName' ('tyConName' tc)@. } -- | Represents promoted data constructor. | PromotedDataCon { -- See Note [Promoted data constructors] tyConUnique :: Unique, -- ^ Same Unique as the data constructor tyConName :: Name, -- ^ Same Name as the data constructor -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConTyCoBinder], -- ^ Full binders -- TyConTyCoBinder: see Note [Promoted GADT data construtors] tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ tcRoles :: [Role], -- ^ Roles: N for kind vars, R for type vars dataCon :: DataCon, -- ^ Corresponding data constructor tcRepName :: TyConRepName, promDcRepInfo :: RuntimeRepInfo -- ^ See comments with 'RuntimeRepInfo' } -- | These exist only during type-checking. See Note [How TcTyCons work] -- in "GHC.Tc.TyCl" | TcTyCon { tyConUnique :: Unique, tyConName :: Name, -- See Note [The binders/kind/arity fields of a TyCon] tyConBinders :: [TyConBinder], -- ^ Full binders tyConTyVars :: [TyVar], -- ^ TyVar binders tyConResKind :: Kind, -- ^ Result kind tyConKind :: Kind, -- ^ Kind of this TyCon tyConArity :: Arity, -- ^ Arity tyConNullaryTy :: Type, -- ^ A pre-allocated @TyConApp tycon []@ -- NB: the TyConArity of a TcTyCon must match -- the number of Required (positional, user-specified) -- arguments to the type constructor; see the use -- of tyConArity in generaliseTcTyCon tcTyConScopedTyVars :: [(Name,TcTyVar)], -- ^ Scoped tyvars over the tycon's body -- The range is always a skolem or TcTyVar, be -- MonoTcTyCon only: see Note [Scoped tyvars in a TcTyCon] tcTyConIsPoly :: Bool, -- ^ Is this TcTyCon already generalized? -- Used only to make zonking more efficient tcTyConFlavour :: TyConFlavour -- ^ What sort of 'TyCon' this represents. } {- Note [Scoped tyvars in a TcTyCon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The tcTyConScopedTyVars field records the lexicial-binding connection between the original, user-specified Name (i.e. thing in scope) and the TcTyVar that the Name is bound to. Order *does* matter; the tcTyConScopedTyvars list consists of specified_tvs ++ required_tvs where * specified ones first * required_tvs the same as tyConTyVars * tyConArity = length required_tvs tcTyConScopedTyVars are used only for MonoTcTyCons, not PolyTcTyCons. See Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] in GHC.Tc.Utils.TcType. Note [Promoted GADT data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Any promoted GADT data constructor will have a type with equality constraints in its type; e.g. K :: forall a b. (a ~# [b]) => a -> b -> T a So, when promoted to become a type constructor, the tyConBinders will include CoVars. That is why we use [TyConTyCoBinder] for the tyconBinders field. TyConTyCoBinder is a synonym for TyConBinder, but with the clue that the binder can be a CoVar not just a TyVar. Note [Representation-polymorphic TyCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To check for representation-polymorphism directly in the typechecker, e.g. when using GHC.Tc.Utils.TcMType.checkTypeHasFixedRuntimeRep, we need to compute whether a type has a syntactically fixed RuntimeRep, as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. It's useful to have a quick way to check whether a saturated application of a type constructor has a fixed RuntimeRep. That is, we want to know, given a TyCon 'T' of arity 'n', does T a_1 ... a_n always have a fixed RuntimeRep? That is, is it always the case that this application has a kind of the form T a_1 ... a_n :: TYPE rep in which 'rep' is a concrete 'RuntimeRep'? ('Concrete' in the sense of Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete: it contains no type-family applications or type variables.) To answer this question, we have 'tcHasFixedRuntimeRep'. If 'tcHasFixedRuntimeRep' returns 'True', it means we're sure that every saturated application of `T` has a fixed RuntimeRep. However, if it returns 'False', we don't know: perhaps some application might not have a fixed RuntimeRep. Examples: - For type families, we won't know in general whether an application will have a fixed RuntimeRep: type F :: k -> k type family F a where {..} `tcHasFixedRuntimeRep F = False' - For newtypes, we're usually OK: newtype N a b c = MkN Int No matter what arguments we apply `N` to, we always get something of kind `Type`, which has a fixed RuntimeRep. Thus `tcHasFixedRuntimeRep N = True`. However, with `-XUnliftedNewtypes`, we can have representation-polymorphic newtypes: type UN :: TYPE rep -> TYPE rep newtype UN a = MkUN a `tcHasFixedRuntimeRep UN = False` For example, `UN @Int8Rep Int8#` is represented by an 8-bit value, while `UN @LiftedRep Int` is represented by a heap pointer. To distinguish whether we are dealing with a representation-polymorphic newtype, we keep track of which situation we are in using the 'nt_fixed_rep' field of the 'NewTyCon' constructor of 'AlgTyConRhs', and read this field to compute 'tcHasFixedRuntimeRep'. - A similar story can be told for datatypes: we're usually OK, except with `-XUnliftedDatatypes` which allows for levity polymorphism, e.g.: type UC :: TYPE (BoxedRep l) -> TYPE (BoxedRep l) type UC a = MkUC a `tcHasFixedRuntimeRep UC = False` Here, we keep track of whether we are dealing with a levity-polymorphic unlifted datatype using the 'data_fixed_lev' field of the 'DataTyCon' constructor of 'AlgTyConRhs'. N.B.: technically, the representation of a datatype is fixed, as it is always a pointer. However, we currently require that we know the specific `RuntimeRep`: knowing that it's `BoxedRep l` for a type-variable `l` isn't enough. See #15532. -} -- | Represents right-hand-sides of 'TyCon's for algebraic types data AlgTyConRhs -- | Says that we know nothing about this data type, except that -- it's represented by a pointer. Used when we export a data type -- abstractly into an .hi file. = AbstractTyCon -- | Information about those 'TyCon's derived from a @data@ -- declaration. This includes data types with no constructors at -- all. | DataTyCon { data_cons :: [DataCon], -- ^ The data type constructors; can be empty if the -- user declares the type to have no constructors -- -- INVARIANT: Kept in order of increasing 'DataCon' -- tag (see the tag assignment in mkTyConTagMap) data_cons_size :: Int, -- ^ Cached value: length data_cons is_enum :: Bool, -- ^ Cached value: is this an enumeration type? -- See Note [Enumeration types] data_fixed_lev :: Bool -- ^ 'True' if the data type constructor has -- a known, fixed levity when fully applied -- to its arguments, False otherwise. -- -- This can only be 'False' with UnliftedDatatypes, -- e.g. -- -- > data A :: TYPE (BoxedRep l) where { MkA :: Int -> A } -- -- This boolean is cached to make it cheaper to check -- for levity and representation-polymorphism in -- tcHasFixedRuntimeRep. } | TupleTyCon { -- A boxed, unboxed, or constraint tuple data_con :: DataCon, -- NB: it can be an *unboxed* tuple tup_sort :: TupleSort -- ^ Is this a boxed, unboxed or constraint -- tuple? } -- | An unboxed sum type. | SumTyCon { data_cons :: [DataCon], data_cons_size :: Int -- ^ Cached value: length data_cons } -- | Information about those 'TyCon's derived from a @newtype@ declaration | NewTyCon { data_con :: DataCon, -- ^ The unique constructor for the @newtype@. -- It has no existentials nt_rhs :: Type, -- ^ Cached value: the argument type of the -- constructor, which is just the representation -- type of the 'TyCon' (remember that @newtype@s -- do not exist at runtime so need a different -- representation type). -- -- The free 'TyVar's of this type are the -- 'tyConTyVars' from the corresponding 'TyCon' nt_etad_rhs :: ([TyVar], Type), -- ^ Same as the 'nt_rhs', but this time eta-reduced. -- Hence the list of 'TyVar's in this field may be -- shorter than the declared arity of the 'TyCon'. -- See Note [Newtype eta] nt_co :: CoAxiom Unbranched, -- The axiom coercion that creates the @newtype@ -- from the representation 'Type'. The axiom witnesses -- a representational coercion: -- nt_co :: N ty1 ~R# rep_tys -- See Note [Newtype coercions] -- Invariant: arity = #tvs in nt_etad_rhs; -- See Note [Newtype eta] -- Watch out! If any newtypes become transparent -- again check #1072. nt_fixed_rep :: Bool -- ^ 'True' if the newtype has a known, fixed representation -- when fully applied to its arguments, 'False' otherwise. -- This can only ever be 'False' with UnliftedNewtypes. -- -- Example: -- -- > newtype N (a :: TYPE r) = MkN a -- -- Invariant: nt_fixed_rep nt = tcHasFixedRuntimeRep (nt_rhs nt) -- -- This boolean is cached to make it cheaper to check if a -- variable binding is representation-polymorphic -- in tcHasFixedRuntimeRep. } mkSumTyConRhs :: [DataCon] -> AlgTyConRhs mkSumTyConRhs data_cons = SumTyCon data_cons (length data_cons) -- | Create an 'AlgTyConRhs' from the data constructors, -- for a potentially levity-polymorphic datatype (with `UnliftedDatatypes`). mkLevPolyDataTyConRhs :: Bool -- ^ whether the 'DataCon' has a fixed levity -> [DataCon] -> AlgTyConRhs mkLevPolyDataTyConRhs fixed_lev cons = DataTyCon { data_cons = cons, data_cons_size = length cons, is_enum = not (null cons) && all is_enum_con cons, -- See Note [Enumeration types] in GHC.Core.TyCon data_fixed_lev = fixed_lev } where is_enum_con con | (_univ_tvs, ex_tvs, eq_spec, theta, arg_tys, _res) <- dataConFullSig con = null ex_tvs && null eq_spec && null theta && null arg_tys -- | Create an 'AlgTyConRhs' from the data constructors. -- -- Use 'mkLevPolyDataConRhs' if the datatype can be levity-polymorphic. mkDataTyConRhs :: [DataCon] -> AlgTyConRhs mkDataTyConRhs = mkLevPolyDataTyConRhs False -- | Some promoted datacons signify extra info relevant to GHC. For example, -- the @IntRep@ constructor of @RuntimeRep@ corresponds to the 'IntRep' -- constructor of 'PrimRep'. This data structure allows us to store this -- information right in the 'TyCon'. The other approach would be to look -- up things like @RuntimeRep@'s @PrimRep@ by known-key every time. -- See also Note [Getting from RuntimeRep to PrimRep] in "GHC.Types.RepType" data RuntimeRepInfo = NoRRI -- ^ an ordinary promoted data con | RuntimeRep ([Type] -> [PrimRep]) -- ^ A constructor of @RuntimeRep@. The argument to the function should -- be the list of arguments to the promoted datacon. | VecCount Int -- ^ A constructor of @VecCount@ | VecElem PrimElemRep -- ^ A constructor of @VecElem@ | LiftedInfo -- ^ A constructor of @Levity@ | UnliftedInfo -- ^ A constructor of @Levity@ -- | Extract those 'DataCon's that we are able to learn about. Note -- that visibility in this sense does not correspond to visibility in -- the context of any particular user program! visibleDataCons :: AlgTyConRhs -> [DataCon] visibleDataCons (AbstractTyCon {}) = [] visibleDataCons (DataTyCon{ data_cons = cs }) = cs visibleDataCons (NewTyCon{ data_con = c }) = [c] visibleDataCons (TupleTyCon{ data_con = c }) = [c] visibleDataCons (SumTyCon{ data_cons = cs }) = cs -- | Describes the flavour of an algebraic type constructor. For -- classes and data families, this flavour includes a reference to -- the parent 'TyCon'. data AlgTyConFlav = -- | An ordinary algebraic type constructor. This includes unlifted and -- representation-polymorphic datatypes and newtypes and unboxed tuples, -- but NOT unboxed sums; see UnboxedSumTyCon. VanillaAlgTyCon TyConRepName -- For Typeable -- | An unboxed sum type constructor. This is distinct from VanillaAlgTyCon -- because we currently don't allow unboxed sums to be Typeable since -- there are too many of them. See #13276. | UnboxedSumTyCon -- | Type constructors representing a class dictionary. -- See Note [ATyCon for classes] in "GHC.Core.TyCo.Rep" | ClassTyCon Class -- INVARIANT: the classTyCon of this Class is the -- current tycon TyConRepName -- | Type constructors representing an *instance* of a *data* family. -- Parameters: -- -- 1) The type family in question -- -- 2) Instance types; free variables are the 'tyConTyVars' -- of the current 'TyCon' (not the family one). INVARIANT: -- the number of types matches the arity of the family 'TyCon' -- -- 3) A 'CoTyCon' identifying the representation -- type with the type instance family | DataFamInstTyCon -- See Note [Data type families] (CoAxiom Unbranched) -- The coercion axiom. -- A *Representational* coercion, -- of kind T ty1 ty2 ~R R:T a b c -- where T is the family TyCon, -- and R:T is the representation TyCon (ie this one) -- and a,b,c are the tyConTyVars of this TyCon -- -- BUT may be eta-reduced; see -- Note [Eta reduction for data families] in -- GHC.Core.Coercion.Axiom -- Cached fields of the CoAxiom, but adjusted to -- use the tyConTyVars of this TyCon TyCon -- The family TyCon [Type] -- Argument types (mentions the tyConTyVars of this TyCon) -- No shorter in length than the tyConTyVars of the family TyCon -- How could it be longer? See [Arity of data families] in GHC.Core.FamInstEnv -- E.g. data instance T [a] = ... -- gives a representation tycon: -- data R:TList a = ... -- axiom co a :: T [a] ~ R:TList a -- with R:TList's algTcFlavour = DataFamInstTyCon T [a] co instance Outputable AlgTyConFlav where ppr (VanillaAlgTyCon {}) = text "Vanilla ADT" ppr (UnboxedSumTyCon {}) = text "Unboxed sum" ppr (ClassTyCon cls _) = text "Class parent" <+> ppr cls ppr (DataFamInstTyCon _ tc tys) = text "Family parent (family instance)" <+> ppr tc <+> sep (map pprType tys) -- | Checks the invariants of a 'AlgTyConFlav' given the appropriate type class -- name, if any okParent :: Name -> AlgTyConFlav -> Bool okParent _ (VanillaAlgTyCon {}) = True okParent _ (UnboxedSumTyCon {}) = True okParent tc_name (ClassTyCon cls _) = tc_name == tyConName (classTyCon cls) okParent _ (DataFamInstTyCon _ fam_tc tys) = tys `lengthAtLeast` tyConArity fam_tc isNoParent :: AlgTyConFlav -> Bool isNoParent (VanillaAlgTyCon {}) = True isNoParent _ = False -------------------- data Injectivity = NotInjective | Injective [Bool] -- 1-1 with tyConTyVars (incl kind vars) deriving( Eq ) -- | Information pertaining to the expansion of a type synonym (@type@) data FamTyConFlav = -- | Represents an open type family without a fixed right hand -- side. Additional instances can appear at any time. -- -- These are introduced by either a top level declaration: -- -- > data family T a :: * -- -- Or an associated data type declaration, within a class declaration: -- -- > class C a b where -- > data T b :: * DataFamilyTyCon TyConRepName -- | An open type synonym family e.g. @type family F x y :: * -> *@ | OpenSynFamilyTyCon -- | A closed type synonym family e.g. -- @type family F x where { F Int = Bool }@ | ClosedSynFamilyTyCon (Maybe (CoAxiom Branched)) -- See Note [Closed type families] -- | A closed type synonym family declared in an hs-boot file with -- type family F a where .. | AbstractClosedSynFamilyTyCon -- | Built-in type family used by the TypeNats solver | BuiltInSynFamTyCon BuiltInSynFamily instance Outputable FamTyConFlav where ppr (DataFamilyTyCon n) = text "data family" <+> ppr n ppr OpenSynFamilyTyCon = text "open type family" ppr (ClosedSynFamilyTyCon Nothing) = text "closed type family" ppr (ClosedSynFamilyTyCon (Just coax)) = text "closed type family" <+> ppr coax ppr AbstractClosedSynFamilyTyCon = text "abstract closed type family" ppr (BuiltInSynFamTyCon _) = text "built-in type family" {- Note [Closed type families] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * In an open type family you can add new instances later. This is the usual case. * In a closed type family you can only put equations where the family is defined. A non-empty closed type family has a single axiom with multiple branches, stored in the 'ClosedSynFamilyTyCon' constructor. A closed type family with no equations does not have an axiom, because there is nothing for the axiom to prove! Note [Promoted data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All data constructors can be promoted to become a type constructor, via the PromotedDataCon alternative in GHC.Core.TyCon. * The TyCon promoted from a DataCon has the *same* Name and Unique as the DataCon. Eg. If the data constructor Data.Maybe.Just(unique 78, say) is promoted to a TyCon whose name is Data.Maybe.Just(unique 78) * We promote the *user* type of the DataCon. Eg data T = MkT {-# UNPACK #-} !(Bool, Bool) The promoted kind is 'MkT :: (Bool,Bool) -> T *not* 'MkT :: Bool -> Bool -> T * Similarly for GADTs: data G a where MkG :: forall b. b -> G [b] The promoted data constructor has kind 'MkG :: forall b. b -> G [b] *not* 'MkG :: forall a b. (a ~# [b]) => b -> G a Note [Enumeration types] ~~~~~~~~~~~~~~~~~~~~~~~~ We define datatypes with no constructors to *not* be enumerations; this fixes trac #2578, Otherwise we end up generating an empty table for __closure_tbl which is used by tagToEnum# to map Int# to constructors in an enumeration. The empty table apparently upset the linker. Moreover, all the data constructor must be enumerations, meaning they have type (forall abc. T a b c). GADTs are not enumerations. For example consider data T a where T1 :: T Int T2 :: T Bool T3 :: T a What would [T1 ..] be? [T1,T3] :: T Int? Easiest thing is to exclude them. See #4528. Note [Newtype coercions] ~~~~~~~~~~~~~~~~~~~~~~~~ The NewTyCon field nt_co is a CoAxiom which is used for coercing from the representation type of the newtype, to the newtype itself. For example, newtype T a = MkT (a -> a) the NewTyCon for T will contain nt_co = CoT where CoT :: forall a. T a ~ a -> a. We might also eta-contract the axiom: see Note [Newtype eta]. Note [Newtype eta] ~~~~~~~~~~~~~~~~~~ Consider newtype Parser a = MkParser (IO a) deriving Monad Are these two types equal? That is, does a coercion exist between them? Monad Parser Monad IO (We need this coercion to make the derived instance for Monad Parser.) Well, yes. But to see that easily we eta-reduce the RHS type of Parser, in this case to IO, so that even unsaturated applications of Parser will work right. So instead of axParser :: forall a. Parser a ~ IO a we generate an eta-reduced axiom axParser :: Parser ~ IO This eta reduction is done when the type constructor is built, in GHC.Tc.TyCl.Build.mkNewTyConRhs, and cached in NewTyCon. Here's an example that I think showed up in practice. Source code: newtype T a = MkT [a] newtype Foo m = MkFoo (forall a. m a -> Int) w1 :: Foo [] w1 = ... w2 :: Foo T w2 = MkFoo (\(MkT x) -> case w1 of MkFoo f -> f x) After desugaring, and discarding the data constructors for the newtypes, we would like to get: w2 = w1 `cast` Foo axT so that w2 and w1 share the same code. To do this, the coercion axiom axT must have kind: axT :: T ~ [] and arity: 0 See also Note [Newtype eta and homogeneous axioms] in GHC.Tc.TyCl.Build. ************************************************************************ * * TyConRepName * * ********************************************************************* -} type TyConRepName = Name -- The Name of the top-level declaration for the Typeable world -- $tcMaybe :: Data.Typeable.Internal.TyCon -- $tcMaybe = TyCon { tyConName = "Maybe", ... } tyConRepName_maybe :: TyCon -> Maybe TyConRepName tyConRepName_maybe (FunTyCon { tcRepName = rep_nm }) = Just rep_nm tyConRepName_maybe (PrimTyCon { primRepName = rep_nm }) = Just rep_nm tyConRepName_maybe (AlgTyCon { algTcFlavour = parent }) = case parent of VanillaAlgTyCon rep_nm -> Just rep_nm UnboxedSumTyCon -> Nothing ClassTyCon _ rep_nm -> Just rep_nm DataFamInstTyCon {} -> Nothing tyConRepName_maybe (FamilyTyCon { famTcFlav = DataFamilyTyCon rep_nm }) = Just rep_nm tyConRepName_maybe (PromotedDataCon { dataCon = dc, tcRepName = rep_nm }) | isUnboxedSumDataCon dc -- see #13276 = Nothing | otherwise = Just rep_nm tyConRepName_maybe _ = Nothing -- | Make a 'Name' for the 'Typeable' representation of the given wired-in type mkPrelTyConRepName :: Name -> TyConRepName -- See Note [Grand plan for Typeable] in "GHC.Tc.Instance.Typeable". mkPrelTyConRepName tc_name -- Prelude tc_name is always External, -- so nameModule will work = mkExternalName rep_uniq rep_mod rep_occ (nameSrcSpan tc_name) where name_occ = nameOccName tc_name name_mod = nameModule tc_name name_uniq = nameUnique tc_name rep_uniq | isTcOcc name_occ = tyConRepNameUnique name_uniq | otherwise = dataConTyRepNameUnique name_uniq (rep_mod, rep_occ) = tyConRepModOcc name_mod name_occ -- | The name (and defining module) for the Typeable representation (TyCon) of a -- type constructor. -- -- See Note [Grand plan for Typeable] in "GHC.Tc.Instance.Typeable". tyConRepModOcc :: Module -> OccName -> (Module, OccName) tyConRepModOcc tc_module tc_occ = (rep_module, mkTyConRepOcc tc_occ) where rep_module | tc_module == gHC_PRIM = gHC_TYPES | otherwise = tc_module {- ********************************************************************* * * PrimRep * * ************************************************************************ Note [rep swamp] ~~~~~~~~~~~~~~~~ GHC has a rich selection of types that represent "primitive types" of one kind or another. Each of them makes a different set of distinctions, and mostly the differences are for good reasons, although it's probably true that we could merge some of these. Roughly in order of "includes more information": - A Width ("GHC.Cmm.Type") is simply a binary value with the specified number of bits. It may represent a signed or unsigned integer, a floating-point value, or an address. data Width = W8 | W16 | W32 | W64 | W128 - Size, which is used in the native code generator, is Width + floating point information. data Size = II8 | II16 | II32 | II64 | FF32 | FF64 it is necessary because e.g. the instruction to move a 64-bit float on x86 (movsd) is different from the instruction to move a 64-bit integer (movq), so the mov instruction is parameterised by Size. - CmmType wraps Width with more information: GC ptr, float, or other value. data CmmType = CmmType CmmCat Width data CmmCat -- "Category" (not exported) = GcPtrCat -- GC pointer | BitsCat -- Non-pointer | FloatCat -- Float It is important to have GcPtr information in Cmm, since we generate info tables containing pointerhood for the GC from this. As for why we have float (and not signed/unsigned) here, see Note [Signed vs unsigned]. - ArgRep makes only the distinctions necessary for the call and return conventions of the STG machine. It is essentially CmmType + void. - PrimRep makes a few more distinctions than ArgRep: it divides non-GC-pointers into signed/unsigned and addresses, information that is necessary for passing these values to foreign functions. There's another tension here: whether the type encodes its size in bytes, or whether its size depends on the machine word size. Width and CmmType have the size built-in, whereas ArgRep and PrimRep do not. This means to turn an ArgRep/PrimRep into a CmmType requires DynFlags. On the other hand, CmmType includes some "nonsense" values, such as CmmType GcPtrCat W32 on a 64-bit machine. The PrimRep type is closely related to the user-visible RuntimeRep type. See Note [RuntimeRep and PrimRep] in GHC.Types.RepType. -} -- | A 'PrimRep' is an abstraction of a type. It contains information that -- the code generator needs in order to pass arguments, return results, -- and store values of this type. See also Note [RuntimeRep and PrimRep] in -- "GHC.Types.RepType" and Note [VoidRep] in "GHC.Types.RepType". data PrimRep = VoidRep | LiftedRep | UnliftedRep -- ^ Unlifted pointer | Int8Rep -- ^ Signed, 8-bit value | Int16Rep -- ^ Signed, 16-bit value | Int32Rep -- ^ Signed, 32-bit value | Int64Rep -- ^ Signed, 64 bit value | IntRep -- ^ Signed, word-sized value | Word8Rep -- ^ Unsigned, 8 bit value | Word16Rep -- ^ Unsigned, 16 bit value | Word32Rep -- ^ Unsigned, 32 bit value | Word64Rep -- ^ Unsigned, 64 bit value | WordRep -- ^ Unsigned, word-sized value | AddrRep -- ^ A pointer, but /not/ to a Haskell value (use '(Un)liftedRep') | FloatRep | DoubleRep | VecRep Int PrimElemRep -- ^ A vector deriving( Data.Data, Eq, Ord, Show ) data PrimElemRep = Int8ElemRep | Int16ElemRep | Int32ElemRep | Int64ElemRep | Word8ElemRep | Word16ElemRep | Word32ElemRep | Word64ElemRep | FloatElemRep | DoubleElemRep deriving( Data.Data, Eq, Ord, Show, Enum ) instance Outputable PrimRep where ppr r = text (show r) instance Outputable PrimElemRep where ppr r = text (show r) instance Binary PrimRep where put_ bh VoidRep = putByte bh 0 put_ bh LiftedRep = putByte bh 1 put_ bh UnliftedRep = putByte bh 2 put_ bh Int8Rep = putByte bh 3 put_ bh Int16Rep = putByte bh 4 put_ bh Int32Rep = putByte bh 5 put_ bh Int64Rep = putByte bh 6 put_ bh IntRep = putByte bh 7 put_ bh Word8Rep = putByte bh 8 put_ bh Word16Rep = putByte bh 9 put_ bh Word32Rep = putByte bh 10 put_ bh Word64Rep = putByte bh 11 put_ bh WordRep = putByte bh 12 put_ bh AddrRep = putByte bh 13 put_ bh FloatRep = putByte bh 14 put_ bh DoubleRep = putByte bh 15 put_ bh (VecRep n per) = putByte bh 16 *> put_ bh n *> put_ bh per get bh = do h <- getByte bh case h of 0 -> pure VoidRep 1 -> pure LiftedRep 2 -> pure UnliftedRep 3 -> pure Int8Rep 4 -> pure Int16Rep 5 -> pure Int32Rep 6 -> pure Int64Rep 7 -> pure IntRep 8 -> pure Word8Rep 9 -> pure Word16Rep 10 -> pure Word32Rep 11 -> pure Word64Rep 12 -> pure WordRep 13 -> pure AddrRep 14 -> pure FloatRep 15 -> pure DoubleRep 16 -> VecRep <$> get bh <*> get bh _ -> pprPanic "Binary:PrimRep" (int (fromIntegral h)) instance Binary PrimElemRep where put_ bh per = putByte bh (fromIntegral (fromEnum per)) get bh = toEnum . fromIntegral <$> getByte bh isVoidRep :: PrimRep -> Bool isVoidRep VoidRep = True isVoidRep _other = False isGcPtrRep :: PrimRep -> Bool isGcPtrRep LiftedRep = True isGcPtrRep UnliftedRep = True isGcPtrRep _ = False -- A PrimRep is compatible with another iff one can be coerced to the other. -- See Note [Bad unsafe coercion] in GHC.Core.Lint for when are two types coercible. primRepCompatible :: Platform -> PrimRep -> PrimRep -> Bool primRepCompatible platform rep1 rep2 = (isUnboxed rep1 == isUnboxed rep2) && (primRepSizeB platform rep1 == primRepSizeB platform rep2) && (primRepIsFloat rep1 == primRepIsFloat rep2) where isUnboxed = not . isGcPtrRep -- More general version of `primRepCompatible` for types represented by zero or -- more than one PrimReps. primRepsCompatible :: Platform -> [PrimRep] -> [PrimRep] -> Bool primRepsCompatible platform reps1 reps2 = length reps1 == length reps2 && and (zipWith (primRepCompatible platform) reps1 reps2) -- | The size of a 'PrimRep' in bytes. -- -- This applies also when used in a constructor, where we allow packing the -- fields. For instance, in @data Foo = Foo Float# Float#@ the two fields will -- take only 8 bytes, which for 64-bit arch will be equal to 1 word. -- See also mkVirtHeapOffsetsWithPadding for details of how data fields are -- laid out. primRepSizeB :: Platform -> PrimRep -> Int primRepSizeB platform = \case IntRep -> platformWordSizeInBytes platform WordRep -> platformWordSizeInBytes platform Int8Rep -> 1 Int16Rep -> 2 Int32Rep -> 4 Int64Rep -> 8 Word8Rep -> 1 Word16Rep -> 2 Word32Rep -> 4 Word64Rep -> 8 FloatRep -> fLOAT_SIZE DoubleRep -> dOUBLE_SIZE AddrRep -> platformWordSizeInBytes platform LiftedRep -> platformWordSizeInBytes platform UnliftedRep -> platformWordSizeInBytes platform VoidRep -> 0 (VecRep len rep) -> len * primElemRepSizeB platform rep primElemRepSizeB :: Platform -> PrimElemRep -> Int primElemRepSizeB platform = primRepSizeB platform . primElemRepToPrimRep primElemRepToPrimRep :: PrimElemRep -> PrimRep primElemRepToPrimRep Int8ElemRep = Int8Rep primElemRepToPrimRep Int16ElemRep = Int16Rep primElemRepToPrimRep Int32ElemRep = Int32Rep primElemRepToPrimRep Int64ElemRep = Int64Rep primElemRepToPrimRep Word8ElemRep = Word8Rep primElemRepToPrimRep Word16ElemRep = Word16Rep primElemRepToPrimRep Word32ElemRep = Word32Rep primElemRepToPrimRep Word64ElemRep = Word64Rep primElemRepToPrimRep FloatElemRep = FloatRep primElemRepToPrimRep DoubleElemRep = DoubleRep -- | Return if Rep stands for floating type, -- returns Nothing for vector types. primRepIsFloat :: PrimRep -> Maybe Bool primRepIsFloat FloatRep = Just True primRepIsFloat DoubleRep = Just True primRepIsFloat (VecRep _ _) = Nothing primRepIsFloat _ = Just False -- Rep is one of the word reps. primRepIsWord :: PrimRep -> Bool primRepIsWord WordRep = True primRepIsWord (Word8Rep) = True primRepIsWord (Word16Rep) = True primRepIsWord (Word32Rep) = True primRepIsWord (Word64Rep) = True primRepIsWord _ = False -- Rep is one of the int reps. primRepIsInt :: PrimRep -> Bool primRepIsInt (IntRep) = True primRepIsInt (Int8Rep) = True primRepIsInt (Int16Rep) = True primRepIsInt (Int32Rep) = True primRepIsInt (Int64Rep) = True primRepIsInt _ = False {- ************************************************************************ * * Field labels * * ************************************************************************ -} -- | The labels for the fields of this particular 'TyCon' tyConFieldLabels :: TyCon -> [FieldLabel] tyConFieldLabels tc = dFsEnvElts $ tyConFieldLabelEnv tc -- | The labels for the fields of this particular 'TyCon' tyConFieldLabelEnv :: TyCon -> FieldLabelEnv tyConFieldLabelEnv tc | isAlgTyCon tc = algTcFields tc | otherwise = emptyDFsEnv -- | Look up a field label belonging to this 'TyCon' lookupTyConFieldLabel :: FieldLabelString -> TyCon -> Maybe FieldLabel lookupTyConFieldLabel lbl tc = lookupDFsEnv (tyConFieldLabelEnv tc) lbl -- | Make a map from strings to FieldLabels from all the data -- constructors of this algebraic tycon fieldsOfAlgTcRhs :: AlgTyConRhs -> FieldLabelEnv fieldsOfAlgTcRhs rhs = mkDFsEnv [ (flLabel fl, fl) | fl <- dataConsFields (visibleDataCons rhs) ] where -- Duplicates in this list will be removed by 'mkFsEnv' dataConsFields dcs = concatMap dataConFieldLabels dcs {- ************************************************************************ * * \subsection{TyCon Construction} * * ************************************************************************ Note: the TyCon constructors all take a Kind as one argument, even though they could, in principle, work out their Kind from their other arguments. But to do so they need functions from Types, and that makes a nasty module mutual-recursion. And they aren't called from many places. So we compromise, and move their Kind calculation to the call site. -} -- | Given the name of the function type constructor and it's kind, create the -- corresponding 'TyCon'. It is recommended to use 'GHC.Builtin.Types.funTyCon' if you want -- this functionality mkFunTyCon :: Name -> [TyConBinder] -> Name -> TyCon mkFunTyCon name binders rep_nm = let tc = FunTyCon { tyConUnique = nameUnique name, tyConName = name, tyConBinders = binders, tyConResKind = liftedTypeKind, tyConKind = mkTyConKind binders liftedTypeKind, tyConArity = length binders, tyConNullaryTy = mkNakedTyConTy tc, tcRepName = rep_nm } in tc -- | This is the making of an algebraic 'TyCon'. mkAlgTyCon :: Name -> [TyConBinder] -- ^ Binders of the 'TyCon' -> Kind -- ^ Result kind -> [Role] -- ^ The roles for each TyVar -> Maybe CType -- ^ The C type this type corresponds to -- when using the CAPI FFI -> [PredType] -- ^ Stupid theta: see 'algTcStupidTheta' -> AlgTyConRhs -- ^ Information about data constructors -> AlgTyConFlav -- ^ What flavour is it? -- (e.g. vanilla, type family) -> Bool -- ^ Was the 'TyCon' declared with GADT syntax? -> TyCon mkAlgTyCon name binders res_kind roles cType stupid rhs parent gadt_syn = let tc = AlgTyCon { tyConName = name, tyConUnique = nameUnique name, tyConBinders = binders, tyConResKind = res_kind, tyConKind = mkTyConKind binders res_kind, tyConArity = length binders, tyConNullaryTy = mkNakedTyConTy tc, tyConTyVars = binderVars binders, tcRoles = roles, tyConCType = cType, algTcStupidTheta = stupid, algTcRhs = rhs, algTcFields = fieldsOfAlgTcRhs rhs, algTcFlavour = assertPpr (okParent name parent) (ppr name $$ ppr parent) parent, algTcGadtSyntax = gadt_syn } in tc -- | Simpler specialization of 'mkAlgTyCon' for classes mkClassTyCon :: Name -> [TyConBinder] -> [Role] -> AlgTyConRhs -> Class -> Name -> TyCon mkClassTyCon name binders roles rhs clas tc_rep_name = mkAlgTyCon name binders constraintKind roles Nothing [] rhs (ClassTyCon clas tc_rep_name) False mkTupleTyCon :: Name -> [TyConBinder] -> Kind -- ^ Result kind of the 'TyCon' -> Arity -- ^ Arity of the tuple 'TyCon' -> DataCon -> TupleSort -- ^ Whether the tuple is boxed or unboxed -> AlgTyConFlav -> TyCon mkTupleTyCon name binders res_kind arity con sort parent = let tc = AlgTyCon { tyConUnique = nameUnique name, tyConName = name, tyConBinders = binders, tyConTyVars = binderVars binders, tyConResKind = res_kind, tyConKind = mkTyConKind binders res_kind, tyConArity = arity, tyConNullaryTy = mkNakedTyConTy tc, tcRoles = replicate arity Representational, tyConCType = Nothing, algTcGadtSyntax = False, algTcStupidTheta = [], algTcRhs = TupleTyCon { data_con = con, tup_sort = sort }, algTcFields = emptyDFsEnv, algTcFlavour = parent } in tc mkSumTyCon :: Name -> [TyConBinder] -> Kind -- ^ Kind of the resulting 'TyCon' -> Arity -- ^ Arity of the sum -> [TyVar] -- ^ 'TyVar's scoped over: see 'tyConTyVars' -> [DataCon] -> AlgTyConFlav -> TyCon mkSumTyCon name binders res_kind arity tyvars cons parent = let tc = AlgTyCon { tyConUnique = nameUnique name, tyConName = name, tyConBinders = binders, tyConTyVars = tyvars, tyConResKind = res_kind, tyConKind = mkTyConKind binders res_kind, tyConArity = arity, tyConNullaryTy = mkNakedTyConTy tc, tcRoles = replicate arity Representational, tyConCType = Nothing, algTcGadtSyntax = False, algTcStupidTheta = [], algTcRhs = mkSumTyConRhs cons, algTcFields = emptyDFsEnv, algTcFlavour = parent } in tc -- | Makes a tycon suitable for use during type-checking. It stores -- a variety of details about the definition of the TyCon, but no -- right-hand side. It lives only during the type-checking of a -- mutually-recursive group of tycons; it is then zonked to a proper -- TyCon in zonkTcTyCon. -- See also Note [Kind checking recursive type and class declarations] -- in "GHC.Tc.TyCl". mkTcTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind only -> [(Name,TcTyVar)] -- ^ Scoped type variables; -- see Note [How TcTyCons work] in GHC.Tc.TyCl -> Bool -- ^ Is this TcTyCon generalised already? -> TyConFlavour -- ^ What sort of 'TyCon' this represents -> TyCon mkTcTyCon name binders res_kind scoped_tvs poly flav = let tc = TcTyCon { tyConUnique = getUnique name , tyConName = name , tyConTyVars = binderVars binders , tyConBinders = binders , tyConResKind = res_kind , tyConKind = mkTyConKind binders res_kind , tyConArity = length binders , tyConNullaryTy = mkNakedTyConTy tc , tcTyConScopedTyVars = scoped_tvs , tcTyConIsPoly = poly , tcTyConFlavour = flav } in tc -- | No scoped type variables (to be used with mkTcTyCon). noTcTyConScopedTyVars :: [(Name, TcTyVar)] noTcTyConScopedTyVars = [] -- | Create an primitive 'TyCon', such as @Int#@, @Type@ or @RealWorld#@ -- Primitive TyCons are marshalable iff not lifted. -- If you'd like to change this, modify marshalablePrimTyCon. mkPrimTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind -- Must answer 'True' to 'isFixedRuntimeRepKind' (i.e., no representation polymorphism). -- (If you need a representation-polymorphic PrimTyCon, -- change tcHasFixedRuntimeRep, marshalablePrimTyCon, reifyTyCon for PrimTyCons.) -> [Role] -> TyCon mkPrimTyCon name binders res_kind roles = let tc = PrimTyCon { tyConName = name, tyConUnique = nameUnique name, tyConBinders = binders, tyConResKind = res_kind, tyConKind = mkTyConKind binders res_kind, tyConArity = length roles, tyConNullaryTy = mkNakedTyConTy tc, tcRoles = roles, primRepName = mkPrelTyConRepName name } in tc -- | Create a type synonym 'TyCon' mkSynonymTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind -> [Role] -> Type -> Bool -> Bool -> Bool -> TyCon mkSynonymTyCon name binders res_kind roles rhs is_tau is_fam_free is_forgetful = let tc = SynonymTyCon { tyConName = name, tyConUnique = nameUnique name, tyConBinders = binders, tyConResKind = res_kind, tyConKind = mkTyConKind binders res_kind, tyConArity = length binders, tyConNullaryTy = mkNakedTyConTy tc, tyConTyVars = binderVars binders, tcRoles = roles, synTcRhs = rhs, synIsTau = is_tau, synIsFamFree = is_fam_free, synIsForgetful = is_forgetful } in tc -- | Create a type family 'TyCon' mkFamilyTyCon :: Name -> [TyConBinder] -> Kind -- ^ /result/ kind -> Maybe Name -> FamTyConFlav -> Maybe Class -> Injectivity -> TyCon mkFamilyTyCon name binders res_kind resVar flav parent inj = let tc = FamilyTyCon { tyConUnique = nameUnique name , tyConName = name , tyConBinders = binders , tyConResKind = res_kind , tyConKind = mkTyConKind binders res_kind , tyConArity = length binders , tyConNullaryTy = mkNakedTyConTy tc , tyConTyVars = binderVars binders , famTcResVar = resVar , famTcFlav = flav , famTcParent = classTyCon <$> parent , famTcInj = inj } in tc -- | Create a promoted data constructor 'TyCon' -- Somewhat dodgily, we give it the same Name -- as the data constructor itself; when we pretty-print -- the TyCon we add a quote; see the Outputable TyCon instance mkPromotedDataCon :: DataCon -> Name -> TyConRepName -> [TyConTyCoBinder] -> Kind -> [Role] -> RuntimeRepInfo -> TyCon mkPromotedDataCon con name rep_name binders res_kind roles rep_info = let tc = PromotedDataCon { tyConUnique = nameUnique name, tyConName = name, tyConArity = length roles, tyConNullaryTy = mkNakedTyConTy tc, tcRoles = roles, tyConBinders = binders, tyConResKind = res_kind, tyConKind = mkTyConKind binders res_kind, dataCon = con, tcRepName = rep_name, promDcRepInfo = rep_info } in tc isFunTyCon :: TyCon -> Bool isFunTyCon (FunTyCon {}) = True isFunTyCon _ = False -- | Test if the 'TyCon' is algebraic but abstract (invisible data constructors) isAbstractTyCon :: TyCon -> Bool isAbstractTyCon (AlgTyCon { algTcRhs = AbstractTyCon {} }) = True isAbstractTyCon _ = False -- | Does this 'TyCon' represent something that cannot be defined in Haskell? isPrimTyCon :: TyCon -> Bool isPrimTyCon (PrimTyCon {}) = True isPrimTyCon _ = False -- | Returns @True@ if the supplied 'TyCon' resulted from either a -- @data@ or @newtype@ declaration isAlgTyCon :: TyCon -> Bool isAlgTyCon (AlgTyCon {}) = True isAlgTyCon _ = False -- | Returns @True@ for vanilla AlgTyCons -- that is, those created -- with a @data@ or @newtype@ declaration. isVanillaAlgTyCon :: TyCon -> Bool isVanillaAlgTyCon (AlgTyCon { algTcFlavour = VanillaAlgTyCon _ }) = True isVanillaAlgTyCon _ = False -- | Returns @True@ for the 'TyCon' of the 'Constraint' kind. {-# INLINE isConstraintKindCon #-} -- See Note [Inlining coreView] in GHC.Core.Type isConstraintKindCon :: TyCon -> Bool -- NB: We intentionally match on AlgTyCon, because 'constraintKindTyCon' is -- always an AlgTyCon (see 'pcTyCon' in TysWiredIn) and the record selector -- for 'tyConUnique' would generate unreachable code for every other data -- constructor of TyCon (see #18026). isConstraintKindCon AlgTyCon { tyConUnique = u } = u == constraintKindTyConKey isConstraintKindCon _ = False isDataTyCon :: TyCon -> Bool -- ^ Returns @True@ for data types that are /definitely/ represented by -- heap-allocated constructors. These are scrutinised by Core-level -- @case@ expressions, and they get info tables allocated for them. -- -- Generally, the function will be true for all @data@ types and false -- for @newtype@s, unboxed tuples, unboxed sums and type family -- 'TyCon's. But it is not guaranteed to return @True@ in all cases -- that it could. -- -- NB: for a data type family, only the /instance/ 'TyCon's -- get an info table. The family declaration 'TyCon' does not isDataTyCon (AlgTyCon {algTcRhs = rhs}) = case rhs of TupleTyCon { tup_sort = sort } -> isBoxed (tupleSortBoxity sort) SumTyCon {} -> False DataTyCon {} -> True NewTyCon {} -> False AbstractTyCon {} -> False -- We don't know, so return False isDataTyCon _ = False -- | 'isInjectiveTyCon' is true of 'TyCon's for which this property holds -- (where X is the role passed in): -- If (T a1 b1 c1) ~X (T a2 b2 c2), then (a1 ~X1 a2), (b1 ~X2 b2), and (c1 ~X3 c2) -- (where X1, X2, and X3, are the roles given by tyConRolesX tc X) -- See also Note [Decomposing equality] in "GHC.Tc.Solver.Canonical" isInjectiveTyCon :: TyCon -> Role -> Bool isInjectiveTyCon _ Phantom = False isInjectiveTyCon (FunTyCon {}) _ = True isInjectiveTyCon (AlgTyCon {}) Nominal = True isInjectiveTyCon (AlgTyCon {algTcRhs = rhs}) Representational = isGenInjAlgRhs rhs isInjectiveTyCon (SynonymTyCon {}) _ = False isInjectiveTyCon (FamilyTyCon { famTcFlav = DataFamilyTyCon _ }) Nominal = True isInjectiveTyCon (FamilyTyCon { famTcInj = Injective inj }) Nominal = and inj isInjectiveTyCon (FamilyTyCon {}) _ = False isInjectiveTyCon (PrimTyCon {}) _ = True isInjectiveTyCon (PromotedDataCon {}) _ = True isInjectiveTyCon (TcTyCon {}) _ = True -- Reply True for TcTyCon to minimise knock on type errors -- See Note [How TcTyCons work] item (1) in GHC.Tc.TyCl -- | 'isGenerativeTyCon' is true of 'TyCon's for which this property holds -- (where X is the role passed in): -- If (T tys ~X t), then (t's head ~X T). -- See also Note [Decomposing equality] in "GHC.Tc.Solver.Canonical" isGenerativeTyCon :: TyCon -> Role -> Bool isGenerativeTyCon (FamilyTyCon { famTcFlav = DataFamilyTyCon _ }) Nominal = True isGenerativeTyCon (FamilyTyCon {}) _ = False -- in all other cases, injectivity implies generativity isGenerativeTyCon tc r = isInjectiveTyCon tc r -- | Is this an 'AlgTyConRhs' of a 'TyCon' that is generative and injective -- with respect to representational equality? isGenInjAlgRhs :: AlgTyConRhs -> Bool isGenInjAlgRhs (TupleTyCon {}) = True isGenInjAlgRhs (SumTyCon {}) = True isGenInjAlgRhs (DataTyCon {}) = True isGenInjAlgRhs (AbstractTyCon {}) = False isGenInjAlgRhs (NewTyCon {}) = False -- | Is this 'TyCon' that for a @newtype@ isNewTyCon :: TyCon -> Bool isNewTyCon (AlgTyCon {algTcRhs = NewTyCon {}}) = True isNewTyCon _ = False -- | Take a 'TyCon' apart into the 'TyVar's it scopes over, the 'Type' it -- expands into, and (possibly) a coercion from the representation type to the -- @newtype@. -- Returns @Nothing@ if this is not possible. unwrapNewTyCon_maybe :: TyCon -> Maybe ([TyVar], Type, CoAxiom Unbranched) unwrapNewTyCon_maybe (AlgTyCon { tyConTyVars = tvs, algTcRhs = NewTyCon { nt_co = co, nt_rhs = rhs }}) = Just (tvs, rhs, co) unwrapNewTyCon_maybe _ = Nothing unwrapNewTyConEtad_maybe :: TyCon -> Maybe ([TyVar], Type, CoAxiom Unbranched) unwrapNewTyConEtad_maybe (AlgTyCon { algTcRhs = NewTyCon { nt_co = co, nt_etad_rhs = (tvs,rhs) }}) = Just (tvs, rhs, co) unwrapNewTyConEtad_maybe _ = Nothing -- | Is this a 'TyCon' representing a regular H98 type synonym (@type@)? {-# INLINE isTypeSynonymTyCon #-} -- See Note [Inlining coreView] in GHC.Core.Type isTypeSynonymTyCon :: TyCon -> Bool isTypeSynonymTyCon (SynonymTyCon {}) = True isTypeSynonymTyCon _ = False isTauTyCon :: TyCon -> Bool isTauTyCon (SynonymTyCon { synIsTau = is_tau }) = is_tau isTauTyCon _ = True -- | Is this tycon neither a type family nor a synonym that expands -- to a type family? isFamFreeTyCon :: TyCon -> Bool isFamFreeTyCon (SynonymTyCon { synIsFamFree = fam_free }) = fam_free isFamFreeTyCon (FamilyTyCon { famTcFlav = flav }) = isDataFamFlav flav isFamFreeTyCon _ = True -- | Is this a forgetful type synonym? If this is a type synonym whose -- RHS does not mention one (or more) of its bound variables, returns -- True. Thus, False means that all bound variables appear on the RHS; -- True may not mean anything, as the test to set this flag is -- conservative. isForgetfulSynTyCon :: TyCon -> Bool isForgetfulSynTyCon (SynonymTyCon { synIsForgetful = forget }) = forget isForgetfulSynTyCon _ = False -- As for newtypes, it is in some contexts important to distinguish between -- closed synonyms and synonym families, as synonym families have no unique -- right hand side to which a synonym family application can expand. -- -- | True iff we can decompose (T a b c) into ((T a b) c) -- I.e. is it injective and generative w.r.t nominal equality? -- That is, if (T a b) ~N d e f, is it always the case that -- (T ~N d), (a ~N e) and (b ~N f)? -- Specifically NOT true of synonyms (open and otherwise) -- -- It'd be unusual to call mustBeSaturated on a regular H98 -- type synonym, because you should probably have expanded it first -- But regardless, it's not decomposable mustBeSaturated :: TyCon -> Bool mustBeSaturated = tcFlavourMustBeSaturated . tyConFlavour -- | Is this an algebraic 'TyCon' declared with the GADT syntax? isGadtSyntaxTyCon :: TyCon -> Bool isGadtSyntaxTyCon (AlgTyCon { algTcGadtSyntax = res }) = res isGadtSyntaxTyCon _ = False -- | Is this an algebraic 'TyCon' which is just an enumeration of values? isEnumerationTyCon :: TyCon -> Bool -- See Note [Enumeration types] in GHC.Core.TyCon isEnumerationTyCon (AlgTyCon { tyConArity = arity, algTcRhs = rhs }) = case rhs of DataTyCon { is_enum = res } -> res TupleTyCon {} -> arity == 0 _ -> False isEnumerationTyCon _ = False -- | Is this a 'TyCon', synonym or otherwise, that defines a family? isFamilyTyCon :: TyCon -> Bool isFamilyTyCon (FamilyTyCon {}) = True isFamilyTyCon _ = False -- | Is this a 'TyCon', synonym or otherwise, that defines a family with -- instances? isOpenFamilyTyCon :: TyCon -> Bool isOpenFamilyTyCon (FamilyTyCon {famTcFlav = flav }) | OpenSynFamilyTyCon <- flav = True | DataFamilyTyCon {} <- flav = True isOpenFamilyTyCon _ = False -- | Is this a synonym 'TyCon' that can have may have further instances appear? isTypeFamilyTyCon :: TyCon -> Bool isTypeFamilyTyCon (FamilyTyCon { famTcFlav = flav }) = not (isDataFamFlav flav) isTypeFamilyTyCon _ = False -- | Is this a synonym 'TyCon' that can have may have further instances appear? isDataFamilyTyCon :: TyCon -> Bool isDataFamilyTyCon (FamilyTyCon { famTcFlav = flav }) = isDataFamFlav flav isDataFamilyTyCon _ = False -- | Is this an open type family TyCon? isOpenTypeFamilyTyCon :: TyCon -> Bool isOpenTypeFamilyTyCon (FamilyTyCon {famTcFlav = OpenSynFamilyTyCon }) = True isOpenTypeFamilyTyCon _ = False -- | Is this a non-empty closed type family? Returns 'Nothing' for -- abstract or empty closed families. isClosedSynFamilyTyConWithAxiom_maybe :: TyCon -> Maybe (CoAxiom Branched) isClosedSynFamilyTyConWithAxiom_maybe (FamilyTyCon {famTcFlav = ClosedSynFamilyTyCon mb}) = mb isClosedSynFamilyTyConWithAxiom_maybe _ = Nothing -- | @'tyConInjectivityInfo' tc@ returns @'Injective' is@ if @tc@ is an -- injective tycon (where @is@ states for which 'tyConBinders' @tc@ is -- injective), or 'NotInjective' otherwise. tyConInjectivityInfo :: TyCon -> Injectivity tyConInjectivityInfo tc | FamilyTyCon { famTcInj = inj } <- tc = inj | isInjectiveTyCon tc Nominal = Injective (replicate (tyConArity tc) True) | otherwise = NotInjective isBuiltInSynFamTyCon_maybe :: TyCon -> Maybe BuiltInSynFamily isBuiltInSynFamTyCon_maybe (FamilyTyCon {famTcFlav = BuiltInSynFamTyCon ops }) = Just ops isBuiltInSynFamTyCon_maybe _ = Nothing isDataFamFlav :: FamTyConFlav -> Bool isDataFamFlav (DataFamilyTyCon {}) = True -- Data family isDataFamFlav _ = False -- Type synonym family -- | Is this TyCon for an associated type? isTyConAssoc :: TyCon -> Bool isTyConAssoc = isJust . tyConAssoc_maybe -- | Get the enclosing class TyCon (if there is one) for the given TyCon. tyConAssoc_maybe :: TyCon -> Maybe TyCon tyConAssoc_maybe = tyConFlavourAssoc_maybe . tyConFlavour -- | Get the enclosing class TyCon (if there is one) for the given TyConFlavour tyConFlavourAssoc_maybe :: TyConFlavour -> Maybe TyCon tyConFlavourAssoc_maybe (DataFamilyFlavour mb_parent) = mb_parent tyConFlavourAssoc_maybe (OpenTypeFamilyFlavour mb_parent) = mb_parent tyConFlavourAssoc_maybe _ = Nothing -- The unit tycon didn't used to be classed as a tuple tycon -- but I thought that was silly so I've undone it -- If it can't be for some reason, it should be a AlgTyCon isTupleTyCon :: TyCon -> Bool -- ^ Does this 'TyCon' represent a tuple? -- -- NB: when compiling @Data.Tuple@, the tycons won't reply @True@ to -- 'isTupleTyCon', because they are built as 'AlgTyCons'. However they -- get spat into the interface file as tuple tycons, so I don't think -- it matters. isTupleTyCon (AlgTyCon { algTcRhs = TupleTyCon {} }) = True isTupleTyCon _ = False tyConTuple_maybe :: TyCon -> Maybe TupleSort tyConTuple_maybe (AlgTyCon { algTcRhs = rhs }) | TupleTyCon { tup_sort = sort} <- rhs = Just sort tyConTuple_maybe _ = Nothing -- | Is this the 'TyCon' for an unboxed tuple? isUnboxedTupleTyCon :: TyCon -> Bool isUnboxedTupleTyCon (AlgTyCon { algTcRhs = rhs }) | TupleTyCon { tup_sort = sort } <- rhs = not (isBoxed (tupleSortBoxity sort)) isUnboxedTupleTyCon _ = False -- | Is this the 'TyCon' for a boxed tuple? isBoxedTupleTyCon :: TyCon -> Bool isBoxedTupleTyCon (AlgTyCon { algTcRhs = rhs }) | TupleTyCon { tup_sort = sort } <- rhs = isBoxed (tupleSortBoxity sort) isBoxedTupleTyCon _ = False -- | Is this the 'TyCon' for an unboxed sum? isUnboxedSumTyCon :: TyCon -> Bool isUnboxedSumTyCon (AlgTyCon { algTcRhs = rhs }) | SumTyCon {} <- rhs = True isUnboxedSumTyCon _ = False isLiftedAlgTyCon :: TyCon -> Bool isLiftedAlgTyCon (AlgTyCon { tyConResKind = res_kind }) = isLiftedTypeKind res_kind isLiftedAlgTyCon _ = False -- | Is this the 'TyCon' for a /promoted/ tuple? isPromotedTupleTyCon :: TyCon -> Bool isPromotedTupleTyCon tyCon | Just dataCon <- isPromotedDataCon_maybe tyCon , isTupleTyCon (dataConTyCon dataCon) = True | otherwise = False -- | Is this a PromotedDataCon? isPromotedDataCon :: TyCon -> Bool isPromotedDataCon (PromotedDataCon {}) = True isPromotedDataCon _ = False -- | Retrieves the promoted DataCon if this is a PromotedDataCon; isPromotedDataCon_maybe :: TyCon -> Maybe DataCon isPromotedDataCon_maybe (PromotedDataCon { dataCon = dc }) = Just dc isPromotedDataCon_maybe _ = Nothing -- | Is this tycon really meant for use at the kind level? That is, -- should it be permitted without -XDataKinds? isKindTyCon :: TyCon -> Bool isKindTyCon tc = getUnique tc `elementOfUniqSet` kindTyConKeys -- | These TyCons should be allowed at the kind level, even without -- -XDataKinds. kindTyConKeys :: UniqSet Unique kindTyConKeys = unionManyUniqSets ( mkUniqSet [ liftedTypeKindTyConKey, liftedRepTyConKey, constraintKindTyConKey, tYPETyConKey ] : map (mkUniqSet . tycon_with_datacons) [ runtimeRepTyCon, levityTyCon , multiplicityTyCon , vecCountTyCon, vecElemTyCon ] ) where tycon_with_datacons tc = getUnique tc : map getUnique (tyConDataCons tc) isLiftedTypeKindTyConName :: Name -> Bool isLiftedTypeKindTyConName = (`hasKey` liftedTypeKindTyConKey) -- | Identifies implicit tycons that, in particular, do not go into interface -- files (because they are implicitly reconstructed when the interface is -- read). -- -- Note that: -- -- * Associated families are implicit, as they are re-constructed from -- the class declaration in which they reside, and -- -- * Family instances are /not/ implicit as they represent the instance body -- (similar to a @dfun@ does that for a class instance). -- -- * Tuples are implicit iff they have a wired-in name -- (namely: boxed and unboxed tuples are wired-in and implicit, -- but constraint tuples are not) isImplicitTyCon :: TyCon -> Bool isImplicitTyCon (FunTyCon {}) = True isImplicitTyCon (PrimTyCon {}) = True isImplicitTyCon (PromotedDataCon {}) = True isImplicitTyCon (AlgTyCon { algTcRhs = rhs, tyConName = name }) | TupleTyCon {} <- rhs = isWiredInName name | SumTyCon {} <- rhs = True | otherwise = False isImplicitTyCon (FamilyTyCon { famTcParent = parent }) = isJust parent isImplicitTyCon (SynonymTyCon {}) = False isImplicitTyCon (TcTyCon {}) = False tyConCType_maybe :: TyCon -> Maybe CType tyConCType_maybe tc@(AlgTyCon {}) = tyConCType tc tyConCType_maybe _ = Nothing -- | Is this a TcTyCon? (That is, one only used during type-checking?) isTcTyCon :: TyCon -> Bool isTcTyCon (TcTyCon {}) = True isTcTyCon _ = False setTcTyConKind :: TyCon -> Kind -> TyCon -- Update the Kind of a TcTyCon -- The new kind is always a zonked version of its previous -- kind, so we don't need to update any other fields. -- See Note [The Purely Kinded Type Invariant (PKTI)] in GHC.Tc.Gen.HsType setTcTyConKind tc@(TcTyCon {}) kind = let tc' = tc { tyConKind = kind , tyConNullaryTy = mkNakedTyConTy tc' -- see Note [Sharing nullary TyCons] } in tc' setTcTyConKind tc _ = pprPanic "setTcTyConKind" (ppr tc) -- | Does this 'TyCon' have a syntactically fixed RuntimeRep when fully applied, -- as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete? -- -- False is safe. True means we're sure. -- Does only a quick check, based on the TyCon's category. -- -- See Note [Representation-polymorphic TyCons] tcHasFixedRuntimeRep :: TyCon -> Bool tcHasFixedRuntimeRep FunTyCon{} = True tcHasFixedRuntimeRep (AlgTyCon { algTcRhs = rhs }) = case rhs of AbstractTyCon {} -> False -- An abstract TyCon might not have a fixed runtime representation. -- Note that this is an entirely different matter from the concreteness -- of the 'TyCon', in the sense of 'isConcreteTyCon'. DataTyCon { data_fixed_lev = fixed_lev } -> fixed_lev -- A datatype might not have a fixed levity with UnliftedDatatypes (#20423). -- NB: the current representation-polymorphism checks require that -- the representation be fully-known, including levity variables. -- This might be relaxed in the future (#15532). TupleTyCon { tup_sort = tuple_sort } -> isBoxed (tupleSortBoxity tuple_sort) SumTyCon {} -> False -- only unboxed sums here NewTyCon { nt_fixed_rep = fixed_rep } -> fixed_rep -- A newtype might not have a fixed runtime representation -- with UnliftedNewtypes (#17360) tcHasFixedRuntimeRep SynonymTyCon{} = False -- conservative choice tcHasFixedRuntimeRep FamilyTyCon{} = False tcHasFixedRuntimeRep PrimTyCon{} = True tcHasFixedRuntimeRep TcTyCon{} = False tcHasFixedRuntimeRep tc@PromotedDataCon{} = pprPanic "tcHasFixedRuntimeRep datacon" (ppr tc) -- | Is this 'TyCon' concrete (i.e. not a synonym/type family)? -- -- Used for representation polymorphism checks. isConcreteTyCon :: TyCon -> Bool isConcreteTyCon = isConcreteTyConFlavour . tyConFlavour -- | Is this 'TyConFlavour' concrete (i.e. not a synonym/type family)? -- -- Used for representation polymorphism checks. isConcreteTyConFlavour :: TyConFlavour -> Bool isConcreteTyConFlavour = \case ClassFlavour -> True TupleFlavour {} -> True SumFlavour -> True DataTypeFlavour -> True NewtypeFlavour -> True AbstractTypeFlavour -> True -- See Note [Concrete types] in GHC.Tc.Utils.Concrete DataFamilyFlavour {} -> False OpenTypeFamilyFlavour {} -> False ClosedTypeFamilyFlavour -> False TypeSynonymFlavour -> False BuiltInTypeFlavour -> True PromotedDataConFlavour -> True {- ----------------------------------------------- -- Expand type-constructor applications ----------------------------------------------- -} expandSynTyCon_maybe :: TyCon -> [tyco] -- ^ Arguments to 'TyCon' -> Maybe ([(TyVar,tyco)], Type, [tyco]) -- ^ Returns a 'TyVar' substitution, the body -- type of the synonym (not yet substituted) -- and any arguments remaining from the -- application -- ^ Expand a type synonym application -- Return Nothing if the TyCon is not a synonym, -- or if not enough arguments are supplied expandSynTyCon_maybe tc tys | SynonymTyCon { tyConTyVars = tvs, synTcRhs = rhs, tyConArity = arity } <- tc = if arity == 0 then Just ([], rhs, tys) -- Avoid a bit of work in the case of nullary synonyms else case tys `listLengthCmp` arity of GT -> Just (tvs `zip` tys, rhs, drop arity tys) EQ -> Just (tvs `zip` tys, rhs, []) LT -> Nothing | otherwise = Nothing ---------------- -- | Check if the tycon actually refers to a proper `data` or `newtype` -- with user defined constructors rather than one from a class or other -- construction. -- NB: This is only used in GHC.Tc.Gen.Export.checkPatSynParent to determine if an -- exported tycon can have a pattern synonym bundled with it, e.g., -- module Foo (TyCon(.., PatSyn)) where isTyConWithSrcDataCons :: TyCon -> Bool isTyConWithSrcDataCons (AlgTyCon { algTcRhs = rhs, algTcFlavour = parent }) = case rhs of DataTyCon {} -> isSrcParent NewTyCon {} -> isSrcParent TupleTyCon {} -> isSrcParent _ -> False where isSrcParent = isNoParent parent isTyConWithSrcDataCons (FamilyTyCon { famTcFlav = DataFamilyTyCon {} }) = True -- #14058 isTyConWithSrcDataCons _ = False -- | As 'tyConDataCons_maybe', but returns the empty list of constructors if no -- constructors could be found tyConDataCons :: TyCon -> [DataCon] -- It's convenient for tyConDataCons to return the -- empty list for type synonyms etc tyConDataCons tycon = tyConDataCons_maybe tycon `orElse` [] -- | Determine the 'DataCon's originating from the given 'TyCon', if the 'TyCon' -- is the sort that can have any constructors (note: this does not include -- abstract algebraic types) tyConDataCons_maybe :: TyCon -> Maybe [DataCon] tyConDataCons_maybe (AlgTyCon {algTcRhs = rhs}) = case rhs of DataTyCon { data_cons = cons } -> Just cons NewTyCon { data_con = con } -> Just [con] TupleTyCon { data_con = con } -> Just [con] SumTyCon { data_cons = cons } -> Just cons _ -> Nothing tyConDataCons_maybe _ = Nothing -- | If the given 'TyCon' has a /single/ data constructor, i.e. it is a @data@ -- type with one alternative, a tuple type or a @newtype@ then that constructor -- is returned. If the 'TyCon' has more than one constructor, or represents a -- primitive or function type constructor then @Nothing@ is returned. tyConSingleDataCon_maybe :: TyCon -> Maybe DataCon tyConSingleDataCon_maybe (AlgTyCon { algTcRhs = rhs }) = case rhs of DataTyCon { data_cons = [c] } -> Just c TupleTyCon { data_con = c } -> Just c NewTyCon { data_con = c } -> Just c _ -> Nothing tyConSingleDataCon_maybe _ = Nothing -- | Like 'tyConSingleDataCon_maybe', but panics if 'Nothing'. tyConSingleDataCon :: TyCon -> DataCon tyConSingleDataCon tc = case tyConSingleDataCon_maybe tc of Just c -> c Nothing -> pprPanic "tyConDataCon" (ppr tc) -- | Like 'tyConSingleDataCon_maybe', but returns 'Nothing' for newtypes. tyConSingleAlgDataCon_maybe :: TyCon -> Maybe DataCon tyConSingleAlgDataCon_maybe tycon | isNewTyCon tycon = Nothing | otherwise = tyConSingleDataCon_maybe tycon -- | Returns @Just dcs@ if the given 'TyCon' is a @data@ type, a tuple type -- or a sum type with data constructors dcs. If the 'TyCon' has more than one -- constructor, or represents a primitive or function type constructor then -- @Nothing@ is returned. -- -- Like 'tyConDataCons_maybe', but returns 'Nothing' for newtypes. tyConAlgDataCons_maybe :: TyCon -> Maybe [DataCon] tyConAlgDataCons_maybe tycon | isNewTyCon tycon = Nothing | otherwise = tyConDataCons_maybe tycon -- | Determine the number of value constructors a 'TyCon' has. Panics if the -- 'TyCon' is not algebraic or a tuple tyConFamilySize :: TyCon -> Int tyConFamilySize tc@(AlgTyCon { algTcRhs = rhs }) = case rhs of DataTyCon { data_cons_size = size } -> size NewTyCon {} -> 1 TupleTyCon {} -> 1 SumTyCon { data_cons_size = size } -> size _ -> pprPanic "tyConFamilySize 1" (ppr tc) tyConFamilySize tc = pprPanic "tyConFamilySize 2" (ppr tc) -- | Extract an 'AlgTyConRhs' with information about data constructors from an -- algebraic or tuple 'TyCon'. Panics for any other sort of 'TyCon' algTyConRhs :: TyCon -> AlgTyConRhs algTyConRhs (AlgTyCon {algTcRhs = rhs}) = rhs algTyConRhs other = pprPanic "algTyConRhs" (ppr other) -- | Extract type variable naming the result of injective type family tyConFamilyResVar_maybe :: TyCon -> Maybe Name tyConFamilyResVar_maybe (FamilyTyCon {famTcResVar = res}) = res tyConFamilyResVar_maybe _ = Nothing -- | Get the list of roles for the type parameters of a TyCon tyConRoles :: TyCon -> [Role] -- See also Note [TyCon Role signatures] tyConRoles tc = case tc of { FunTyCon {} -> [Nominal, Nominal, Nominal, Representational, Representational] ; AlgTyCon { tcRoles = roles } -> roles ; SynonymTyCon { tcRoles = roles } -> roles ; FamilyTyCon {} -> const_role Nominal ; PrimTyCon { tcRoles = roles } -> roles ; PromotedDataCon { tcRoles = roles } -> roles ; TcTyCon {} -> const_role Nominal } where const_role r = replicate (tyConArity tc) r -- | Extract the bound type variables and type expansion of a type synonym -- 'TyCon'. Panics if the 'TyCon' is not a synonym newTyConRhs :: TyCon -> ([TyVar], Type) newTyConRhs (AlgTyCon {tyConTyVars = tvs, algTcRhs = NewTyCon { nt_rhs = rhs }}) = (tvs, rhs) newTyConRhs tycon = pprPanic "newTyConRhs" (ppr tycon) -- | The number of type parameters that need to be passed to a newtype to -- resolve it. May be less than in the definition if it can be eta-contracted. newTyConEtadArity :: TyCon -> Int newTyConEtadArity (AlgTyCon {algTcRhs = NewTyCon { nt_etad_rhs = tvs_rhs }}) = length (fst tvs_rhs) newTyConEtadArity tycon = pprPanic "newTyConEtadArity" (ppr tycon) -- | Extract the bound type variables and type expansion of an eta-contracted -- type synonym 'TyCon'. Panics if the 'TyCon' is not a synonym newTyConEtadRhs :: TyCon -> ([TyVar], Type) newTyConEtadRhs (AlgTyCon {algTcRhs = NewTyCon { nt_etad_rhs = tvs_rhs }}) = tvs_rhs newTyConEtadRhs tycon = pprPanic "newTyConEtadRhs" (ppr tycon) -- | Extracts the @newtype@ coercion from such a 'TyCon', which can be used to -- construct something with the @newtype@s type from its representation type -- (right hand side). If the supplied 'TyCon' is not a @newtype@, returns -- @Nothing@ newTyConCo_maybe :: TyCon -> Maybe (CoAxiom Unbranched) newTyConCo_maybe (AlgTyCon {algTcRhs = NewTyCon { nt_co = co }}) = Just co newTyConCo_maybe _ = Nothing newTyConCo :: TyCon -> CoAxiom Unbranched newTyConCo tc = case newTyConCo_maybe tc of Just co -> co Nothing -> pprPanic "newTyConCo" (ppr tc) newTyConDataCon_maybe :: TyCon -> Maybe DataCon newTyConDataCon_maybe (AlgTyCon {algTcRhs = NewTyCon { data_con = con }}) = Just con newTyConDataCon_maybe _ = Nothing -- | Find the \"stupid theta\" of the 'TyCon'. A \"stupid theta\" is the context -- to the left of an algebraic type declaration, e.g. @Eq a@ in the declaration -- @data Eq a => T a ...@. See @Note [The stupid context]@ in "GHC.Core.DataCon". tyConStupidTheta :: TyCon -> [PredType] tyConStupidTheta (AlgTyCon {algTcStupidTheta = stupid}) = stupid tyConStupidTheta (FunTyCon {}) = [] tyConStupidTheta tycon = pprPanic "tyConStupidTheta" (ppr tycon) -- | Extract the 'TyVar's bound by a vanilla type synonym -- and the corresponding (unsubstituted) right hand side. synTyConDefn_maybe :: TyCon -> Maybe ([TyVar], Type) synTyConDefn_maybe (SynonymTyCon {tyConTyVars = tyvars, synTcRhs = ty}) = Just (tyvars, ty) synTyConDefn_maybe _ = Nothing -- | Extract the information pertaining to the right hand side of a type synonym -- (@type@) declaration. synTyConRhs_maybe :: TyCon -> Maybe Type synTyConRhs_maybe (SynonymTyCon {synTcRhs = rhs}) = Just rhs synTyConRhs_maybe _ = Nothing -- | Extract the flavour of a type family (with all the extra information that -- it carries) famTyConFlav_maybe :: TyCon -> Maybe FamTyConFlav famTyConFlav_maybe (FamilyTyCon {famTcFlav = flav}) = Just flav famTyConFlav_maybe _ = Nothing -- | Is this 'TyCon' that for a class instance? isClassTyCon :: TyCon -> Bool isClassTyCon (AlgTyCon {algTcFlavour = ClassTyCon {}}) = True isClassTyCon _ = False -- | If this 'TyCon' is that for a class instance, return the class it is for. -- Otherwise returns @Nothing@ tyConClass_maybe :: TyCon -> Maybe Class tyConClass_maybe (AlgTyCon {algTcFlavour = ClassTyCon clas _}) = Just clas tyConClass_maybe _ = Nothing -- | Return the associated types of the 'TyCon', if any tyConATs :: TyCon -> [TyCon] tyConATs (AlgTyCon {algTcFlavour = ClassTyCon clas _}) = classATs clas tyConATs _ = [] ---------------------------------------------------------------------------- -- | Is this 'TyCon' that for a data family instance? isFamInstTyCon :: TyCon -> Bool isFamInstTyCon (AlgTyCon {algTcFlavour = DataFamInstTyCon {} }) = True isFamInstTyCon _ = False tyConFamInstSig_maybe :: TyCon -> Maybe (TyCon, [Type], CoAxiom Unbranched) tyConFamInstSig_maybe (AlgTyCon {algTcFlavour = DataFamInstTyCon ax f ts }) = Just (f, ts, ax) tyConFamInstSig_maybe _ = Nothing -- | If this 'TyCon' is that of a data family instance, return the family in question -- and the instance types. Otherwise, return @Nothing@ tyConFamInst_maybe :: TyCon -> Maybe (TyCon, [Type]) tyConFamInst_maybe (AlgTyCon {algTcFlavour = DataFamInstTyCon _ f ts }) = Just (f, ts) tyConFamInst_maybe _ = Nothing -- | If this 'TyCon' is that of a data family instance, return a 'TyCon' which -- represents a coercion identifying the representation type with the type -- instance family. Otherwise, return @Nothing@ tyConFamilyCoercion_maybe :: TyCon -> Maybe (CoAxiom Unbranched) tyConFamilyCoercion_maybe (AlgTyCon {algTcFlavour = DataFamInstTyCon ax _ _ }) = Just ax tyConFamilyCoercion_maybe _ = Nothing -- | Extract any 'RuntimeRepInfo' from this TyCon tyConRuntimeRepInfo :: TyCon -> RuntimeRepInfo tyConRuntimeRepInfo (PromotedDataCon { promDcRepInfo = rri }) = rri tyConRuntimeRepInfo _ = NoRRI -- could panic in that second case. But Douglas Adams told me not to. {- Note [Constructor tag allocation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When typechecking we need to allocate constructor tags to constructors. They are allocated based on the position in the data_cons field of TyCon, with the first constructor getting fIRST_TAG. We used to pay linear cost per constructor, with each constructor looking up its relative index in the constructor list. That was quadratic and prohibitive for large data types with more than 10k constructors. The current strategy is to build a NameEnv with a mapping from constructor's Name to ConTag and pass it down to buildDataCon for efficient lookup. Relevant ticket: #14657 -} mkTyConTagMap :: TyCon -> NameEnv ConTag mkTyConTagMap tycon = mkNameEnv $ map getName (tyConDataCons tycon) `zip` [fIRST_TAG..] -- See Note [Constructor tag allocation] {- ************************************************************************ * * \subsection[TyCon-instances]{Instance declarations for @TyCon@} * * ************************************************************************ @TyCon@s are compared by comparing their @Unique@s. -} instance Eq TyCon where a == b = getUnique a == getUnique b a /= b = getUnique a /= getUnique b instance Uniquable TyCon where getUnique tc = tyConUnique tc instance Outputable TyCon where -- At the moment a promoted TyCon has the same Name as its -- corresponding TyCon, so we add the quote to distinguish it here ppr tc = pprPromotionQuote tc <> ppr (tyConName tc) <> pp_tc where pp_tc = getPprStyle $ \sty -> getPprDebug $ \debug -> if ((debug || dumpStyle sty) && isTcTyCon tc) then text "[tc]" else empty -- | Paints a picture of what a 'TyCon' represents, in broad strokes. -- This is used towards more informative error messages. data TyConFlavour = ClassFlavour | TupleFlavour Boxity | SumFlavour | DataTypeFlavour | NewtypeFlavour | AbstractTypeFlavour | DataFamilyFlavour (Maybe TyCon) -- Just tc <=> (tc == associated class) | OpenTypeFamilyFlavour (Maybe TyCon) -- Just tc <=> (tc == associated class) | ClosedTypeFamilyFlavour | TypeSynonymFlavour | BuiltInTypeFlavour -- ^ e.g., the @(->)@ 'TyCon'. | PromotedDataConFlavour deriving Eq instance Outputable TyConFlavour where ppr = text . go where go ClassFlavour = "class" go (TupleFlavour boxed) | isBoxed boxed = "tuple" | otherwise = "unboxed tuple" go SumFlavour = "unboxed sum" go DataTypeFlavour = "data type" go NewtypeFlavour = "newtype" go AbstractTypeFlavour = "abstract type" go (DataFamilyFlavour (Just _)) = "associated data family" go (DataFamilyFlavour Nothing) = "data family" go (OpenTypeFamilyFlavour (Just _)) = "associated type family" go (OpenTypeFamilyFlavour Nothing) = "type family" go ClosedTypeFamilyFlavour = "type family" go TypeSynonymFlavour = "type synonym" go BuiltInTypeFlavour = "built-in type" go PromotedDataConFlavour = "promoted data constructor" tyConFlavour :: TyCon -> TyConFlavour tyConFlavour (AlgTyCon { algTcFlavour = parent, algTcRhs = rhs }) | ClassTyCon _ _ <- parent = ClassFlavour | otherwise = case rhs of TupleTyCon { tup_sort = sort } -> TupleFlavour (tupleSortBoxity sort) SumTyCon {} -> SumFlavour DataTyCon {} -> DataTypeFlavour NewTyCon {} -> NewtypeFlavour AbstractTyCon {} -> AbstractTypeFlavour tyConFlavour (FamilyTyCon { famTcFlav = flav, famTcParent = parent }) = case flav of DataFamilyTyCon{} -> DataFamilyFlavour parent OpenSynFamilyTyCon -> OpenTypeFamilyFlavour parent ClosedSynFamilyTyCon{} -> ClosedTypeFamilyFlavour AbstractClosedSynFamilyTyCon -> ClosedTypeFamilyFlavour BuiltInSynFamTyCon{} -> ClosedTypeFamilyFlavour tyConFlavour (SynonymTyCon {}) = TypeSynonymFlavour tyConFlavour (FunTyCon {}) = BuiltInTypeFlavour tyConFlavour (PrimTyCon {}) = BuiltInTypeFlavour tyConFlavour (PromotedDataCon {}) = PromotedDataConFlavour tyConFlavour (TcTyCon { tcTyConFlavour = flav }) = flav -- | Can this flavour of 'TyCon' appear unsaturated? tcFlavourMustBeSaturated :: TyConFlavour -> Bool tcFlavourMustBeSaturated ClassFlavour = False tcFlavourMustBeSaturated DataTypeFlavour = False tcFlavourMustBeSaturated NewtypeFlavour = False tcFlavourMustBeSaturated DataFamilyFlavour{} = False tcFlavourMustBeSaturated TupleFlavour{} = False tcFlavourMustBeSaturated SumFlavour = False tcFlavourMustBeSaturated AbstractTypeFlavour {} = False tcFlavourMustBeSaturated BuiltInTypeFlavour = False tcFlavourMustBeSaturated PromotedDataConFlavour = False tcFlavourMustBeSaturated TypeSynonymFlavour = True tcFlavourMustBeSaturated OpenTypeFamilyFlavour{} = True tcFlavourMustBeSaturated ClosedTypeFamilyFlavour = True -- | Is this flavour of 'TyCon' an open type family or a data family? tcFlavourIsOpen :: TyConFlavour -> Bool tcFlavourIsOpen DataFamilyFlavour{} = True tcFlavourIsOpen OpenTypeFamilyFlavour{} = True tcFlavourIsOpen ClosedTypeFamilyFlavour = False tcFlavourIsOpen ClassFlavour = False tcFlavourIsOpen DataTypeFlavour = False tcFlavourIsOpen NewtypeFlavour = False tcFlavourIsOpen TupleFlavour{} = False tcFlavourIsOpen SumFlavour = False tcFlavourIsOpen AbstractTypeFlavour {} = False tcFlavourIsOpen BuiltInTypeFlavour = False tcFlavourIsOpen PromotedDataConFlavour = False tcFlavourIsOpen TypeSynonymFlavour = False pprPromotionQuote :: TyCon -> SDoc -- Promoted data constructors already have a tick in their OccName pprPromotionQuote tc = case tc of PromotedDataCon {} -> char '\'' -- Always quote promoted DataCons in types _ -> empty instance NamedThing TyCon where getName = tyConName instance Data.Data TyCon where -- don't traverse? toConstr _ = abstractConstr "TyCon" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "TyCon" instance Binary Injectivity where put_ bh NotInjective = putByte bh 0 put_ bh (Injective xs) = putByte bh 1 >> put_ bh xs get bh = do { h <- getByte bh ; case h of 0 -> return NotInjective _ -> do { xs <- get bh ; return (Injective xs) } } -- | Returns whether or not this 'TyCon' is definite, or a hole -- that may be filled in at some later point. See Note [Skolem abstract data] tyConSkolem :: TyCon -> Bool tyConSkolem = isHoleName . tyConName -- Note [Skolem abstract data] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Skolem abstract data arises from data declarations in an hsig file. -- -- The best analogy is to interpret the types declared in signature files as -- elaborating to universally quantified type variables; e.g., -- -- unit p where -- signature H where -- data T -- data S -- module M where -- import H -- f :: (T ~ S) => a -> b -- f x = x -- -- elaborates as (with some fake structural types): -- -- p :: forall t s. { f :: forall a b. t ~ s => a -> b } -- p = { f = \x -> x } -- ill-typed -- -- It is clear that inside p, t ~ s is not provable (and -- if we tried to write a function to cast t to s, that -- would not work), but if we call p @Int @Int, clearly Int ~ Int -- is provable. The skolem variables are all distinct from -- one another, but we can't make assumptions like "f is -- inaccessible", because the skolem variables will get -- instantiated eventually! -- -- Skolem abstractness can apply to "non-abstract" data as well): -- -- unit p where -- signature H1 where -- data T = MkT -- signature H2 where -- data T = MkT -- module M where -- import qualified H1 -- import qualified H2 -- f :: (H1.T ~ H2.T) => a -> b -- f x = x -- -- This is why the test is on the original name of the TyCon, -- not whether it is abstract or not. ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCon/Env.hs0000644000000000000000000001242714472400112020651 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[TyConEnv]{@TyConEnv@: tyCon environments} -} {-# LANGUAGE ScopedTypeVariables #-} module GHC.Core.TyCon.Env ( -- * TyCon environment (map) TyConEnv, -- ** Manipulating these environments mkTyConEnv, mkTyConEnvWith, emptyTyConEnv, isEmptyTyConEnv, unitTyConEnv, nonDetTyConEnvElts, extendTyConEnv_C, extendTyConEnv_Acc, extendTyConEnv, extendTyConEnvList, extendTyConEnvList_C, filterTyConEnv, anyTyConEnv, plusTyConEnv, plusTyConEnv_C, plusTyConEnv_CD, plusTyConEnv_CD2, alterTyConEnv, lookupTyConEnv, lookupTyConEnv_NF, delFromTyConEnv, delListFromTyConEnv, elemTyConEnv, mapTyConEnv, disjointTyConEnv, DTyConEnv, emptyDTyConEnv, isEmptyDTyConEnv, lookupDTyConEnv, delFromDTyConEnv, filterDTyConEnv, mapDTyConEnv, mapMaybeDTyConEnv, adjustDTyConEnv, alterDTyConEnv, extendDTyConEnv, foldDTyConEnv ) where import GHC.Prelude import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Core.TyCon (TyCon) import GHC.Data.Maybe {- ************************************************************************ * * \subsection{TyCon environment} * * ************************************************************************ -} -- | TyCon Environment type TyConEnv a = UniqFM TyCon a -- Domain is TyCon emptyTyConEnv :: TyConEnv a isEmptyTyConEnv :: TyConEnv a -> Bool mkTyConEnv :: [(TyCon,a)] -> TyConEnv a mkTyConEnvWith :: (a -> TyCon) -> [a] -> TyConEnv a nonDetTyConEnvElts :: TyConEnv a -> [a] alterTyConEnv :: (Maybe a-> Maybe a) -> TyConEnv a -> TyCon -> TyConEnv a extendTyConEnv_C :: (a->a->a) -> TyConEnv a -> TyCon -> a -> TyConEnv a extendTyConEnv_Acc :: (a->b->b) -> (a->b) -> TyConEnv b -> TyCon -> a -> TyConEnv b extendTyConEnv :: TyConEnv a -> TyCon -> a -> TyConEnv a plusTyConEnv :: TyConEnv a -> TyConEnv a -> TyConEnv a plusTyConEnv_C :: (a->a->a) -> TyConEnv a -> TyConEnv a -> TyConEnv a plusTyConEnv_CD :: (a->a->a) -> TyConEnv a -> a -> TyConEnv a -> a -> TyConEnv a plusTyConEnv_CD2 :: (Maybe a->Maybe a->a) -> TyConEnv a -> TyConEnv a -> TyConEnv a extendTyConEnvList :: TyConEnv a -> [(TyCon,a)] -> TyConEnv a extendTyConEnvList_C :: (a->a->a) -> TyConEnv a -> [(TyCon,a)] -> TyConEnv a delFromTyConEnv :: TyConEnv a -> TyCon -> TyConEnv a delListFromTyConEnv :: TyConEnv a -> [TyCon] -> TyConEnv a elemTyConEnv :: TyCon -> TyConEnv a -> Bool unitTyConEnv :: TyCon -> a -> TyConEnv a lookupTyConEnv :: TyConEnv a -> TyCon -> Maybe a lookupTyConEnv_NF :: TyConEnv a -> TyCon -> a filterTyConEnv :: (elt -> Bool) -> TyConEnv elt -> TyConEnv elt anyTyConEnv :: (elt -> Bool) -> TyConEnv elt -> Bool mapTyConEnv :: (elt1 -> elt2) -> TyConEnv elt1 -> TyConEnv elt2 disjointTyConEnv :: TyConEnv a -> TyConEnv a -> Bool nonDetTyConEnvElts x = nonDetEltsUFM x emptyTyConEnv = emptyUFM isEmptyTyConEnv = isNullUFM unitTyConEnv x y = unitUFM x y extendTyConEnv x y z = addToUFM x y z extendTyConEnvList x l = addListToUFM x l lookupTyConEnv x y = lookupUFM x y alterTyConEnv = alterUFM mkTyConEnv l = listToUFM l mkTyConEnvWith f = mkTyConEnv . map (\a -> (f a, a)) elemTyConEnv x y = elemUFM x y plusTyConEnv x y = plusUFM x y plusTyConEnv_C f x y = plusUFM_C f x y plusTyConEnv_CD f x d y b = plusUFM_CD f x d y b plusTyConEnv_CD2 f x y = plusUFM_CD2 f x y extendTyConEnv_C f x y z = addToUFM_C f x y z mapTyConEnv f x = mapUFM f x extendTyConEnv_Acc x y z a b = addToUFM_Acc x y z a b extendTyConEnvList_C x y z = addListToUFM_C x y z delFromTyConEnv x y = delFromUFM x y delListFromTyConEnv x y = delListFromUFM x y filterTyConEnv x y = filterUFM x y anyTyConEnv f x = foldUFM ((||) . f) False x disjointTyConEnv x y = disjointUFM x y lookupTyConEnv_NF env n = expectJust "lookupTyConEnv_NF" (lookupTyConEnv env n) -- | Deterministic TyCon Environment -- -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why -- we need DTyConEnv. type DTyConEnv a = UniqDFM TyCon a emptyDTyConEnv :: DTyConEnv a emptyDTyConEnv = emptyUDFM isEmptyDTyConEnv :: DTyConEnv a -> Bool isEmptyDTyConEnv = isNullUDFM lookupDTyConEnv :: DTyConEnv a -> TyCon -> Maybe a lookupDTyConEnv = lookupUDFM delFromDTyConEnv :: DTyConEnv a -> TyCon -> DTyConEnv a delFromDTyConEnv = delFromUDFM filterDTyConEnv :: (a -> Bool) -> DTyConEnv a -> DTyConEnv a filterDTyConEnv = filterUDFM mapDTyConEnv :: (a -> b) -> DTyConEnv a -> DTyConEnv b mapDTyConEnv = mapUDFM mapMaybeDTyConEnv :: (a -> Maybe b) -> DTyConEnv a -> DTyConEnv b mapMaybeDTyConEnv = mapMaybeUDFM adjustDTyConEnv :: (a -> a) -> DTyConEnv a -> TyCon -> DTyConEnv a adjustDTyConEnv = adjustUDFM alterDTyConEnv :: (Maybe a -> Maybe a) -> DTyConEnv a -> TyCon -> DTyConEnv a alterDTyConEnv = alterUDFM extendDTyConEnv :: DTyConEnv a -> TyCon -> a -> DTyConEnv a extendDTyConEnv = addToUDFM foldDTyConEnv :: (elt -> a -> a) -> a -> DTyConEnv elt -> a foldDTyConEnv = foldUDFM ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCon/RecWalk.hs0000644000000000000000000000632214472400112021446 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Check for recursive type constructors. -} module GHC.Core.TyCon.RecWalk ( -- * Recursion breaking RecTcChecker, initRecTc, defaultRecTcMaxBound, setRecTcMaxBound, checkRecTc ) where import GHC.Prelude import GHC.Core.TyCon import GHC.Core.TyCon.Env import GHC.Utils.Outputable {- ************************************************************************ * * Walking over recursive TyCons * * ************************************************************************ Note [Expanding newtypes and products] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When expanding a type to expose a data-type constructor, we need to be careful about newtypes, lest we fall into an infinite loop. Here are the key examples: newtype Id x = MkId x newtype Fix f = MkFix (f (Fix f)) newtype T = MkT (T -> T) Type Expansion -------------------------- T T -> T Fix Maybe Maybe (Fix Maybe) Id (Id Int) Int Fix Id NO NO NO Notice that * We can expand T, even though it's recursive. * We can expand Id (Id Int), even though the Id shows up twice at the outer level, because Id is non-recursive So, when expanding, we keep track of when we've seen a recursive newtype at outermost level; and bail out if we see it again. We sometimes want to do the same for product types, so that the strictness analyser doesn't unbox infinitely deeply. More precisely, we keep a *count* of how many times we've seen it. This is to account for data instance T (a,b) = MkT (T a) (T b) Then (#10482) if we have a type like T (Int,(Int,(Int,(Int,Int)))) we can still unbox deeply enough during strictness analysis. We have to treat T as potentially recursive, but it's still good to be able to unwrap multiple layers. The function that manages all this is checkRecTc. -} data RecTcChecker = RC !Int (TyConEnv Int) -- The upper bound, and the number of times -- we have encountered each TyCon instance Outputable RecTcChecker where ppr (RC n env) = text "RC:" <> int n <+> ppr env -- | Initialise a 'RecTcChecker' with 'defaultRecTcMaxBound'. initRecTc :: RecTcChecker initRecTc = RC defaultRecTcMaxBound emptyTyConEnv -- | The default upper bound (100) for the number of times a 'RecTcChecker' is -- allowed to encounter each 'TyCon'. defaultRecTcMaxBound :: Int defaultRecTcMaxBound = 100 -- Should we have a flag for this? -- | Change the upper bound for the number of times a 'RecTcChecker' is allowed -- to encounter each 'TyCon'. setRecTcMaxBound :: Int -> RecTcChecker -> RecTcChecker setRecTcMaxBound new_bound (RC _old_bound rec_nts) = RC new_bound rec_nts checkRecTc :: RecTcChecker -> TyCon -> Maybe RecTcChecker -- Nothing => Recursion detected -- Just rec_tcs => Keep going checkRecTc (RC bound rec_nts) tc = case lookupTyConEnv rec_nts tc of Just n | n >= bound -> Nothing | otherwise -> Just (RC bound (extendTyConEnv rec_nts tc (n+1))) Nothing -> Just (RC bound (extendTyConEnv rec_nts tc 1)) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Type.hs0000644000000000000000000046014014472400112020005 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1998 -- -- Type - public interface {-# LANGUAGE FlexibleContexts, PatternSynonyms, ViewPatterns #-} {-# OPTIONS_GHC -fno-warn-orphans #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Main functions for manipulating types and type-related things module GHC.Core.Type ( -- Note some of this is just re-exports from TyCon.. -- * Main data types representing Types -- $type_classification -- $representation_types Type, ArgFlag(..), AnonArgFlag(..), Specificity(..), KindOrType, PredType, ThetaType, FRRType, Var, TyVar, isTyVar, TyCoVar, TyCoBinder, TyCoVarBinder, TyVarBinder, Mult, Scaled, KnotTied, -- ** Constructing and deconstructing types mkTyVarTy, mkTyVarTys, getTyVar, getTyVar_maybe, repGetTyVar_maybe, getCastedTyVar_maybe, tyVarKind, varType, mkAppTy, mkAppTys, splitAppTy, splitAppTys, repSplitAppTys, splitAppTy_maybe, repSplitAppTy_maybe, tcRepSplitAppTy_maybe, mkFunTy, mkVisFunTy, mkInvisFunTy, mkVisFunTys, mkVisFunTyMany, mkInvisFunTyMany, mkVisFunTysMany, mkInvisFunTysMany, splitFunTy, splitFunTy_maybe, splitFunTys, funResultTy, funArgTy, mkTyConApp, mkTyConTy, mkTYPEapp, tyConAppTyCon_maybe, tyConAppTyConPicky_maybe, tyConAppArgs_maybe, tyConAppTyCon, tyConAppArgs, splitTyConApp_maybe, splitTyConApp, tyConAppArgN, tcSplitTyConApp_maybe, splitListTyConApp_maybe, repSplitTyConApp_maybe, tcRepSplitTyConApp_maybe, mkForAllTy, mkForAllTys, mkInvisForAllTys, mkTyCoInvForAllTys, mkSpecForAllTy, mkSpecForAllTys, mkVisForAllTys, mkTyCoInvForAllTy, mkInfForAllTy, mkInfForAllTys, splitForAllTyCoVars, splitForAllTyVars, splitForAllReqTVBinders, splitForAllInvisTVBinders, splitForAllTyCoVarBinders, splitForAllTyCoVar_maybe, splitForAllTyCoVar, splitForAllTyVar_maybe, splitForAllCoVar_maybe, splitPiTy_maybe, splitPiTy, splitPiTys, getRuntimeArgTys, mkTyConBindersPreferAnon, mkPiTy, mkPiTys, piResultTy, piResultTys, applyTysX, dropForAlls, mkFamilyTyConApp, buildSynTyCon, mkNumLitTy, isNumLitTy, mkStrLitTy, isStrLitTy, mkCharLitTy, isCharLitTy, isLitTy, isPredTy, getRuntimeRep_maybe, kindRep_maybe, kindRep, mkCastTy, mkCoercionTy, splitCastTy_maybe, userTypeError_maybe, pprUserTypeErrorTy, coAxNthLHS, stripCoercionTy, splitInvisPiTys, splitInvisPiTysN, invisibleTyBndrCount, filterOutInvisibleTypes, filterOutInferredTypes, partitionInvisibleTypes, partitionInvisibles, tyConArgFlags, appTyArgFlags, -- ** Analyzing types TyCoMapper(..), mapTyCo, mapTyCoX, TyCoFolder(..), foldTyCo, noView, -- (Newtypes) newTyConInstRhs, -- ** Binders sameVis, mkTyCoVarBinder, mkTyCoVarBinders, mkTyVarBinder, mkTyVarBinders, tyVarSpecToBinders, mkAnonBinder, isAnonTyCoBinder, binderVar, binderVars, binderType, binderArgFlag, tyCoBinderType, tyCoBinderVar_maybe, tyBinderType, binderRelevantType_maybe, isVisibleArgFlag, isInvisibleArgFlag, isVisibleBinder, isInvisibleBinder, isNamedBinder, tyConBindersTyCoBinders, -- ** Common type constructors funTyCon, unrestrictedFunTyCon, -- ** Predicates on types isTyVarTy, isFunTy, isCoercionTy, isCoercionTy_maybe, isForAllTy, isForAllTy_ty, isForAllTy_co, isPiTy, isTauTy, isFamFreeTy, isCoVarType, isAtomicTy, isValidJoinPointType, tyConAppNeedsKindSig, -- *** Levity and boxity typeLevity_maybe, isLiftedTypeKind, isUnliftedTypeKind, isBoxedTypeKind, pickyIsLiftedTypeKind, isLiftedRuntimeRep, isUnliftedRuntimeRep, runtimeRepLevity_maybe, isBoxedRuntimeRep, isLiftedLevity, isUnliftedLevity, isUnliftedType, isBoxedType, isUnboxedTupleType, isUnboxedSumType, mightBeLiftedType, mightBeUnliftedType, isStateType, isAlgType, isDataFamilyAppType, isPrimitiveType, isStrictType, isLevityTy, isLevityVar, isRuntimeRepTy, isRuntimeRepVar, isRuntimeRepKindedTy, dropRuntimeRepArgs, getRuntimeRep, getLevity, getLevity_maybe, -- * Multiplicity isMultiplicityTy, isMultiplicityVar, unrestricted, linear, tymult, mkScaled, irrelevantMult, scaledSet, pattern One, pattern Many, isOneDataConTy, isManyDataConTy, isLinearType, -- * Main data types representing Kinds Kind, -- ** Finding the kind of a type typeKind, tcTypeKind, typeHasFixedRuntimeRep, resultHasFixedRuntimeRep, tcIsLiftedTypeKind, tcIsConstraintKind, tcReturnsConstraintKind, tcIsBoxedTypeKind, tcIsRuntimeTypeKind, -- ** Common Kind liftedTypeKind, unliftedTypeKind, -- * Type free variables tyCoFVsOfType, tyCoFVsBndr, tyCoFVsVarBndr, tyCoFVsVarBndrs, tyCoVarsOfType, tyCoVarsOfTypes, tyCoVarsOfTypeDSet, coVarsOfType, coVarsOfTypes, anyFreeVarsOfType, anyFreeVarsOfTypes, noFreeVarsOfType, splitVisVarsOfType, splitVisVarsOfTypes, expandTypeSynonyms, typeSize, occCheckExpand, -- ** Closing over kinds closeOverKindsDSet, closeOverKindsList, closeOverKinds, -- * Well-scoped lists of variables scopedSort, tyCoVarsOfTypeWellScoped, tyCoVarsOfTypesWellScoped, -- * Type comparison eqType, eqTypeX, eqTypes, nonDetCmpType, nonDetCmpTypes, nonDetCmpTypeX, nonDetCmpTypesX, nonDetCmpTc, eqVarBndrs, -- * Forcing evaluation of types seqType, seqTypes, -- * Other views onto Types coreView, tcView, tyConsOfType, -- * Main type substitution data types TvSubstEnv, -- Representation widely visible TCvSubst(..), -- Representation visible to a few friends -- ** Manipulating type substitutions emptyTvSubstEnv, emptyTCvSubst, mkEmptyTCvSubst, mkTCvSubst, zipTvSubst, mkTvSubstPrs, zipTCvSubst, notElemTCvSubst, getTvSubstEnv, setTvSubstEnv, zapTCvSubst, getTCvInScope, getTCvSubstRangeFVs, extendTCvInScope, extendTCvInScopeList, extendTCvInScopeSet, extendTCvSubst, extendCvSubst, extendTvSubst, extendTvSubstBinderAndInScope, extendTvSubstList, extendTvSubstAndInScope, extendTCvSubstList, extendTvSubstWithClone, extendTCvSubstWithClone, isInScope, composeTCvSubstEnv, composeTCvSubst, zipTyEnv, zipCoEnv, isEmptyTCvSubst, unionTCvSubst, -- ** Performing substitution on types and kinds substTy, substTys, substScaledTy, substScaledTys, substTyWith, substTysWith, substTheta, substTyAddInScope, substTyUnchecked, substTysUnchecked, substScaledTyUnchecked, substScaledTysUnchecked, substThetaUnchecked, substTyWithUnchecked, substCoUnchecked, substCoWithUnchecked, substTyVarBndr, substTyVarBndrs, substTyVar, substTyVars, substVarBndr, substVarBndrs, substTyCoBndr, cloneTyVarBndr, cloneTyVarBndrs, lookupTyVar, -- * Tidying type related things up for printing tidyType, tidyTypes, tidyOpenType, tidyOpenTypes, tidyVarBndr, tidyVarBndrs, tidyFreeTyCoVars, tidyOpenTyCoVar, tidyOpenTyCoVars, tidyTyCoVarOcc, tidyTopType, tidyTyCoVarBinder, tidyTyCoVarBinders, -- * Kinds isConstraintKindCon, classifiesTypeWithValues, isConcrete, isFixedRuntimeRepKind, ) where import GHC.Prelude import GHC.Types.Basic -- We import the representation and primitive functions from GHC.Core.TyCo.Rep. -- Many things are reexported, but not the representation! import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Subst import GHC.Core.TyCo.Tidy import GHC.Core.TyCo.FVs -- friends: import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Unique.Set import GHC.Core.TyCon import GHC.Builtin.Types.Prim import {-# SOURCE #-} GHC.Builtin.Types ( charTy, naturalTy, listTyCon , typeSymbolKind, liftedTypeKind, unliftedTypeKind , liftedRepTy, unliftedRepTy, zeroBitRepTy , boxedRepDataConTyCon , constraintKind, zeroBitTypeKind , unrestrictedFunTyCon , manyDataConTy, oneDataConTy ) import GHC.Types.Name( Name ) import GHC.Builtin.Names import GHC.Core.Coercion.Axiom import {-# SOURCE #-} GHC.Core.Coercion ( mkNomReflCo, mkGReflCo, mkReflCo , mkTyConAppCo, mkAppCo, mkCoVarCo, mkAxiomRuleCo , mkForAllCo, mkFunCo, mkAxiomInstCo, mkUnivCo , mkSymCo, mkTransCo, mkNthCo, mkLRCo, mkInstCo , mkKindCo, mkSubCo , decomposePiCos, coercionKind, coercionLKind , coercionRKind, coercionType , isReflexiveCo, seqCo , topNormaliseNewType_maybe ) import {-# SOURCE #-} GHC.Tc.Utils.TcType ( isConcreteTyVar ) -- others import GHC.Utils.Misc import GHC.Utils.FV import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString import GHC.Data.Pair import GHC.Data.List.SetOps import GHC.Types.Unique ( nonDetCmpUnique ) import GHC.Data.Maybe ( orElse, expectJust ) import Data.Maybe ( isJust ) import Control.Monad ( guard ) -- import GHC.Utils.Trace -- $type_classification -- #type_classification# -- -- Types are any, but at least one, of: -- -- [Boxed] Iff its representation is a pointer to an object on the -- GC'd heap. Operationally, heap objects can be entered as -- a means of evaluation. -- -- [Lifted] Iff it has bottom as an element: An instance of a -- lifted type might diverge when evaluated. -- GHC Haskell's unboxed types are unlifted. -- An unboxed, but lifted type is not very useful. -- (Example: A byte-represented type, where evaluating 0xff -- computes the 12345678th collatz number modulo 0xff.) -- Only lifted types may be unified with a type variable. -- -- [Algebraic] Iff it is a type with one or more constructors, whether -- declared with @data@ or @newtype@. -- An algebraic type is one that can be deconstructed -- with a case expression. There are algebraic types that -- are not lifted types, like unlifted data types or -- unboxed tuples. -- -- [Data] Iff it is a type declared with @data@, or a boxed tuple. -- There are also /unlifted/ data types. -- -- [Primitive] Iff it is a built-in type that can't be expressed in Haskell. -- -- [Unlifted] Anything that isn't lifted is considered unlifted. -- -- Currently, all primitive types are unlifted, but that's not necessarily -- the case: for example, @Int@ could be primitive. -- -- Some primitive types are unboxed, such as @Int#@, whereas some are boxed -- but unlifted (such as @ByteArray#@). The only primitive types that we -- classify as algebraic are the unboxed tuples. -- -- Some examples of type classifications that may make this a bit clearer are: -- -- @ -- Type primitive boxed lifted algebraic -- ----------------------------------------------------------------------------- -- Int# Yes No No No -- ByteArray# Yes Yes No No -- (\# a, b \#) Yes No No Yes -- (\# a | b \#) Yes No No Yes -- ( a, b ) No Yes Yes Yes -- [a] No Yes Yes Yes -- @ -- $representation_types -- A /source type/ is a type that is a separate type as far as the type checker is -- concerned, but which has a more low-level representation as far as Core-to-Core -- passes and the rest of the back end is concerned. -- -- You don't normally have to worry about this, as the utility functions in -- this module will automatically convert a source into a representation type -- if they are spotted, to the best of its abilities. If you don't want this -- to happen, use the equivalent functions from the "TcType" module. {- ************************************************************************ * * Type representation * * ************************************************************************ Note [coreView vs tcView] ~~~~~~~~~~~~~~~~~~~~~~~~~ So far as the typechecker is concerned, 'Constraint' and 'TYPE LiftedRep' are distinct kinds. But in Core these two are treated as identical. We implement this by making 'coreView' convert 'Constraint' to 'TYPE LiftedRep' on the fly. The function tcView (used in the type checker) does not do this. Accordingly, tcView is used in type-checker-oriented functions (including the pure unifier, used in instance resolution), while coreView is used during e.g. optimisation passes. See also #11715, which tracks removing this inconsistency. In order to prevent users from discerning between Type and Constraint (which could create inconsistent axioms -- see #21092), we say that Type and Constraint are not SurelyApart in the pure unifier. See GHC.Core.Unify.unify_ty, where this case produces MaybeApart. One annoying consequence of this inconsistency is that we can get ill-kinded updates to metavariables. #20356 is a case in point. Simplifying somewhat, we end up with [W] (alpha :: Constraint) ~ (Int :: Type) This is heterogeneous, so we produce [W] co :: (Constraint ~ Type) and transform our original wanted to become [W] alpha ~ Int |> sym co in accordance with Note [Equalities with incompatible kinds] in GHC.Tc.Solver.Canonical. Our transformed wanted is now homogeneous (both sides have kind Constraint) and so we unify alpha := Int |> sym co. However, it's not so easy: when we build the cast (Int |> sym co), we actually just get Int back. This is because we forbid reflexive casts (invariant (EQ2) of Note [Respecting definitional equality] in GHC.Core.TyCo.Rep), and co looks reflexive: it relates Type and Constraint, even though these are considered identical in Core. Above, when we tried to say alpha := Int |> sym co, we really ended up doing alpha := Int -- even though alpha :: Constraint and Int :: Type have different kinds. Nothing has really gone wrong, though: we still emitted [W] co :: (Constraint ~ Type), which will be insoluble and lead to a decent error message. We simply need not to fall over at the moment of unification, because all will be OK in the end. We thus use the Core eqType, not the Haskell tcEqType, in the kind check for a meta-tyvar unification in GHC.Tc.Utils.TcMType.writeMetaTyVarRef. -} -- | Gives the typechecker view of a type. This unwraps synonyms but -- leaves 'Constraint' alone. c.f. 'coreView', which turns 'Constraint' into -- 'Type'. Returns 'Nothing' if no unwrapping happens. -- See also Note [coreView vs tcView] tcView :: Type -> Maybe Type tcView (TyConApp tc tys) | res@(Just _) <- expandSynTyConApp_maybe tc tys = res tcView _ = Nothing -- See Note [Inlining coreView]. {-# INLINE tcView #-} coreView :: Type -> Maybe Type -- ^ This function strips off the /top layer only/ of a type synonym -- application (if any) its underlying representation type. -- Returns 'Nothing' if there is nothing to look through. -- This function considers 'Constraint' to be a synonym of @Type@. -- -- This function does not look through type family applications. -- -- By being non-recursive and inlined, this case analysis gets efficiently -- joined onto the case analysis that the caller is already doing coreView ty@(TyConApp tc tys) | res@(Just _) <- expandSynTyConApp_maybe tc tys = res -- At the Core level, Constraint = Type -- See Note [coreView vs tcView] | isConstraintKindCon tc = assertPpr (null tys) (ppr ty) $ Just liftedTypeKind coreView _ = Nothing -- See Note [Inlining coreView]. {-# INLINE coreView #-} ----------------------------------------------- -- | @expandSynTyConApp_maybe tc tys@ expands the RHS of type synonym @tc@ -- instantiated at arguments @tys@, or returns 'Nothing' if @tc@ is not a -- synonym. expandSynTyConApp_maybe :: TyCon -> [Type] -> Maybe Type {-# INLINE expandSynTyConApp_maybe #-} -- This INLINE will inline the call to expandSynTyConApp_maybe in coreView, -- which will eliminate the allocat ion Just/Nothing in the result -- Don't be tempted to make `expand_syn` (which is NOINLIN) return the -- Just/Nothing, else you'll increase allocation expandSynTyConApp_maybe tc arg_tys | Just (tvs, rhs) <- synTyConDefn_maybe tc , arg_tys `lengthAtLeast` (tyConArity tc) = Just (expand_syn tvs rhs arg_tys) | otherwise = Nothing -- | A helper for 'expandSynTyConApp_maybe' to avoid inlining this cold path -- into call-sites. -- -- Precondition: the call is saturated or over-saturated; -- i.e. length tvs <= length arg_tys expand_syn :: [TyVar] -- ^ the variables bound by the synonym -> Type -- ^ the RHS of the synonym -> [Type] -- ^ the type arguments the synonym is instantiated at. -> Type {-# NOINLINE expand_syn #-} -- We never want to inline this cold-path. expand_syn tvs rhs arg_tys -- No substitution necessary if either tvs or tys is empty -- This is both more efficient, and steers clear of an infinite -- loop; see Note [Care using synonyms to compress types] | null arg_tys = assert (null tvs) rhs | null tvs = mkAppTys rhs arg_tys | otherwise = go empty_subst tvs arg_tys where empty_subst = mkEmptyTCvSubst in_scope in_scope = mkInScopeSet $ shallowTyCoVarsOfTypes $ arg_tys -- The free vars of 'rhs' should all be bound by 'tenv', -- so we only need the free vars of tys -- See also Note [The substitution invariant] in GHC.Core.TyCo.Subst. go subst [] tys | null tys = rhs' -- Exactly Saturated | otherwise = mkAppTys rhs' tys -- Its important to use mkAppTys, rather than (foldl AppTy), -- because the function part might well return a -- partially-applied type constructor; indeed, usually will! where rhs' = substTy subst rhs go subst (tv:tvs) (ty:tys) = go (extendTvSubst subst tv ty) tvs tys go _ (_:_) [] = pprPanic "expand_syn" (ppr tvs $$ ppr rhs $$ ppr arg_tys) -- Under-saturated, precondition failed coreFullView :: Type -> Type -- ^ Iterates 'coreView' until there is no more to synonym to expand. -- See Note [Inlining coreView]. coreFullView ty@(TyConApp tc _) | isTypeSynonymTyCon tc || isConstraintKindCon tc = go ty where go ty | Just ty' <- coreView ty = go ty' | otherwise = ty coreFullView ty = ty {-# INLINE coreFullView #-} {- Note [Inlining coreView] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is very common to have a function f :: Type -> ... f ty | Just ty' <- coreView ty = f ty' f (TyVarTy ...) = ... f ... = ... If f is not otherwise recursive, the initial call to coreView causes f to become recursive, which kills the possibility of inlining. Instead, for non-recursive functions, we prefer to use coreFullView, which guarantees to unwrap top-level type synonyms. It can be inlined and is efficient and non-allocating in its fast path. For this to really be fast, all calls made on its fast path must also be inlined, linked back to this Note. -} ----------------------------------------------- expandTypeSynonyms :: Type -> Type -- ^ Expand out all type synonyms. Actually, it'd suffice to expand out -- just the ones that discard type variables (e.g. type Funny a = Int) -- But we don't know which those are currently, so we just expand all. -- -- 'expandTypeSynonyms' only expands out type synonyms mentioned in the type, -- not in the kinds of any TyCon or TyVar mentioned in the type. -- -- Keep this synchronized with 'synonymTyConsOfType' expandTypeSynonyms ty = go (mkEmptyTCvSubst in_scope) ty where in_scope = mkInScopeSet (tyCoVarsOfType ty) go subst (TyConApp tc tys) | Just (tenv, rhs, tys') <- expandSynTyCon_maybe tc expanded_tys = let subst' = mkTvSubst in_scope (mkVarEnv tenv) -- Make a fresh substitution; rhs has nothing to -- do with anything that has happened so far -- NB: if you make changes here, be sure to build an -- /idempotent/ substitution, even in the nested case -- type T a b = a -> b -- type S x y = T y x -- (#11665) in mkAppTys (go subst' rhs) tys' | otherwise = TyConApp tc expanded_tys where expanded_tys = (map (go subst) tys) go _ (LitTy l) = LitTy l go subst (TyVarTy tv) = substTyVar subst tv go subst (AppTy t1 t2) = mkAppTy (go subst t1) (go subst t2) go subst ty@(FunTy _ mult arg res) = ty { ft_mult = go subst mult, ft_arg = go subst arg, ft_res = go subst res } go subst (ForAllTy (Bndr tv vis) t) = let (subst', tv') = substVarBndrUsing go subst tv in ForAllTy (Bndr tv' vis) (go subst' t) go subst (CastTy ty co) = mkCastTy (go subst ty) (go_co subst co) go subst (CoercionTy co) = mkCoercionTy (go_co subst co) go_mco _ MRefl = MRefl go_mco subst (MCo co) = MCo (go_co subst co) go_co subst (Refl ty) = mkNomReflCo (go subst ty) go_co subst (GRefl r ty mco) = mkGReflCo r (go subst ty) (go_mco subst mco) -- NB: coercions are always expanded upon creation go_co subst (TyConAppCo r tc args) = mkTyConAppCo r tc (map (go_co subst) args) go_co subst (AppCo co arg) = mkAppCo (go_co subst co) (go_co subst arg) go_co subst (ForAllCo tv kind_co co) = let (subst', tv', kind_co') = go_cobndr subst tv kind_co in mkForAllCo tv' kind_co' (go_co subst' co) go_co subst (FunCo r w co1 co2) = mkFunCo r (go_co subst w) (go_co subst co1) (go_co subst co2) go_co subst (CoVarCo cv) = substCoVar subst cv go_co subst (AxiomInstCo ax ind args) = mkAxiomInstCo ax ind (map (go_co subst) args) go_co subst (UnivCo p r t1 t2) = mkUnivCo (go_prov subst p) r (go subst t1) (go subst t2) go_co subst (SymCo co) = mkSymCo (go_co subst co) go_co subst (TransCo co1 co2) = mkTransCo (go_co subst co1) (go_co subst co2) go_co subst (NthCo r n co) = mkNthCo r n (go_co subst co) go_co subst (LRCo lr co) = mkLRCo lr (go_co subst co) go_co subst (InstCo co arg) = mkInstCo (go_co subst co) (go_co subst arg) go_co subst (KindCo co) = mkKindCo (go_co subst co) go_co subst (SubCo co) = mkSubCo (go_co subst co) go_co subst (AxiomRuleCo ax cs) = AxiomRuleCo ax (map (go_co subst) cs) go_co _ (HoleCo h) = pprPanic "expandTypeSynonyms hit a hole" (ppr h) go_prov subst (PhantomProv co) = PhantomProv (go_co subst co) go_prov subst (ProofIrrelProv co) = ProofIrrelProv (go_co subst co) go_prov _ p@(PluginProv _) = p go_prov _ p@(CorePrepProv _) = p -- the "False" and "const" are to accommodate the type of -- substForAllCoBndrUsing, which is general enough to -- handle coercion optimization (which sometimes swaps the -- order of a coercion) go_cobndr subst = substForAllCoBndrUsing False (go_co subst) subst -- | An INLINE helper for function such as 'kindRep_maybe' below. -- -- @isTyConKeyApp_maybe key ty@ returns @Just tys@ iff -- the type @ty = T tys@, where T's unique = key isTyConKeyApp_maybe :: Unique -> Type -> Maybe [Type] isTyConKeyApp_maybe key ty | TyConApp tc args <- coreFullView ty , tc `hasKey` key = Just args | otherwise = Nothing {-# INLINE isTyConKeyApp_maybe #-} -- | Extract the RuntimeRep classifier of a type from its kind. For example, -- @kindRep * = LiftedRep@; Panics if this is not possible. -- Treats * and Constraint as the same kindRep :: HasDebugCallStack => Kind -> Type kindRep k = case kindRep_maybe k of Just r -> r Nothing -> pprPanic "kindRep" (ppr k) -- | Given a kind (TYPE rr), extract its RuntimeRep classifier rr. -- For example, @kindRep_maybe * = Just LiftedRep@ -- Returns 'Nothing' if the kind is not of form (TYPE rr) -- Treats * and Constraint as the same kindRep_maybe :: HasDebugCallStack => Kind -> Maybe Type kindRep_maybe kind | Just [arg] <- isTyConKeyApp_maybe tYPETyConKey kind = Just arg | otherwise = Nothing -- | Returns True if the kind classifies types which are allocated on -- the GC'd heap and False otherwise. Note that this returns False for -- representation-polymorphic kinds, which may be specialized to a kind that -- classifies AddrRep or even unboxed kinds. isBoxedTypeKind :: Kind -> Bool isBoxedTypeKind kind = case kindRep_maybe kind of Just rep -> isBoxedRuntimeRep rep Nothing -> False -- | This version considers Constraint to be the same as *. Returns True -- if the argument is equivalent to Type/Constraint and False otherwise. -- See Note [Kind Constraint and kind Type] isLiftedTypeKind :: Kind -> Bool isLiftedTypeKind kind = case kindRep_maybe kind of Just rep -> isLiftedRuntimeRep rep Nothing -> False pickyIsLiftedTypeKind :: Kind -> Bool -- Checks whether the kind is literally -- TYPE LiftedRep -- or TYPE ('BoxedRep 'Lifted) -- or Type -- without expanding type synonyms or anything -- Used only when deciding whether to suppress the ":: *" in -- (a :: *) when printing kinded type variables -- See Note [Suppressing * kinds] in GHC.Core.TyCo.Ppr pickyIsLiftedTypeKind kind | TyConApp tc [arg] <- kind , tc `hasKey` tYPETyConKey , TyConApp rr_tc rr_args <- arg = case rr_args of [] -> rr_tc `hasKey` liftedRepTyConKey [rr_arg] | rr_tc `hasKey` boxedRepDataConKey , TyConApp lev [] <- rr_arg , lev `hasKey` liftedDataConKey -> True _ -> False | TyConApp tc [] <- kind , tc `hasKey` liftedTypeKindTyConKey = True | otherwise = False -- | Returns True if the kind classifies unlifted types (like 'Int#') and False -- otherwise. Note that this returns False for representation-polymorphic -- kinds, which may be specialized to a kind that classifies unlifted types. isUnliftedTypeKind :: Kind -> Bool isUnliftedTypeKind kind = case kindRep_maybe kind of Just rep -> isUnliftedRuntimeRep rep Nothing -> False -- | See 'isBoxedRuntimeRep_maybe'. isBoxedRuntimeRep :: Type -> Bool isBoxedRuntimeRep rep = isJust (isBoxedRuntimeRep_maybe rep) -- | `isBoxedRuntimeRep_maybe (rep :: RuntimeRep)` returns `Just lev` if `rep` -- expands to `Boxed lev` and returns `Nothing` otherwise. -- -- Types with this runtime rep are represented by pointers on the GC'd heap. isBoxedRuntimeRep_maybe :: Type -> Maybe Type isBoxedRuntimeRep_maybe rep | Just [lev] <- isTyConKeyApp_maybe boxedRepDataConKey rep = Just lev | otherwise = Nothing -- | Check whether a type of kind 'RuntimeRep' is lifted, unlifted, or unknown. -- -- @isLiftedRuntimeRep rr@ returns: -- -- * @Just Lifted@ if @rr@ is @LiftedRep :: RuntimeRep@ -- * @Just Unlifted@ if @rr@ is definitely unlifted, e.g. @IntRep@ -- * @Nothing@ if not known (e.g. it's a type variable or a type family application). runtimeRepLevity_maybe :: Type -> Maybe Levity runtimeRepLevity_maybe rep | TyConApp rr_tc args <- coreFullView rep , isPromotedDataCon rr_tc = -- NB: args might be non-empty e.g. TupleRep [r1, .., rn] if (rr_tc `hasKey` boxedRepDataConKey) then case args of [lev] | isLiftedLevity lev -> Just Lifted | isUnliftedLevity lev -> Just Unlifted _ -> Nothing else Just Unlifted -- Avoid searching all the unlifted RuntimeRep type cons -- In the RuntimeRep data type, only LiftedRep is lifted -- But be careful of type families (F tys) :: RuntimeRep, -- hence the isPromotedDataCon rr_tc runtimeRepLevity_maybe _ = Nothing -- | Check whether a type of kind 'RuntimeRep' is lifted. -- -- 'isLiftedRuntimeRep' is: -- -- * True of @LiftedRep :: RuntimeRep@ -- * False of type variables, type family applications, -- and of other reps such as @IntRep :: RuntimeRep@. isLiftedRuntimeRep :: Type -> Bool isLiftedRuntimeRep rep = runtimeRepLevity_maybe rep == Just Lifted -- | Check whether a type of kind 'RuntimeRep' is unlifted. -- -- * True of definitely unlifted 'RuntimeRep's such as -- 'UnliftedRep', 'IntRep', 'FloatRep', ... -- * False of 'LiftedRep', -- * False for type variables and type family applications. isUnliftedRuntimeRep :: Type -> Bool isUnliftedRuntimeRep rep = runtimeRepLevity_maybe rep == Just Unlifted -- | An INLINE helper for functions such as 'isLiftedLevity' and 'isUnliftedLevity'. -- -- Checks whether the type is a nullary 'TyCon' application, -- for a 'TyCon' with the given 'Unique'. isNullaryTyConKeyApp :: Unique -> Type -> Bool isNullaryTyConKeyApp key ty | Just args <- isTyConKeyApp_maybe key ty = assert (null args) True | otherwise = False {-# INLINE isNullaryTyConKeyApp #-} isLiftedLevity :: Type -> Bool isLiftedLevity = isNullaryTyConKeyApp liftedDataConKey isUnliftedLevity :: Type -> Bool isUnliftedLevity = isNullaryTyConKeyApp unliftedDataConKey -- | Is this the type 'Levity'? isLevityTy :: Type -> Bool isLevityTy = isNullaryTyConKeyApp levityTyConKey -- | Is this the type 'RuntimeRep'? isRuntimeRepTy :: Type -> Bool isRuntimeRepTy = isNullaryTyConKeyApp runtimeRepTyConKey -- | Is a tyvar of type 'RuntimeRep'? isRuntimeRepVar :: TyVar -> Bool isRuntimeRepVar = isRuntimeRepTy . tyVarKind -- | Is a tyvar of type 'Levity'? isLevityVar :: TyVar -> Bool isLevityVar = isLevityTy . tyVarKind -- | Is this the type 'Multiplicity'? isMultiplicityTy :: Type -> Bool isMultiplicityTy = isNullaryTyConKeyApp multiplicityTyConKey -- | Is a tyvar of type 'Multiplicity'? isMultiplicityVar :: TyVar -> Bool isMultiplicityVar = isMultiplicityTy . tyVarKind {- ********************************************************************* * * mapType * * ************************************************************************ These functions do a map-like operation over types, performing some operation on all variables and binding sites. Primarily used for zonking. Note [Efficiency for ForAllCo case of mapTyCoX] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As noted in Note [Forall coercions] in GHC.Core.TyCo.Rep, a ForAllCo is a bit redundant. It stores a TyCoVar and a Coercion, where the kind of the TyCoVar always matches the left-hand kind of the coercion. This is convenient lots of the time, but not when mapping a function over a coercion. The problem is that tcm_tybinder will affect the TyCoVar's kind and mapCoercion will affect the Coercion, and we hope that the results will be the same. Even if they are the same (which should generally happen with correct algorithms), then there is an efficiency issue. In particular, this problem seems to make what should be a linear algorithm into a potentially exponential one. But it's only going to be bad in the case where there's lots of foralls in the kinds of other foralls. Like this: forall a : (forall b : (forall c : ...). ...). ... This construction seems unlikely. So we'll do the inefficient, easy way for now. Note [Specialising mappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ These INLINE pragmas are indispensable. mapTyCo and mapTyCoX are used to implement zonking, and it's vital that they get specialised to the TcM monad and the particular mapper in use. Even specialising to the monad alone made a 20% allocation difference in perf/compiler/T5030. See Note [Specialising foldType] in "GHC.Core.TyCo.Rep" for more details of this idiom. -} -- | This describes how a "map" operation over a type/coercion should behave data TyCoMapper env m = TyCoMapper { tcm_tyvar :: env -> TyVar -> m Type , tcm_covar :: env -> CoVar -> m Coercion , tcm_hole :: env -> CoercionHole -> m Coercion -- ^ What to do with coercion holes. -- See Note [Coercion holes] in "GHC.Core.TyCo.Rep". , tcm_tycobinder :: env -> TyCoVar -> ArgFlag -> m (env, TyCoVar) -- ^ The returned env is used in the extended scope , tcm_tycon :: TyCon -> m TyCon -- ^ This is used only for TcTyCons -- a) To zonk TcTyCons -- b) To turn TcTyCons into TyCons. -- See Note [Type checking recursive type and class declarations] -- in "GHC.Tc.TyCl" } {-# INLINE mapTyCo #-} -- See Note [Specialising mappers] mapTyCo :: Monad m => TyCoMapper () m -> ( Type -> m Type , [Type] -> m [Type] , Coercion -> m Coercion , [Coercion] -> m[Coercion]) mapTyCo mapper = case mapTyCoX mapper of (go_ty, go_tys, go_co, go_cos) -> (go_ty (), go_tys (), go_co (), go_cos ()) {-# INLINE mapTyCoX #-} -- See Note [Specialising mappers] mapTyCoX :: Monad m => TyCoMapper env m -> ( env -> Type -> m Type , env -> [Type] -> m [Type] , env -> Coercion -> m Coercion , env -> [Coercion] -> m[Coercion]) mapTyCoX (TyCoMapper { tcm_tyvar = tyvar , tcm_tycobinder = tycobinder , tcm_tycon = tycon , tcm_covar = covar , tcm_hole = cohole }) = (go_ty, go_tys, go_co, go_cos) where go_tys _ [] = return [] go_tys env (ty:tys) = (:) <$> go_ty env ty <*> go_tys env tys go_ty env (TyVarTy tv) = tyvar env tv go_ty env (AppTy t1 t2) = mkAppTy <$> go_ty env t1 <*> go_ty env t2 go_ty _ ty@(LitTy {}) = return ty go_ty env (CastTy ty co) = mkCastTy <$> go_ty env ty <*> go_co env co go_ty env (CoercionTy co) = CoercionTy <$> go_co env co go_ty env ty@(FunTy _ w arg res) = do { w' <- go_ty env w; arg' <- go_ty env arg; res' <- go_ty env res ; return (ty { ft_mult = w', ft_arg = arg', ft_res = res' }) } go_ty env ty@(TyConApp tc tys) | isTcTyCon tc = do { tc' <- tycon tc ; mkTyConApp tc' <$> go_tys env tys } -- Not a TcTyCon | null tys -- Avoid allocation in this very = return ty -- common case (E.g. Int, LiftedRep etc) | otherwise = mkTyConApp tc <$> go_tys env tys go_ty env (ForAllTy (Bndr tv vis) inner) = do { (env', tv') <- tycobinder env tv vis ; inner' <- go_ty env' inner ; return $ ForAllTy (Bndr tv' vis) inner' } go_cos _ [] = return [] go_cos env (co:cos) = (:) <$> go_co env co <*> go_cos env cos go_mco _ MRefl = return MRefl go_mco env (MCo co) = MCo <$> (go_co env co) go_co env (Refl ty) = Refl <$> go_ty env ty go_co env (GRefl r ty mco) = mkGReflCo r <$> go_ty env ty <*> go_mco env mco go_co env (AppCo c1 c2) = mkAppCo <$> go_co env c1 <*> go_co env c2 go_co env (FunCo r cw c1 c2) = mkFunCo r <$> go_co env cw <*> go_co env c1 <*> go_co env c2 go_co env (CoVarCo cv) = covar env cv go_co env (HoleCo hole) = cohole env hole go_co env (UnivCo p r t1 t2) = mkUnivCo <$> go_prov env p <*> pure r <*> go_ty env t1 <*> go_ty env t2 go_co env (SymCo co) = mkSymCo <$> go_co env co go_co env (TransCo c1 c2) = mkTransCo <$> go_co env c1 <*> go_co env c2 go_co env (AxiomRuleCo r cos) = AxiomRuleCo r <$> go_cos env cos go_co env (NthCo r i co) = mkNthCo r i <$> go_co env co go_co env (LRCo lr co) = mkLRCo lr <$> go_co env co go_co env (InstCo co arg) = mkInstCo <$> go_co env co <*> go_co env arg go_co env (KindCo co) = mkKindCo <$> go_co env co go_co env (SubCo co) = mkSubCo <$> go_co env co go_co env (AxiomInstCo ax i cos) = mkAxiomInstCo ax i <$> go_cos env cos go_co env co@(TyConAppCo r tc cos) | isTcTyCon tc = do { tc' <- tycon tc ; mkTyConAppCo r tc' <$> go_cos env cos } -- Not a TcTyCon | null cos -- Avoid allocation in this very = return co -- common case (E.g. Int, LiftedRep etc) | otherwise = mkTyConAppCo r tc <$> go_cos env cos go_co env (ForAllCo tv kind_co co) = do { kind_co' <- go_co env kind_co ; (env', tv') <- tycobinder env tv Inferred ; co' <- go_co env' co ; return $ mkForAllCo tv' kind_co' co' } -- See Note [Efficiency for ForAllCo case of mapTyCoX] go_prov env (PhantomProv co) = PhantomProv <$> go_co env co go_prov env (ProofIrrelProv co) = ProofIrrelProv <$> go_co env co go_prov _ p@(PluginProv _) = return p go_prov _ p@(CorePrepProv _) = return p {- ************************************************************************ * * \subsection{Constructor-specific functions} * * ************************************************************************ --------------------------------------------------------------------- TyVarTy ~~~~~~~ -} -- | Attempts to obtain the type variable underlying a 'Type', and panics with the -- given message if this is not a type variable type. See also 'getTyVar_maybe' getTyVar :: String -> Type -> TyVar getTyVar msg ty = case getTyVar_maybe ty of Just tv -> tv Nothing -> panic ("getTyVar: " ++ msg) isTyVarTy :: Type -> Bool isTyVarTy ty = isJust (getTyVar_maybe ty) -- | Attempts to obtain the type variable underlying a 'Type' getTyVar_maybe :: Type -> Maybe TyVar getTyVar_maybe = repGetTyVar_maybe . coreFullView -- | If the type is a tyvar, possibly under a cast, returns it, along -- with the coercion. Thus, the co is :: kind tv ~N kind ty getCastedTyVar_maybe :: Type -> Maybe (TyVar, CoercionN) getCastedTyVar_maybe ty = case coreFullView ty of CastTy (TyVarTy tv) co -> Just (tv, co) TyVarTy tv -> Just (tv, mkReflCo Nominal (tyVarKind tv)) _ -> Nothing -- | Attempts to obtain the type variable underlying a 'Type', without -- any expansion repGetTyVar_maybe :: Type -> Maybe TyVar repGetTyVar_maybe (TyVarTy tv) = Just tv repGetTyVar_maybe _ = Nothing {- --------------------------------------------------------------------- AppTy ~~~~~ We need to be pretty careful with AppTy to make sure we obey the invariant that a TyConApp is always visibly so. mkAppTy maintains the invariant: use it. Note [Decomposing fat arrow c=>t] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Can we unify (a b) with (Eq a => ty)? If we do so, we end up with a partial application like ((=>) Eq a) which doesn't make sense in source Haskell. In contrast, we *can* unify (a b) with (t1 -> t2). Here's an example (#9858) of how you might do it: i :: (Typeable a, Typeable b) => Proxy (a b) -> TypeRep i p = typeRep p j = i (Proxy :: Proxy (Eq Int => Int)) The type (Proxy (Eq Int => Int)) is only accepted with -XImpredicativeTypes, but suppose we want that. But then in the call to 'i', we end up decomposing (Eq Int => Int), and we definitely don't want that. This really only applies to the type checker; in Core, '=>' and '->' are the same, as are 'Constraint' and '*'. But for now I've put the test in repSplitAppTy_maybe, which applies throughout, because the other calls to splitAppTy are in GHC.Core.Unify, which is also used by the type checker (e.g. when matching type-function equations). -} -- | Applies a type to another, as in e.g. @k a@ mkAppTy :: Type -> Type -> Type -- See Note [Respecting definitional equality], invariant (EQ1). mkAppTy (CastTy fun_ty co) arg_ty | ([arg_co], res_co) <- decomposePiCos co (coercionKind co) [arg_ty] = (fun_ty `mkAppTy` (arg_ty `mkCastTy` arg_co)) `mkCastTy` res_co mkAppTy (TyConApp tc tys) ty2 = mkTyConApp tc (tys ++ [ty2]) mkAppTy ty1 ty2 = AppTy ty1 ty2 -- Note that the TyConApp could be an -- under-saturated type synonym. GHC allows that; e.g. -- type Foo k = k a -> k a -- type Id x = x -- foo :: Foo Id -> Foo Id -- -- Here Id is partially applied in the type sig for Foo, -- but once the type synonyms are expanded all is well -- -- Moreover in GHC.Tc.Types.tcInferTyApps we build up a type -- (T t1 t2 t3) one argument at a type, thus forming -- (T t1), (T t1 t2), etc mkAppTys :: Type -> [Type] -> Type mkAppTys ty1 [] = ty1 mkAppTys (CastTy fun_ty co) arg_tys -- much more efficient then nested mkAppTy -- Why do this? See (EQ1) of -- Note [Respecting definitional equality] -- in GHC.Core.TyCo.Rep = foldl' AppTy ((mkAppTys fun_ty casted_arg_tys) `mkCastTy` res_co) leftovers where (arg_cos, res_co) = decomposePiCos co (coercionKind co) arg_tys (args_to_cast, leftovers) = splitAtList arg_cos arg_tys casted_arg_tys = zipWith mkCastTy args_to_cast arg_cos mkAppTys (TyConApp tc tys1) tys2 = mkTyConApp tc (tys1 ++ tys2) mkAppTys ty1 tys2 = foldl' AppTy ty1 tys2 ------------- splitAppTy_maybe :: Type -> Maybe (Type, Type) -- ^ Attempt to take a type application apart, whether it is a -- function, type constructor, or plain type application. Note -- that type family applications are NEVER unsaturated by this! splitAppTy_maybe = repSplitAppTy_maybe . coreFullView ------------- repSplitAppTy_maybe :: HasDebugCallStack => Type -> Maybe (Type,Type) -- ^ Does the AppTy split as in 'splitAppTy_maybe', but assumes that -- any Core view stuff is already done repSplitAppTy_maybe (FunTy _ w ty1 ty2) = Just (TyConApp funTyCon [w, rep1, rep2, ty1], ty2) where rep1 = getRuntimeRep ty1 rep2 = getRuntimeRep ty2 repSplitAppTy_maybe (AppTy ty1 ty2) = Just (ty1, ty2) repSplitAppTy_maybe (TyConApp tc tys) | not (mustBeSaturated tc) || tys `lengthExceeds` tyConArity tc , Just (tys', ty') <- snocView tys = Just (TyConApp tc tys', ty') -- Never create unsaturated type family apps! repSplitAppTy_maybe _other = Nothing -- This one doesn't break apart (c => t). -- See Note [Decomposing fat arrow c=>t] -- Defined here to avoid module loops between Unify and TcType. tcRepSplitAppTy_maybe :: Type -> Maybe (Type,Type) -- ^ Does the AppTy split as in 'tcSplitAppTy_maybe', but assumes that -- any coreView stuff is already done. Refuses to look through (c => t) tcRepSplitAppTy_maybe (FunTy { ft_af = af, ft_mult = w, ft_arg = ty1, ft_res = ty2 }) | VisArg <- af -- See Note [Decomposing fat arrow c=>t] -- See Note [The Purely Kinded Type Invariant (PKTI)] in GHC.Tc.Gen.HsType, -- Wrinkle around FunTy , Just rep1 <- getRuntimeRep_maybe ty1 , Just rep2 <- getRuntimeRep_maybe ty2 = Just (TyConApp funTyCon [w, rep1, rep2, ty1], ty2) | otherwise = Nothing tcRepSplitAppTy_maybe (AppTy ty1 ty2) = Just (ty1, ty2) tcRepSplitAppTy_maybe (TyConApp tc tys) | not (mustBeSaturated tc) || tys `lengthExceeds` tyConArity tc , Just (tys', ty') <- snocView tys = Just (TyConApp tc tys', ty') -- Never create unsaturated type family apps! tcRepSplitAppTy_maybe _other = Nothing ------------- splitAppTy :: Type -> (Type, Type) -- ^ Attempts to take a type application apart, as in 'splitAppTy_maybe', -- and panics if this is not possible splitAppTy ty = case splitAppTy_maybe ty of Just pr -> pr Nothing -> panic "splitAppTy" ------------- splitAppTys :: Type -> (Type, [Type]) -- ^ Recursively splits a type as far as is possible, leaving a residual -- type being applied to and the type arguments applied to it. Never fails, -- even if that means returning an empty list of type applications. splitAppTys ty = split ty ty [] where split orig_ty ty args | Just ty' <- coreView ty = split orig_ty ty' args split _ (AppTy ty arg) args = split ty ty (arg:args) split _ (TyConApp tc tc_args) args = let -- keep type families saturated n | mustBeSaturated tc = tyConArity tc | otherwise = 0 (tc_args1, tc_args2) = splitAt n tc_args in (TyConApp tc tc_args1, tc_args2 ++ args) split _ (FunTy _ w ty1 ty2) args = assert (null args ) (TyConApp funTyCon [], [w, rep1, rep2, ty1, ty2]) where rep1 = getRuntimeRep ty1 rep2 = getRuntimeRep ty2 split orig_ty _ args = (orig_ty, args) -- | Like 'splitAppTys', but doesn't look through type synonyms repSplitAppTys :: HasDebugCallStack => Type -> (Type, [Type]) repSplitAppTys ty = split ty [] where split (AppTy ty arg) args = split ty (arg:args) split (TyConApp tc tc_args) args = let n | mustBeSaturated tc = tyConArity tc | otherwise = 0 (tc_args1, tc_args2) = splitAt n tc_args in (TyConApp tc tc_args1, tc_args2 ++ args) split (FunTy _ w ty1 ty2) args = assert (null args ) (TyConApp funTyCon [], [w, rep1, rep2, ty1, ty2]) where rep1 = getRuntimeRep ty1 rep2 = getRuntimeRep ty2 split ty args = (ty, args) {- LitTy ~~~~~ -} mkNumLitTy :: Integer -> Type mkNumLitTy n = LitTy (NumTyLit n) -- | Is this a numeric literal. We also look through type synonyms. isNumLitTy :: Type -> Maybe Integer isNumLitTy ty | LitTy (NumTyLit n) <- coreFullView ty = Just n | otherwise = Nothing mkStrLitTy :: FastString -> Type mkStrLitTy s = LitTy (StrTyLit s) -- | Is this a symbol literal. We also look through type synonyms. isStrLitTy :: Type -> Maybe FastString isStrLitTy ty | LitTy (StrTyLit s) <- coreFullView ty = Just s | otherwise = Nothing mkCharLitTy :: Char -> Type mkCharLitTy c = LitTy (CharTyLit c) -- | Is this a char literal? We also look through type synonyms. isCharLitTy :: Type -> Maybe Char isCharLitTy ty | LitTy (CharTyLit s) <- coreFullView ty = Just s | otherwise = Nothing -- | Is this a type literal (symbol, numeric, or char)? isLitTy :: Type -> Maybe TyLit isLitTy ty | LitTy l <- coreFullView ty = Just l | otherwise = Nothing -- | Is this type a custom user error? -- If so, give us the kind and the error message. userTypeError_maybe :: Type -> Maybe Type userTypeError_maybe t = do { (tc, _kind : msg : _) <- splitTyConApp_maybe t -- There may be more than 2 arguments, if the type error is -- used as a type constructor (e.g. at kind `Type -> Type`). ; guard (tyConName tc == errorMessageTypeErrorFamName) ; return msg } -- | Render a type corresponding to a user type error into a SDoc. pprUserTypeErrorTy :: Type -> SDoc pprUserTypeErrorTy ty = case splitTyConApp_maybe ty of -- Text "Something" Just (tc,[txt]) | tyConName tc == typeErrorTextDataConName , Just str <- isStrLitTy txt -> ftext str -- ShowType t Just (tc,[_k,t]) | tyConName tc == typeErrorShowTypeDataConName -> ppr t -- t1 :<>: t2 Just (tc,[t1,t2]) | tyConName tc == typeErrorAppendDataConName -> pprUserTypeErrorTy t1 <> pprUserTypeErrorTy t2 -- t1 :$$: t2 Just (tc,[t1,t2]) | tyConName tc == typeErrorVAppendDataConName -> pprUserTypeErrorTy t1 $$ pprUserTypeErrorTy t2 -- An unevaluated type function _ -> ppr ty {- --------------------------------------------------------------------- FunTy ~~~~~ Note [Representation of function types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Functions (e.g. Int -> Char) can be thought of as being applications of funTyCon (known in Haskell surface syntax as (->)), (note that `RuntimeRep' quantifiers are left inferred) (->) :: forall {r1 :: RuntimeRep} {r2 :: RuntimeRep} (a :: TYPE r1) (b :: TYPE r2). a -> b -> Type However, for efficiency's sake we represent saturated applications of (->) with FunTy. For instance, the type, (->) r1 r2 a b is equivalent to, FunTy (Anon a) b Note how the RuntimeReps are implied in the FunTy representation. For this reason we must be careful when reconstructing the TyConApp representation (see, for instance, splitTyConApp_maybe). In the compiler we maintain the invariant that all saturated applications of (->) are represented with FunTy. See #11714. -} splitFunTy :: Type -> (Mult, Type, Type) -- ^ Attempts to extract the multiplicity, argument and result types from a type, -- and panics if that is not possible. See also 'splitFunTy_maybe' splitFunTy = expectJust "splitFunTy" . splitFunTy_maybe {-# INLINE splitFunTy_maybe #-} splitFunTy_maybe :: Type -> Maybe (Mult, Type, Type) -- ^ Attempts to extract the multiplicity, argument and result types from a type splitFunTy_maybe ty | FunTy _ w arg res <- coreFullView ty = Just (w, arg, res) | otherwise = Nothing splitFunTys :: Type -> ([Scaled Type], Type) splitFunTys ty = split [] ty ty where -- common case first split args _ (FunTy _ w arg res) = split ((Scaled w arg):args) res res split args orig_ty ty | Just ty' <- coreView ty = split args orig_ty ty' split args orig_ty _ = (reverse args, orig_ty) funResultTy :: HasDebugCallStack => Type -> Type -- ^ Extract the function result type and panic if that is not possible funResultTy ty | FunTy { ft_res = res } <- coreFullView ty = res | otherwise = pprPanic "funResultTy" (ppr ty) funArgTy :: Type -> Type -- ^ Extract the function argument type and panic if that is not possible funArgTy ty | FunTy { ft_arg = arg } <- coreFullView ty = arg | otherwise = pprPanic "funArgTy" (ppr ty) -- ^ Just like 'piResultTys' but for a single argument -- Try not to iterate 'piResultTy', because it's inefficient to substitute -- one variable at a time; instead use 'piResultTys" piResultTy :: HasDebugCallStack => Type -> Type -> Type piResultTy ty arg = case piResultTy_maybe ty arg of Just res -> res Nothing -> pprPanic "piResultTy" (ppr ty $$ ppr arg) piResultTy_maybe :: Type -> Type -> Maybe Type -- We don't need a 'tc' version, because -- this function behaves the same for Type and Constraint piResultTy_maybe ty arg = case coreFullView ty of FunTy { ft_res = res } -> Just res ForAllTy (Bndr tv _) res -> let empty_subst = mkEmptyTCvSubst $ mkInScopeSet $ tyCoVarsOfTypes [arg,res] in Just (substTy (extendTCvSubst empty_subst tv arg) res) _ -> Nothing -- | (piResultTys f_ty [ty1, .., tyn]) gives the type of (f ty1 .. tyn) -- where f :: f_ty -- 'piResultTys' is interesting because: -- 1. 'f_ty' may have more for-alls than there are args -- 2. Less obviously, it may have fewer for-alls -- For case 2. think of: -- piResultTys (forall a.a) [forall b.b, Int] -- This really can happen, but only (I think) in situations involving -- undefined. For example: -- undefined :: forall a. a -- Term: undefined @(forall b. b->b) @Int -- This term should have type (Int -> Int), but notice that -- there are more type args than foralls in 'undefined's type. -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint -- This is a heavily used function (e.g. from typeKind), -- so we pay attention to efficiency, especially in the special case -- where there are no for-alls so we are just dropping arrows from -- a function type/kind. piResultTys :: HasDebugCallStack => Type -> [Type] -> Type piResultTys ty [] = ty piResultTys ty orig_args@(arg:args) | FunTy { ft_res = res } <- ty = piResultTys res args | ForAllTy (Bndr tv _) res <- ty = go (extendTCvSubst init_subst tv arg) res args | Just ty' <- coreView ty = piResultTys ty' orig_args | otherwise = pprPanic "piResultTys1" (ppr ty $$ ppr orig_args) where init_subst = mkEmptyTCvSubst $ mkInScopeSet (tyCoVarsOfTypes (ty:orig_args)) go :: TCvSubst -> Type -> [Type] -> Type go subst ty [] = substTyUnchecked subst ty go subst ty all_args@(arg:args) | FunTy { ft_res = res } <- ty = go subst res args | ForAllTy (Bndr tv _) res <- ty = go (extendTCvSubst subst tv arg) res args | Just ty' <- coreView ty = go subst ty' all_args | not (isEmptyTCvSubst subst) -- See Note [Care with kind instantiation] = go init_subst (substTy subst ty) all_args | otherwise = -- We have not run out of arguments, but the function doesn't -- have the right kind to apply to them; so panic. -- Without the explicit isEmptyVarEnv test, an ill-kinded type -- would give an infinite loop, which is very unhelpful -- c.f. #15473 pprPanic "piResultTys2" (ppr ty $$ ppr orig_args $$ ppr all_args) applyTysX :: [TyVar] -> Type -> [Type] -> Type -- applyTyxX beta-reduces (/\tvs. body_ty) arg_tys -- Assumes that (/\tvs. body_ty) is closed applyTysX tvs body_ty arg_tys = assertPpr (tvs `leLength` arg_tys) pp_stuff $ assertPpr (tyCoVarsOfType body_ty `subVarSet` mkVarSet tvs) pp_stuff $ mkAppTys (substTyWith tvs arg_tys_prefix body_ty) arg_tys_rest where pp_stuff = vcat [ppr tvs, ppr body_ty, ppr arg_tys] (arg_tys_prefix, arg_tys_rest) = splitAtList tvs arg_tys {- Note [Care with kind instantiation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have T :: forall k. k and we are finding the kind of T (forall b. b -> b) * Int Then T (forall b. b->b) :: k[ k :-> forall b. b->b] :: forall b. b -> b So T (forall b. b->b) * :: (b -> b)[ b :-> *] :: * -> * In other words we must instantiate the forall! Similarly (#15428) S :: forall k f. k -> f k and we are finding the kind of S * (* ->) Int Bool We have S * (* ->) :: (k -> f k)[ k :-> *, f :-> (* ->)] :: * -> * -> * So again we must instantiate. The same thing happens in GHC.CoreToIface.toIfaceAppArgsX. --------------------------------------------------------------------- TyConApp ~~~~~~~~ -} -- splitTyConApp "looks through" synonyms, because they don't -- mean a distinct type, but all other type-constructor applications -- including functions are returned as Just .. -- | Retrieve the tycon heading this type, if there is one. Does /not/ -- look through synonyms. tyConAppTyConPicky_maybe :: Type -> Maybe TyCon tyConAppTyConPicky_maybe (TyConApp tc _) = Just tc tyConAppTyConPicky_maybe (FunTy {}) = Just funTyCon tyConAppTyConPicky_maybe _ = Nothing -- | The same as @fst . splitTyConApp@ {-# INLINE tyConAppTyCon_maybe #-} tyConAppTyCon_maybe :: Type -> Maybe TyCon tyConAppTyCon_maybe ty = case coreFullView ty of TyConApp tc _ -> Just tc FunTy {} -> Just funTyCon _ -> Nothing tyConAppTyCon :: HasDebugCallStack => Type -> TyCon tyConAppTyCon ty = tyConAppTyCon_maybe ty `orElse` pprPanic "tyConAppTyCon" (ppr ty) -- | The same as @snd . splitTyConApp@ tyConAppArgs_maybe :: Type -> Maybe [Type] tyConAppArgs_maybe ty = case coreFullView ty of TyConApp _ tys -> Just tys FunTy _ w arg res | Just rep1 <- getRuntimeRep_maybe arg , Just rep2 <- getRuntimeRep_maybe res -> Just [w, rep1, rep2, arg, res] _ -> Nothing tyConAppArgs :: HasCallStack => Type -> [Type] tyConAppArgs ty = tyConAppArgs_maybe ty `orElse` pprPanic "tyConAppArgs" (ppr ty) tyConAppArgN :: Int -> Type -> Type -- Executing Nth tyConAppArgN n ty = case tyConAppArgs_maybe ty of Just tys -> tys `getNth` n Nothing -> pprPanic "tyConAppArgN" (ppr n <+> ppr ty) -- | Attempts to tease a type apart into a type constructor and the application -- of a number of arguments to that constructor. Panics if that is not possible. -- See also 'splitTyConApp_maybe' splitTyConApp :: Type -> (TyCon, [Type]) splitTyConApp ty = case splitTyConApp_maybe ty of Just stuff -> stuff Nothing -> pprPanic "splitTyConApp" (ppr ty) -- | Attempts to tease a type apart into a type constructor and the application -- of a number of arguments to that constructor splitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) splitTyConApp_maybe = repSplitTyConApp_maybe . coreFullView -- | Split a type constructor application into its type constructor and -- applied types. Note that this may fail in the case of a 'FunTy' with an -- argument of unknown kind 'FunTy' (e.g. @FunTy (a :: k) Int@. since the kind -- of @a@ isn't of the form @TYPE rep@). Consequently, you may need to zonk your -- type before using this function. -- -- This does *not* split types headed with (=>), as that's not a TyCon in the -- type-checker. -- -- If you only need the 'TyCon', consider using 'tcTyConAppTyCon_maybe'. tcSplitTyConApp_maybe :: HasCallStack => Type -> Maybe (TyCon, [Type]) -- Defined here to avoid module loops between Unify and TcType. tcSplitTyConApp_maybe ty | Just ty' <- tcView ty = tcSplitTyConApp_maybe ty' | otherwise = tcRepSplitTyConApp_maybe ty ------------------- repSplitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) -- ^ Like 'splitTyConApp_maybe', but doesn't look through synonyms. This -- assumes the synonyms have already been dealt with. repSplitTyConApp_maybe (TyConApp tc tys) = Just (tc, tys) repSplitTyConApp_maybe (FunTy _ w arg res) -- NB: we're in Core, so no check for VisArg = Just (funTyCon, [w, arg_rep, res_rep, arg, res]) where arg_rep = getRuntimeRep arg res_rep = getRuntimeRep res repSplitTyConApp_maybe _ = Nothing tcRepSplitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) -- ^ Like 'tcSplitTyConApp_maybe', but doesn't look through synonyms. This -- assumes the synonyms have already been dealt with. -- -- Moreover, for a FunTy, it only succeeds if the argument types -- have enough info to extract the runtime-rep arguments that -- the funTyCon requires. This will usually be true; -- but may be temporarily false during canonicalization: -- see Note [Decomposing FunTy] in GHC.Tc.Solver.Canonical -- and Note [The Purely Kinded Type Invariant (PKTI)] in GHC.Tc.Gen.HsType, -- Wrinkle around FunTy tcRepSplitTyConApp_maybe (TyConApp tc tys) = Just (tc, tys) tcRepSplitTyConApp_maybe (FunTy VisArg w arg res) -- NB: VisArg. See Note [Decomposing fat arrow c=>t] | Just arg_rep <- getRuntimeRep_maybe arg , Just res_rep <- getRuntimeRep_maybe res = Just (funTyCon, [w, arg_rep, res_rep, arg, res]) tcRepSplitTyConApp_maybe _ = Nothing ------------------- -- | Attempts to tease a list type apart and gives the type of the elements if -- successful (looks through type synonyms) splitListTyConApp_maybe :: Type -> Maybe Type splitListTyConApp_maybe ty = case splitTyConApp_maybe ty of Just (tc,[e]) | tc == listTyCon -> Just e _other -> Nothing newTyConInstRhs :: TyCon -> [Type] -> Type -- ^ Unwrap one 'layer' of newtype on a type constructor and its -- arguments, using an eta-reduced version of the @newtype@ if possible. -- This requires tys to have at least @newTyConInstArity tycon@ elements. newTyConInstRhs tycon tys = assertPpr (tvs `leLength` tys) (ppr tycon $$ ppr tys $$ ppr tvs) $ applyTysX tvs rhs tys where (tvs, rhs) = newTyConEtadRhs tycon {- --------------------------------------------------------------------- CastTy ~~~~~~ A casted type has its *kind* casted into something new. -} splitCastTy_maybe :: Type -> Maybe (Type, Coercion) splitCastTy_maybe ty | CastTy ty' co <- coreFullView ty = Just (ty', co) | otherwise = Nothing -- | Make a 'CastTy'. The Coercion must be nominal. Checks the -- Coercion for reflexivity, dropping it if it's reflexive. -- See @Note [Respecting definitional equality]@ in "GHC.Core.TyCo.Rep" mkCastTy :: Type -> Coercion -> Type mkCastTy orig_ty co | isReflexiveCo co = orig_ty -- (EQ2) from the Note -- NB: Do the slow check here. This is important to keep the splitXXX -- functions working properly. Otherwise, we may end up with something -- like (((->) |> something_reflexive_but_not_obviously_so) biz baz) -- fails under splitFunTy_maybe. This happened with the cheaper check -- in test dependent/should_compile/dynamic-paper. mkCastTy orig_ty co = mk_cast_ty orig_ty co -- | Like 'mkCastTy', but avoids checking the coercion for reflexivity, -- as that can be expensive. mk_cast_ty :: Type -> Coercion -> Type mk_cast_ty orig_ty co = go orig_ty where go :: Type -> Type -- See Note [Using coreView in mk_cast_ty] go ty | Just ty' <- coreView ty = go ty' go (CastTy ty co1) -- (EQ3) from the Note = mkCastTy ty (co1 `mkTransCo` co) -- call mkCastTy again for the reflexivity check go (ForAllTy (Bndr tv vis) inner_ty) -- (EQ4) from the Note -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep. | isTyVar tv , let fvs = tyCoVarsOfCo co = -- have to make sure that pushing the co in doesn't capture the bound var! if tv `elemVarSet` fvs then let empty_subst = mkEmptyTCvSubst (mkInScopeSet fvs) (subst, tv') = substVarBndr empty_subst tv in ForAllTy (Bndr tv' vis) (substTy subst inner_ty `mk_cast_ty` co) else ForAllTy (Bndr tv vis) (inner_ty `mk_cast_ty` co) go _ = CastTy orig_ty co -- NB: orig_ty: preserve synonyms if possible {- Note [Using coreView in mk_cast_ty] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Invariants (EQ3) and (EQ4) of Note [Respecting definitional equality] in GHC.Core.TyCo.Rep must apply regardless of type synonyms. For instance, consider this example (#19742): type EqSameNat = () |> co useNatEq :: EqSameNat |> sym co (Those casts aren't visible in the user-source code, of course; see #19742 for what the user might write.) The type `EqSameNat |> sym co` looks as if it satisfies (EQ3), as it has no nested casts, but if we expand EqSameNat, we see that it doesn't. And then Bad Things happen. The solution is easy: just use `coreView` when establishing (EQ3) and (EQ4) in `mk_cast_ty`. -} tyConBindersTyCoBinders :: [TyConBinder] -> [TyCoBinder] -- Return the tyConBinders in TyCoBinder form tyConBindersTyCoBinders = map to_tyb where to_tyb (Bndr tv (NamedTCB vis)) = Named (Bndr tv vis) to_tyb (Bndr tv (AnonTCB af)) = Anon af (tymult (varType tv)) -- | (mkTyConTy tc) returns (TyConApp tc []) -- but arranges to share that TyConApp among all calls -- See Note [Sharing nullary TyConApps] in GHC.Core.TyCon mkTyConTy :: TyCon -> Type mkTyConTy tycon = tyConNullaryTy tycon -- | A key function: builds a 'TyConApp' or 'FunTy' as appropriate to -- its arguments. Applies its arguments to the constructor from left to right. mkTyConApp :: TyCon -> [Type] -> Type mkTyConApp tycon [] = -- See Note [Sharing nullary TyConApps] in GHC.Core.TyCon mkTyConTy tycon mkTyConApp tycon tys@(ty1:rest) | key == funTyConKey = case tys of [w, _rep1,_rep2,arg,res] -> FunTy { ft_af = VisArg, ft_mult = w , ft_arg = arg, ft_res = res } _ -> bale_out -- See Note [Using synonyms to compress types] | key == tYPETyConKey = assert (null rest) $ -- mkTYPEapp_maybe ty1 `orElse` bale_out case mkTYPEapp_maybe ty1 of Just ty -> ty -- pprTrace "mkTYPEapp:yes" (ppr ty) ty Nothing -> bale_out -- pprTrace "mkTYPEapp:no" (ppr bale_out) bale_out -- See Note [Using synonyms to compress types] | key == boxedRepDataConTyConKey = assert (null rest) $ -- mkBoxedRepApp_maybe ty1 `orElse` bale_out case mkBoxedRepApp_maybe ty1 of Just ty -> ty -- pprTrace "mkBoxedRepApp:yes" (ppr ty) ty Nothing -> bale_out -- pprTrace "mkBoxedRepApp:no" (ppr bale_out) bale_out | key == tupleRepDataConTyConKey = case mkTupleRepApp_maybe ty1 of Just ty -> ty -- pprTrace "mkTupleRepApp:yes" (ppr ty) ty Nothing -> bale_out -- pprTrace "mkTupleRepApp:no" (ppr bale_out) bale_out -- The catch-all case | otherwise = bale_out where key = tyConUnique tycon bale_out = TyConApp tycon tys mkTYPEapp :: Type -> Type mkTYPEapp rr = case mkTYPEapp_maybe rr of Just ty -> ty Nothing -> TyConApp tYPETyCon [rr] mkTYPEapp_maybe :: Type -> Maybe Type -- ^ Given a @RuntimeRep@, applies @TYPE@ to it. -- On the fly it rewrites -- TYPE LiftedRep --> liftedTypeKind (a synonym) -- TYPE UnliftedRep --> unliftedTypeKind (ditto) -- TYPE ZeroBitRep --> zeroBitTypeKind (ditto) -- NB: no need to check for TYPE (BoxedRep Lifted), TYPE (BoxedRep Unlifted) -- because those inner types should already have been rewritten -- to LiftedRep and UnliftedRep respectively, by mkTyConApp -- -- see Note [TYPE and RuntimeRep] in GHC.Builtin.Types.Prim. -- See Note [Using synonyms to compress types] in GHC.Core.Type {-# NOINLINE mkTYPEapp_maybe #-} mkTYPEapp_maybe (TyConApp tc args) | key == liftedRepTyConKey = assert (null args) $ Just liftedTypeKind -- TYPE LiftedRep | key == unliftedRepTyConKey = assert (null args) $ Just unliftedTypeKind -- TYPE UnliftedRep | key == zeroBitRepTyConKey = assert (null args) $ Just zeroBitTypeKind -- TYPE ZeroBitRep where key = tyConUnique tc mkTYPEapp_maybe _ = Nothing mkBoxedRepApp_maybe :: Type -> Maybe Type -- ^ Given a `Levity`, apply `BoxedRep` to it -- On the fly, rewrite -- BoxedRep Lifted --> liftedRepTy (a synonym) -- BoxedRep Unlifted --> unliftedRepTy (ditto) -- See Note [TYPE and RuntimeRep] in GHC.Builtin.Types.Prim. -- See Note [Using synonyms to compress types] in GHC.Core.Type {-# NOINLINE mkBoxedRepApp_maybe #-} mkBoxedRepApp_maybe (TyConApp tc args) | key == liftedDataConKey = assert (null args) $ Just liftedRepTy -- BoxedRep Lifted | key == unliftedDataConKey = assert (null args) $ Just unliftedRepTy -- BoxedRep Unlifted where key = tyConUnique tc mkBoxedRepApp_maybe _ = Nothing mkTupleRepApp_maybe :: Type -> Maybe Type -- ^ Given a `[RuntimeRep]`, apply `TupleRep` to it -- On the fly, rewrite -- TupleRep [] -> zeroBitRepTy (a synonym) -- See Note [TYPE and RuntimeRep] in GHC.Builtin.Types.Prim. -- See Note [Using synonyms to compress types] in GHC.Core.Type {-# NOINLINE mkTupleRepApp_maybe #-} mkTupleRepApp_maybe (TyConApp tc args) | key == nilDataConKey = assert (isSingleton args) $ Just zeroBitRepTy -- ZeroBitRep where key = tyConUnique tc mkTupleRepApp_maybe _ = Nothing {- Note [Using synonyms to compress types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Was: Prefer Type over TYPE (BoxedRep Lifted)] The Core of nearly any program will have numerous occurrences of the Types TyConApp BoxedRep [TyConApp Lifted []] -- Synonym LiftedRep TyConApp BoxedRep [TyConApp Unlifted []] -- Synonym UnliftedREp TyConApp TYPE [TyConApp LiftedRep []] -- Synonym Type TyConApp TYPE [TyConApp UnliftedRep []] -- Synonym UnliftedType While investigating #17292 we found that these constituted a majority of all TyConApp constructors on the heap: (From a sample of 100000 TyConApp closures) 0x45f3523 - 28732 - `Type` 0x420b840702 - 9629 - generic type constructors 0x42055b7e46 - 9596 0x420559b582 - 9511 0x420bb15a1e - 9509 0x420b86c6ba - 9501 0x42055bac1e - 9496 0x45e68fd - 538 - `TYPE ...` Consequently, we try hard to ensure that operations on such types are efficient. Specifically, we strive to a. Avoid heap allocation of such types; use a single static TyConApp b. Use a small (shallow in the tree-depth sense) representation for such types Goal (b) is particularly useful as it makes traversals (e.g. free variable traversal, substitution, and comparison) more efficient. Comparison in particular takes special advantage of nullary type synonym applications (e.g. things like @TyConApp typeTyCon []@), Note [Comparing nullary type synonyms] in "GHC.Core.Type". To accomplish these we use a number of tricks, implemented by mkTyConApp. 1. Instead of (TyConApp BoxedRep [TyConApp Lifted []]), we prefer a statically-allocated (TyConApp LiftedRep []) where `LiftedRep` is a type synonym: type LiftedRep = BoxedRep Lifted Similarly for UnliftedRep 2. Instead of (TyConApp TYPE [TyConApp LiftedRep []]) we prefer the statically-allocated (TyConApp Type []) where `Type` is a type synonym type Type = TYPE LiftedRep Similarly for UnliftedType These serve goal (b) since there are no applied type arguments to traverse, e.g., during comparison. 3. We have a single, statically allocated top-level binding to represent `TyConApp GHC.Types.Type []` (namely 'GHC.Builtin.Types.Prim.liftedTypeKind'), ensuring that we don't need to allocate such types (goal (a)). See functions mkTYPEapp and mkBoxedRepApp 4. We use the sharing mechanism described in Note [Sharing nullary TyConApps] in GHC.Core.TyCon to ensure that we never need to allocate such nullary applications (goal (a)). See #17958, #20541 Note [Care using synonyms to compress types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using a synonym to compress a types has a tricky wrinkle. Consider coreView applied to (TyConApp LiftedRep []) * coreView expands the LiftedRep synonym: type LiftedRep = BoxedRep Lifted * Danger: we might apply the empty substitution to the RHS of the synonym. And substTy calls mkTyConApp BoxedRep [Lifted]. And mkTyConApp compresses that back to LiftedRep. Loop! * Solution: in expandSynTyConApp_maybe, don't call substTy for nullary type synonyms. That's more efficient anyway. -} {- -------------------------------------------------------------------- CoercionTy ~~~~~~~~~~ CoercionTy allows us to inject coercions into types. A CoercionTy should appear only in the right-hand side of an application. -} mkCoercionTy :: Coercion -> Type mkCoercionTy = CoercionTy isCoercionTy :: Type -> Bool isCoercionTy (CoercionTy _) = True isCoercionTy _ = False isCoercionTy_maybe :: Type -> Maybe Coercion isCoercionTy_maybe (CoercionTy co) = Just co isCoercionTy_maybe _ = Nothing stripCoercionTy :: Type -> Coercion stripCoercionTy (CoercionTy co) = co stripCoercionTy ty = pprPanic "stripCoercionTy" (ppr ty) {- --------------------------------------------------------------------- SynTy ~~~~~ Notes on type synonyms ~~~~~~~~~~~~~~~~~~~~~~ The various "split" functions (splitFunTy, splitRhoTy, splitForAllTy) try to return type synonyms wherever possible. Thus type Foo a = a -> a we want splitFunTys (a -> Foo a) = ([a], Foo a) not ([a], a -> a) The reason is that we then get better (shorter) type signatures in interfaces. Notably this plays a role in tcTySigs in GHC.Tc.Gen.Bind. --------------------------------------------------------------------- ForAllTy ~~~~~~~~ -} -- | Make a dependent forall over an 'Inferred' variable mkTyCoInvForAllTy :: TyCoVar -> Type -> Type mkTyCoInvForAllTy tv ty | isCoVar tv , not (tv `elemVarSet` tyCoVarsOfType ty) = mkVisFunTyMany (varType tv) ty | otherwise = ForAllTy (Bndr tv Inferred) ty -- | Like 'mkTyCoInvForAllTy', but tv should be a tyvar mkInfForAllTy :: TyVar -> Type -> Type mkInfForAllTy tv ty = assert (isTyVar tv ) ForAllTy (Bndr tv Inferred) ty -- | Like 'mkForAllTys', but assumes all variables are dependent and -- 'Inferred', a common case mkTyCoInvForAllTys :: [TyCoVar] -> Type -> Type mkTyCoInvForAllTys tvs ty = foldr mkTyCoInvForAllTy ty tvs -- | Like 'mkTyCoInvForAllTys', but tvs should be a list of tyvar mkInfForAllTys :: [TyVar] -> Type -> Type mkInfForAllTys tvs ty = foldr mkInfForAllTy ty tvs -- | Like 'mkForAllTy', but assumes the variable is dependent and 'Specified', -- a common case mkSpecForAllTy :: TyVar -> Type -> Type mkSpecForAllTy tv ty = assert (isTyVar tv ) -- covar is always Inferred, so input should be tyvar ForAllTy (Bndr tv Specified) ty -- | Like 'mkForAllTys', but assumes all variables are dependent and -- 'Specified', a common case mkSpecForAllTys :: [TyVar] -> Type -> Type mkSpecForAllTys tvs ty = foldr mkSpecForAllTy ty tvs -- | Like mkForAllTys, but assumes all variables are dependent and visible mkVisForAllTys :: [TyVar] -> Type -> Type mkVisForAllTys tvs = assert (all isTyVar tvs ) -- covar is always Inferred, so all inputs should be tyvar mkForAllTys [ Bndr tv Required | tv <- tvs ] -- | Given a list of type-level vars and the free vars of a result kind, -- makes TyCoBinders, preferring anonymous binders -- if the variable is, in fact, not dependent. -- e.g. mkTyConBindersPreferAnon [(k:*),(b:k),(c:k)] (k->k) -- We want (k:*) Named, (b:k) Anon, (c:k) Anon -- -- All non-coercion binders are /visible/. mkTyConBindersPreferAnon :: [TyVar] -- ^ binders -> TyCoVarSet -- ^ free variables of result -> [TyConBinder] mkTyConBindersPreferAnon vars inner_tkvs = assert (all isTyVar vars) fst (go vars) where go :: [TyVar] -> ([TyConBinder], VarSet) -- also returns the free vars go [] = ([], inner_tkvs) go (v:vs) | v `elemVarSet` fvs = ( Bndr v (NamedTCB Required) : binders , fvs `delVarSet` v `unionVarSet` kind_vars ) | otherwise = ( Bndr v (AnonTCB VisArg) : binders , fvs `unionVarSet` kind_vars ) where (binders, fvs) = go vs kind_vars = tyCoVarsOfType $ tyVarKind v -- | Take a ForAllTy apart, returning the list of tycovars and the result type. -- This always succeeds, even if it returns only an empty list. Note that the -- result type returned may have free variables that were bound by a forall. splitForAllTyCoVars :: Type -> ([TyCoVar], Type) splitForAllTyCoVars ty = split ty ty [] where split _ (ForAllTy (Bndr tv _) ty) tvs = split ty ty (tv:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Splits the longest initial sequence of 'ForAllTy's that satisfy -- @argf_pred@, returning the binders transformed by @argf_pred@ splitSomeForAllTyCoVarBndrs :: (ArgFlag -> Maybe af) -> Type -> ([VarBndr TyCoVar af], Type) splitSomeForAllTyCoVarBndrs argf_pred ty = split ty ty [] where split _ (ForAllTy (Bndr tcv argf) ty) tvs | Just argf' <- argf_pred argf = split ty ty (Bndr tcv argf' : tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Like 'splitForAllTyCoVars', but only splits 'ForAllTy's with 'Required' type -- variable binders. Furthermore, each returned tyvar is annotated with '()'. splitForAllReqTVBinders :: Type -> ([ReqTVBinder], Type) splitForAllReqTVBinders ty = splitSomeForAllTyCoVarBndrs argf_pred ty where argf_pred :: ArgFlag -> Maybe () argf_pred Required = Just () argf_pred (Invisible {}) = Nothing -- | Like 'splitForAllTyCoVars', but only splits 'ForAllTy's with 'Invisible' type -- variable binders. Furthermore, each returned tyvar is annotated with its -- 'Specificity'. splitForAllInvisTVBinders :: Type -> ([InvisTVBinder], Type) splitForAllInvisTVBinders ty = splitSomeForAllTyCoVarBndrs argf_pred ty where argf_pred :: ArgFlag -> Maybe Specificity argf_pred Required = Nothing argf_pred (Invisible spec) = Just spec -- | Like 'splitForAllTyCoVars', but split only for tyvars. -- This always succeeds, even if it returns only an empty list. Note that the -- result type returned may have free variables that were bound by a forall. splitForAllTyVars :: Type -> ([TyVar], Type) splitForAllTyVars ty = split ty ty [] where split _ (ForAllTy (Bndr tv _) ty) tvs | isTyVar tv = split ty ty (tv:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Checks whether this is a proper forall (with a named binder) isForAllTy :: Type -> Bool isForAllTy ty | ForAllTy {} <- coreFullView ty = True | otherwise = False -- | Like `isForAllTy`, but returns True only if it is a tyvar binder isForAllTy_ty :: Type -> Bool isForAllTy_ty ty | ForAllTy (Bndr tv _) _ <- coreFullView ty , isTyVar tv = True | otherwise = False -- | Like `isForAllTy`, but returns True only if it is a covar binder isForAllTy_co :: Type -> Bool isForAllTy_co ty | ForAllTy (Bndr tv _) _ <- coreFullView ty , isCoVar tv = True | otherwise = False -- | Is this a function or forall? isPiTy :: Type -> Bool isPiTy ty = case coreFullView ty of ForAllTy {} -> True FunTy {} -> True _ -> False -- | Is this a function? isFunTy :: Type -> Bool isFunTy ty | FunTy {} <- coreFullView ty = True | otherwise = False -- | Take a forall type apart, or panics if that is not possible. splitForAllTyCoVar :: Type -> (TyCoVar, Type) splitForAllTyCoVar ty | Just answer <- splitForAllTyCoVar_maybe ty = answer | otherwise = pprPanic "splitForAllTyCoVar" (ppr ty) -- | Drops all ForAllTys dropForAlls :: Type -> Type dropForAlls ty = go ty where go (ForAllTy _ res) = go res go ty | Just ty' <- coreView ty = go ty' go res = res -- | Attempts to take a forall type apart, but only if it's a proper forall, -- with a named binder splitForAllTyCoVar_maybe :: Type -> Maybe (TyCoVar, Type) splitForAllTyCoVar_maybe ty | ForAllTy (Bndr tv _) inner_ty <- coreFullView ty = Just (tv, inner_ty) | otherwise = Nothing -- | Like 'splitForAllTyCoVar_maybe', but only returns Just if it is a tyvar binder. splitForAllTyVar_maybe :: Type -> Maybe (TyCoVar, Type) splitForAllTyVar_maybe ty | ForAllTy (Bndr tv _) inner_ty <- coreFullView ty , isTyVar tv = Just (tv, inner_ty) | otherwise = Nothing -- | Like 'splitForAllTyCoVar_maybe', but only returns Just if it is a covar binder. splitForAllCoVar_maybe :: Type -> Maybe (TyCoVar, Type) splitForAllCoVar_maybe ty | ForAllTy (Bndr tv _) inner_ty <- coreFullView ty , isCoVar tv = Just (tv, inner_ty) | otherwise = Nothing -- | Attempts to take a forall type apart; works with proper foralls and -- functions {-# INLINE splitPiTy_maybe #-} -- callers will immediately deconstruct splitPiTy_maybe :: Type -> Maybe (TyCoBinder, Type) splitPiTy_maybe ty = case coreFullView ty of ForAllTy bndr ty -> Just (Named bndr, ty) FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res} -> Just (Anon af (mkScaled w arg), res) _ -> Nothing -- | Takes a forall type apart, or panics splitPiTy :: Type -> (TyCoBinder, Type) splitPiTy ty | Just answer <- splitPiTy_maybe ty = answer | otherwise = pprPanic "splitPiTy" (ppr ty) -- | Split off all TyCoBinders to a type, splitting both proper foralls -- and functions splitPiTys :: Type -> ([TyCoBinder], Type) splitPiTys ty = split ty ty [] where split _ (ForAllTy b res) bs = split res res (Named b : bs) split _ (FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res }) bs = split res res (Anon af (Scaled w arg) : bs) split orig_ty ty bs | Just ty' <- coreView ty = split orig_ty ty' bs split orig_ty _ bs = (reverse bs, orig_ty) -- | Extracts a list of run-time arguments from a function type, -- looking through newtypes to the right of arrows. -- -- Examples: -- -- @ -- newtype Identity a = I a -- -- getRuntimeArgTys (Int -> Bool -> Double) == [(Int, VisArg), (Bool, VisArg)] -- getRuntimeArgTys (Identity Int -> Bool -> Double) == [(Identity Int, VisArg), (Bool, VisArg)] -- getRuntimeArgTys (Int -> Identity (Bool -> Identity Double)) == [(Int, VisArg), (Bool, VisArg)] -- getRuntimeArgTys (forall a. Show a => Identity a -> a -> Int -> Bool) == [(Show a, InvisArg), (Identity a, VisArg),(a, VisArg),(Int, VisArg)] -- @ -- -- Note that, in the last case, the returned types might mention an out-of-scope -- type variable. This function is used only when we really care about the /kinds/ -- of the returned types, so this is OK. -- -- **Warning**: this function can return an infinite list. For example: -- -- @ -- newtype N a = MkN (a -> N a) -- getRuntimeArgTys (N a) == repeat (a, VisArg) -- @ getRuntimeArgTys :: Type -> [(Type, AnonArgFlag)] getRuntimeArgTys = go where go :: Type -> [(Type, AnonArgFlag)] go (ForAllTy _ res) = go res go (FunTy { ft_arg = arg, ft_res = res, ft_af = af }) = (arg, af) : go res go ty | Just ty' <- coreView ty = go ty' | Just (_,ty') <- topNormaliseNewType_maybe ty = go ty' | otherwise = [] -- | Like 'splitPiTys' but split off only /named/ binders -- and returns 'TyCoVarBinder's rather than 'TyCoBinder's splitForAllTyCoVarBinders :: Type -> ([TyCoVarBinder], Type) splitForAllTyCoVarBinders ty = split ty ty [] where split orig_ty ty bs | Just ty' <- coreView ty = split orig_ty ty' bs split _ (ForAllTy b res) bs = split res res (b:bs) split orig_ty _ bs = (reverse bs, orig_ty) {-# INLINE splitForAllTyCoVarBinders #-} invisibleTyBndrCount :: Type -> Int -- Returns the number of leading invisible forall'd binders in the type -- Includes invisible predicate arguments; e.g. for -- e.g. forall {k}. (k ~ *) => k -> k -- returns 2 not 1 invisibleTyBndrCount ty = length (fst (splitInvisPiTys ty)) -- | Like 'splitPiTys', but returns only *invisible* binders, including constraints. -- Stops at the first visible binder. splitInvisPiTys :: Type -> ([TyCoBinder], Type) splitInvisPiTys ty = split ty ty [] where split _ (ForAllTy b res) bs | Bndr _ vis <- b , isInvisibleArgFlag vis = split res res (Named b : bs) split _ (FunTy { ft_af = InvisArg, ft_mult = mult, ft_arg = arg, ft_res = res }) bs = split res res (Anon InvisArg (mkScaled mult arg) : bs) split orig_ty ty bs | Just ty' <- coreView ty = split orig_ty ty' bs split orig_ty _ bs = (reverse bs, orig_ty) splitInvisPiTysN :: Int -> Type -> ([TyCoBinder], Type) -- ^ Same as 'splitInvisPiTys', but stop when -- - you have found @n@ 'TyCoBinder's, -- - or you run out of invisible binders splitInvisPiTysN n ty = split n ty ty [] where split n orig_ty ty bs | n == 0 = (reverse bs, orig_ty) | Just ty' <- coreView ty = split n orig_ty ty' bs | ForAllTy b res <- ty , Bndr _ vis <- b , isInvisibleArgFlag vis = split (n-1) res res (Named b : bs) | FunTy { ft_af = InvisArg, ft_mult = mult, ft_arg = arg, ft_res = res } <- ty = split (n-1) res res (Anon InvisArg (Scaled mult arg) : bs) | otherwise = (reverse bs, orig_ty) -- | Given a 'TyCon' and a list of argument types, filter out any invisible -- (i.e., 'Inferred' or 'Specified') arguments. filterOutInvisibleTypes :: TyCon -> [Type] -> [Type] filterOutInvisibleTypes tc tys = snd $ partitionInvisibleTypes tc tys -- | Given a 'TyCon' and a list of argument types, filter out any 'Inferred' -- arguments. filterOutInferredTypes :: TyCon -> [Type] -> [Type] filterOutInferredTypes tc tys = filterByList (map (/= Inferred) $ tyConArgFlags tc tys) tys -- | Given a 'TyCon' and a list of argument types, partition the arguments -- into: -- -- 1. 'Inferred' or 'Specified' (i.e., invisible) arguments and -- -- 2. 'Required' (i.e., visible) arguments partitionInvisibleTypes :: TyCon -> [Type] -> ([Type], [Type]) partitionInvisibleTypes tc tys = partitionByList (map isInvisibleArgFlag $ tyConArgFlags tc tys) tys -- | Given a list of things paired with their visibilities, partition the -- things into (invisible things, visible things). partitionInvisibles :: [(a, ArgFlag)] -> ([a], [a]) partitionInvisibles = partitionWith pick_invis where pick_invis :: (a, ArgFlag) -> Either a a pick_invis (thing, vis) | isInvisibleArgFlag vis = Left thing | otherwise = Right thing -- | Given a 'TyCon' and a list of argument types to which the 'TyCon' is -- applied, determine each argument's visibility -- ('Inferred', 'Specified', or 'Required'). -- -- Wrinkle: consider the following scenario: -- -- > T :: forall k. k -> k -- > tyConArgFlags T [forall m. m -> m -> m, S, R, Q] -- -- After substituting, we get -- -- > T (forall m. m -> m -> m) :: (forall m. m -> m -> m) -> forall n. n -> n -> n -- -- Thus, the first argument is invisible, @S@ is visible, @R@ is invisible again, -- and @Q@ is visible. tyConArgFlags :: TyCon -> [Type] -> [ArgFlag] tyConArgFlags tc = fun_kind_arg_flags (tyConKind tc) -- | Given a 'Type' and a list of argument types to which the 'Type' is -- applied, determine each argument's visibility -- ('Inferred', 'Specified', or 'Required'). -- -- Most of the time, the arguments will be 'Required', but not always. Consider -- @f :: forall a. a -> Type@. In @f Type Bool@, the first argument (@Type@) is -- 'Specified' and the second argument (@Bool@) is 'Required'. It is precisely -- this sort of higher-rank situation in which 'appTyArgFlags' comes in handy, -- since @f Type Bool@ would be represented in Core using 'AppTy's. -- (See also #15792). appTyArgFlags :: Type -> [Type] -> [ArgFlag] appTyArgFlags ty = fun_kind_arg_flags (typeKind ty) -- | Given a function kind and a list of argument types (where each argument's -- kind aligns with the corresponding position in the argument kind), determine -- each argument's visibility ('Inferred', 'Specified', or 'Required'). fun_kind_arg_flags :: Kind -> [Type] -> [ArgFlag] fun_kind_arg_flags = go emptyTCvSubst where go subst ki arg_tys | Just ki' <- coreView ki = go subst ki' arg_tys go _ _ [] = [] go subst (ForAllTy (Bndr tv argf) res_ki) (arg_ty:arg_tys) = argf : go subst' res_ki arg_tys where subst' = extendTvSubst subst tv arg_ty go subst (TyVarTy tv) arg_tys | Just ki <- lookupTyVar subst tv = go subst ki arg_tys -- This FunTy case is important to handle kinds with nested foralls, such -- as this kind (inspired by #16518): -- -- forall {k1} k2. k1 -> k2 -> forall k3. k3 -> Type -- -- Here, we want to get the following ArgFlags: -- -- [Inferred, Specified, Required, Required, Specified, Required] -- forall {k1}. forall k2. k1 -> k2 -> forall k3. k3 -> Type go subst (FunTy{ft_af = af, ft_res = res_ki}) (_:arg_tys) = argf : go subst res_ki arg_tys where argf = case af of VisArg -> Required InvisArg -> Inferred go _ _ arg_tys = map (const Required) arg_tys -- something is ill-kinded. But this can happen -- when printing errors. Assume everything is Required. -- @isTauTy@ tests if a type has no foralls or (=>) isTauTy :: Type -> Bool isTauTy ty | Just ty' <- coreView ty = isTauTy ty' isTauTy (TyVarTy _) = True isTauTy (LitTy {}) = True isTauTy (TyConApp tc tys) = all isTauTy tys && isTauTyCon tc isTauTy (AppTy a b) = isTauTy a && isTauTy b isTauTy (FunTy af w a b) = case af of InvisArg -> False -- e.g., Eq a => b VisArg -> isTauTy w && isTauTy a && isTauTy b -- e.g., a -> b isTauTy (ForAllTy {}) = False isTauTy (CastTy ty _) = isTauTy ty isTauTy (CoercionTy _) = False -- Not sure about this isAtomicTy :: Type -> Bool -- True if the type is just a single token, and can be printed compactly -- Used when deciding how to lay out type error messages; see the -- call in GHC.Tc.Errors isAtomicTy (TyVarTy {}) = True isAtomicTy (LitTy {}) = True isAtomicTy (TyConApp _ []) = True isAtomicTy ty | isLiftedTypeKind ty = True -- 'Type' prints compactly as * -- See GHC.Iface.Type.ppr_kind_type isAtomicTy _ = False {- %************************************************************************ %* * TyCoBinders %* * %************************************************************************ -} -- | Make an anonymous binder mkAnonBinder :: AnonArgFlag -> Scaled Type -> TyCoBinder mkAnonBinder = Anon -- | Does this binder bind a variable that is /not/ erased? Returns -- 'True' for anonymous binders. isAnonTyCoBinder :: TyCoBinder -> Bool isAnonTyCoBinder (Named {}) = False isAnonTyCoBinder (Anon {}) = True tyCoBinderVar_maybe :: TyCoBinder -> Maybe TyCoVar tyCoBinderVar_maybe (Named tv) = Just $ binderVar tv tyCoBinderVar_maybe _ = Nothing tyCoBinderType :: TyCoBinder -> Type tyCoBinderType (Named tvb) = binderType tvb tyCoBinderType (Anon _ ty) = scaledThing ty tyBinderType :: TyBinder -> Type tyBinderType (Named (Bndr tv _)) = assert (isTyVar tv ) tyVarKind tv tyBinderType (Anon _ ty) = scaledThing ty -- | Extract a relevant type, if there is one. binderRelevantType_maybe :: TyCoBinder -> Maybe Type binderRelevantType_maybe (Named {}) = Nothing binderRelevantType_maybe (Anon _ ty) = Just (scaledThing ty) {- ************************************************************************ * * \subsection{Type families} * * ************************************************************************ -} mkFamilyTyConApp :: TyCon -> [Type] -> Type -- ^ Given a family instance TyCon and its arg types, return the -- corresponding family type. E.g: -- -- > data family T a -- > data instance T (Maybe b) = MkT b -- -- Where the instance tycon is :RTL, so: -- -- > mkFamilyTyConApp :RTL Int = T (Maybe Int) mkFamilyTyConApp tc tys | Just (fam_tc, fam_tys) <- tyConFamInst_maybe tc , let tvs = tyConTyVars tc fam_subst = assertPpr (tvs `equalLength` tys) (ppr tc <+> ppr tys) $ zipTvSubst tvs tys = mkTyConApp fam_tc (substTys fam_subst fam_tys) | otherwise = mkTyConApp tc tys -- | Get the type on the LHS of a coercion induced by a type/data -- family instance. coAxNthLHS :: CoAxiom br -> Int -> Type coAxNthLHS ax ind = mkTyConApp (coAxiomTyCon ax) (coAxBranchLHS (coAxiomNthBranch ax ind)) isFamFreeTy :: Type -> Bool isFamFreeTy ty | Just ty' <- coreView ty = isFamFreeTy ty' isFamFreeTy (TyVarTy _) = True isFamFreeTy (LitTy {}) = True isFamFreeTy (TyConApp tc tys) = all isFamFreeTy tys && isFamFreeTyCon tc isFamFreeTy (AppTy a b) = isFamFreeTy a && isFamFreeTy b isFamFreeTy (FunTy _ w a b) = isFamFreeTy w && isFamFreeTy a && isFamFreeTy b isFamFreeTy (ForAllTy _ ty) = isFamFreeTy ty isFamFreeTy (CastTy ty _) = isFamFreeTy ty isFamFreeTy (CoercionTy _) = False -- Not sure about this -- | Does this type classify a core (unlifted) Coercion? -- At either role nominal or representational -- (t1 ~# t2) or (t1 ~R# t2) -- See Note [Types for coercions, predicates, and evidence] in "GHC.Core.TyCo.Rep" isCoVarType :: Type -> Bool -- ToDo: should we check saturation? isCoVarType ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` eqPrimTyConKey || tc `hasKey` eqReprPrimTyConKey | otherwise = False buildSynTyCon :: Name -> [KnotTied TyConBinder] -> Kind -- ^ /result/ kind -> [Role] -> KnotTied Type -> TyCon -- This function is here because here is where we have -- isFamFree and isTauTy buildSynTyCon name binders res_kind roles rhs = mkSynonymTyCon name binders res_kind roles rhs is_tau is_fam_free is_forgetful where is_tau = isTauTy rhs is_fam_free = isFamFreeTy rhs is_forgetful = any (not . (`elemVarSet` tyCoVarsOfType rhs) . binderVar) binders || uniqSetAny isForgetfulSynTyCon (tyConsOfType rhs) -- NB: This is allowed to be conservative, returning True more often -- than it should. See comments on GHC.Core.TyCon.isForgetfulSynTyCon {- ************************************************************************ * * \subsection{Liftedness} * * ************************************************************************ -} -- | Tries to compute the 'Levity' of the given type. Returns either -- a definite 'Levity', or 'Nothing' if we aren't sure (e.g. the -- type is representation-polymorphic). -- -- Panics if the kind does not have the shape @TYPE r@. typeLevity_maybe :: HasDebugCallStack => Type -> Maybe Levity typeLevity_maybe ty = runtimeRepLevity_maybe (getRuntimeRep ty) -- | Is the given type definitely unlifted? -- See "Type#type_classification" for what an unlifted type is. -- -- Panics on representation-polymorphic types; See 'mightBeUnliftedType' for -- a more approximate predicate that behaves better in the presence of -- representation polymorphism. isUnliftedType :: HasDebugCallStack => Type -> Bool -- isUnliftedType returns True for forall'd unlifted types: -- x :: forall a. Int# -- I found bindings like these were getting floated to the top level. -- They are pretty bogus types, mind you. It would be better never to -- construct them isUnliftedType ty = case typeLevity_maybe ty of Just Lifted -> False Just Unlifted -> True Nothing -> pprPanic "isUnliftedType" (ppr ty <+> dcolon <+> ppr (typeKind ty)) -- | State token type. isStateType :: Type -> Bool isStateType ty = case tyConAppTyCon_maybe ty of Just tycon -> tycon == statePrimTyCon _ -> False -- | Returns: -- -- * 'False' if the type is /guaranteed/ unlifted or -- * 'True' if it lifted, OR we aren't sure -- (e.g. in a representation-polymorphic case) mightBeLiftedType :: Type -> Bool mightBeLiftedType = mightBeLifted . typeLevity_maybe -- | Returns: -- -- * 'False' if the type is /guaranteed/ lifted or -- * 'True' if it is unlifted, OR we aren't sure -- (e.g. in a representation-polymorphic case) mightBeUnliftedType :: Type -> Bool mightBeUnliftedType = mightBeUnlifted . typeLevity_maybe -- | See "Type#type_classification" for what a boxed type is. -- Panics on representation-polymorphic types; See 'mightBeUnliftedType' for -- a more approximate predicate that behaves better in the presence of -- representation polymorphism. isBoxedType :: Type -> Bool isBoxedType ty = isBoxedRuntimeRep (getRuntimeRep ty) -- | Is this a type of kind RuntimeRep? (e.g. LiftedRep) isRuntimeRepKindedTy :: Type -> Bool isRuntimeRepKindedTy = isRuntimeRepTy . typeKind -- | Drops prefix of RuntimeRep constructors in 'TyConApp's. Useful for e.g. -- dropping 'LiftedRep arguments of unboxed tuple TyCon applications: -- -- dropRuntimeRepArgs [ 'LiftedRep, 'IntRep -- , String, Int# ] == [String, Int#] -- dropRuntimeRepArgs :: [Type] -> [Type] dropRuntimeRepArgs = dropWhile isRuntimeRepKindedTy -- | Extract the RuntimeRep classifier of a type. For instance, -- @getRuntimeRep_maybe Int = Just LiftedRep@. Returns 'Nothing' if this is not -- possible. getRuntimeRep_maybe :: HasDebugCallStack => Type -> Maybe Type getRuntimeRep_maybe = kindRep_maybe . typeKind -- | Extract the RuntimeRep classifier of a type. For instance, -- @getRuntimeRep_maybe Int = LiftedRep@. Panics if this is not possible. getRuntimeRep :: HasDebugCallStack => Type -> Type getRuntimeRep ty = case getRuntimeRep_maybe ty of Just r -> r Nothing -> pprPanic "getRuntimeRep" (ppr ty <+> dcolon <+> ppr (typeKind ty)) -- | Extract the 'Levity' of a type. For example, @getLevity_maybe Int = Just Lifted@, -- @getLevity (Array# Int) = Just Unlifted@, @getLevity Float# = Nothing@. -- -- Returns 'Nothing' if this is not possible. Does not look through type family applications. getLevity_maybe :: HasDebugCallStack => Type -> Maybe Type getLevity_maybe ty | Just rep <- getRuntimeRep_maybe ty , Just (tc, [lev]) <- splitTyConApp_maybe rep , tc == boxedRepDataConTyCon = Just lev | otherwise = Nothing -- | Extract the 'Levity' of a type. For example, @getLevity Int = Lifted@, -- or @getLevity (Array# Int) = Unlifted@. -- -- Panics if this is not possible. Does not look through type family applications. getLevity :: HasDebugCallStack => Type -> Type getLevity ty | Just lev <- getLevity_maybe ty = lev | otherwise = pprPanic "getLevity" (ppr ty <+> dcolon <+> ppr (typeKind ty)) isUnboxedTupleType :: Type -> Bool isUnboxedTupleType ty = tyConAppTyCon (getRuntimeRep ty) `hasKey` tupleRepDataConKey -- NB: Do not use typePrimRep, as that can't tell the difference between -- unboxed tuples and unboxed sums isUnboxedSumType :: Type -> Bool isUnboxedSumType ty = tyConAppTyCon (getRuntimeRep ty) `hasKey` sumRepDataConKey -- | See "Type#type_classification" for what an algebraic type is. -- Should only be applied to /types/, as opposed to e.g. partially -- saturated type constructors isAlgType :: Type -> Bool isAlgType ty = case splitTyConApp_maybe ty of Just (tc, ty_args) -> assert (ty_args `lengthIs` tyConArity tc ) isAlgTyCon tc _other -> False -- | Check whether a type is a data family type isDataFamilyAppType :: Type -> Bool isDataFamilyAppType ty = case tyConAppTyCon_maybe ty of Just tc -> isDataFamilyTyCon tc _ -> False -- | Computes whether an argument (or let right hand side) should -- be computed strictly or lazily, based only on its type. -- Currently, it's just 'isUnliftedType'. -- Panics on representation-polymorphic types. isStrictType :: HasDebugCallStack => Type -> Bool isStrictType = isUnliftedType isPrimitiveType :: Type -> Bool -- ^ Returns true of types that are opaque to Haskell. isPrimitiveType ty = case splitTyConApp_maybe ty of Just (tc, ty_args) -> assert (ty_args `lengthIs` tyConArity tc ) isPrimTyCon tc _ -> False {- ************************************************************************ * * \subsection{Join points} * * ************************************************************************ -} -- | Determine whether a type could be the type of a join point of given total -- arity, according to the polymorphism rule. A join point cannot be polymorphic -- in its return type, since given -- join j @a @b x y z = e1 in e2, -- the types of e1 and e2 must be the same, and a and b are not in scope for e2. -- (See Note [The polymorphism rule of join points] in "GHC.Core".) Returns False -- also if the type simply doesn't have enough arguments. -- -- Note that we need to know how many arguments (type *and* value) the putative -- join point takes; for instance, if -- j :: forall a. a -> Int -- then j could be a binary join point returning an Int, but it could *not* be a -- unary join point returning a -> Int. -- -- TODO: See Note [Excess polymorphism and join points] isValidJoinPointType :: JoinArity -> Type -> Bool isValidJoinPointType arity ty = valid_under emptyVarSet arity ty where valid_under tvs arity ty | arity == 0 = tvs `disjointVarSet` tyCoVarsOfType ty | Just (t, ty') <- splitForAllTyCoVar_maybe ty = valid_under (tvs `extendVarSet` t) (arity-1) ty' | Just (_, _, res_ty) <- splitFunTy_maybe ty = valid_under tvs (arity-1) res_ty | otherwise = False {- Note [Excess polymorphism and join points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In principle, if a function would be a join point except that it fails the polymorphism rule (see Note [The polymorphism rule of join points] in GHC.Core), it can still be made a join point with some effort. This is because all tail calls must return the same type (they return to the same context!), and thus if the return type depends on an argument, that argument must always be the same. For instance, consider: let f :: forall a. a -> Char -> [a] f @a x c = ... f @a y 'a' ... in ... f @Int 1 'b' ... f @Int 2 'c' ... (where the calls are tail calls). `f` fails the polymorphism rule because its return type is [a], where [a] is bound. But since the type argument is always 'Int', we can rewrite it as: let f' :: Int -> Char -> [Int] f' x c = ... f' y 'a' ... in ... f' 1 'b' ... f 2 'c' ... and now we can make f' a join point: join f' :: Int -> Char -> [Int] f' x c = ... jump f' y 'a' ... in ... jump f' 1 'b' ... jump f' 2 'c' ... It's not clear that this comes up often, however. TODO: Measure how often and add this analysis if necessary. See #14620. ************************************************************************ * * \subsection{Sequencing on types} * * ************************************************************************ -} seqType :: Type -> () seqType (LitTy n) = n `seq` () seqType (TyVarTy tv) = tv `seq` () seqType (AppTy t1 t2) = seqType t1 `seq` seqType t2 seqType (FunTy _ w t1 t2) = seqType w `seq` seqType t1 `seq` seqType t2 seqType (TyConApp tc tys) = tc `seq` seqTypes tys seqType (ForAllTy (Bndr tv _) ty) = seqType (varType tv) `seq` seqType ty seqType (CastTy ty co) = seqType ty `seq` seqCo co seqType (CoercionTy co) = seqCo co seqTypes :: [Type] -> () seqTypes [] = () seqTypes (ty:tys) = seqType ty `seq` seqTypes tys {- ************************************************************************ * * Comparison for types (We don't use instances so that we know where it happens) * * ************************************************************************ Note [Equality on AppTys] ~~~~~~~~~~~~~~~~~~~~~~~~~ In our cast-ignoring equality, we want to say that the following two are equal: (Maybe |> co) (Int |> co') ~? Maybe Int But the left is an AppTy while the right is a TyConApp. The solution is to use repSplitAppTy_maybe to break up the TyConApp into its pieces and then continue. Easy to do, but also easy to forget to do. Note [Comparing nullary type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the task of testing equality between two 'Type's of the form TyConApp tc [] where @tc@ is a type synonym. A naive way to perform this comparison these would first expand the synonym and then compare the resulting expansions. However, this is obviously wasteful and the RHS of @tc@ may be large; it is much better to rather compare the TyCons directly. Consequently, before expanding type synonyms in type comparisons we first look for a nullary TyConApp and simply compare the TyCons if we find one. Of course, if we find that the TyCons are *not* equal then we still need to perform the expansion as their RHSs may still be equal. We perform this optimisation in a number of places: * GHC.Core.Types.eqType * GHC.Core.Types.nonDetCmpType * GHC.Core.Unify.unify_ty * TcCanonical.can_eq_nc' * TcUnify.uType This optimisation is especially helpful for the ubiquitous GHC.Types.Type, since GHC prefers to use the type synonym over @TYPE 'LiftedRep@ applications whenever possible. See Note [Using synonyms to compress types] in GHC.Core.Type for details. -} eqType :: Type -> Type -> Bool -- ^ Type equality on source types. Does not look through @newtypes@, -- 'PredType's or type families, but it does look through type synonyms. -- This first checks that the kinds of the types are equal and then -- checks whether the types are equal, ignoring casts and coercions. -- (The kind check is a recursive call, but since all kinds have type -- @Type@, there is no need to check the types of kinds.) -- See also Note [Non-trivial definitional equality] in "GHC.Core.TyCo.Rep". eqType t1 t2 = isEqual $ nonDetCmpType t1 t2 -- It's OK to use nonDetCmpType here and eqType is deterministic, -- nonDetCmpType does equality deterministically -- | Compare types with respect to a (presumably) non-empty 'RnEnv2'. eqTypeX :: RnEnv2 -> Type -> Type -> Bool eqTypeX env t1 t2 = isEqual $ nonDetCmpTypeX env t1 t2 -- It's OK to use nonDetCmpType here and eqTypeX is deterministic, -- nonDetCmpTypeX does equality deterministically -- | Type equality on lists of types, looking through type synonyms -- but not newtypes. eqTypes :: [Type] -> [Type] -> Bool eqTypes tys1 tys2 = isEqual $ nonDetCmpTypes tys1 tys2 -- It's OK to use nonDetCmpType here and eqTypes is deterministic, -- nonDetCmpTypes does equality deterministically eqVarBndrs :: RnEnv2 -> [Var] -> [Var] -> Maybe RnEnv2 -- Check that the var lists are the same length -- and have matching kinds; if so, extend the RnEnv2 -- Returns Nothing if they don't match eqVarBndrs env [] [] = Just env eqVarBndrs env (tv1:tvs1) (tv2:tvs2) | eqTypeX env (varType tv1) (varType tv2) = eqVarBndrs (rnBndr2 env tv1 tv2) tvs1 tvs2 eqVarBndrs _ _ _= Nothing -- Now here comes the real worker {- Note [nonDetCmpType nondeterminism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nonDetCmpType is implemented in terms of nonDetCmpTypeX. nonDetCmpTypeX uses nonDetCmpTc which compares TyCons by their Unique value. Using Uniques for ordering leads to nondeterminism. We hit the same problem in the TyVarTy case, comparing type variables is nondeterministic, note the call to nonDetCmpVar in nonDetCmpTypeX. See Note [Unique Determinism] for more details. Note [Computing equality on types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are several places within GHC that depend on the precise choice of definitional equality used. If we change that definition, all these places must be updated. This Note merely serves as a place for all these places to refer to, so searching for references to this Note will find every place that needs to be updated. See also Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep. -} nonDetCmpType :: Type -> Type -> Ordering nonDetCmpType (TyConApp tc1 []) (TyConApp tc2 []) | tc1 == tc2 = EQ nonDetCmpType t1 t2 -- we know k1 and k2 have the same kind, because they both have kind *. = nonDetCmpTypeX rn_env t1 t2 where rn_env = mkRnEnv2 (mkInScopeSet (tyCoVarsOfTypes [t1, t2])) {-# INLINE nonDetCmpType #-} nonDetCmpTypes :: [Type] -> [Type] -> Ordering nonDetCmpTypes ts1 ts2 = nonDetCmpTypesX rn_env ts1 ts2 where rn_env = mkRnEnv2 (mkInScopeSet (tyCoVarsOfTypes (ts1 ++ ts2))) -- | An ordering relation between two 'Type's (known below as @t1 :: k1@ -- and @t2 :: k2@) data TypeOrdering = TLT -- ^ @t1 < t2@ | TEQ -- ^ @t1 ~ t2@ and there are no casts in either, -- therefore we can conclude @k1 ~ k2@ | TEQX -- ^ @t1 ~ t2@ yet one of the types contains a cast so -- they may differ in kind. | TGT -- ^ @t1 > t2@ deriving (Eq, Ord, Enum, Bounded) nonDetCmpTypeX :: RnEnv2 -> Type -> Type -> Ordering -- Main workhorse -- See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep -- See Note [Computing equality on types] nonDetCmpTypeX env orig_t1 orig_t2 = case go env orig_t1 orig_t2 of -- If there are casts then we also need to do a comparison of the kinds of -- the types being compared TEQX -> toOrdering $ go env k1 k2 ty_ordering -> toOrdering ty_ordering where k1 = typeKind orig_t1 k2 = typeKind orig_t2 toOrdering :: TypeOrdering -> Ordering toOrdering TLT = LT toOrdering TEQ = EQ toOrdering TEQX = EQ toOrdering TGT = GT liftOrdering :: Ordering -> TypeOrdering liftOrdering LT = TLT liftOrdering EQ = TEQ liftOrdering GT = TGT thenCmpTy :: TypeOrdering -> TypeOrdering -> TypeOrdering thenCmpTy TEQ rel = rel thenCmpTy TEQX rel = hasCast rel thenCmpTy rel _ = rel hasCast :: TypeOrdering -> TypeOrdering hasCast TEQ = TEQX hasCast rel = rel -- Returns both the resulting ordering relation between the two types -- and whether either contains a cast. go :: RnEnv2 -> Type -> Type -> TypeOrdering -- See Note [Comparing nullary type synonyms]. go _ (TyConApp tc1 []) (TyConApp tc2 []) | tc1 == tc2 = TEQ go env t1 t2 | Just t1' <- coreView t1 = go env t1' t2 | Just t2' <- coreView t2 = go env t1 t2' go env (TyVarTy tv1) (TyVarTy tv2) = liftOrdering $ rnOccL env tv1 `nonDetCmpVar` rnOccR env tv2 go env (ForAllTy (Bndr tv1 _) t1) (ForAllTy (Bndr tv2 _) t2) = go env (varType tv1) (varType tv2) `thenCmpTy` go (rnBndr2 env tv1 tv2) t1 t2 -- See Note [Equality on AppTys] go env (AppTy s1 t1) ty2 | Just (s2, t2) <- repSplitAppTy_maybe ty2 = go env s1 s2 `thenCmpTy` go env t1 t2 go env ty1 (AppTy s2 t2) | Just (s1, t1) <- repSplitAppTy_maybe ty1 = go env s1 s2 `thenCmpTy` go env t1 t2 go env (FunTy _ w1 s1 t1) (FunTy _ w2 s2 t2) -- NB: nonDepCmpTypeX does the kind check requested by -- Note [Equality on FunTys] in GHC.Core.TyCo.Rep = liftOrdering (nonDetCmpTypeX env s1 s2 `thenCmp` nonDetCmpTypeX env t1 t2) `thenCmpTy` go env w1 w2 -- Comparing multiplicities last because the test is usually true go env (TyConApp tc1 tys1) (TyConApp tc2 tys2) = liftOrdering (tc1 `nonDetCmpTc` tc2) `thenCmpTy` gos env tys1 tys2 go _ (LitTy l1) (LitTy l2) = liftOrdering (nonDetCmpTyLit l1 l2) go env (CastTy t1 _) t2 = hasCast $ go env t1 t2 go env t1 (CastTy t2 _) = hasCast $ go env t1 t2 go _ (CoercionTy {}) (CoercionTy {}) = TEQ -- Deal with the rest: TyVarTy < CoercionTy < AppTy < LitTy < TyConApp < ForAllTy go _ ty1 ty2 = liftOrdering $ (get_rank ty1) `compare` (get_rank ty2) where get_rank :: Type -> Int get_rank (CastTy {}) = pprPanic "nonDetCmpTypeX.get_rank" (ppr [ty1,ty2]) get_rank (TyVarTy {}) = 0 get_rank (CoercionTy {}) = 1 get_rank (AppTy {}) = 3 get_rank (LitTy {}) = 4 get_rank (TyConApp {}) = 5 get_rank (FunTy {}) = 6 get_rank (ForAllTy {}) = 7 gos :: RnEnv2 -> [Type] -> [Type] -> TypeOrdering gos _ [] [] = TEQ gos _ [] _ = TLT gos _ _ [] = TGT gos env (ty1:tys1) (ty2:tys2) = go env ty1 ty2 `thenCmpTy` gos env tys1 tys2 ------------- nonDetCmpTypesX :: RnEnv2 -> [Type] -> [Type] -> Ordering nonDetCmpTypesX _ [] [] = EQ nonDetCmpTypesX env (t1:tys1) (t2:tys2) = nonDetCmpTypeX env t1 t2 `thenCmp` nonDetCmpTypesX env tys1 tys2 nonDetCmpTypesX _ [] _ = LT nonDetCmpTypesX _ _ [] = GT ------------- -- | Compare two 'TyCon's. NB: This should /never/ see 'Constraint' (as -- recognized by Kind.isConstraintKindCon) which is considered a synonym for -- 'Type' in Core. -- See Note [Kind Constraint and kind Type] in "GHC.Core.Type". -- See Note [nonDetCmpType nondeterminism] nonDetCmpTc :: TyCon -> TyCon -> Ordering nonDetCmpTc tc1 tc2 = assert (not (isConstraintKindCon tc1) && not (isConstraintKindCon tc2)) $ u1 `nonDetCmpUnique` u2 where u1 = tyConUnique tc1 u2 = tyConUnique tc2 {- ************************************************************************ * * The kind of a type * * ************************************************************************ Note [typeKind vs tcTypeKind] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have two functions to get the kind of a type * typeKind ignores the distinction between Constraint and * * tcTypeKind respects the distinction between Constraint and * tcTypeKind is used by the type inference engine, for which Constraint and * are different; after that we use typeKind. See also Note [coreView vs tcView] Note [Kinding rules for types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In typeKind we consider Constraint and (TYPE LiftedRep) to be identical. We then have t1 : TYPE rep1 t2 : TYPE rep2 (FUN) ---------------- t1 -> t2 : Type ty : TYPE rep `a` is not free in rep (FORALL) ----------------------- forall a. ty : TYPE rep In tcTypeKind we consider Constraint and (TYPE LiftedRep) to be distinct: t1 : TYPE rep1 t2 : TYPE rep2 (FUN) ---------------- t1 -> t2 : Type t1 : Constraint t2 : TYPE rep (PRED1) ---------------- t1 => t2 : Type t1 : Constraint t2 : Constraint (PRED2) --------------------- t1 => t2 : Constraint ty : TYPE rep `a` is not free in rep (FORALL1) ----------------------- forall a. ty : TYPE rep ty : Constraint (FORALL2) ------------------------- forall a. ty : Constraint Note that: * The only way we distinguish '->' from '=>' is by the fact that the argument is a PredTy. Both are FunTys Note [Phantom type variables in kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider type K (r :: RuntimeRep) = Type -- Note 'r' is unused data T r :: K r -- T :: forall r -> K r foo :: forall r. T r The body of the forall in foo's type has kind (K r), and normally it would make no sense to have forall r. (ty :: K r) because the kind of the forall would escape the binding of 'r'. But in this case it's fine because (K r) exapands to Type, so we explicitly /permit/ the type forall r. T r To accommodate such a type, in typeKind (forall a.ty) we use occCheckExpand to expand any type synonyms in the kind of 'ty' to eliminate 'a'. See kinding rule (FORALL) in Note [Kinding rules for types] See also * GHC.Core.Type.occCheckExpand * GHC.Core.Utils.coreAltsType * GHC.Tc.Validity.checkEscapingKind all of which grapple with the same problem. See #14939. -} ----------------------------- typeKind :: HasDebugCallStack => Type -> Kind -- No need to expand synonyms typeKind (TyConApp tc tys) = piResultTys (tyConKind tc) tys typeKind (LitTy l) = typeLiteralKind l typeKind (FunTy {}) = liftedTypeKind typeKind (TyVarTy tyvar) = tyVarKind tyvar typeKind (CastTy _ty co) = coercionRKind co typeKind (CoercionTy co) = coercionType co typeKind (AppTy fun arg) = go fun [arg] where -- Accumulate the type arguments, so we can call piResultTys, -- rather than a succession of calls to piResultTy (which is -- asymptotically costly as the number of arguments increases) go (AppTy fun arg) args = go fun (arg:args) go fun args = piResultTys (typeKind fun) args typeKind ty@(ForAllTy {}) = case occCheckExpand tvs body_kind of -- We must make sure tv does not occur in kind -- As it is already out of scope! -- See Note [Phantom type variables in kinds] Just k' -> k' Nothing -> pprPanic "typeKind" (ppr ty $$ ppr tvs $$ ppr body <+> dcolon <+> ppr body_kind) where (tvs, body) = splitForAllTyVars ty body_kind = typeKind body --------------------------------------------- -- Utilities to be used in GHC.Core.Unify, -- which uses "tc" functions --------------------------------------------- tcTypeKind :: HasDebugCallStack => Type -> Kind -- No need to expand synonyms tcTypeKind (TyConApp tc tys) = piResultTys (tyConKind tc) tys tcTypeKind (LitTy l) = typeLiteralKind l tcTypeKind (TyVarTy tyvar) = tyVarKind tyvar tcTypeKind (CastTy _ty co) = coercionRKind co tcTypeKind (CoercionTy co) = coercionType co tcTypeKind (FunTy { ft_af = af, ft_res = res }) | InvisArg <- af , tcIsConstraintKind (tcTypeKind res) = constraintKind -- Eq a => Ord a :: Constraint | otherwise -- Eq a => a -> a :: TYPE LiftedRep = liftedTypeKind -- Eq a => Array# Int :: Type LiftedRep (not TYPE PtrRep) tcTypeKind (AppTy fun arg) = go fun [arg] where -- Accumulate the type arguments, so we can call piResultTys, -- rather than a succession of calls to piResultTy (which is -- asymptotically costly as the number of arguments increases) go (AppTy fun arg) args = go fun (arg:args) go fun args = piResultTys (tcTypeKind fun) args tcTypeKind ty@(ForAllTy {}) | tcIsConstraintKind body_kind = constraintKind | otherwise = case occCheckExpand tvs body_kind of -- We must make sure tv does not occur in kind -- As it is already out of scope! -- See Note [Phantom type variables in kinds] Just k' -> k' Nothing -> pprPanic "tcTypeKind" (ppr ty $$ ppr tvs $$ ppr body <+> dcolon <+> ppr body_kind) where (tvs, body) = splitForAllTyVars ty body_kind = tcTypeKind body isPredTy :: HasDebugCallStack => Type -> Bool -- See Note [Types for coercions, predicates, and evidence] in GHC.Core.TyCo.Rep isPredTy ty = tcIsConstraintKind (tcTypeKind ty) -- tcIsConstraintKind stuff only makes sense in the typechecker -- After that Constraint = Type -- See Note [coreView vs tcView] -- Defined here because it is used in isPredTy and tcRepSplitAppTy_maybe (sigh) tcIsConstraintKind :: Kind -> Bool tcIsConstraintKind ty | Just (tc, args) <- tcSplitTyConApp_maybe ty -- Note: tcSplit here , isConstraintKindCon tc = assertPpr (null args) (ppr ty) True | otherwise = False -- | Like 'kindRep_maybe', but considers 'Constraint' to be distinct -- from 'Type'. For a version that treats them as the same type, see -- 'kindRep_maybe'. tcKindRep_maybe :: HasDebugCallStack => Kind -> Maybe Type tcKindRep_maybe kind | Just (tc, [arg]) <- tcSplitTyConApp_maybe kind -- Note: tcSplit here , tc `hasKey` tYPETyConKey = Just arg | otherwise = Nothing -- | Is this kind equivalent to 'Type'? -- -- This considers 'Constraint' to be distinct from 'Type'. For a version that -- treats them as the same type, see 'isLiftedTypeKind'. tcIsLiftedTypeKind :: Kind -> Bool tcIsLiftedTypeKind kind = case tcKindRep_maybe kind of Just rep -> isLiftedRuntimeRep rep Nothing -> False -- | Is this kind equivalent to @TYPE (BoxedRep l)@ for some @l :: Levity@? -- -- This considers 'Constraint' to be distinct from 'Type'. For a version that -- treats them as the same type, see 'isLiftedTypeKind'. tcIsBoxedTypeKind :: Kind -> Bool tcIsBoxedTypeKind kind = case tcKindRep_maybe kind of Just rep -> isBoxedRuntimeRep rep Nothing -> False -- | Is this kind equivalent to @TYPE r@ (for some unknown r)? -- -- This considers 'Constraint' to be distinct from @*@. tcIsRuntimeTypeKind :: Kind -> Bool tcIsRuntimeTypeKind kind = isJust (tcKindRep_maybe kind) tcReturnsConstraintKind :: Kind -> Bool -- True <=> the Kind ultimately returns a Constraint -- E.g. * -> Constraint -- forall k. k -> Constraint tcReturnsConstraintKind kind | Just kind' <- tcView kind = tcReturnsConstraintKind kind' tcReturnsConstraintKind (ForAllTy _ ty) = tcReturnsConstraintKind ty tcReturnsConstraintKind (FunTy { ft_res = ty }) = tcReturnsConstraintKind ty tcReturnsConstraintKind (TyConApp tc _) = isConstraintKindCon tc tcReturnsConstraintKind _ = False -------------------------- typeLiteralKind :: TyLit -> Kind typeLiteralKind (NumTyLit {}) = naturalTy typeLiteralKind (StrTyLit {}) = typeSymbolKind typeLiteralKind (CharTyLit {}) = charTy -- | Returns True if a type has a syntactically fixed runtime rep, -- as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. -- -- This function is equivalent to @('isFixedRuntimeRepKind' . 'typeKind')@, -- but much faster. -- -- __Precondition:__ The type has kind @('TYPE' blah)@ typeHasFixedRuntimeRep :: Type -> Bool typeHasFixedRuntimeRep = go where go (TyConApp tc _) | tcHasFixedRuntimeRep tc = True go (FunTy {}) = True go (LitTy {}) = True go (ForAllTy _ ty) = go ty go ty = isFixedRuntimeRepKind (typeKind ty) -- | Looking past all pi-types, does the end result have a -- fixed runtime rep, as per Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete? -- -- Examples: -- -- * False for @(forall r (a :: TYPE r). String -> a)@ -- * True for @(forall r1 r2 (a :: TYPE r1) (b :: TYPE r2). a -> b -> Type)@ resultHasFixedRuntimeRep :: Type -> Bool resultHasFixedRuntimeRep = typeHasFixedRuntimeRep . snd . splitPiTys {- ********************************************************************** * * Occurs check expansion %* * %********************************************************************* -} {- Note [Occurs check expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (occurCheckExpand tv xi) expands synonyms in xi just enough to get rid of occurrences of tv outside type function arguments, if that is possible; otherwise, it returns Nothing. For example, suppose we have type F a b = [a] Then occCheckExpand b (F Int b) = Just [Int] but occCheckExpand a (F a Int) = Nothing We don't promise to do the absolute minimum amount of expanding necessary, but we try not to do expansions we don't need to. We prefer doing inner expansions first. For example, type F a b = (a, Int, a, [a]) type G b = Char We have occCheckExpand b (F (G b)) = Just (F Char) even though we could also expand F to get rid of b. Note [Occurrence checking: look inside kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we are considering unifying (alpha :: *) ~ Int -> (beta :: alpha -> alpha) This may be an error (what is that alpha doing inside beta's kind?), but we must not make the mistake of actually unifying or we'll build an infinite data structure. So when looking for occurrences of alpha in the rhs, we must look in the kinds of type variables that occur there. occCheckExpand tries to expand type synonyms to remove unnecessary occurrences of a variable, and thereby get past an occurs-check failure. This is good; but we can't do it in the /kind/ of a variable /occurrence/ For example #18451 built an infinite type: type Const a b = a data SameKind :: k -> k -> Type type T (k :: Const Type a) = forall (b :: k). SameKind a b We have b :: k k :: Const Type a a :: k (must be same as b) So if we aren't careful, a's kind mentions a, which is bad. And expanding an /occurrence/ of 'a' doesn't help, because the /binding site/ is the master copy and all the occurrences should match it. Here's a related example: f :: forall a b (c :: Const Type b). Proxy '[a, c] The list means that 'a' gets the same kind as 'c'; but that kind mentions 'b', so the binders are out of order. Bottom line: in occCheckExpand, do not expand inside the kinds of occurrences. See bad_var_occ in occCheckExpand. And see #18451 for more debate. -} occCheckExpand :: [Var] -> Type -> Maybe Type -- See Note [Occurs check expansion] -- We may have needed to do some type synonym unfolding in order to -- get rid of the variable (or forall), so we also return the unfolded -- version of the type, which is guaranteed to be syntactically free -- of the given type variable. If the type is already syntactically -- free of the variable, then the same type is returned. occCheckExpand vs_to_avoid ty | null vs_to_avoid -- Efficient shortcut = Just ty -- Can happen, eg. GHC.Core.Utils.mkSingleAltCase | otherwise = go (mkVarSet vs_to_avoid, emptyVarEnv) ty where go :: (VarSet, VarEnv TyCoVar) -> Type -> Maybe Type -- The VarSet is the set of variables we are trying to avoid -- The VarEnv carries mappings necessary -- because of kind expansion go (as, env) ty@(TyVarTy tv) | Just tv' <- lookupVarEnv env tv = return (mkTyVarTy tv') | bad_var_occ as tv = Nothing | otherwise = return ty go _ ty@(LitTy {}) = return ty go cxt (AppTy ty1 ty2) = do { ty1' <- go cxt ty1 ; ty2' <- go cxt ty2 ; return (mkAppTy ty1' ty2') } go cxt ty@(FunTy _ w ty1 ty2) = do { w' <- go cxt w ; ty1' <- go cxt ty1 ; ty2' <- go cxt ty2 ; return (ty { ft_mult = w', ft_arg = ty1', ft_res = ty2' }) } go cxt@(as, env) (ForAllTy (Bndr tv vis) body_ty) = do { ki' <- go cxt (varType tv) ; let tv' = setVarType tv ki' env' = extendVarEnv env tv tv' as' = as `delVarSet` tv ; body' <- go (as', env') body_ty ; return (ForAllTy (Bndr tv' vis) body') } -- For a type constructor application, first try expanding away the -- offending variable from the arguments. If that doesn't work, next -- see if the type constructor is a type synonym, and if so, expand -- it and try again. go cxt ty@(TyConApp tc tys) = case mapM (go cxt) tys of Just tys' -> return (mkTyConApp tc tys') Nothing | Just ty' <- tcView ty -> go cxt ty' | otherwise -> Nothing -- Failing that, try to expand a synonym go cxt (CastTy ty co) = do { ty' <- go cxt ty ; co' <- go_co cxt co ; return (mkCastTy ty' co') } go cxt (CoercionTy co) = do { co' <- go_co cxt co ; return (mkCoercionTy co') } ------------------ bad_var_occ :: VarSet -> Var -> Bool -- Works for TyVar and CoVar -- See Note [Occurrence checking: look inside kinds] bad_var_occ vs_to_avoid v = v `elemVarSet` vs_to_avoid || tyCoVarsOfType (varType v) `intersectsVarSet` vs_to_avoid ------------------ go_mco _ MRefl = return MRefl go_mco ctx (MCo co) = MCo <$> go_co ctx co ------------------ go_co cxt (Refl ty) = do { ty' <- go cxt ty ; return (mkNomReflCo ty') } go_co cxt (GRefl r ty mco) = do { mco' <- go_mco cxt mco ; ty' <- go cxt ty ; return (mkGReflCo r ty' mco') } -- Note: Coercions do not contain type synonyms go_co cxt (TyConAppCo r tc args) = do { args' <- mapM (go_co cxt) args ; return (mkTyConAppCo r tc args') } go_co cxt (AppCo co arg) = do { co' <- go_co cxt co ; arg' <- go_co cxt arg ; return (mkAppCo co' arg') } go_co cxt@(as, env) (ForAllCo tv kind_co body_co) = do { kind_co' <- go_co cxt kind_co ; let tv' = setVarType tv $ coercionLKind kind_co' env' = extendVarEnv env tv tv' as' = as `delVarSet` tv ; body' <- go_co (as', env') body_co ; return (ForAllCo tv' kind_co' body') } go_co cxt (FunCo r w co1 co2) = do { co1' <- go_co cxt co1 ; co2' <- go_co cxt co2 ; w' <- go_co cxt w ; return (mkFunCo r w' co1' co2') } go_co (as,env) co@(CoVarCo c) | Just c' <- lookupVarEnv env c = return (mkCoVarCo c') | bad_var_occ as c = Nothing | otherwise = return co go_co (as,_) co@(HoleCo h) | bad_var_occ as (ch_co_var h) = Nothing | otherwise = return co go_co cxt (AxiomInstCo ax ind args) = do { args' <- mapM (go_co cxt) args ; return (mkAxiomInstCo ax ind args') } go_co cxt (UnivCo p r ty1 ty2) = do { p' <- go_prov cxt p ; ty1' <- go cxt ty1 ; ty2' <- go cxt ty2 ; return (mkUnivCo p' r ty1' ty2') } go_co cxt (SymCo co) = do { co' <- go_co cxt co ; return (mkSymCo co') } go_co cxt (TransCo co1 co2) = do { co1' <- go_co cxt co1 ; co2' <- go_co cxt co2 ; return (mkTransCo co1' co2') } go_co cxt (NthCo r n co) = do { co' <- go_co cxt co ; return (mkNthCo r n co') } go_co cxt (LRCo lr co) = do { co' <- go_co cxt co ; return (mkLRCo lr co') } go_co cxt (InstCo co arg) = do { co' <- go_co cxt co ; arg' <- go_co cxt arg ; return (mkInstCo co' arg') } go_co cxt (KindCo co) = do { co' <- go_co cxt co ; return (mkKindCo co') } go_co cxt (SubCo co) = do { co' <- go_co cxt co ; return (mkSubCo co') } go_co cxt (AxiomRuleCo ax cs) = do { cs' <- mapM (go_co cxt) cs ; return (mkAxiomRuleCo ax cs') } ------------------ go_prov cxt (PhantomProv co) = PhantomProv <$> go_co cxt co go_prov cxt (ProofIrrelProv co) = ProofIrrelProv <$> go_co cxt co go_prov _ p@(PluginProv _) = return p go_prov _ p@(CorePrepProv _) = return p {- %************************************************************************ %* * Miscellaneous functions %* * %************************************************************************ -} -- | All type constructors occurring in the type; looking through type -- synonyms, but not newtypes. -- When it finds a Class, it returns the class TyCon. tyConsOfType :: Type -> UniqSet TyCon tyConsOfType ty = go ty where go :: Type -> UniqSet TyCon -- The UniqSet does duplicate elim go ty | Just ty' <- coreView ty = go ty' go (TyVarTy {}) = emptyUniqSet go (LitTy {}) = emptyUniqSet go (TyConApp tc tys) = go_tc tc `unionUniqSets` go_s tys go (AppTy a b) = go a `unionUniqSets` go b go (FunTy _ w a b) = go w `unionUniqSets` go a `unionUniqSets` go b `unionUniqSets` go_tc funTyCon go (ForAllTy (Bndr tv _) ty) = go ty `unionUniqSets` go (varType tv) go (CastTy ty co) = go ty `unionUniqSets` go_co co go (CoercionTy co) = go_co co go_co (Refl ty) = go ty go_co (GRefl _ ty mco) = go ty `unionUniqSets` go_mco mco go_co (TyConAppCo _ tc args) = go_tc tc `unionUniqSets` go_cos args go_co (AppCo co arg) = go_co co `unionUniqSets` go_co arg go_co (ForAllCo _ kind_co co) = go_co kind_co `unionUniqSets` go_co co go_co (FunCo _ co_mult co1 co2) = go_co co_mult `unionUniqSets` go_co co1 `unionUniqSets` go_co co2 go_co (AxiomInstCo ax _ args) = go_ax ax `unionUniqSets` go_cos args go_co (UnivCo p _ t1 t2) = go_prov p `unionUniqSets` go t1 `unionUniqSets` go t2 go_co (CoVarCo {}) = emptyUniqSet go_co (HoleCo {}) = emptyUniqSet go_co (SymCo co) = go_co co go_co (TransCo co1 co2) = go_co co1 `unionUniqSets` go_co co2 go_co (NthCo _ _ co) = go_co co go_co (LRCo _ co) = go_co co go_co (InstCo co arg) = go_co co `unionUniqSets` go_co arg go_co (KindCo co) = go_co co go_co (SubCo co) = go_co co go_co (AxiomRuleCo _ cs) = go_cos cs go_mco MRefl = emptyUniqSet go_mco (MCo co) = go_co co go_prov (PhantomProv co) = go_co co go_prov (ProofIrrelProv co) = go_co co go_prov (PluginProv _) = emptyUniqSet go_prov (CorePrepProv _) = emptyUniqSet -- this last case can happen from the tyConsOfType used from -- checkTauTvUpdate go_s tys = foldr (unionUniqSets . go) emptyUniqSet tys go_cos cos = foldr (unionUniqSets . go_co) emptyUniqSet cos go_tc tc = unitUniqSet tc go_ax ax = go_tc $ coAxiomTyCon ax -- | Retrieve the free variables in this type, splitting them based -- on whether they are used visibly or invisibly. Invisible ones come -- first. splitVisVarsOfType :: Type -> Pair TyCoVarSet splitVisVarsOfType orig_ty = Pair invis_vars vis_vars where Pair invis_vars1 vis_vars = go orig_ty invis_vars = invis_vars1 `minusVarSet` vis_vars go (TyVarTy tv) = Pair (tyCoVarsOfType $ tyVarKind tv) (unitVarSet tv) go (AppTy t1 t2) = go t1 `mappend` go t2 go (TyConApp tc tys) = go_tc tc tys go (FunTy _ w t1 t2) = go w `mappend` go t1 `mappend` go t2 go (ForAllTy (Bndr tv _) ty) = ((`delVarSet` tv) <$> go ty) `mappend` (invisible (tyCoVarsOfType $ varType tv)) go (LitTy {}) = mempty go (CastTy ty co) = go ty `mappend` invisible (tyCoVarsOfCo co) go (CoercionTy co) = invisible $ tyCoVarsOfCo co invisible vs = Pair vs emptyVarSet go_tc tc tys = let (invis, vis) = partitionInvisibleTypes tc tys in invisible (tyCoVarsOfTypes invis) `mappend` foldMap go vis splitVisVarsOfTypes :: [Type] -> Pair TyCoVarSet splitVisVarsOfTypes = foldMap splitVisVarsOfType {- ************************************************************************ * * Functions over Kinds * * ************************************************************************ Note [Kind Constraint and kind Type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The kind Constraint is the kind of classes and other type constraints. The special thing about types of kind Constraint is that * They are displayed with double arrow: f :: Ord a => a -> a * They are implicitly instantiated at call sites; so the type inference engine inserts an extra argument of type (Ord a) at every call site to f. However, once type inference is over, there is *no* distinction between Constraint and Type. Indeed we can have coercions between the two. Consider class C a where op :: a -> a For this single-method class we may generate a newtype, which in turn generates an axiom witnessing C a ~ (a -> a) so on the left we have Constraint, and on the right we have Type. See #7451. Because we treat Constraint/Type differently during and after type inference, GHC has two notions of equality that differ in whether they equate Constraint/Type or not: * GHC.Tc.Utils.TcType.tcEqType implements typechecker equality (see Note [Typechecker equality vs definitional equality] in GHC.Tc.Utils.TcType), which treats Constraint and Type as distinct. This is used during type inference. See #11715 for issues that arise from this. * GHC.Core.TyCo.Rep.eqType implements definitional equality (see Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep), which treats Constraint and Type as equal. This is used after type inference. Bottom line: although 'Type' and 'Constraint' are distinct TyCons, with distinct uniques, they are treated as equal at all times except during type inference. -} -- | Checks that a kind of the form 'Type', 'Constraint' -- or @'TYPE r@ is concrete. See 'isConcrete'. -- -- __Precondition:__ The type has kind @('TYPE' blah)@. isFixedRuntimeRepKind :: HasDebugCallStack => Kind -> Bool isFixedRuntimeRepKind k = assertPpr (isLiftedTypeKind k || _is_type) (ppr k) $ -- the isLiftedTypeKind check is necessary b/c of Constraint isConcrete k where _is_type = classifiesTypeWithValues k -- | Tests whether the given type is concrete, i.e. it -- whether it consists only of concrete type constructors, -- concrete type variables, and applications. -- -- See Note [Concrete types] in GHC.Tc.Utils.Concrete. isConcrete :: Type -> Bool isConcrete = go where go ty | Just ty' <- coreView ty = go ty' go (TyVarTy tv) = isConcreteTyVar tv go (AppTy ty1 ty2) = go ty1 && go ty2 go (TyConApp tc tys) | isConcreteTyCon tc = all go tys | otherwise = False go ForAllTy{} = False go (FunTy _ w t1 t2) = go w && go (typeKind t1) && go t1 && go (typeKind t2) && go t2 go LitTy{} = True go CastTy{} = False go CoercionTy{} = False ----------------------------------------- -- | Does this classify a type allowed to have values? Responds True to things -- like *, TYPE Lifted, TYPE IntRep, TYPE v, Constraint. classifiesTypeWithValues :: Kind -> Bool -- ^ True of any sub-kind of OpenTypeKind classifiesTypeWithValues k = isJust (kindRep_maybe k) {- %************************************************************************ %* * Pretty-printing %* * %************************************************************************ Most pretty-printing is either in GHC.Core.TyCo.Rep or GHC.Iface.Type. -} -- | Does a 'TyCon' (that is applied to some number of arguments) need to be -- ascribed with an explicit kind signature to resolve ambiguity if rendered as -- a source-syntax type? -- (See @Note [When does a tycon application need an explicit kind signature?]@ -- for a full explanation of what this function checks for.) tyConAppNeedsKindSig :: Bool -- ^ Should specified binders count towards injective positions in -- the kind of the TyCon? (If you're using visible kind -- applications, then you want True here. -> TyCon -> Int -- ^ The number of args the 'TyCon' is applied to. -> Bool -- ^ Does @T t_1 ... t_n@ need a kind signature? (Where @n@ is the -- number of arguments) tyConAppNeedsKindSig spec_inj_pos tc n_args | LT <- listLengthCmp tc_binders n_args = False | otherwise = let (dropped_binders, remaining_binders) = splitAt n_args tc_binders result_kind = mkTyConKind remaining_binders tc_res_kind result_vars = tyCoVarsOfType result_kind dropped_vars = fvVarSet $ mapUnionFV injective_vars_of_binder dropped_binders in not (subVarSet result_vars dropped_vars) where tc_binders = tyConBinders tc tc_res_kind = tyConResKind tc -- Returns the variables that would be fixed by knowing a TyConBinder. See -- Note [When does a tycon application need an explicit kind signature?] -- for a more detailed explanation of what this function does. injective_vars_of_binder :: TyConBinder -> FV injective_vars_of_binder (Bndr tv vis) = case vis of AnonTCB VisArg -> injectiveVarsOfType False -- conservative choice (varType tv) NamedTCB argf | source_of_injectivity argf -> unitFV tv `unionFV` injectiveVarsOfType False (varType tv) _ -> emptyFV source_of_injectivity Required = True source_of_injectivity Specified = spec_inj_pos source_of_injectivity Inferred = False {- Note [When does a tycon application need an explicit kind signature?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a couple of places in GHC where we convert Core Types into forms that more closely resemble user-written syntax. These include: 1. Template Haskell Type reification (see, for instance, GHC.Tc.Gen.Splice.reify_tc_app) 2. Converting Types to LHsTypes (such as in Haddock.Convert in haddock) This conversion presents a challenge: how do we ensure that the resulting type has enough kind information so as not to be ambiguous? To better motivate this question, consider the following Core type: -- Foo :: Type -> Type type Foo = Proxy Type There is nothing ambiguous about the RHS of Foo in Core. But if we were to, say, reify it into a TH Type, then it's tempting to just drop the invisible Type argument and simply return `Proxy`. But now we've lost crucial kind information: we don't know if we're dealing with `Proxy Type` or `Proxy Bool` or `Proxy Int` or something else! We've inadvertently introduced ambiguity. Unlike in other situations in GHC, we can't just turn on -fprint-explicit-kinds, as we need to produce something which has the same structure as a source-syntax type. Moreover, we can't rely on visible kind application, since the first kind argument to Proxy is inferred, not specified. Our solution is to annotate certain tycons with their kinds whenever they appear in applied form in order to resolve the ambiguity. For instance, we would reify the RHS of Foo like so: type Foo = (Proxy :: Type -> Type) We need to devise an algorithm that determines precisely which tycons need these explicit kind signatures. We certainly don't want to annotate _every_ tycon with a kind signature, or else we might end up with horribly bloated types like the following: (Either :: Type -> Type -> Type) (Int :: Type) (Char :: Type) We only want to annotate tycons that absolutely require kind signatures in order to resolve some sort of ambiguity, and nothing more. Suppose we have a tycon application (T ty_1 ... ty_n). Why might this type require a kind signature? It might require it when we need to fill in any of T's omitted arguments. By "omitted argument", we mean one that is dropped when reifying ty_1 ... ty_n. Sometimes, the omitted arguments are inferred and specified arguments (e.g., TH reification in GHC.Tc.Gen.Splice), and sometimes the omitted arguments are only the inferred ones (e.g., in situations where specified arguments are reified through visible kind application). Regardless, the key idea is that _some_ arguments are going to be omitted after reification, and the only mechanism we have at our disposal for filling them in is through explicit kind signatures. What do we mean by "fill in"? Let's consider this small example: T :: forall {k}. Type -> (k -> Type) -> k Moreover, we have this application of T: T @{j} Int aty When we reify this type, we omit the inferred argument @{j}. Is it fixed by the other (non-inferred) arguments? Yes! If we know the kind of (aty :: blah), then we'll generate an equality constraint (kappa -> Type) and, assuming we can solve it, that will fix `kappa`. (Here, `kappa` is the unification variable that we instantiate `k` with.) Therefore, for any application of a tycon T to some arguments, the Question We Must Answer is: * Given the first n arguments of T, do the kinds of the non-omitted arguments fill in the omitted arguments? (This is still a bit hand-wavey, but we'll refine this question incrementally as we explain more of the machinery underlying this process.) Answering this question is precisely the role that the `injectiveVarsOfType` and `injective_vars_of_binder` functions exist to serve. If an omitted argument `a` appears in the set returned by `injectiveVarsOfType ty`, then knowing `ty` determines (i.e., fills in) `a`. (More on `injective_vars_of_binder` in a bit.) More formally, if `a` is in `injectiveVarsOfType ty` and S1(ty) ~ S2(ty), then S1(a) ~ S2(a), where S1 and S2 are arbitrary substitutions. For example, is `F` is a non-injective type family, then injectiveVarsOfType(Either c (Maybe (a, F b c))) = {a, c} Now that we know what this function does, here is a second attempt at the Question We Must Answer: * Given the first n arguments of T (ty_1 ... ty_n), consider the binders of T that are instantiated by non-omitted arguments. Do the injective variables of these binders fill in the remainder of T's kind? Alright, we're getting closer. Next, we need to clarify what the injective variables of a tycon binder are. This the role that the `injective_vars_of_binder` function serves. Here is what this function does for each form of tycon binder: * Anonymous binders are injective positions. For example, in the promoted data constructor '(:): '(:) :: forall a. a -> [a] -> [a] The second and third tyvar binders (of kinds `a` and `[a]`) are both anonymous, so if we had '(:) 'True '[], then the kinds of 'True and '[] would contribute to the kind of '(:) 'True '[]. Therefore, injective_vars_of_binder(_ :: a) = injectiveVarsOfType(a) = {a}. (Similarly, injective_vars_of_binder(_ :: [a]) = {a}.) * Named binders: - Inferred binders are never injective positions. For example, in this data type: data Proxy a Proxy :: forall {k}. k -> Type If we had Proxy 'True, then the kind of 'True would not contribute to the kind of Proxy 'True. Therefore, injective_vars_of_binder(forall {k}. ...) = {}. - Required binders are injective positions. For example, in this data type: data Wurble k (a :: k) :: k Wurble :: forall k -> k -> k The first tyvar binder (of kind `forall k`) has required visibility, so if we had Wurble (Maybe a) Nothing, then the kind of Maybe a would contribute to the kind of Wurble (Maybe a) Nothing. Hence, injective_vars_of_binder(forall a -> ...) = {a}. - Specified binders /might/ be injective positions, depending on how you approach things. Continuing the '(:) example: '(:) :: forall a. a -> [a] -> [a] Normally, the (forall a. ...) tyvar binder wouldn't contribute to the kind of '(:) 'True '[], since it's not explicitly instantiated by the user. But if visible kind application is enabled, then this is possible, since the user can write '(:) @Bool 'True '[]. (In that case, injective_vars_of_binder(forall a. ...) = {a}.) There are some situations where using visible kind application is appropriate and others where it is not (e.g., TH reification), so the `injective_vars_of_binder` function is parametrized by a Bool which decides if specified binders should be counted towards injective positions or not. Now that we've defined injective_vars_of_binder, we can refine the Question We Must Answer once more: * Given the first n arguments of T (ty_1 ... ty_n), consider the binders of T that are instantiated by non-omitted arguments. For each such binder b_i, take the union of all injective_vars_of_binder(b_i). Is this set a superset of the free variables of the remainder of T's kind? If the answer to this question is "no", then (T ty_1 ... ty_n) needs an explicit kind signature, since T's kind has kind variables leftover that aren't fixed by the non-omitted arguments. One last sticking point: what does "the remainder of T's kind" mean? You might be tempted to think that it corresponds to all of the arguments in the kind of T that would normally be instantiated by omitted arguments. But this isn't quite right, strictly speaking. Consider the following (silly) example: S :: forall {k}. Type -> Type And suppose we have this application of S: S Int Bool The Int argument would be omitted, and injective_vars_of_binder(_ :: Type) = {}. This is not a superset of {k}, which might suggest that (S Bool) needs an explicit kind signature. But (S Bool :: Type) doesn't actually fix `k`! This is because the kind signature only affects the /result/ of the application, not all of the individual arguments. So adding a kind signature here won't make a difference. Therefore, the fourth (and final) iteration of the Question We Must Answer is: * Given the first n arguments of T (ty_1 ... ty_n), consider the binders of T that are instantiated by non-omitted arguments. For each such binder b_i, take the union of all injective_vars_of_binder(b_i). Is this set a superset of the free variables of the kind of (T ty_1 ... ty_n)? Phew, that was a lot of work! How can be sure that this is correct? That is, how can we be sure that in the event that we leave off a kind annotation, that one could infer the kind of the tycon application from its arguments? It's essentially a proof by induction: if we can infer the kinds of every subtree of a type, then the whole tycon application will have an inferrable kind--unless, of course, the remainder of the tycon application's kind has uninstantiated kind variables. What happens if T is oversaturated? That is, if T's kind has fewer than n arguments, in the case that the concrete application instantiates a result kind variable with an arrow kind? If we run out of arguments, we do not attach a kind annotation. This should be a rare case, indeed. Here is an example: data T1 :: k1 -> k2 -> * data T2 :: k1 -> k2 -> * type family G (a :: k) :: k type instance G T1 = T2 type instance F Char = (G T1 Bool :: (* -> *) -> *) -- F from above Here G's kind is (forall k. k -> k), and the desugared RHS of that last instance of F is (G (* -> (* -> *) -> *) (T1 * (* -> *)) Bool). According to the algorithm above, there are 3 arguments to G so we should peel off 3 arguments in G's kind. But G's kind has only two arguments. This is the rare special case, and we choose not to annotate the application of G with a kind signature. After all, we needn't do this, since that instance would be reified as: type instance F Char = G (T1 :: * -> (* -> *) -> *) Bool So the kind of G isn't ambiguous anymore due to the explicit kind annotation on its argument. See #8953 and test th/T8953. -} {- ************************************************************************ * * Multiplicities * * ************************************************************************ These functions would prefer to be in GHC.Core.Multiplicity, but they some are used elsewhere in this module, and wanted to bring their friends here with them. -} unrestricted, linear, tymult :: a -> Scaled a -- | Scale a payload by Many unrestricted = Scaled Many -- | Scale a payload by One linear = Scaled One -- | Scale a payload by Many; used for type arguments in core tymult = Scaled Many irrelevantMult :: Scaled a -> a irrelevantMult = scaledThing mkScaled :: Mult -> a -> Scaled a mkScaled = Scaled scaledSet :: Scaled a -> b -> Scaled b scaledSet (Scaled m _) b = Scaled m b pattern One :: Mult pattern One <- (isOneDataConTy -> True) where One = oneDataConTy pattern Many :: Mult pattern Many <- (isManyDataConTy -> True) where Many = manyDataConTy isManyDataConTy :: Mult -> Bool isManyDataConTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` manyDataConKey isManyDataConTy _ = False isOneDataConTy :: Mult -> Bool isOneDataConTy ty | Just tc <- tyConAppTyCon_maybe ty = tc `hasKey` oneDataConKey isOneDataConTy _ = False isLinearType :: Type -> Bool -- ^ @isLinear t@ returns @True@ of a if @t@ is a type of (curried) function -- where at least one argument is linear (or otherwise non-unrestricted). We use -- this function to check whether it is safe to eta reduce an Id in CorePrep. It -- is always safe to return 'True', because 'True' deactivates the optimisation. isLinearType ty = case ty of FunTy _ Many _ res -> isLinearType res FunTy _ _ _ _ -> True ForAllTy _ res -> isLinearType res _ -> False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Unfold.hs0000644000000000000000000016750214472400112020321 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 Core-syntax unfoldings Unfoldings (which can travel across module boundaries) are in Core syntax (namely @CoreExpr@s). The type @Unfolding@ sits ``above'' simply-Core-expressions unfoldings, capturing ``higher-level'' things we know about a binding, usually things that the simplifier found out (e.g., ``it's a literal''). In the corner of a @CoreUnfolding@ unfolding, you will find, unsurprisingly, a Core expression. -} {-# LANGUAGE BangPatterns #-} module GHC.Core.Unfold ( Unfolding, UnfoldingGuidance, -- Abstract types UnfoldingOpts (..), defaultUnfoldingOpts, updateCreationThreshold, updateUseThreshold, updateFunAppDiscount, updateDictDiscount, updateVeryAggressive, updateCaseScaling, updateCaseThreshold, updateReportPrefix, ArgSummary(..), couldBeSmallEnoughToInline, inlineBoringOk, smallEnoughToInline, callSiteInline, CallCtxt(..), calcUnfoldingGuidance ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Core import GHC.Core.Utils import GHC.Types.Id import GHC.Core.DataCon import GHC.Types.Literal import GHC.Builtin.PrimOps import GHC.Types.Id.Info import GHC.Types.Basic ( Arity ) import GHC.Core.Type import GHC.Builtin.Names import GHC.Builtin.Types.Prim ( realWorldStatePrimTy ) import GHC.Data.Bag import GHC.Utils.Logger import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Types.ForeignCall import GHC.Types.Name import GHC.Types.Tickish import qualified Data.ByteString as BS import Data.List (isPrefixOf) -- | Unfolding options data UnfoldingOpts = UnfoldingOpts { unfoldingCreationThreshold :: !Int -- ^ Threshold above which unfoldings are not *created* , unfoldingUseThreshold :: !Int -- ^ Threshold above which unfoldings are not *inlined* , unfoldingFunAppDiscount :: !Int -- ^ Discount for lambdas that are used (applied) , unfoldingDictDiscount :: !Int -- ^ Discount for dictionaries , unfoldingVeryAggressive :: !Bool -- ^ Force inlining in many more cases , unfoldingCaseThreshold :: !Int -- ^ Don't consider depth up to x , unfoldingCaseScaling :: !Int -- ^ Penalize depth with 1/x , unfoldingReportPrefix :: !(Maybe String) -- ^ Only report inlining decisions for names with this prefix } defaultUnfoldingOpts :: UnfoldingOpts defaultUnfoldingOpts = UnfoldingOpts { unfoldingCreationThreshold = 750 -- The unfoldingCreationThreshold threshold must be reasonably high -- to take account of possible discounts. -- E.g. 450 is not enough in 'fulsom' for Interval.sqr to -- inline into Csg.calc (The unfolding for sqr never makes it -- into the interface file.) , unfoldingUseThreshold = 90 -- Last adjusted upwards in #18282, when I reduced -- the result discount for constructors. , unfoldingFunAppDiscount = 60 -- Be fairly keen to inline a function if that means -- we'll be able to pick the right method from a dictionary , unfoldingDictDiscount = 30 -- Be fairly keen to inline a function if that means -- we'll be able to pick the right method from a dictionary , unfoldingVeryAggressive = False -- Only apply scaling once we are deeper than threshold cases -- in an RHS. , unfoldingCaseThreshold = 2 -- Penalize depth with (size*depth)/scaling , unfoldingCaseScaling = 30 -- Don't filter inlining decision reports , unfoldingReportPrefix = Nothing } -- Helpers for "GHC.Driver.Session" updateCreationThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateCreationThreshold n opts = opts { unfoldingCreationThreshold = n } updateUseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateUseThreshold n opts = opts { unfoldingUseThreshold = n } updateFunAppDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateFunAppDiscount n opts = opts { unfoldingFunAppDiscount = n } updateDictDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateDictDiscount n opts = opts { unfoldingDictDiscount = n } updateVeryAggressive :: Bool -> UnfoldingOpts -> UnfoldingOpts updateVeryAggressive n opts = opts { unfoldingVeryAggressive = n } updateCaseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateCaseThreshold n opts = opts { unfoldingCaseThreshold = n } updateCaseScaling :: Int -> UnfoldingOpts -> UnfoldingOpts updateCaseScaling n opts = opts { unfoldingCaseScaling = n } updateReportPrefix :: Maybe String -> UnfoldingOpts -> UnfoldingOpts updateReportPrefix n opts = opts { unfoldingReportPrefix = n } {- Note [Occurrence analysis of unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We do occurrence-analysis of unfoldings once and for all, when the unfolding is built, rather than each time we inline them. But given this decision it's vital that we do *always* do it. Consider this unfolding \x -> letrec { f = ...g...; g* = f } in body where g* is (for some strange reason) the loop breaker. If we don't occ-anal it when reading it in, we won't mark g as a loop breaker, and we may inline g entirely in body, dropping its binding, and leaving the occurrence in f out of scope. This happened in #8892, where the unfolding in question was a DFun unfolding. But more generally, the simplifier is designed on the basis that it is looking at occurrence-analysed expressions, so better ensure that they actually are. Note [Calculate unfolding guidance on the non-occ-anal'd expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notice that we give the non-occur-analysed expression to calcUnfoldingGuidance. In some ways it'd be better to occur-analyse first; for example, sometimes during simplification, there's a large let-bound thing which has been substituted, and so is now dead; so 'expr' contains two copies of the thing while the occurrence-analysed expression doesn't. Nevertheless, we *don't* and *must not* occ-analyse before computing the size because a) The size computation bales out after a while, whereas occurrence analysis does not. b) Residency increases sharply if you occ-anal first. I'm not 100% sure why, but it's a large effect. Compiling Cabal went from residency of 534M to over 800M with this one change. This can occasionally mean that the guidance is very pessimistic; it gets fixed up next round. And it should be rare, because large let-bound things that are dead are usually caught by preInlineUnconditionally ************************************************************************ * * \subsection{The UnfoldingGuidance type} * * ************************************************************************ -} inlineBoringOk :: CoreExpr -> Bool -- See Note [INLINE for small functions] -- True => the result of inlining the expression is -- no bigger than the expression itself -- eg (\x y -> f y x) -- This is a quick and dirty version. It doesn't attempt -- to deal with (\x y z -> x (y z)) -- The really important one is (x `cast` c) inlineBoringOk e = go 0 e where go :: Int -> CoreExpr -> Bool go credit (Lam x e) | isId x = go (credit+1) e | otherwise = go credit e -- See Note [Count coercion arguments in boring contexts] go credit (App f (Type {})) = go credit f go credit (App f a) | credit > 0 , exprIsTrivial a = go (credit-1) f go credit (Tick _ e) = go credit e -- dubious go credit (Cast e _) = go credit e go credit (Case scrut _ _ [Alt _ _ rhs]) -- See Note [Inline unsafeCoerce] | isUnsafeEqualityProof scrut = go credit rhs go _ (Var {}) = boringCxtOk go _ _ = boringCxtNotOk calcUnfoldingGuidance :: UnfoldingOpts -> Bool -- Definitely a top-level, bottoming binding -> CoreExpr -- Expression to look at -> UnfoldingGuidance calcUnfoldingGuidance opts is_top_bottoming (Tick t expr) | not (tickishIsCode t) -- non-code ticks don't matter for unfolding = calcUnfoldingGuidance opts is_top_bottoming expr calcUnfoldingGuidance opts is_top_bottoming expr = case sizeExpr opts bOMB_OUT_SIZE val_bndrs body of TooBig -> UnfNever SizeIs size cased_bndrs scrut_discount | uncondInline expr n_val_bndrs size -> UnfWhen { ug_unsat_ok = unSaturatedOk , ug_boring_ok = boringCxtOk , ug_arity = n_val_bndrs } -- Note [INLINE for small functions] | is_top_bottoming -> UnfNever -- See Note [Do not inline top-level bottoming functions] | otherwise -> UnfIfGoodArgs { ug_args = map (mk_discount cased_bndrs) val_bndrs , ug_size = size , ug_res = scrut_discount } where (bndrs, body) = collectBinders expr bOMB_OUT_SIZE = unfoldingCreationThreshold opts -- Bomb out if size gets bigger than this val_bndrs = filter isId bndrs n_val_bndrs = length val_bndrs mk_discount :: Bag (Id,Int) -> Id -> Int mk_discount cbs bndr = foldl' combine 0 cbs where combine acc (bndr', disc) | bndr == bndr' = acc `plus_disc` disc | otherwise = acc plus_disc :: Int -> Int -> Int plus_disc | isFunTy (idType bndr) = max | otherwise = (+) -- See Note [Function and non-function discounts] {- Note [Inline unsafeCoerce] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We really want to inline unsafeCoerce, even when applied to boring arguments. It doesn't look as if its RHS is smaller than the call unsafeCoerce x = case unsafeEqualityProof @a @b of UnsafeRefl -> x but that case is discarded -- see Note [Implementing unsafeCoerce] in base:Unsafe.Coerce. Moreover, if we /don't/ inline it, we may be left with f (unsafeCoerce x) which will build a thunk -- bad, bad, bad. Conclusion: we really want inlineBoringOk to be True of the RHS of unsafeCoerce. This is (U4) in Note [Implementing unsafeCoerce]. Note [Computing the size of an expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic idea of sizeExpr is obvious enough: count nodes. But getting the heuristics right has taken a long time. Here's the basic strategy: * Variables, literals: 0 (Exception for string literals, see litSize.) * Function applications (f e1 .. en): 1 + #value args * Constructor applications: 1, regardless of #args * Let(rec): 1 + size of components * Note, cast: 0 Examples Size Term -------------- 0 42# 0 x 0 True 2 f x 1 Just x 4 f (g x) Notice that 'x' counts 0, while (f x) counts 2. That's deliberate: there's a function call to account for. Notice also that constructor applications are very cheap, because exposing them to a caller is so valuable. [25/5/11] All sizes are now multiplied by 10, except for primops (which have sizes like 1 or 4. This makes primops look fantastically cheap, and seems to be almost universally beneficial. Done partly as a result of #4978. Note [Do not inline top-level bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The FloatOut pass has gone to some trouble to float out calls to 'error' and similar friends. See Note [Bottoming floats] in GHC.Core.Opt.SetLevels. Do not re-inline them! But we *do* still inline if they are very small (the uncondInline stuff). Note [INLINE for small functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider {-# INLINE f #-} f x = Just x g y = f y Then f's RHS is no larger than its LHS, so we should inline it into even the most boring context. In general, f the function is sufficiently small that its body is as small as the call itself, the inline unconditionally, regardless of how boring the context is. Things to note: (1) We inline *unconditionally* if inlined thing is smaller (using sizeExpr) than the thing it's replacing. Notice that (f x) --> (g 3) -- YES, unconditionally (f x) --> x : [] -- YES, *even though* there are two -- arguments to the cons x --> g 3 -- NO x --> Just v -- NO It's very important not to unconditionally replace a variable by a non-atomic term. (2) We do this even if the thing isn't saturated, else we end up with the silly situation that f x y = x ...map (f 3)... doesn't inline. Even in a boring context, inlining without being saturated will give a lambda instead of a PAP, and will be more efficient at runtime. (3) However, when the function's arity > 0, we do insist that it has at least one value argument at the call site. (This check is made in the UnfWhen case of callSiteInline.) Otherwise we find this: f = /\a \x:a. x d = /\b. MkD (f b) If we inline f here we get d = /\b. MkD (\x:b. x) and then prepareRhs floats out the argument, abstracting the type variables, so we end up with the original again! (4) We must be much more cautious about arity-zero things. Consider let x = y +# z in ... In *size* terms primops look very small, because the generate a single instruction, but we do not want to unconditionally replace every occurrence of x with (y +# z). So we only do the unconditional-inline thing for *trivial* expressions. NB: you might think that PostInlineUnconditionally would do this but it doesn't fire for top-level things; see GHC.Core.Opt.Simplify.Utils Note [Top level and postInlineUnconditionally] Note [Count coercion arguments in boring contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In inlineBoringOK, we ignore type arguments when deciding whether an expression is okay to inline into boring contexts. This is good, since if we have a definition like let y = x @Int in f y y there’s no reason not to inline y at both use sites — no work is actually duplicated. It may seem like the same reasoning applies to coercion arguments, and indeed, in #17182 we changed inlineBoringOK to treat coercions the same way. However, this isn’t a good idea: unlike type arguments, which have no runtime representation, coercion arguments *do* have a runtime representation (albeit the zero-width VoidRep, see Note [Coercion tokens] in "GHC.CoreToStg"). This caused trouble in #17787 for DataCon wrappers for nullary GADT constructors: the wrappers would be inlined and each use of the constructor would lead to a separate allocation instead of just sharing the wrapper closure. The solution: don’t ignore coercion arguments after all. -} uncondInline :: CoreExpr -> Arity -> Int -> Bool -- Inline unconditionally if there no size increase -- Size of call is arity (+1 for the function) -- See Note [INLINE for small functions] uncondInline rhs arity size | arity > 0 = size <= 10 * (arity + 1) -- See Note [INLINE for small functions] (1) | otherwise = exprIsTrivial rhs -- See Note [INLINE for small functions] (4) sizeExpr :: UnfoldingOpts -> Int -- Bomb out if it gets bigger than this -> [Id] -- Arguments; we're interested in which of these -- get case'd -> CoreExpr -> ExprSize -- Note [Computing the size of an expression] -- Forcing bOMB_OUT_SIZE early prevents repeated -- unboxing of the Int argument. sizeExpr opts !bOMB_OUT_SIZE top_args expr = size_up expr where size_up (Cast e _) = size_up e size_up (Tick _ e) = size_up e size_up (Type _) = sizeZero -- Types cost nothing size_up (Coercion _) = sizeZero size_up (Lit lit) = sizeN (litSize lit) size_up (Var f) | isRealWorldId f = sizeZero -- Make sure we get constructor discounts even -- on nullary constructors | otherwise = size_up_call f [] 0 size_up (App fun arg) | isTyCoArg arg = size_up fun | otherwise = size_up arg `addSizeNSD` size_up_app fun [arg] (if isRealWorldExpr arg then 1 else 0) size_up (Lam b e) | isId b && not (isRealWorldId b) = lamScrutDiscount opts (size_up e `addSizeN` 10) | otherwise = size_up e size_up (Let (NonRec binder rhs) body) = size_up_rhs (binder, rhs) `addSizeNSD` size_up body `addSizeN` size_up_alloc binder size_up (Let (Rec pairs) body) = foldr (addSizeNSD . size_up_rhs) (size_up body `addSizeN` sum (map (size_up_alloc . fst) pairs)) pairs size_up (Case e _ _ alts) | null alts = size_up e -- case e of {} never returns, so take size of scrutinee size_up (Case e _ _ alts) -- Now alts is non-empty | Just v <- is_top_arg e -- We are scrutinising an argument variable = let alt_sizes = map size_up_alt alts -- alts_size tries to compute a good discount for -- the case when we are scrutinising an argument variable alts_size (SizeIs tot tot_disc tot_scrut) -- Size of all alternatives (SizeIs max _ _) -- Size of biggest alternative = SizeIs tot (unitBag (v, 20 + tot - max) `unionBags` tot_disc) tot_scrut -- If the variable is known, we produce a -- discount that will take us back to 'max', -- the size of the largest alternative The -- 1+ is a little discount for reduced -- allocation in the caller -- -- Notice though, that we return tot_disc, -- the total discount from all branches. I -- think that's right. alts_size tot_size _ = tot_size in alts_size (foldr1 addAltSize alt_sizes) -- alts is non-empty (foldr1 maxSize alt_sizes) -- Good to inline if an arg is scrutinised, because -- that may eliminate allocation in the caller -- And it eliminates the case itself where is_top_arg (Var v) | v `elem` top_args = Just v is_top_arg (Cast e _) = is_top_arg e is_top_arg _ = Nothing size_up (Case e _ _ alts) = size_up e `addSizeNSD` foldr (addAltSize . size_up_alt) case_size alts where case_size | is_inline_scrut e, lengthAtMost alts 1 = sizeN (-10) | otherwise = sizeZero -- Normally we don't charge for the case itself, but -- we charge one per alternative (see size_up_alt, -- below) to account for the cost of the info table -- and comparisons. -- -- However, in certain cases (see is_inline_scrut -- below), no code is generated for the case unless -- there are multiple alts. In these cases we -- subtract one, making the first alt free. -- e.g. case x# +# y# of _ -> ... should cost 1 -- case touch# x# of _ -> ... should cost 0 -- (see #4978) -- -- I would like to not have the "lengthAtMost alts 1" -- condition above, but without that some programs got worse -- (spectral/hartel/event and spectral/para). I don't fully -- understand why. (SDM 24/5/11) -- unboxed variables, inline primops and unsafe foreign calls -- are all "inline" things: is_inline_scrut (Var v) = isUnliftedType (idType v) -- isUnliftedType is OK here: scrutinees have a fixed RuntimeRep (search for FRRCase) is_inline_scrut scrut | (Var f, _) <- collectArgs scrut = case idDetails f of FCallId fc -> not (isSafeForeignCall fc) PrimOpId op -> not (primOpOutOfLine op) _other -> False | otherwise = False size_up_rhs (bndr, rhs) | Just join_arity <- isJoinId_maybe bndr -- Skip arguments to join point , (_bndrs, body) <- collectNBinders join_arity rhs = size_up body | otherwise = size_up rhs ------------ -- size_up_app is used when there's ONE OR MORE value args size_up_app (App fun arg) args voids | isTyCoArg arg = size_up_app fun args voids | isRealWorldExpr arg = size_up_app fun (arg:args) (voids + 1) | otherwise = size_up arg `addSizeNSD` size_up_app fun (arg:args) voids size_up_app (Var fun) args voids = size_up_call fun args voids size_up_app (Tick _ expr) args voids = size_up_app expr args voids size_up_app (Cast expr _) args voids = size_up_app expr args voids size_up_app other args voids = size_up other `addSizeN` callSize (length args) voids -- if the lhs is not an App or a Var, or an invisible thing like a -- Tick or Cast, then we should charge for a complete call plus the -- size of the lhs itself. ------------ size_up_call :: Id -> [CoreExpr] -> Int -> ExprSize size_up_call fun val_args voids = case idDetails fun of FCallId _ -> sizeN (callSize (length val_args) voids) DataConWorkId dc -> conSize dc (length val_args) PrimOpId op -> primOpSize op (length val_args) ClassOpId _ -> classOpSize opts top_args val_args _ -> funSize opts top_args fun (length val_args) voids ------------ size_up_alt (Alt _con _bndrs rhs) = size_up rhs `addSizeN` 10 -- Don't charge for args, so that wrappers look cheap -- (See comments about wrappers with Case) -- -- IMPORTANT: *do* charge 1 for the alternative, else we -- find that giant case nests are treated as practically free -- A good example is Foreign.C.Error.errnoToIOError ------------ -- Cost to allocate binding with given binder size_up_alloc bndr | isTyVar bndr -- Doesn't exist at runtime || isJoinId bndr -- Not allocated at all || isUnliftedType (idType bndr) -- Doesn't live in heap -- OK to call isUnliftedType: binders have a fixed RuntimeRep (search for FRRBinder) = 0 | otherwise = 10 ------------ -- These addSize things have to be here because -- I don't want to give them bOMB_OUT_SIZE as an argument addSizeN TooBig _ = TooBig addSizeN (SizeIs n xs d) m = mkSizeIs bOMB_OUT_SIZE (n + m) xs d -- addAltSize is used to add the sizes of case alternatives addAltSize TooBig _ = TooBig addAltSize _ TooBig = TooBig addAltSize (SizeIs n1 xs d1) (SizeIs n2 ys d2) = mkSizeIs bOMB_OUT_SIZE (n1 + n2) (xs `unionBags` ys) (d1 + d2) -- Note [addAltSize result discounts] -- This variant ignores the result discount from its LEFT argument -- It's used when the second argument isn't part of the result addSizeNSD TooBig _ = TooBig addSizeNSD _ TooBig = TooBig addSizeNSD (SizeIs n1 xs _) (SizeIs n2 ys d2) = mkSizeIs bOMB_OUT_SIZE (n1 + n2) (xs `unionBags` ys) d2 -- Ignore d1 isRealWorldId id = idType id `eqType` realWorldStatePrimTy -- an expression of type State# RealWorld must be a variable isRealWorldExpr (Var id) = isRealWorldId id isRealWorldExpr (Tick _ e) = isRealWorldExpr e isRealWorldExpr _ = False -- | Finds a nominal size of a string literal. litSize :: Literal -> Int -- Used by GHC.Core.Unfold.sizeExpr litSize (LitNumber LitNumBigNat _) = 100 litSize (LitString str) = 10 + 10 * ((BS.length str + 3) `div` 4) -- If size could be 0 then @f "x"@ might be too small -- [Sept03: make literal strings a bit bigger to avoid fruitless -- duplication of little strings] litSize _other = 0 -- Must match size of nullary constructors -- Key point: if x |-> 4, then x must inline unconditionally -- (eg via case binding) classOpSize :: UnfoldingOpts -> [Id] -> [CoreExpr] -> ExprSize -- See Note [Conlike is interesting] classOpSize _ _ [] = sizeZero classOpSize opts top_args (arg1 : other_args) = SizeIs size arg_discount 0 where size = 20 + (10 * length other_args) -- If the class op is scrutinising a lambda bound dictionary then -- give it a discount, to encourage the inlining of this function -- The actual discount is rather arbitrarily chosen arg_discount = case arg1 of Var dict | dict `elem` top_args -> unitBag (dict, unfoldingDictDiscount opts) _other -> emptyBag -- | The size of a function call callSize :: Int -- ^ number of value args -> Int -- ^ number of value args that are void -> Int callSize n_val_args voids = 10 * (1 + n_val_args - voids) -- The 1+ is for the function itself -- Add 1 for each non-trivial arg; -- the allocation cost, as in let(rec) -- | The size of a jump to a join point jumpSize :: Int -- ^ number of value args -> Int -- ^ number of value args that are void -> Int jumpSize n_val_args voids = 2 * (1 + n_val_args - voids) -- A jump is 20% the size of a function call. Making jumps free reopens -- bug #6048, but making them any more expensive loses a 21% improvement in -- spectral/puzzle. TODO Perhaps adjusting the default threshold would be a -- better solution? funSize :: UnfoldingOpts -> [Id] -> Id -> Int -> Int -> ExprSize -- Size for functions that are not constructors or primops -- Note [Function applications] funSize opts top_args fun n_val_args voids | fun `hasKey` buildIdKey = buildSize | fun `hasKey` augmentIdKey = augmentSize | otherwise = SizeIs size arg_discount res_discount where some_val_args = n_val_args > 0 is_join = isJoinId fun size | is_join = jumpSize n_val_args voids | not some_val_args = 0 | otherwise = callSize n_val_args voids -- DISCOUNTS -- See Note [Function and non-function discounts] arg_discount | some_val_args && fun `elem` top_args = unitBag (fun, unfoldingFunAppDiscount opts) | otherwise = emptyBag -- If the function is an argument and is applied -- to some values, give it an arg-discount res_discount | idArity fun > n_val_args = unfoldingFunAppDiscount opts | otherwise = 0 -- If the function is partially applied, show a result discount -- XXX maybe behave like ConSize for eval'd variable conSize :: DataCon -> Int -> ExprSize conSize dc n_val_args | n_val_args == 0 = SizeIs 0 emptyBag 10 -- Like variables -- See Note [Unboxed tuple size and result discount] | isUnboxedTupleDataCon dc = SizeIs 0 emptyBag 10 -- See Note [Constructor size and result discount] | otherwise = SizeIs 10 emptyBag 10 {- Note [Constructor size and result discount] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Treat a constructors application as size 10, regardless of how many arguments it has; we are keen to expose them (and we charge separately for their args). We can't treat them as size zero, else we find that (Just x) has size 0, which is the same as a lone variable; and hence 'v' will always be replaced by (Just x), where v is bound to Just x. The "result discount" is applied if the result of the call is scrutinised (say by a case). For a constructor application that will mean the constructor application will disappear, so we don't need to charge it to the function. So the discount should at least match the cost of the constructor application, namely 10. Historical note 1: Until Jun 2020 we gave it a "bit of extra incentive" via a discount of 10*(1 + n_val_args), but that was FAR too much (#18282). In particular, consider a huge case tree like let r = case y1 of Nothing -> B1 a b c Just v1 -> case y2 of Nothing -> B1 c b a Just v2 -> ... If conSize gives a cost of 10 (regardless of n_val_args) and a discount of 10, that'll make each alternative RHS cost zero. We charge 10 for each case alternative (see size_up_alt). If we give a bigger discount (say 20) in conSize, we'll make the case expression cost *nothing*, and that can make a huge case tree cost nothing. This leads to massive, sometimes exponenial inlinings (#18282). In short, don't give a discount that give a negative size to a sub-expression! Historical note 2: Much longer ago, Simon M tried a MUCH bigger discount: (10 * (10 + n_val_args)), and said it was an "unambiguous win", but its terribly dangerous because a function with many many case branches, each finishing with a constructor, can have an arbitrarily large discount. This led to terrible code bloat: see #6099. Note [Unboxed tuple size and result discount] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ However, unboxed tuples count as size zero. I found occasions where we had f x y z = case op# x y z of { s -> (# s, () #) } and f wasn't getting inlined. I tried giving unboxed tuples a *result discount* of zero (see the commented-out line). Why? When returned as a result they do not allocate, so maybe we don't want to charge so much for them. If you have a non-zero discount here, we find that workers often get inlined back into wrappers, because it look like f x = case $wf x of (# a,b #) -> (a,b) and we are keener because of the case. However while this change shrank binary sizes by 0.5% it also made spectral/boyer allocate 5% more. All other changes were very small. So it's not a big deal but I didn't adopt the idea. When fixing #18282 (see Note [Constructor size and result discount]) I changed the result discount to be just 10, not 10*(1+n_val_args). Note [Function and non-function discounts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want a discount if the function is applied. A good example is monadic combinators with continuation arguments, where inlining is quite important. But we don't want a big discount when a function is called many times (see the detailed comments with #6048) because if the function is big it won't be inlined at its many call sites and no benefit results. Indeed, we can get exponentially big inlinings this way; that is what #6048 is about. On the other hand, for data-valued arguments, if there are lots of case expressions in the body, each one will get smaller if we apply the function to a constructor application, so we *want* a big discount if the argument is scrutinised by many case expressions. Conclusion: - For functions, take the max of the discounts - For data values, take the sum of the discounts Note [Literal integer size] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Literal integers *can* be big (mkInteger [...coefficients...]), but need not be (IS n). We just use an arbitrary big-ish constant here so that, in particular, we don't inline top-level defns like n = IS 5 There's no point in doing so -- any optimisations will see the IS through n's unfolding. Nor will a big size inhibit unfoldings functions that mention a literal Integer, because the float-out pass will float all those constants to top level. -} primOpSize :: PrimOp -> Int -> ExprSize primOpSize op n_val_args = if primOpOutOfLine op then sizeN (op_size + n_val_args) else sizeN op_size where op_size = primOpCodeSize op buildSize :: ExprSize buildSize = SizeIs 0 emptyBag 40 -- We really want to inline applications of build -- build t (\cn -> e) should cost only the cost of e (because build will be inlined later) -- Indeed, we should add a result_discount because build is -- very like a constructor. We don't bother to check that the -- build is saturated (it usually is). The "-2" discounts for the \c n, -- The "4" is rather arbitrary. augmentSize :: ExprSize augmentSize = SizeIs 0 emptyBag 40 -- Ditto (augment t (\cn -> e) ys) should cost only the cost of -- e plus ys. The -2 accounts for the \cn -- When we return a lambda, give a discount if it's used (applied) lamScrutDiscount :: UnfoldingOpts -> ExprSize -> ExprSize lamScrutDiscount opts (SizeIs n vs _) = SizeIs n vs (unfoldingFunAppDiscount opts) lamScrutDiscount _ TooBig = TooBig {- Note [addAltSize result discounts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When adding the size of alternatives, we *add* the result discounts too, rather than take the *maximum*. For a multi-branch case, this gives a discount for each branch that returns a constructor, making us keener to inline. I did try using 'max' instead, but it makes nofib 'rewrite' and 'puzzle' allocate significantly more, and didn't make binary sizes shrink significantly either. Note [Discounts and thresholds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constants for discounts and thresholds are defined in 'UnfoldingOpts'. They are: unfoldingCreationThreshold At a definition site, if the unfolding is bigger than this, we may discard it altogether unfoldingUseThreshold At a call site, if the unfolding, less discounts, is smaller than this, then it's small enough inline unfoldingDictDiscount The discount for each occurrence of a dictionary argument as an argument of a class method. Should be pretty small else big functions may get inlined unfoldingFunAppDiscount Discount for a function argument that is applied. Quite large, because if we inline we avoid the higher-order call. unfoldingVeryAggressive If True, the compiler ignores all the thresholds and inlines very aggressively. It still adheres to arity, simplifier phase control and loop breakers. Historical Note: Before April 2020 we had another factor, ufKeenessFactor, which would scale the discounts before they were subtracted from the size. This was justified with the following comment: -- We multiply the raw discounts (args_discount and result_discount) -- ty opt_UnfoldingKeenessFactor because the former have to do with -- *size* whereas the discounts imply that there's some extra -- *efficiency* to be gained (e.g. beta reductions, case reductions) -- by inlining. However, this is highly suspect since it means that we subtract a *scaled* size from an absolute size, resulting in crazy (e.g. negative) scores in some cases (#15304). We consequently killed off ufKeenessFactor and bumped up the ufUseThreshold to compensate. Note [Function applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a function application (f a b) - If 'f' is an argument to the function being analysed, and there's at least one value arg, record a FunAppDiscount for f - If the application if a PAP (arity > 2 in this example) record a *result* discount (because inlining with "extra" args in the call may mean that we now get a saturated application) Code for manipulating sizes -} -- | The size of a candidate expression for unfolding data ExprSize = TooBig | SizeIs { _es_size_is :: {-# UNPACK #-} !Int -- ^ Size found , _es_args :: !(Bag (Id,Int)) -- ^ Arguments cased herein, and discount for each such , _es_discount :: {-# UNPACK #-} !Int -- ^ Size to subtract if result is scrutinised by a case -- expression } instance Outputable ExprSize where ppr TooBig = text "TooBig" ppr (SizeIs a _ c) = brackets (int a <+> int c) -- subtract the discount before deciding whether to bale out. eg. we -- want to inline a large constructor application into a selector: -- tup = (a_1, ..., a_99) -- x = case tup of ... -- mkSizeIs :: Int -> Int -> Bag (Id, Int) -> Int -> ExprSize mkSizeIs max n xs d | (n - d) > max = TooBig | otherwise = SizeIs n xs d maxSize :: ExprSize -> ExprSize -> ExprSize maxSize TooBig _ = TooBig maxSize _ TooBig = TooBig maxSize s1@(SizeIs n1 _ _) s2@(SizeIs n2 _ _) | n1 > n2 = s1 | otherwise = s2 sizeZero :: ExprSize sizeN :: Int -> ExprSize sizeZero = SizeIs 0 emptyBag 0 sizeN n = SizeIs n emptyBag 0 {- ************************************************************************ * * \subsection[considerUnfolding]{Given all the info, do (not) do the unfolding} * * ************************************************************************ We use 'couldBeSmallEnoughToInline' to avoid exporting inlinings that we ``couldn't possibly use'' on the other side. Can be overridden w/ flaggery. Just the same as smallEnoughToInline, except that it has no actual arguments. -} couldBeSmallEnoughToInline :: UnfoldingOpts -> Int -> CoreExpr -> Bool couldBeSmallEnoughToInline opts threshold rhs = case sizeExpr opts threshold [] body of TooBig -> False _ -> True where (_, body) = collectBinders rhs ---------------- smallEnoughToInline :: UnfoldingOpts -> Unfolding -> Bool smallEnoughToInline opts (CoreUnfolding {uf_guidance = guidance}) = case guidance of UnfIfGoodArgs {ug_size = size} -> size <= unfoldingUseThreshold opts UnfWhen {} -> True UnfNever -> False smallEnoughToInline _ _ = False {- ************************************************************************ * * \subsection{callSiteInline} * * ************************************************************************ This is the key function. It decides whether to inline a variable at a call site callSiteInline is used at call sites, so it is a bit more generous. It's a very important function that embodies lots of heuristics. A non-WHNF can be inlined if it doesn't occur inside a lambda, and occurs exactly once or occurs once in each branch of a case and is small If the thing is in WHNF, there's no danger of duplicating work, so we can inline if it occurs once, or is small NOTE: we don't want to inline top-level functions that always diverge. It just makes the code bigger. Tt turns out that the convenient way to prevent them inlining is to give them a NOINLINE pragma, which we do in StrictAnal.addStrictnessInfoToTopId -} data ArgSummary = TrivArg -- Nothing interesting | NonTrivArg -- Arg has structure | ValueArg -- Arg is a con-app or PAP -- ..or con-like. Note [Conlike is interesting] instance Outputable ArgSummary where ppr TrivArg = text "TrivArg" ppr NonTrivArg = text "NonTrivArg" ppr ValueArg = text "ValueArg" nonTriv :: ArgSummary -> Bool nonTriv TrivArg = False nonTriv _ = True data CallCtxt = BoringCtxt | RhsCtxt -- Rhs of a let-binding; see Note [RHS of lets] | DiscArgCtxt -- Argument of a function with non-zero arg discount | RuleArgCtxt -- We are somewhere in the argument of a function with rules | ValAppCtxt -- We're applied to at least one value arg -- This arises when we have ((f x |> co) y) -- Then the (f x) has argument 'x' but in a ValAppCtxt | CaseCtxt -- We're the scrutinee of a case -- that decomposes its scrutinee instance Outputable CallCtxt where ppr CaseCtxt = text "CaseCtxt" ppr ValAppCtxt = text "ValAppCtxt" ppr BoringCtxt = text "BoringCtxt" ppr RhsCtxt = text "RhsCtxt" ppr DiscArgCtxt = text "DiscArgCtxt" ppr RuleArgCtxt = text "RuleArgCtxt" callSiteInline :: Logger -> UnfoldingOpts -> Int -- Case depth -> Id -- The Id -> Bool -- True <=> unfolding is active -> Bool -- True if there are no arguments at all (incl type args) -> [ArgSummary] -- One for each value arg; True if it is interesting -> CallCtxt -- True <=> continuation is interesting -> Maybe CoreExpr -- Unfolding, if any callSiteInline logger opts !case_depth id active_unfolding lone_variable arg_infos cont_info = case idUnfolding id of -- idUnfolding checks for loop-breakers, returning NoUnfolding -- Things with an INLINE pragma may have an unfolding *and* -- be a loop breaker (maybe the knot is not yet untied) CoreUnfolding { uf_tmpl = unf_template , uf_cache = unf_cache , uf_guidance = guidance } | active_unfolding -> tryUnfolding logger opts case_depth id lone_variable arg_infos cont_info unf_template unf_cache guidance | otherwise -> traceInline logger opts id "Inactive unfolding:" (ppr id) Nothing NoUnfolding -> Nothing BootUnfolding -> Nothing OtherCon {} -> Nothing DFunUnfolding {} -> Nothing -- Never unfold a DFun -- | Report the inlining of an identifier's RHS to the user, if requested. traceInline :: Logger -> UnfoldingOpts -> Id -> String -> SDoc -> a -> a traceInline logger opts inline_id str doc result -- We take care to ensure that doc is used in only one branch, ensuring that -- the simplifier can push its allocation into the branch. See Note [INLINE -- conditional tracing utilities]. | enable = logTraceMsg logger str doc result | otherwise = result where enable | logHasDumpFlag logger Opt_D_dump_verbose_inlinings = True | Just prefix <- unfoldingReportPrefix opts = prefix `isPrefixOf` occNameString (getOccName inline_id) | otherwise = False {-# INLINE traceInline #-} -- see Note [INLINE conditional tracing utilities] {- Note [Avoid inlining into deeply nested cases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a function f like this: f arg1 arg2 = case ... ... -> g arg1 ... -> g arg2 This function is small. So should be safe to inline. However sometimes this doesn't quite work out like that. Consider this code: f1 arg1 arg2 ... = ... case _foo of alt1 -> ... f2 arg1 ... alt2 -> ... f2 arg2 ... f2 arg1 arg2 ... = ... case _foo of alt1 -> ... f3 arg1 ... alt2 -> ... f3 arg2 ... f3 arg1 arg2 ... = ... ... repeats up to n times. And then f1 is applied to some arguments: foo = ... f1 ... Initially f2..fn are not interesting to inline so we don't. However we see that f1 is applied to interesting args. So it's an obvious choice to inline those: foo = ... case _foo of alt1 -> ... f2 ... alt2 -> ... f2 ... As a result we go and inline f2 both mentions of f2 in turn are now applied to interesting arguments and f2 is small: foo = ... case _foo of alt1 -> ... case _foo of alt1 -> ... f3 ... alt2 -> ... f3 ... alt2 -> ... case _foo of alt1 -> ... f3 ... alt2 -> ... f3 ... The same thing happens for each binding up to f_n, duplicating the amount of inlining done in each step. Until at some point we are either done or run out of simplifier ticks/RAM. This pattern happened #18730. To combat this we introduce one more heuristic when weighing inlining decision. We keep track of a "case-depth". Which increases each time we look inside a case expression with more than one alternative. We then apply a penalty to inlinings based on the case-depth at which they would be inlined. Bounding the number of inlinings in such a scenario. The heuristic can be tuned in two ways: * We can ignore the first n levels of case nestings for inlining decisions using -funfolding-case-threshold. * The penalty grows linear with the depth. It's computed as size*(depth-threshold)/scaling. Scaling can be set with -funfolding-case-scaling. Some guidance on setting these defaults: * A low treshold (<= 2) is needed to prevent exponential cases from spiraling out of control. We picked 2 for no particular reason. * Scaling the penalty by any more than 30 means the reproducer from T18730 won't compile even with reasonably small values of n. Instead it will run out of runs/ticks. This means to positively affect the reproducer a scaling <= 30 is required. * A scaling of >= 15 still causes a few very large regressions on some nofib benchmarks. (+80% for gc/fulsom, +90% for real/ben-raytrace, +20% for spectral/fibheaps) * A scaling of >= 25 showed no regressions on nofib. However it showed a number of (small) regression for compiler perf benchmarks. The end result is that we are settling for a scaling of 30, with a threshold of 2. This gives us minimal compiler perf regressions. No nofib runtime regressions and will still avoid this pattern sometimes. This is a "safe" default, where we err on the side of compiler blowup instead of risking runtime regressions. For cases where the default falls short the flag can be changed to allow more/less inlining as needed on a per-module basis. -} tryUnfolding :: Logger -> UnfoldingOpts -> Int -> Id -> Bool -> [ArgSummary] -> CallCtxt -> CoreExpr -> UnfoldingCache -> UnfoldingGuidance -> Maybe CoreExpr tryUnfolding logger opts !case_depth id lone_variable arg_infos cont_info unf_template unf_cache guidance = case guidance of UnfNever -> traceInline logger opts id str (text "UnfNever") Nothing UnfWhen { ug_arity = uf_arity, ug_unsat_ok = unsat_ok, ug_boring_ok = boring_ok } | enough_args && (boring_ok || some_benefit || unfoldingVeryAggressive opts) -- See Note [INLINE for small functions (3)] -> traceInline logger opts id str (mk_doc some_benefit empty True) (Just unf_template) | otherwise -> traceInline logger opts id str (mk_doc some_benefit empty False) Nothing where some_benefit = calc_some_benefit uf_arity enough_args = (n_val_args >= uf_arity) || (unsat_ok && n_val_args > 0) UnfIfGoodArgs { ug_args = arg_discounts, ug_res = res_discount, ug_size = size } | unfoldingVeryAggressive opts -> traceInline logger opts id str (mk_doc some_benefit extra_doc True) (Just unf_template) | is_wf && some_benefit && small_enough -> traceInline logger opts id str (mk_doc some_benefit extra_doc True) (Just unf_template) | otherwise -> traceInline logger opts id str (mk_doc some_benefit extra_doc False) Nothing where some_benefit = calc_some_benefit (length arg_discounts) -- See Note [Avoid inlining into deeply nested cases] depth_treshold = unfoldingCaseThreshold opts depth_scaling = unfoldingCaseScaling opts depth_penalty | case_depth <= depth_treshold = 0 | otherwise = (size * (case_depth - depth_treshold)) `div` depth_scaling adjusted_size = size + depth_penalty - discount small_enough = adjusted_size <= unfoldingUseThreshold opts discount = computeDiscount arg_discounts res_discount arg_infos cont_info extra_doc = vcat [ text "case depth =" <+> int case_depth , text "depth based penalty =" <+> int depth_penalty , text "discounted size =" <+> int adjusted_size ] where -- Unpack the UnfoldingCache lazily because it may not be needed, and all -- its fields are strict; so evaluating unf_cache at all forces all the -- isWorkFree etc computations to take place. That risks wasting effort for -- Ids that are never going to inline anyway. -- See Note [UnfoldingCache] in GHC.Core UnfoldingCache{ uf_is_work_free = is_wf, uf_expandable = is_exp } = unf_cache mk_doc some_benefit extra_doc yes_or_no = vcat [ text "arg infos" <+> ppr arg_infos , text "interesting continuation" <+> ppr cont_info , text "some_benefit" <+> ppr some_benefit , text "is exp:" <+> ppr is_exp , text "is work-free:" <+> ppr is_wf , text "guidance" <+> ppr guidance , extra_doc , text "ANSWER =" <+> if yes_or_no then text "YES" else text "NO"] ctx = log_default_dump_context (logFlags logger) str = "Considering inlining: " ++ renderWithContext ctx (ppr id) n_val_args = length arg_infos -- some_benefit is used when the RHS is small enough -- and the call has enough (or too many) value -- arguments (ie n_val_args >= arity). But there must -- be *something* interesting about some argument, or the -- result context, to make it worth inlining calc_some_benefit :: Arity -> Bool -- The Arity is the number of args -- expected by the unfolding calc_some_benefit uf_arity | not saturated = interesting_args -- Under-saturated -- Note [Unsaturated applications] | otherwise = interesting_args -- Saturated or over-saturated || interesting_call where saturated = n_val_args >= uf_arity over_saturated = n_val_args > uf_arity interesting_args = any nonTriv arg_infos -- NB: (any nonTriv arg_infos) looks at the -- over-saturated args too which is "wrong"; -- but if over-saturated we inline anyway. interesting_call | over_saturated = True | otherwise = case cont_info of CaseCtxt -> not (lone_variable && is_exp) -- Note [Lone variables] ValAppCtxt -> True -- Note [Cast then apply] RuleArgCtxt -> uf_arity > 0 -- See Note [Unfold info lazy contexts] DiscArgCtxt -> uf_arity > 0 -- Note [Inlining in ArgCtxt] RhsCtxt -> uf_arity > 0 -- _other -> False -- See Note [Nested functions] {- Note [Unfold into lazy contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Merged into Note [RHS of lets]. Note [RHS of lets] ~~~~~~~~~~~~~~~~~~ When the call is the argument of a function with a RULE, or the RHS of a let, we are a little bit keener to inline. For example f y = (y,y,y) g y = let x = f y in ...(case x of (a,b,c) -> ...) ... We'd inline 'f' if the call was in a case context, and it kind-of-is, only we can't see it. Also x = f v could be expensive whereas x = case v of (a,b) -> a is patently cheap and may allow more eta expansion. So we treat the RHS of a let as not-totally-boring. Note [Unsaturated applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When a call is not saturated, we *still* inline if one of the arguments has interesting structure. That's sometimes very important. A good example is the Ord instance for Bool in Base: Rec { $fOrdBool =GHC.Classes.D:Ord @ Bool ... $cmin_ajX $cmin_ajX [Occ=LoopBreaker] :: Bool -> Bool -> Bool $cmin_ajX = GHC.Classes.$dmmin @ Bool $fOrdBool } But the defn of GHC.Classes.$dmmin is: $dmmin :: forall a. GHC.Classes.Ord a => a -> a -> a {- Arity: 3, HasNoCafRefs, Strictness: SLL, Unfolding: (\ @ a $dOrd :: GHC.Classes.Ord a x :: a y :: a -> case @ a GHC.Classes.<= @ a $dOrd x y of wild { GHC.Types.False -> y GHC.Types.True -> x }) -} We *really* want to inline $dmmin, even though it has arity 3, in order to unravel the recursion. Note [Things to watch] ~~~~~~~~~~~~~~~~~~~~~~ * { y = I# 3; x = y `cast` co; ...case (x `cast` co) of ... } Assume x is exported, so not inlined unconditionally. Then we want x to inline unconditionally; no reason for it not to, and doing so avoids an indirection. * { x = I# 3; ....f x.... } Make sure that x does not inline unconditionally! Lest we get extra allocation. Note [Inlining an InlineRule] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An InlineRules is used for (a) programmer INLINE pragmas (b) inlinings from worker/wrapper For (a) the RHS may be large, and our contract is that we *only* inline when the function is applied to all the arguments on the LHS of the source-code defn. (The uf_arity in the rule.) However for worker/wrapper it may be worth inlining even if the arity is not satisfied (as we do in the CoreUnfolding case) so we don't require saturation. Note [Nested functions] ~~~~~~~~~~~~~~~~~~~~~~~ At one time we treated a call of a non-top-level function as "interesting" (regardless of how boring the context) in the hope that inlining it would eliminate the binding, and its allocation. Specifically, in the default case of interesting_call we had _other -> not is_top && uf_arity > 0 But actually postInlineUnconditionally does some of this and overall it makes virtually no difference to nofib. So I simplified away this special case Note [Cast then apply] ~~~~~~~~~~~~~~~~~~~~~~ Consider myIndex = __inline_me ( (/\a. ) |> co ) co :: (forall a. a -> a) ~ (forall a. T a) ... /\a.\x. case ((myIndex a) |> sym co) x of { ... } ... We need to inline myIndex to unravel this; but the actual call (myIndex a) has no value arguments. The ValAppCtxt gives it enough incentive to inline. Note [Inlining in ArgCtxt] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The condition (arity > 0) here is very important, because otherwise we end up inlining top-level stuff into useless places; eg x = I# 3# f = \y. g x This can make a very big difference: it adds 16% to nofib 'integer' allocs, and 20% to 'power'. At one stage I replaced this condition by 'True' (leading to the above slow-down). The motivation was test eyeball/inline1.hs; but that seems to work ok now. NOTE: arguably, we should inline in ArgCtxt only if the result of the call is at least CONLIKE. At least for the cases where we use ArgCtxt for the RHS of a 'let', we only profit from the inlining if we get a CONLIKE thing (modulo lets). Note [Lone variables] See also Note [Interaction of exprIsWorkFree and lone variables] ~~~~~~~~~~~~~~~~~~~~~ which appears below The "lone-variable" case is important. I spent ages messing about with unsatisfactory variants, but this is nice. The idea is that if a variable appears all alone as an arg of lazy fn, or rhs BoringCtxt as scrutinee of a case CaseCtxt as arg of a fn ArgCtxt AND it is bound to a cheap expression then we should not inline it (unless there is some other reason, e.g. it is the sole occurrence). That is what is happening at the use of 'lone_variable' in 'interesting_call'. Why? At least in the case-scrutinee situation, turning let x = (a,b) in case x of y -> ... into let x = (a,b) in case (a,b) of y -> ... and thence to let x = (a,b) in let y = (a,b) in ... is bad if the binding for x will remain. Another example: I discovered that strings were getting inlined straight back into applications of 'error' because the latter is strict. s = "foo" f = \x -> ...(error s)... Fundamentally such contexts should not encourage inlining because, provided the RHS is "expandable" (see Note [exprIsExpandable] in GHC.Core.Utils) the context can ``see'' the unfolding of the variable (e.g. case or a RULE) so there's no gain. However, watch out: * Consider this: foo = _inline_ (\n. [n]) bar = _inline_ (foo 20) baz = \n. case bar of { (m:_) -> m + n } Here we really want to inline 'bar' so that we can inline 'foo' and the whole thing unravels as it should obviously do. This is important: in the NDP project, 'bar' generates a closure data structure rather than a list. So the non-inlining of lone_variables should only apply if the unfolding is regarded as cheap; because that is when exprIsConApp_maybe looks through the unfolding. Hence the "&& is_wf" in the InlineRule branch. * Even a type application or coercion isn't a lone variable. Consider case $fMonadST @ RealWorld of { :DMonad a b c -> c } We had better inline that sucker! The case won't see through it. For now, I'm treating treating a variable applied to types in a *lazy* context "lone". The motivating example was f = /\a. \x. BIG g = /\a. \y. h (f a) There's no advantage in inlining f here, and perhaps a significant disadvantage. Hence some_val_args in the Stop case Note [Interaction of exprIsWorkFree and lone variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The lone-variable test says "don't inline if a case expression scrutinises a lone variable whose unfolding is cheap". It's very important that, under these circumstances, exprIsConApp_maybe can spot a constructor application. So, for example, we don't consider let x = e in (x,x) to be cheap, and that's good because exprIsConApp_maybe doesn't think that expression is a constructor application. In the 'not (lone_variable && is_wf)' test, I used to test is_value rather than is_wf, which was utterly wrong, because the above expression responds True to exprIsHNF, which is what sets is_value. This kind of thing can occur if you have {-# INLINE foo #-} foo = let x = e in (x,x) which Roman did. -} computeDiscount :: [Int] -> Int -> [ArgSummary] -> CallCtxt -> Int computeDiscount arg_discounts res_discount arg_infos cont_info = 10 -- Discount of 10 because the result replaces the call -- so we count 10 for the function itself + 10 * length actual_arg_discounts -- Discount of 10 for each arg supplied, -- because the result replaces the call + total_arg_discount + res_discount' where actual_arg_discounts = zipWith mk_arg_discount arg_discounts arg_infos total_arg_discount = sum actual_arg_discounts mk_arg_discount _ TrivArg = 0 mk_arg_discount _ NonTrivArg = 10 mk_arg_discount discount ValueArg = discount res_discount' | LT <- arg_discounts `compareLength` arg_infos = res_discount -- Over-saturated | otherwise = case cont_info of BoringCtxt -> 0 CaseCtxt -> res_discount -- Presumably a constructor ValAppCtxt -> res_discount -- Presumably a function _ -> 40 `min` res_discount -- ToDo: this 40 `min` res_discount doesn't seem right -- for DiscArgCtxt it shouldn't matter because the function will -- get the arg discount for any non-triv arg -- for RuleArgCtxt we do want to be keener to inline; but not only -- constructor results -- for RhsCtxt I suppose that exposing a data con is good in general -- And 40 seems very arbitrary -- -- res_discount can be very large when a function returns -- constructors; but we only want to invoke that large discount -- when there's a case continuation. -- Otherwise we, rather arbitrarily, threshold it. Yuk. -- But we want to avoid inlining large functions that return -- constructors into contexts that are simply "interesting" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Unfold/Make.hs0000644000000000000000000004603614472400112021174 0ustar0000000000000000{-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Unfolding creation module GHC.Core.Unfold.Make ( noUnfolding , mkUnfolding , mkCoreUnfolding , mkFinalUnfolding , mkFinalUnfolding' , mkSimpleUnfolding , mkWorkerUnfolding , mkInlineUnfolding , mkInlineUnfoldingWithArity , mkInlinableUnfolding , mkWrapperUnfolding , mkCompulsoryUnfolding , mkCompulsoryUnfolding' , mkDFunUnfolding , specUnfolding , certainlyWillInline ) where import GHC.Prelude import GHC.Core import GHC.Core.Unfold import GHC.Core.Opt.OccurAnal ( occurAnalyseExpr ) import GHC.Core.Opt.Arity ( manifestArity ) import GHC.Core.DataCon import GHC.Core.Utils import GHC.Types.Basic import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand ( DmdSig, isDeadEndSig ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import Data.Maybe ( fromMaybe ) -- the very simple optimiser is used to optimise unfoldings import {-# SOURCE #-} GHC.Core.SimpleOpt mkFinalUnfolding :: UnfoldingOpts -> UnfoldingSource -> DmdSig -> CoreExpr -> Maybe UnfoldingCache -> Unfolding -- "Final" in the sense that this is a GlobalId that will not be further -- simplified; so the unfolding should be occurrence-analysed mkFinalUnfolding opts src strict_sig expr = mkFinalUnfolding' opts src strict_sig expr -- See Note [Tying the 'CoreUnfolding' knot] for why interfaces need -- to pass a precomputed 'UnfoldingCache' mkFinalUnfolding' :: UnfoldingOpts -> UnfoldingSource -> DmdSig -> CoreExpr -> Maybe UnfoldingCache -> Unfolding -- "Final" in the sense that this is a GlobalId that will not be further -- simplified; so the unfolding should be occurrence-analysed mkFinalUnfolding' opts src strict_sig expr = mkUnfolding opts src True {- Top level -} (isDeadEndSig strict_sig) expr -- | Used for things that absolutely must be unfolded mkCompulsoryUnfolding :: SimpleOpts -> CoreExpr -> Unfolding mkCompulsoryUnfolding opts expr = mkCompulsoryUnfolding' (simpleOptExpr opts expr) -- | Same as 'mkCompulsoryUnfolding' but no simple optimiser pass is performed -- on the unfolding. mkCompulsoryUnfolding' :: CoreExpr -> Unfolding mkCompulsoryUnfolding' expr = mkCoreUnfolding InlineCompulsory True expr Nothing (UnfWhen { ug_arity = 0 -- Arity of unfolding doesn't matter , ug_unsat_ok = unSaturatedOk, ug_boring_ok = boringCxtOk }) -- Note [Top-level flag on inline rules] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Slight hack: note that mk_inline_rules conservatively sets the -- top-level flag to True. It gets set more accurately by the simplifier -- Simplify.simplUnfolding. mkSimpleUnfolding :: UnfoldingOpts -> CoreExpr -> Unfolding mkSimpleUnfolding !opts rhs = mkUnfolding opts InlineRhs False False rhs Nothing mkDFunUnfolding :: [Var] -> DataCon -> [CoreExpr] -> Unfolding mkDFunUnfolding bndrs con ops = DFunUnfolding { df_bndrs = bndrs , df_con = con , df_args = map occurAnalyseExpr ops } -- See Note [Occurrence analysis of unfoldings] mkWrapperUnfolding :: SimpleOpts -> CoreExpr -> Arity -> Unfolding -- Make the unfolding for the wrapper in a worker/wrapper split -- after demand/CPR analysis mkWrapperUnfolding opts expr arity = mkCoreUnfolding InlineStable True (simpleOptExpr opts expr) Nothing (UnfWhen { ug_arity = arity , ug_unsat_ok = unSaturatedOk , ug_boring_ok = boringCxtNotOk }) mkWorkerUnfolding :: SimpleOpts -> (CoreExpr -> CoreExpr) -> Unfolding -> Unfolding -- See Note [Worker/wrapper for INLINABLE functions] in GHC.Core.Opt.WorkWrap mkWorkerUnfolding opts work_fn (CoreUnfolding { uf_src = src, uf_tmpl = tmpl , uf_is_top = top_lvl }) | isStableSource src = mkCoreUnfolding src top_lvl new_tmpl Nothing guidance where new_tmpl = simpleOptExpr opts (work_fn tmpl) guidance = calcUnfoldingGuidance (so_uf_opts opts) False new_tmpl mkWorkerUnfolding _ _ _ = noUnfolding -- | Make an unfolding that may be used unsaturated -- (ug_unsat_ok = unSaturatedOk) and that is reported as having its -- manifest arity (the number of outer lambdas applications will -- resolve before doing any work). mkInlineUnfolding :: SimpleOpts -> CoreExpr -> Unfolding mkInlineUnfolding opts expr = mkCoreUnfolding InlineStable True -- Note [Top-level flag on inline rules] expr' Nothing guide where expr' = simpleOptExpr opts expr guide = UnfWhen { ug_arity = manifestArity expr' , ug_unsat_ok = unSaturatedOk , ug_boring_ok = boring_ok } boring_ok = inlineBoringOk expr' -- | Make an unfolding that will be used once the RHS has been saturated -- to the given arity. mkInlineUnfoldingWithArity :: Arity -> SimpleOpts -> CoreExpr -> Unfolding mkInlineUnfoldingWithArity arity opts expr = mkCoreUnfolding InlineStable True -- Note [Top-level flag on inline rules] expr' Nothing guide where expr' = simpleOptExpr opts expr guide = UnfWhen { ug_arity = arity , ug_unsat_ok = needSaturated , ug_boring_ok = boring_ok } -- See Note [INLINE pragmas and boring contexts] as to why we need to look -- at the arity here. boring_ok | arity == 0 = True | otherwise = inlineBoringOk expr' mkInlinableUnfolding :: SimpleOpts -> CoreExpr -> Unfolding mkInlinableUnfolding opts expr = mkUnfolding (so_uf_opts opts) InlineStable False False expr' Nothing where expr' = simpleOptExpr opts expr specUnfolding :: SimpleOpts -> [Var] -> (CoreExpr -> CoreExpr) -> [CoreArg] -- LHS arguments in the RULE -> Unfolding -> Unfolding -- See Note [Specialising unfoldings] -- specUnfolding spec_bndrs spec_args unf -- = \spec_bndrs. unf spec_args -- specUnfolding opts spec_bndrs spec_app rule_lhs_args df@(DFunUnfolding { df_bndrs = old_bndrs, df_con = con, df_args = args }) = assertPpr (rule_lhs_args `equalLength` old_bndrs) (ppr df $$ ppr rule_lhs_args) $ -- For this ASSERT see Note [DFunUnfoldings] in GHC.Core.Opt.Specialise mkDFunUnfolding spec_bndrs con (map spec_arg args) -- For DFunUnfoldings we transform -- \obs. MkD ... -- to -- \sbs. MkD ((\obs. ) spec_args) ... ditto where spec_arg arg = simpleOptExpr opts $ spec_app (mkLams old_bndrs arg) -- The beta-redexes created by spec_app will be -- simplified away by simplOptExpr specUnfolding opts spec_bndrs spec_app rule_lhs_args (CoreUnfolding { uf_src = src, uf_tmpl = tmpl , uf_is_top = top_lvl , uf_guidance = old_guidance }) | isStableSource src -- See Note [Specialising unfoldings] , UnfWhen { ug_arity = old_arity } <- old_guidance = mkCoreUnfolding src top_lvl new_tmpl Nothing (old_guidance { ug_arity = old_arity - arity_decrease }) where new_tmpl = simpleOptExpr opts $ mkLams spec_bndrs $ spec_app tmpl -- The beta-redexes created by spec_app -- will be simplified away by simplOptExpr arity_decrease = count isValArg rule_lhs_args - count isId spec_bndrs specUnfolding _ _ _ _ _ = noUnfolding {- Note [Specialising unfoldings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we specialise a function for some given type-class arguments, we use specUnfolding to specialise its unfolding. Some important points: * If the original function has a DFunUnfolding, the specialised one must do so too! Otherwise we lose the magic rules that make it interact with ClassOps * There is a bit of hack for INLINABLE functions: f :: Ord a => .... f = {- INLINABLE f #-} Now if we specialise f, should the specialised version still have an INLINABLE pragma? If it does, we'll capture a specialised copy of as its unfolding, and that probably won't inline. But if we don't, the specialised version of might be small enough to inline at a call site. This happens with Control.Monad.liftM3, and can cause a lot more allocation as a result (nofib n-body shows this). Moreover, keeping the INLINABLE thing isn't much help, because the specialised function (probably) isn't overloaded any more. Conclusion: drop the INLINEALE pragma. In practice what this means is: if a stable unfolding has UnfoldingGuidance of UnfWhen, we keep it (so the specialised thing too will always inline) if a stable unfolding has UnfoldingGuidance of UnfIfGoodArgs (which arises from INLINABLE), we discard it Note [Honour INLINE on 0-ary bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider x = {-# INLINE x #-} f y = ...x... The semantics of an INLINE pragma is inline x at every call site, provided it is saturated; that is, applied to at least as many arguments as appear on the LHS of the Haskell source definition. (This source-code-derived arity is stored in the `ug_arity` field of the `UnfoldingGuidance`.) In the example, x's ug_arity is 0, so we should inline it at every use site. It's rare to have such an INLINE pragma (usually INLINE is on functions), but it's occasionally very important (#15578, #15519). In #15519 we had something like x = case (g a b) of I# r -> T r {-# INLINE x #-} f y = ...(h x).... where h is strict. So we got f y = ...(case g a b of I# r -> h (T r))... and that in turn allowed SpecConstr to ramp up performance. How do we deliver on this? By adjusting the ug_boring_ok flag in mkInlineUnfoldingWithArity; see Note [INLINE pragmas and boring contexts] NB: there is a real risk that full laziness will float it right back out again. Consider again x = factorial 200 {-# INLINE x #-} f y = ...x... After inlining we get f y = ...(factorial 200)... but it's entirely possible that full laziness will do lvl23 = factorial 200 f y = ...lvl23... That's a problem for another day. Note [INLINE pragmas and boring contexts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An INLINE pragma uses mkInlineUnfoldingWithArity to build the unfolding. That sets the ug_boring_ok flag to False if the function is not tiny (inlineBoringOK), so that even INLINE functions are not inlined in an utterly boring context. E.g. \x y. Just (f y x) Nothing is gained by inlining f here, even if it has an INLINE pragma. But for 0-ary bindings, we want to inline regardless; see Note [Honour INLINE on 0-ary bindings]. I'm a bit worried that it's possible for the same kind of problem to arise for non-0-ary functions too, but let's wait and see. -} mkUnfolding :: UnfoldingOpts -> UnfoldingSource -> Bool -- Is top-level -> Bool -- Definitely a bottoming binding -- (only relevant for top-level bindings) -> CoreExpr -> Maybe UnfoldingCache -> Unfolding -- Calculates unfolding guidance -- Occurrence-analyses the expression before capturing it mkUnfolding opts src top_lvl is_bottoming expr cache = mkCoreUnfolding src top_lvl expr cache guidance where is_top_bottoming = top_lvl && is_bottoming guidance = calcUnfoldingGuidance opts is_top_bottoming expr -- NB: *not* (calcUnfoldingGuidance (occurAnalyseExpr expr))! -- See Note [Calculate unfolding guidance on the non-occ-anal'd expression] mkCoreUnfolding :: UnfoldingSource -> Bool -> CoreExpr -> Maybe UnfoldingCache -> UnfoldingGuidance -> Unfolding mkCoreUnfolding src top_lvl expr precomputed_cache guidance = CoreUnfolding { uf_tmpl = cache `seq` occurAnalyseExpr expr -- occAnalyseExpr: see Note [Occurrence analysis of unfoldings] -- See #20905 for what a discussion of this 'seq'. -- We are careful to make sure we only -- have one copy of an unfolding around at once. -- Note [Thoughtful forcing in mkCoreUnfolding] , uf_src = src , uf_is_top = top_lvl , uf_cache = cache , uf_guidance = guidance } where is_value = exprIsHNF expr is_conlike = exprIsConLike expr is_work_free = exprIsWorkFree expr is_expandable = exprIsExpandable expr recomputed_cache = UnfoldingCache { uf_is_value = is_value , uf_is_conlike = is_conlike , uf_is_work_free = is_work_free , uf_expandable = is_expandable } cache = fromMaybe recomputed_cache precomputed_cache ---------------- certainlyWillInline :: UnfoldingOpts -> IdInfo -> CoreExpr -> Maybe Unfolding -- ^ Sees if the unfolding is pretty certain to inline. -- If so, return a *stable* unfolding for it, that will always inline. -- The CoreExpr is the WW'd and simplified RHS. In contrast, the unfolding -- template might not have been WW'd yet. certainlyWillInline opts fn_info rhs' = case fn_unf of CoreUnfolding { uf_guidance = guidance, uf_src = src } | noinline -> Nothing -- See Note [Worker/wrapper for NOINLINE functions] | otherwise -> case guidance of UnfNever -> Nothing UnfWhen {} -> Just (fn_unf { uf_src = src', uf_tmpl = tmpl' }) -- INLINE functions have UnfWhen UnfIfGoodArgs { ug_size = size, ug_args = args } -> do_cunf size args src' tmpl' where src' = -- Do not change InlineCompulsory! case src of InlineCompulsory -> InlineCompulsory _ -> InlineStable tmpl' = -- Do not overwrite stable unfoldings! case src of InlineRhs -> occurAnalyseExpr rhs' _ -> uf_tmpl fn_unf DFunUnfolding {} -> Just fn_unf -- Don't w/w DFuns; it never makes sense -- to do so, and even if it is currently a -- loop breaker, it may not be later _other_unf -> Nothing where noinline = isNoInlinePragma (inlinePragInfo fn_info) fn_unf = unfoldingInfo fn_info -- NB: loop-breakers never inline -- The UnfIfGoodArgs case seems important. If we w/w small functions -- binary sizes go up by 10%! (This is with SplitObjs.) -- I'm not totally sure why. -- INLINABLE functions come via this path -- See Note [certainlyWillInline: INLINABLE] do_cunf size args src' tmpl' | arityInfo fn_info > 0 -- See Note [certainlyWillInline: be careful of thunks] , not (isDeadEndSig (dmdSigInfo fn_info)) -- Do not unconditionally inline a bottoming functions even if -- it seems smallish. We've carefully lifted it out to top level, -- so we don't want to re-inline it. , let unf_arity = length args , size - (10 * (unf_arity + 1)) <= unfoldingUseThreshold opts = Just (fn_unf { uf_src = src' , uf_tmpl = tmpl' , uf_guidance = UnfWhen { ug_arity = unf_arity , ug_unsat_ok = unSaturatedOk , ug_boring_ok = inlineBoringOk tmpl' } }) -- Note the "unsaturatedOk". A function like f = \ab. a -- will certainly inline, even if partially applied (f e), so we'd -- better make sure that the transformed inlining has the same property | otherwise = Nothing {- Note [certainlyWillInline: be careful of thunks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Don't claim that thunks will certainly inline, because that risks work duplication. Even if the work duplication is not great (eg is_cheap holds), it can make a big difference in an inner loop In #5623 we found that the WorkWrap phase thought that y = case x of F# v -> F# (v +# v) was certainlyWillInline, so the addition got duplicated. Note that we check arityInfo instead of the arity of the unfolding to detect this case. This is so that we don't accidentally fail to inline small partial applications, like `f = g 42` (where `g` recurses into `f`) where g has arity 2 (say). Here there is no risk of work duplication, and the RHS is tiny, so certainlyWillInline should return True. But `unf_arity` is zero! However f's arity, gotten from `arityInfo fn_info`, is 1. Failing to say that `f` will inline forces W/W to generate a potentially huge worker for f that will immediately cancel with `g`'s wrapper anyway, causing unnecessary churn in the Simplifier while arriving at the same result. Note [certainlyWillInline: INLINABLE] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ certainlyWillInline /must/ return Nothing for a large INLINABLE thing, even though we have a stable inlining, so that strictness w/w takes place. It makes a big difference to efficiency, and the w/w pass knows how to transfer the INLINABLE info to the worker; see WorkWrap Note [Worker/wrapper for INLINABLE functions] Note [Thoughtful forcing in mkCoreUnfolding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Core expressions retained in unfoldings is one of biggest uses of memory when compiling a program. Therefore we have to be careful about retaining copies of old or redundant templates (see !6202 for a particularlly bad case). With that in mind we want to maintain the invariant that each unfolding only references a single CoreExpr. One place where we have to be careful is in mkCoreUnfolding. * The template of the unfolding is the result of performing occurence analysis (Note [Occurrence analysis of unfoldings]) * Predicates are applied to the unanalysed expression Therefore if we are not thoughtful about forcing you can end up in a situation where the template is forced but not all the predicates are forced so the unfolding will retain both the old and analysed expressions. I investigated this using ghc-debug and it was clear this situation did often arise: ``` (["ghc:GHC.Core:Lam","ghc-prim:GHC.Types:True","THUNK_1_0","THUNK_1_0","THUNK_1_0"],Count 4307) ``` Here the predicates are unforced but the template is forced. Therefore we basically had two options in order to fix this: 1. Perform the predicates on the analysed expression. 2. Force the predicates to remove retainer to the old expression if we force the template. Option 1 is bad because occurence analysis is expensive and destroys any sharing of the unfolding with the actual program. (Testing this approach showed peak 25G memory usage) Therefore we got for Option 2 which performs a little more work but compensates by reducing memory pressure. The result of fixing this led to a 1G reduction in peak memory usage (12G -> 11G) when compiling a very large module (peak 3 million terms). For more discussion see #20905. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Unify.hs0000644000000000000000000025455414472400112020170 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE ScopedTypeVariables, PatternSynonyms, MultiWayIf #-} {-# LANGUAGE DeriveFunctor #-} module GHC.Core.Unify ( tcMatchTy, tcMatchTyKi, tcMatchTys, tcMatchTyKis, tcMatchTyX, tcMatchTysX, tcMatchTyKisX, tcMatchTyX_BM, ruleMatchTyKiX, -- * Rough matching RoughMatchTc(..), roughMatchTcs, roughMatchTcsLookup, instanceCantMatch, typesCantMatch, isRoughWildcard, -- Side-effect free unification tcUnifyTy, tcUnifyTyKi, tcUnifyTys, tcUnifyTyKis, tcUnifyTysFG, tcUnifyTyWithTFs, BindFun, BindFlag(..), matchBindFun, alwaysBindFun, UnifyResult, UnifyResultM(..), MaybeApartReason(..), -- Matching a type against a lifted type (coercion) liftCoMatch, -- The core flattening algorithm flattenTys, flattenTysX ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name( Name, mkSysTvName, mkSystemVarName ) import GHC.Core.Type hiding ( getTvSubstEnv ) import GHC.Core.Coercion hiding ( getCvSubstEnv ) import GHC.Core.TyCon import GHC.Core.TyCo.Rep import GHC.Core.TyCo.FVs ( tyCoVarsOfCoList, tyCoFVsOfTypes ) import GHC.Core.TyCo.Subst ( mkTvSubst ) import GHC.Core.RoughMap import GHC.Core.Map.Type import GHC.Utils.FV( FV, fvVarSet, fvVarList ) import GHC.Utils.Misc import GHC.Data.Pair import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import {-# SOURCE #-} GHC.Tc.Utils.TcType ( tcEqType ) import GHC.Exts( oneShot ) import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString import Data.List ( mapAccumL ) import Control.Monad import qualified Data.Semigroup as S import GHC.Builtin.Names (constraintKindTyConKey, liftedTypeKindTyConKey) {- Unification is much tricker than you might think. 1. The substitution we generate binds the *template type variables* which are given to us explicitly. 2. We want to match in the presence of foralls; e.g (forall a. t1) ~ (forall b. t2) That is what the RnEnv2 is for; it does the alpha-renaming that makes it as if a and b were the same variable. Initialising the RnEnv2, so that it can generate a fresh binder when necessary, entails knowing the free variables of both types. 3. We must be careful not to bind a template type variable to a locally bound variable. E.g. (forall a. x) ~ (forall b. b) where x is the template type variable. Then we do not want to bind x to a/b! This is a kind of occurs check. The necessary locals accumulate in the RnEnv2. Note [tcMatchTy vs tcMatchTyKi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module offers two variants of matching: with kinds and without. The TyKi variant takes two types, of potentially different kinds, and matches them. Along the way, it necessarily also matches their kinds. The Ty variant instead assumes that the kinds are already eqType and so skips matching up the kinds. How do you choose between them? 1. If you know that the kinds of the two types are eqType, use the Ty variant. It is more efficient, as it does less work. 2. If the kinds of variables in the template type might mention type families, use the Ty variant (and do other work to make sure the kinds work out). These pure unification functions do a straightforward syntactic unification and do no complex reasoning about type families. Note that the types of the variables in instances can indeed mention type families, so instance lookup must use the Ty variant. (Nothing goes terribly wrong -- no panics -- if there might be type families in kinds in the TyKi variant. You just might get match failure even though a reducing a type family would lead to success.) 3. Otherwise, if you're sure that the variable kinds do not mention type families and you're not already sure that the kind of the template equals the kind of the target, then use the TyKi version. -} -- | Some unification functions are parameterised by a 'BindFun', which -- says whether or not to allow a certain unification to take place. -- A 'BindFun' takes the 'TyVar' involved along with the 'Type' it will -- potentially be bound to. -- -- It is possible for the variable to actually be a coercion variable -- (Note [Matching coercion variables]), but only when one-way matching. -- In this case, the 'Type' will be a 'CoercionTy'. type BindFun = TyCoVar -> Type -> BindFlag -- | @tcMatchTy t1 t2@ produces a substitution (over fvs(t1)) -- @s@ such that @s(t1)@ equals @t2@. -- The returned substitution might bind coercion variables, -- if the variable is an argument to a GADT constructor. -- -- Precondition: typeKind ty1 `eqType` typeKind ty2 -- -- We don't pass in a set of "template variables" to be bound -- by the match, because tcMatchTy (and similar functions) are -- always used on top-level types, so we can bind any of the -- free variables of the LHS. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTy :: Type -> Type -> Maybe TCvSubst tcMatchTy ty1 ty2 = tcMatchTys [ty1] [ty2] tcMatchTyX_BM :: BindFun -> TCvSubst -> Type -> Type -> Maybe TCvSubst tcMatchTyX_BM bind_me subst ty1 ty2 = tc_match_tys_x bind_me False subst [ty1] [ty2] -- | Like 'tcMatchTy', but allows the kinds of the types to differ, -- and thus matches them as well. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyKi :: Type -> Type -> Maybe TCvSubst tcMatchTyKi ty1 ty2 = tc_match_tys alwaysBindFun True [ty1] [ty2] -- | This is similar to 'tcMatchTy', but extends a substitution -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyX :: TCvSubst -- ^ Substitution to extend -> Type -- ^ Template -> Type -- ^ Target -> Maybe TCvSubst tcMatchTyX subst ty1 ty2 = tc_match_tys_x alwaysBindFun False subst [ty1] [ty2] -- | Like 'tcMatchTy' but over a list of types. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTys :: [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe TCvSubst -- ^ One-shot; in principle the template -- variables could be free in the target tcMatchTys tys1 tys2 = tc_match_tys alwaysBindFun False tys1 tys2 -- | Like 'tcMatchTyKi' but over a list of types. -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyKis :: [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe TCvSubst -- ^ One-shot substitution tcMatchTyKis tys1 tys2 = tc_match_tys alwaysBindFun True tys1 tys2 -- | Like 'tcMatchTys', but extending a substitution -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTysX :: TCvSubst -- ^ Substitution to extend -> [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe TCvSubst -- ^ One-shot substitution tcMatchTysX subst tys1 tys2 = tc_match_tys_x alwaysBindFun False subst tys1 tys2 -- | Like 'tcMatchTyKis', but extending a substitution -- See also Note [tcMatchTy vs tcMatchTyKi] tcMatchTyKisX :: TCvSubst -- ^ Substitution to extend -> [Type] -- ^ Template -> [Type] -- ^ Target -> Maybe TCvSubst -- ^ One-shot substitution tcMatchTyKisX subst tys1 tys2 = tc_match_tys_x alwaysBindFun True subst tys1 tys2 -- | Same as tc_match_tys_x, but starts with an empty substitution tc_match_tys :: BindFun -> Bool -- ^ match kinds? -> [Type] -> [Type] -> Maybe TCvSubst tc_match_tys bind_me match_kis tys1 tys2 = tc_match_tys_x bind_me match_kis (mkEmptyTCvSubst in_scope) tys1 tys2 where in_scope = mkInScopeSet (tyCoVarsOfTypes tys1 `unionVarSet` tyCoVarsOfTypes tys2) -- | Worker for 'tcMatchTysX' and 'tcMatchTyKisX' tc_match_tys_x :: BindFun -> Bool -- ^ match kinds? -> TCvSubst -> [Type] -> [Type] -> Maybe TCvSubst tc_match_tys_x bind_me match_kis (TCvSubst in_scope tv_env cv_env) tys1 tys2 = case tc_unify_tys bind_me False -- Matching, not unifying False -- Not an injectivity check match_kis (mkRnEnv2 in_scope) tv_env cv_env tys1 tys2 of Unifiable (tv_env', cv_env') -> Just $ TCvSubst in_scope tv_env' cv_env' _ -> Nothing -- | This one is called from the expression matcher, -- which already has a MatchEnv in hand ruleMatchTyKiX :: TyCoVarSet -- ^ template variables -> RnEnv2 -> TvSubstEnv -- ^ type substitution to extend -> Type -- ^ Template -> Type -- ^ Target -> Maybe TvSubstEnv ruleMatchTyKiX tmpl_tvs rn_env tenv tmpl target -- See Note [Kind coercions in Unify] = case tc_unify_tys (matchBindFun tmpl_tvs) False False True -- <-- this means to match the kinds rn_env tenv emptyCvSubstEnv [tmpl] [target] of Unifiable (tenv', _) -> Just tenv' _ -> Nothing -- | Allow binding only for any variable in the set. Variables may -- be bound to any type. -- Used when doing simple matching; e.g. can we find a substitution -- -- @ -- S = [a :-> t1, b :-> t2] such that -- S( Maybe (a, b->Int ) = Maybe (Bool, Char -> Int) -- @ matchBindFun :: TyCoVarSet -> BindFun matchBindFun tvs tv _ty | tv `elemVarSet` tvs = BindMe | otherwise = Apart -- | Allow the binding of any variable to any type alwaysBindFun :: BindFun alwaysBindFun _tv _ty = BindMe {- ********************************************************************* * * Rough matching * * ********************************************************************* -} {- Note [Rough matching in class and family instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider instance C (Maybe [Tree a]) Bool and suppose we are looking up C Bool Bool We can very quickly rule the instance out, because the first argument is headed by Maybe, whereas in the constraint we are looking up has first argument headed by Bool. These "headed by" TyCons are called the "rough match TyCons" of the constraint or instance. They are used for a quick filter, to check when an instance cannot possibly match. The main motivation is to avoid sucking in whole instance declarations that are utterly useless. See GHC.Core.InstEnv Note [ClsInst laziness and the rough-match fields]. INVARIANT: a rough-match TyCons `tc` is always a real, generative tycon, like Maybe or Either, including a newtype or a data family, both of which are generative. It replies True to `isGenerativeTyCon tc Nominal`. But it is never - A type synonym E.g. Int and (S Bool) might match if (S Bool) is a synonym for Int - A type family (#19336) E.g. (Just a) and (F a) might match if (F a) reduces to (Just a) albeit perhaps only after 'a' is instantiated. -} roughMatchTcs :: [Type] -> [RoughMatchTc] roughMatchTcs tys = map typeToRoughMatchTc tys roughMatchTcsLookup :: [Type] -> [RoughMatchLookupTc] roughMatchTcsLookup tys = map typeToRoughMatchLookupTc tys instanceCantMatch :: [RoughMatchTc] -> [RoughMatchTc] -> Bool -- (instanceCantMatch tcs1 tcs2) returns True if tcs1 cannot -- possibly be instantiated to actual, nor vice versa; -- False is non-committal instanceCantMatch (mt : ts) (ma : as) = itemCantMatch mt ma || instanceCantMatch ts as instanceCantMatch _ _ = False -- Safe itemCantMatch :: RoughMatchTc -> RoughMatchTc -> Bool itemCantMatch (RM_KnownTc t) (RM_KnownTc a) = t /= a itemCantMatch _ _ = False {- ************************************************************************ * * GADTs * * ************************************************************************ Note [Pruning dead case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T a where T1 :: T Int T2 :: T a newtype X = MkX Int newtype Y = MkY Char type family F a type instance F Bool = Int Now consider case x of { T1 -> e1; T2 -> e2 } The question before the house is this: if I know something about the type of x, can I prune away the T1 alternative? Suppose x::T Char. It's impossible to construct a (T Char) using T1, Answer = YES we can prune the T1 branch (clearly) Suppose x::T (F a), where 'a' is in scope. Then 'a' might be instantiated to 'Bool', in which case x::T Int, so ANSWER = NO (clearly) We see here that we want precisely the apartness check implemented within tcUnifyTysFG. So that's what we do! Two types cannot match if they are surely apart. Note that since we are simply dropping dead code, a conservative test suffices. -} -- | Given a list of pairs of types, are any two members of a pair surely -- apart, even after arbitrary type function evaluation and substitution? typesCantMatch :: [(Type,Type)] -> Bool -- See Note [Pruning dead case alternatives] typesCantMatch prs = any (uncurry cant_match) prs where cant_match :: Type -> Type -> Bool cant_match t1 t2 = case tcUnifyTysFG alwaysBindFun [t1] [t2] of SurelyApart -> True _ -> False {- ************************************************************************ * * Unification * * ************************************************************************ Note [Fine-grained unification] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do the types (x, x) and ([y], y) unify? The answer is seemingly "no" -- no substitution to finite types makes these match. But, a substitution to *infinite* types can unify these two types: [x |-> [[[...]]], y |-> [[[...]]] ]. Why do we care? Consider these two type family instances: type instance F x x = Int type instance F [y] y = Bool If we also have type instance Looper = [Looper] then the instances potentially overlap. The solution is to use unification over infinite terms. This is possible (see [1] for lots of gory details), but a full algorithm is a little more power than we need. Instead, we make a conservative approximation and just omit the occurs check. [1]: http://research.microsoft.com/en-us/um/people/simonpj/papers/ext-f/axioms-extended.pdf tcUnifyTys considers an occurs-check problem as the same as general unification failure. tcUnifyTysFG ("fine-grained") returns one of three results: success, occurs-check failure ("MaybeApart"), or general failure ("SurelyApart"). See also #8162. It's worth noting that unification in the presence of infinite types is not complete. This means that, sometimes, a closed type family does not reduce when it should. See test case indexed-types/should_fail/Overlap15 for an example. Note [Unification result] ~~~~~~~~~~~~~~~~~~~~~~~~~ When unifying t1 ~ t2, we return * Unifiable s, if s is a substitution such that s(t1) is syntactically the same as s(t2), modulo type-synonym expansion. * SurelyApart, if there is no substitution s such that s(t1) = s(t2), where "=" includes type-family reductions. * MaybeApart mar s, when we aren't sure. `mar` is a MaybeApartReason. Examples * [a] ~ Maybe b: SurelyApart, because [] and Maybe can't unify * [(a,Int)] ~ [(Bool,b)]: Unifiable * [F Int] ~ [Bool]: MaybeApart MARTypeFamily, because F Int might reduce to Bool (the unifier does not try this) * a ~ Maybe a: MaybeApart MARInfinite. Not Unifiable clearly, but not SurelyApart either; consider a := Loop where type family Loop where Loop = Maybe Loop There is the possibility that two types are MaybeApart for *both* reasons: * (a, F Int) ~ (Maybe a, Bool) What reason should we use? The *only* consumer of the reason is described in Note [Infinitary substitution in lookup] in GHC.Core.InstEnv. The goal there is identify which instances might match a target later (but don't match now) -- except that we want to ignore the possibility of infinitary substitutions. So let's examine a concrete scenario: class C a b c instance C a (Maybe a) Bool -- other instances, including one that will actually match [W] C b b (F Int) Do we want the instance as a future possibility? No. The only way that instance can match is in the presence of an infinite type (infinitely nested Maybes). We thus say that MARInfinite takes precedence, so that InstEnv treats this case as an infinitary substitution case; the fact that a type family is involved is only incidental. We thus define the Semigroup instance for MaybeApartReason to prefer MARInfinite. Note [The substitution in MaybeApart] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The constructor MaybeApart carries data with it, typically a TvSubstEnv. Why? Because consider unifying these: (a, a, Int) ~ (b, [b], Bool) If we go left-to-right, we start with [a |-> b]. Then, on the middle terms, we apply the subst we have so far and discover that we need [b |-> [b]]. Because this fails the occurs check, we say that the types are MaybeApart (see above Note [Fine-grained unification]). But, we can't stop there! Because if we continue, we discover that Int is SurelyApart from Bool, and therefore the types are apart. This has practical consequences for the ability for closed type family applications to reduce. See test case indexed-types/should_compile/Overlap14. -} -- | Simple unification of two types; all type variables are bindable -- Precondition: the kinds are already equal tcUnifyTy :: Type -> Type -- All tyvars are bindable -> Maybe TCvSubst -- A regular one-shot (idempotent) substitution tcUnifyTy t1 t2 = tcUnifyTys alwaysBindFun [t1] [t2] -- | Like 'tcUnifyTy', but also unifies the kinds tcUnifyTyKi :: Type -> Type -> Maybe TCvSubst tcUnifyTyKi t1 t2 = tcUnifyTyKis alwaysBindFun [t1] [t2] -- | Unify two types, treating type family applications as possibly unifying -- with anything and looking through injective type family applications. -- Precondition: kinds are the same tcUnifyTyWithTFs :: Bool -- ^ True <=> do two-way unification; -- False <=> do one-way matching. -- See end of sec 5.2 from the paper -> Type -> Type -> Maybe TCvSubst -- This algorithm is an implementation of the "Algorithm U" presented in -- the paper "Injective type families for Haskell", Figures 2 and 3. -- The code is incorporated with the standard unifier for convenience, but -- its operation should match the specification in the paper. tcUnifyTyWithTFs twoWay t1 t2 = case tc_unify_tys alwaysBindFun twoWay True False rn_env emptyTvSubstEnv emptyCvSubstEnv [t1] [t2] of Unifiable (subst, _) -> Just $ maybe_fix subst MaybeApart _reason (subst, _) -> Just $ maybe_fix subst -- we want to *succeed* in questionable cases. This is a -- pre-unification algorithm. SurelyApart -> Nothing where in_scope = mkInScopeSet $ tyCoVarsOfTypes [t1, t2] rn_env = mkRnEnv2 in_scope maybe_fix | twoWay = niFixTCvSubst | otherwise = mkTvSubst in_scope -- when matching, don't confuse -- domain with range ----------------- tcUnifyTys :: BindFun -> [Type] -> [Type] -> Maybe TCvSubst -- ^ A regular one-shot (idempotent) substitution -- that unifies the erased types. See comments -- for 'tcUnifyTysFG' -- The two types may have common type variables, and indeed do so in the -- second call to tcUnifyTys in GHC.Tc.Instance.FunDeps.checkClsFD tcUnifyTys bind_fn tys1 tys2 = case tcUnifyTysFG bind_fn tys1 tys2 of Unifiable result -> Just result _ -> Nothing -- | Like 'tcUnifyTys' but also unifies the kinds tcUnifyTyKis :: BindFun -> [Type] -> [Type] -> Maybe TCvSubst tcUnifyTyKis bind_fn tys1 tys2 = case tcUnifyTyKisFG bind_fn tys1 tys2 of Unifiable result -> Just result _ -> Nothing -- This type does double-duty. It is used in the UM (unifier monad) and to -- return the final result. See Note [Fine-grained unification] type UnifyResult = UnifyResultM TCvSubst -- | See Note [Unification result] data UnifyResultM a = Unifiable a -- the subst that unifies the types | MaybeApart MaybeApartReason a -- the subst has as much as we know -- it must be part of a most general unifier -- See Note [The substitution in MaybeApart] | SurelyApart deriving Functor -- | Why are two types 'MaybeApart'? 'MARInfinite' takes precedence: -- This is used (only) in Note [Infinitary substitution in lookup] in GHC.Core.InstEnv -- As of Feb 2022, we never differentiate between MARTypeFamily and MARTypeVsConstraint; -- it's really only MARInfinite that's interesting here. data MaybeApartReason = MARTypeFamily -- ^ matching e.g. F Int ~? Bool | MARInfinite -- ^ matching e.g. a ~? Maybe a | MARTypeVsConstraint -- ^ matching Type ~? Constraint -- See Note [coreView vs tcView] in GHC.Core.Type instance Outputable MaybeApartReason where ppr MARTypeFamily = text "MARTypeFamily" ppr MARInfinite = text "MARInfinite" ppr MARTypeVsConstraint = text "MARTypeVsConstraint" instance Semigroup MaybeApartReason where -- see end of Note [Unification result] for why MARTypeFamily <> r = r MARInfinite <> _ = MARInfinite MARTypeVsConstraint <> r = r instance Applicative UnifyResultM where pure = Unifiable (<*>) = ap instance Monad UnifyResultM where SurelyApart >>= _ = SurelyApart MaybeApart r1 x >>= f = case f x of Unifiable y -> MaybeApart r1 y MaybeApart r2 y -> MaybeApart (r1 S.<> r2) y SurelyApart -> SurelyApart Unifiable x >>= f = f x -- | @tcUnifyTysFG bind_tv tys1 tys2@ attempts to find a substitution @s@ (whose -- domain elements all respond 'BindMe' to @bind_tv@) such that -- @s(tys1)@ and that of @s(tys2)@ are equal, as witnessed by the returned -- Coercions. This version requires that the kinds of the types are the same, -- if you unify left-to-right. tcUnifyTysFG :: BindFun -> [Type] -> [Type] -> UnifyResult tcUnifyTysFG bind_fn tys1 tys2 = tc_unify_tys_fg False bind_fn tys1 tys2 tcUnifyTyKisFG :: BindFun -> [Type] -> [Type] -> UnifyResult tcUnifyTyKisFG bind_fn tys1 tys2 = tc_unify_tys_fg True bind_fn tys1 tys2 tc_unify_tys_fg :: Bool -> BindFun -> [Type] -> [Type] -> UnifyResult tc_unify_tys_fg match_kis bind_fn tys1 tys2 = do { (env, _) <- tc_unify_tys bind_fn True False match_kis env emptyTvSubstEnv emptyCvSubstEnv tys1 tys2 ; return $ niFixTCvSubst env } where vars = tyCoVarsOfTypes tys1 `unionVarSet` tyCoVarsOfTypes tys2 env = mkRnEnv2 $ mkInScopeSet vars -- | This function is actually the one to call the unifier -- a little -- too general for outside clients, though. tc_unify_tys :: BindFun -> AmIUnifying -- ^ True <=> unify; False <=> match -> Bool -- ^ True <=> doing an injectivity check -> Bool -- ^ True <=> treat the kinds as well -> RnEnv2 -> TvSubstEnv -- ^ substitution to extend -> CvSubstEnv -> [Type] -> [Type] -> UnifyResultM (TvSubstEnv, CvSubstEnv) -- NB: It's tempting to ASSERT here that, if we're not matching kinds, then -- the kinds of the types should be the same. However, this doesn't work, -- as the types may be a dependent telescope, where later types have kinds -- that mention variables occurring earlier in the list of types. Here's an -- example (from typecheck/should_fail/T12709): -- template: [rep :: RuntimeRep, a :: TYPE rep] -- target: [LiftedRep :: RuntimeRep, Int :: TYPE LiftedRep] -- We can see that matching the first pair will make the kinds of the second -- pair equal. Yet, we still don't need a separate pass to unify the kinds -- of these types, so it's appropriate to use the Ty variant of unification. -- See also Note [tcMatchTy vs tcMatchTyKi]. tc_unify_tys bind_fn unif inj_check match_kis rn_env tv_env cv_env tys1 tys2 = initUM tv_env cv_env $ do { when match_kis $ unify_tys env kis1 kis2 ; unify_tys env tys1 tys2 ; (,) <$> getTvSubstEnv <*> getCvSubstEnv } where env = UMEnv { um_bind_fun = bind_fn , um_skols = emptyVarSet , um_unif = unif , um_inj_tf = inj_check , um_rn_env = rn_env } kis1 = map typeKind tys1 kis2 = map typeKind tys2 instance Outputable a => Outputable (UnifyResultM a) where ppr SurelyApart = text "SurelyApart" ppr (Unifiable x) = text "Unifiable" <+> ppr x ppr (MaybeApart r x) = text "MaybeApart" <+> ppr r <+> ppr x {- ************************************************************************ * * Non-idempotent substitution * * ************************************************************************ Note [Non-idempotent substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During unification we use a TvSubstEnv/CvSubstEnv pair that is (a) non-idempotent (b) loop-free; ie repeatedly applying it yields a fixed point Note [Finding the substitution fixpoint] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Finding the fixpoint of a non-idempotent substitution arising from a unification is much trickier than it looks, because of kinds. Consider T k (H k (f:k)) ~ T * (g:*) If we unify, we get the substitution [ k -> * , g -> H k (f:k) ] To make it idempotent we don't want to get just [ k -> * , g -> H * (f:k) ] We also want to substitute inside f's kind, to get [ k -> * , g -> H k (f:*) ] If we don't do this, we may apply the substitution to something, and get an ill-formed type, i.e. one where typeKind will fail. This happened, for example, in #9106. It gets worse. In #14164 we wanted to take the fixpoint of this substitution [ xs_asV :-> F a_aY6 (z_aY7 :: a_aY6) (rest_aWF :: G a_aY6 (z_aY7 :: a_aY6)) , a_aY6 :-> a_aXQ ] We have to apply the substitution for a_aY6 two levels deep inside the invocation of F! We don't have a function that recursively applies substitutions inside the kinds of variable occurrences (and probably rightly so). So, we work as follows: 1. Start with the current substitution (which we are trying to fixpoint [ xs :-> F a (z :: a) (rest :: G a (z :: a)) , a :-> b ] 2. Take all the free vars of the range of the substitution: {a, z, rest, b} NB: the free variable finder closes over the kinds of variable occurrences 3. If none are in the domain of the substitution, stop. We have found a fixpoint. 4. Remove the variables that are bound by the substitution, leaving {z, rest, b} 5. Do a topo-sort to put them in dependency order: [ b :: *, z :: a, rest :: G a z ] 6. Apply the substitution left-to-right to the kinds of these tyvars, extending it each time with a new binding, so we finish up with [ xs :-> ..as before.. , a :-> b , b :-> b :: * , z :-> z :: b , rest :-> rest :: G b (z :: b) ] Note that rest now has the right kind 7. Apply this extended substitution (once) to the range of the /original/ substitution. (Note that we do the extended substitution would go on forever if you tried to find its fixpoint, because it maps z to z.) 8. And go back to step 1 In Step 6 we use the free vars from Step 2 as the initial in-scope set, because all of those variables appear in the range of the substitution, so they must all be in the in-scope set. But NB that the type substitution engine does not look up variables in the in-scope set; it is used only to ensure no shadowing. -} niFixTCvSubst :: TvSubstEnv -> TCvSubst -- Find the idempotent fixed point of the non-idempotent substitution -- This is surprisingly tricky: -- see Note [Finding the substitution fixpoint] -- ToDo: use laziness instead of iteration? niFixTCvSubst tenv | not_fixpoint = niFixTCvSubst (mapVarEnv (substTy subst) tenv) | otherwise = subst where range_fvs :: FV range_fvs = tyCoFVsOfTypes (nonDetEltsUFM tenv) -- It's OK to use nonDetEltsUFM here because the -- order of range_fvs, range_tvs is immaterial range_tvs :: [TyVar] range_tvs = fvVarList range_fvs not_fixpoint = any in_domain range_tvs in_domain tv = tv `elemVarEnv` tenv free_tvs = scopedSort (filterOut in_domain range_tvs) -- See Note [Finding the substitution fixpoint], Step 6 init_in_scope = mkInScopeSet (fvVarSet range_fvs) subst = foldl' add_free_tv (mkTvSubst init_in_scope tenv) free_tvs add_free_tv :: TCvSubst -> TyVar -> TCvSubst add_free_tv subst tv = extendTvSubst subst tv (mkTyVarTy tv') where tv' = updateTyVarKind (substTy subst) tv niSubstTvSet :: TvSubstEnv -> TyCoVarSet -> TyCoVarSet -- Apply the non-idempotent substitution to a set of type variables, -- remembering that the substitution isn't necessarily idempotent -- This is used in the occurs check, before extending the substitution niSubstTvSet tsubst tvs = nonDetStrictFoldUniqSet (unionVarSet . get) emptyVarSet tvs -- It's OK to use a non-deterministic fold here because we immediately forget -- the ordering by creating a set. where get tv | Just ty <- lookupVarEnv tsubst tv = niSubstTvSet tsubst (tyCoVarsOfType ty) | otherwise = unitVarSet tv {- ************************************************************************ * * unify_ty: the main workhorse * * ************************************************************************ Note [Specification of unification] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The pure unifier, unify_ty, defined in this module, tries to work out a substitution to make two types say True to eqType. NB: eqType is itself not purely syntactic; it accounts for CastTys; see Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep Unlike the "impure unifiers" in the typechecker (the eager unifier in GHC.Tc.Utils.Unify, and the constraint solver itself in GHC.Tc.Solver.Canonical), the pure unifier does /not/ work up to ~. The algorithm implemented here is rather delicate, and we depend on it to uphold certain properties. This is a summary of these required properties. Notation: θ,φ substitutions ξ type-function-free types τ,σ other types τ♭ type τ, flattened ≡ eqType (U1) Soundness. If (unify τ₁ τ₂) = Unifiable θ, then θ(τ₁) ≡ θ(τ₂). θ is a most general unifier for τ₁ and τ₂. (U2) Completeness. If (unify ξ₁ ξ₂) = SurelyApart, then there exists no substitution θ such that θ(ξ₁) ≡ θ(ξ₂). These two properties are stated as Property 11 in the "Closed Type Families" paper (POPL'14). Below, this paper is called [CTF]. (U3) Apartness under substitution. If (unify ξ τ♭) = SurelyApart, then (unify ξ θ(τ)♭) = SurelyApart, for any θ. (Property 12 from [CTF]) (U4) Apart types do not unify. If (unify ξ τ♭) = SurelyApart, then there exists no θ such that θ(ξ) = θ(τ). (Property 13 from [CTF]) THEOREM. Completeness w.r.t ~ If (unify τ₁♭ τ₂♭) = SurelyApart, then there exists no proof that (τ₁ ~ τ₂). PROOF. See appendix of [CTF]. The unification algorithm is used for type family injectivity, as described in the "Injective Type Families" paper (Haskell'15), called [ITF]. When run in this mode, it has the following properties. (I1) If (unify σ τ) = SurelyApart, then σ and τ are not unifiable, even after arbitrary type family reductions. Note that σ and τ are not flattened here. (I2) If (unify σ τ) = MaybeApart θ, and if some φ exists such that φ(σ) ~ φ(τ), then φ extends θ. Furthermore, the RULES matching algorithm requires this property, but only when using this algorithm for matching: (M1) If (match σ τ) succeeds with θ, then all matchable tyvars in σ are bound in θ. Property M1 means that we must extend the substitution with, say (a ↦ a) when appropriate during matching. See also Note [Self-substitution when matching]. (M2) Completeness of matching. If θ(σ) = τ, then (match σ τ) = Unifiable φ, where θ is an extension of φ. Sadly, property M2 and I2 conflict. Consider type family F1 a b where F1 Int Bool = Char F1 Double String = Char Consider now two matching problems: P1. match (F1 a Bool) (F1 Int Bool) P2. match (F1 a Bool) (F1 Double String) In case P1, we must find (a ↦ Int) to satisfy M2. In case P2, we must /not/ find (a ↦ Double), in order to satisfy I2. (Note that the correct mapping for I2 is (a ↦ Int). There is no way to discover this, but we mustn't map a to anything else!) We thus must parameterize the algorithm over whether it's being used for an injectivity check (refrain from looking at non-injective arguments to type families) or not (do indeed look at those arguments). This is implemented by the um_inj_tf field of UMEnv. (It's all a question of whether or not to include equation (7) from Fig. 2 of [ITF].) This extra parameter is a bit fiddly, perhaps, but seemingly less so than having two separate, almost-identical algorithms. Note [Self-substitution when matching] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ What should happen when we're *matching* (not unifying) a1 with a1? We should get a substitution [a1 |-> a1]. A successful match should map all the template variables (except ones that disappear when expanding synonyms). But when unifying, we don't want to do this, because we'll then fall into a loop. This arrangement affects the code in three places: - If we're matching a refined template variable, don't recur. Instead, just check for equality. That is, if we know [a |-> Maybe a] and are matching (a ~? Maybe Int), we want to just fail. - Skip the occurs check when matching. This comes up in two places, because matching against variables is handled separately from matching against full-on types. Note that this arrangement was provoked by a real failure, where the same unique ended up in the template as in the target. (It was a rule firing when compiling Data.List.NonEmpty.) Note [Matching coercion variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this: type family F a data G a where MkG :: F a ~ Bool => G a type family Foo (x :: G a) :: F a type instance Foo MkG = False We would like that to be accepted. For that to work, we need to introduce a coercion variable on the left and then use it on the right. Accordingly, at use sites of Foo, we need to be able to use matching to figure out the value for the coercion. (See the desugared version: axFoo :: [a :: *, c :: F a ~ Bool]. Foo (MkG c) = False |> (sym c) ) We never want this action to happen during *unification* though, when all bets are off. Note [Kind coercions in Unify] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We wish to match/unify while ignoring casts. But, we can't just ignore them completely, or we'll end up with ill-kinded substitutions. For example, say we're matching `a` with `ty |> co`. If we just drop the cast, we'll return [a |-> ty], but `a` and `ty` might have different kinds. We can't just match/unify their kinds, either, because this might gratuitously fail. After all, `co` is the witness that the kinds are the same -- they may look nothing alike. So, we pass a kind coercion to the match/unify worker. This coercion witnesses the equality between the substed kind of the left-hand type and the substed kind of the right-hand type. Note that we do not unify kinds at the leaves (as we did previously). We thus have INVARIANT: In the call unify_ty ty1 ty2 kco it must be that subst(kco) :: subst(kind(ty1)) ~N subst(kind(ty2)), where `subst` is the ambient substitution in the UM monad. To get this coercion, we first have to match/unify the kinds before looking at the types. Happily, we need look only one level up, as all kinds are guaranteed to have kind *. When we're working with type applications (either TyConApp or AppTy) we need to worry about establishing INVARIANT, as the kinds of the function & arguments aren't (necessarily) included in the kind of the result. When unifying two TyConApps, this is easy, because the two TyCons are the same. Their kinds are thus the same. As long as we unify left-to-right, we'll be sure to unify types' kinds before the types themselves. (For example, think about Proxy :: forall k. k -> *. Unifying the first args matches up the kinds of the second args.) For AppTy, we must unify the kinds of the functions, but once these are unified, we can continue unifying arguments without worrying further about kinds. The interface to this module includes both "...Ty" functions and "...TyKi" functions. The former assume that INVARIANT is already established, either because the kinds are the same or because the list of types being passed in are the well-typed arguments to some type constructor (see two paragraphs above). The latter take a separate pre-pass over the kinds to establish INVARIANT. Sometimes, it's important not to take the second pass, as it caused #12442. We thought, at one point, that this was all unnecessary: why should casts be in types in the first place? But they are sometimes. In dependent/should_compile/KindEqualities2, we see, for example the constraint Num (Int |> (blah ; sym blah)). We naturally want to find a dictionary for that constraint, which requires dealing with coercions in this manner. Note [Matching in the presence of casts (1)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When matching, it is crucial that no variables from the template end up in the range of the matching substitution (obviously!). When unifying, that's not a constraint; instead we take the fixpoint of the substitution at the end. So what should we do with this, when matching? unify_ty (tmpl |> co) tgt kco Previously, wrongly, we pushed 'co' in the (horrid) accumulating 'kco' argument like this: unify_ty (tmpl |> co) tgt kco = unify_ty tmpl tgt (kco ; co) But that is obviously wrong because 'co' (from the template) ends up in 'kco', which in turn ends up in the range of the substitution. This all came up in #13910. Because we match tycon arguments left-to-right, the ambient substitution will already have a matching substitution for any kinds; so there is an easy fix: just apply the substitution-so-far to the coercion from the LHS. Note that * When matching, the first arg of unify_ty is always the template; we never swap round. * The above argument is distressingly indirect. We seek a better way. * One better way is to ensure that type patterns (the template in the matching process) have no casts. See #14119. Note [Matching in the presence of casts (2)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is another wrinkle (#17395). Suppose (T :: forall k. k -> Type) and we are matching tcMatchTy (T k (a::k)) (T j (b::j)) Then we'll match k :-> j, as expected. But then in unify_tys we invoke unify_tys env (a::k) (b::j) (Refl j) Although we have unified k and j, it's very important that we put (Refl j), /not/ (Refl k) as the fourth argument to unify_tys. If we put (Refl k) we'd end up with the substitution a :-> b |> Refl k which is bogus because one of the template variables, k, appears in the range of the substitution. Eek. Similar care is needed in unify_ty_app. Note [Polykinded tycon applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose T :: forall k. Type -> K and we are unifying ty1: T @Type Int :: Type ty2: T @(Type->Type) Int Int :: Type These two TyConApps have the same TyCon at the front but they (legitimately) have different numbers of arguments. They are surelyApart, so we can report that without looking any further (see #15704). Note [Unifying type applications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unifying type applications is quite subtle, as we found in #23134 and #22647, when type families are involved. Suppose type family F a :: Type -> Type type family G k :: k = r | r -> k and consider these examples: * F Int ~ F Char, where F is injective Since F is injective, we can reduce this to Int ~ Char, therefore SurelyApart. * F Int ~ F Char, where F is not injective Without injectivity, return MaybeApart. * G Type ~ G (Type -> Type) Int Even though G is injective and the arguments to G are different, we cannot deduce apartness because the RHS is oversaturated. For example, G might be defined as G Type = Maybe Int G (Type -> Type) = Maybe So we return MaybeApart. * F Int Bool ~ F Int Char -- SurelyApart (since Bool is apart from Char) F Int Bool ~ Maybe a -- MaybeApart F Int Bool ~ a b -- MaybeApart F Int Bool ~ Char -> Bool -- MaybeApart An oversaturated type family can match an application, whether it's a TyConApp, AppTy or FunTy. Decompose. * F Int ~ a b We cannot decompose a saturated, or under-saturated type family application. We return MaybeApart. To handle all those conditions, unify_ty goes through the following checks in sequence, where Fn is a type family of arity n: * (C1) Fn x_1 ... x_n ~ Fn y_1 .. y_n A saturated application. Here we can unify arguments in which Fn is injective. * (C2) Fn x_1 ... x_n ~ anything, anything ~ Fn x_1 ... x_n A saturated type family can match anything - we return MaybeApart. * (C3) Fn x_1 ... x_m ~ a b, a b ~ Fn x_1 ... x_m where m > n An oversaturated type family can be decomposed. * (C4) Fn x_1 ... x_m ~ anything, anything ~ Fn x_1 ... x_m, where m > n If we couldn't decompose in the previous step, we return SurelyApart. Afterwards, the rest of the code doesn't have to worry about type families. -} -------------- unify_ty: the main workhorse ----------- type AmIUnifying = Bool -- True <=> Unifying -- False <=> Matching unify_ty :: UMEnv -> Type -> Type -- Types to be unified and a co -> CoercionN -- A coercion between their kinds -- See Note [Kind coercions in Unify] -> UM () -- See Note [Specification of unification] -- Respects newtypes, PredTypes -- See Note [Computing equality on types] in GHC.Core.Type unify_ty _env (TyConApp tc1 []) (TyConApp tc2 []) _kco -- See Note [Comparing nullary type synonyms] in GHC.Core.Type. | tc1 == tc2 = return () -- See Note [coreView vs tcView] in GHC.Core.Type. | tc1 `hasKey` constraintKindTyConKey , tc2 `hasKey` liftedTypeKindTyConKey = maybeApart MARTypeVsConstraint | tc2 `hasKey` constraintKindTyConKey , tc1 `hasKey` liftedTypeKindTyConKey = maybeApart MARTypeVsConstraint unify_ty env ty1 ty2 kco -- Now handle the cases we can "look through": synonyms and casts. | Just ty1' <- tcView ty1 = unify_ty env ty1' ty2 kco | Just ty2' <- tcView ty2 = unify_ty env ty1 ty2' kco | CastTy ty1' co <- ty1 = if um_unif env then unify_ty env ty1' ty2 (co `mkTransCo` kco) else -- See Note [Matching in the presence of casts (1)] do { subst <- getSubst env ; let co' = substCo subst co ; unify_ty env ty1' ty2 (co' `mkTransCo` kco) } | CastTy ty2' co <- ty2 = unify_ty env ty1 ty2' (kco `mkTransCo` mkSymCo co) unify_ty env (TyVarTy tv1) ty2 kco = uVar env tv1 ty2 kco unify_ty env ty1 (TyVarTy tv2) kco | um_unif env -- If unifying, can swap args = uVar (umSwapRn env) tv2 ty1 (mkSymCo kco) unify_ty env ty1 ty2 _kco -- Handle non-oversaturated type families first -- See Note [Unifying type applications] -- -- (C1) If we have T x1 ... xn ~ T y1 ... yn, use injectivity information of T -- Note that both sides must not be oversaturated | Just (tc1, tys1) <- isSatTyFamApp mb_tc_app1 , Just (tc2, tys2) <- isSatTyFamApp mb_tc_app2 , tc1 == tc2 = do { let inj = case tyConInjectivityInfo tc1 of NotInjective -> repeat False Injective bs -> bs (inj_tys1, noninj_tys1) = partitionByList inj tys1 (inj_tys2, noninj_tys2) = partitionByList inj tys2 ; unify_tys env inj_tys1 inj_tys2 ; unless (um_inj_tf env) $ -- See (end of) Note [Specification of unification] don'tBeSoSure MARTypeFamily $ unify_tys env noninj_tys1 noninj_tys2 } | Just _ <- isSatTyFamApp mb_tc_app1 -- (C2) A (not-over-saturated) type-family application = maybeApart MARTypeFamily -- behaves like a type variable; might match | Just _ <- isSatTyFamApp mb_tc_app2 -- (C2) A (not-over-saturated) type-family application -- behaves like a type variable; might unify -- but doesn't match (as in the TyVarTy case) = if um_unif env then maybeApart MARTypeFamily else surelyApart -- Handle oversaturated type families. -- -- They can match an application (TyConApp/FunTy/AppTy), this is handled -- the same way as in the AppTy case below. -- -- If there is no application, an oversaturated type family can only -- match a type variable or a saturated type family, -- both of which we handled earlier. So we can say surelyApart. | Just (tc1, _) <- mb_tc_app1 , isTypeFamilyTyCon tc1 = if | Just (ty1a, ty1b) <- tcRepSplitAppTy_maybe ty1 , Just (ty2a, ty2b) <- tcRepSplitAppTy_maybe ty2 -> unify_ty_app env ty1a [ty1b] ty2a [ty2b] -- (C3) | otherwise -> surelyApart -- (C4) | Just (tc2, _) <- mb_tc_app2 , isTypeFamilyTyCon tc2 = if | Just (ty1a, ty1b) <- tcRepSplitAppTy_maybe ty1 , Just (ty2a, ty2b) <- tcRepSplitAppTy_maybe ty2 -> unify_ty_app env ty1a [ty1b] ty2a [ty2b] -- (C3) | otherwise -> surelyApart -- (C4) -- At this point, neither tc1 nor tc2 can be a type family. | Just (tc1, tys1) <- mb_tc_app1 , Just (tc2, tys2) <- mb_tc_app2 , tc1 == tc2 = do { massertPpr (isInjectiveTyCon tc1 Nominal) (ppr tc1) ; unify_tys env tys1 tys2 } where mb_tc_app1 = tcSplitTyConApp_maybe ty1 mb_tc_app2 = tcSplitTyConApp_maybe ty2 -- Applications need a bit of care! -- They can match FunTy and TyConApp, so use splitAppTy_maybe -- NB: we've already dealt with type variables, -- so if one type is an App the other one jolly well better be too unify_ty env (AppTy ty1a ty1b) ty2 _kco | Just (ty2a, ty2b) <- tcRepSplitAppTy_maybe ty2 = unify_ty_app env ty1a [ty1b] ty2a [ty2b] unify_ty env ty1 (AppTy ty2a ty2b) _kco | Just (ty1a, ty1b) <- tcRepSplitAppTy_maybe ty1 = unify_ty_app env ty1a [ty1b] ty2a [ty2b] -- tcSplitTyConApp won't split a (=>), so we handle this separately. unify_ty env (FunTy InvisArg _w1 arg1 res1) (FunTy InvisArg _w2 arg2 res2) _kco -- Look at result representations, but arg representations would be redundant -- as anything that can appear to the left of => is lifted. -- And anything that can appear to the left of => is unrestricted, so skip the -- multiplicities. | Just res_rep1 <- getRuntimeRep_maybe res1 , Just res_rep2 <- getRuntimeRep_maybe res2 = unify_tys env [res_rep1, arg1, res1] [res_rep2, arg2, res2] unify_ty _ (LitTy x) (LitTy y) _kco | x == y = return () unify_ty env (ForAllTy (Bndr tv1 _) ty1) (ForAllTy (Bndr tv2 _) ty2) kco = do { unify_ty env (varType tv1) (varType tv2) (mkNomReflCo liftedTypeKind) ; let env' = umRnBndr2 env tv1 tv2 ; unify_ty env' ty1 ty2 kco } -- See Note [Matching coercion variables] unify_ty env (CoercionTy co1) (CoercionTy co2) kco = do { c_subst <- getCvSubstEnv ; case co1 of CoVarCo cv | not (um_unif env) , not (cv `elemVarEnv` c_subst) , let (_, co_l, co_r) = decomposeFunCo Nominal kco -- Because the coercion is used in a type, it should be safe to -- ignore the multiplicity coercion. -- cv :: t1 ~ t2 -- co2 :: s1 ~ s2 -- co_l :: t1 ~ s1 -- co_r :: t2 ~ s2 rhs_co = co_l `mkTransCo` co2 `mkTransCo` mkSymCo co_r , BindMe <- tvBindFlag env cv (CoercionTy rhs_co) -> do { checkRnEnv env (tyCoVarsOfCo co2) ; extendCvEnv cv rhs_co } _ -> return () } unify_ty _ _ _ _ = surelyApart unify_ty_app :: UMEnv -> Type -> [Type] -> Type -> [Type] -> UM () unify_ty_app env ty1 ty1args ty2 ty2args | Just (ty1', ty1a) <- repSplitAppTy_maybe ty1 , Just (ty2', ty2a) <- repSplitAppTy_maybe ty2 = unify_ty_app env ty1' (ty1a : ty1args) ty2' (ty2a : ty2args) | otherwise = do { let ki1 = typeKind ty1 ki2 = typeKind ty2 -- See Note [Kind coercions in Unify] ; unify_ty env ki1 ki2 (mkNomReflCo liftedTypeKind) ; unify_ty env ty1 ty2 (mkNomReflCo ki2) -- Very important: 'ki2' not 'ki1' -- See Note [Matching in the presence of casts (2)] ; unify_tys env ty1args ty2args } unify_tys :: UMEnv -> [Type] -> [Type] -> UM () unify_tys env orig_xs orig_ys = go orig_xs orig_ys where go [] [] = return () go (x:xs) (y:ys) -- See Note [Kind coercions in Unify] = do { unify_ty env x y (mkNomReflCo $ typeKind y) -- Very important: 'y' not 'x' -- See Note [Matching in the presence of casts (2)] ; go xs ys } go _ _ = surelyApart -- Possibly different saturations of a polykinded tycon -- See Note [Polykinded tycon applications] isSatTyFamApp :: Maybe (TyCon, [Type]) -> Maybe (TyCon, [Type]) -- Return the argument if we have a saturated type family application -- If it is /over/ saturated then we return False. E.g. -- unify_ty (F a b) (c d) where F has arity 1 -- we definitely want to decompose that type application! (#22647) isSatTyFamApp tapp@(Just (tc, tys)) | isTypeFamilyTyCon tc && not (tys `lengthExceeds` tyConArity tc) -- Not over-saturated = tapp isSatTyFamApp _ = Nothing --------------------------------- uVar :: UMEnv -> InTyVar -- Variable to be unified -> Type -- with this Type -> Coercion -- :: kind tv ~N kind ty -> UM () uVar env tv1 ty kco = do { -- Apply the ambient renaming let tv1' = umRnOccL env tv1 -- Check to see whether tv1 is refined by the substitution ; subst <- getTvSubstEnv ; case (lookupVarEnv subst tv1') of Just ty' | um_unif env -- Unifying, so call -> unify_ty env ty' ty kco -- back into unify | otherwise -> -- Matching, we don't want to just recur here. -- this is because the range of the subst is the target -- type, not the template type. So, just check for -- normal type equality. unless ((ty' `mkCastTy` kco) `tcEqType` ty) $ surelyApart -- NB: it's important to use `tcEqType` instead of `eqType` here, -- otherwise we might not reject a substitution -- which unifies `Type` with `Constraint`, e.g. -- a call to tc_unify_tys with arguments -- -- tys1 = [k,k] -- tys2 = [Type, Constraint] -- -- See test cases: T11715b, T20521. Nothing -> uUnrefined env tv1' ty ty kco } -- No, continue uUnrefined :: UMEnv -> OutTyVar -- variable to be unified -> Type -- with this Type -> Type -- (version w/ expanded synonyms) -> Coercion -- :: kind tv ~N kind ty -> UM () -- We know that tv1 isn't refined uUnrefined env tv1' ty2 ty2' kco -- Use tcView, not coreView. See Note [coreView vs tcView] in GHC.Core.Type. | Just ty2'' <- tcView ty2' = uUnrefined env tv1' ty2 ty2'' kco -- Unwrap synonyms -- This is essential, in case we have -- type Foo a = a -- and then unify a ~ Foo a | TyVarTy tv2 <- ty2' = do { let tv2' = umRnOccR env tv2 ; unless (tv1' == tv2' && um_unif env) $ do -- If we are unifying a ~ a, just return immediately -- Do not extend the substitution -- See Note [Self-substitution when matching] -- Check to see whether tv2 is refined { subst <- getTvSubstEnv ; case lookupVarEnv subst tv2 of { Just ty' | um_unif env -> uUnrefined env tv1' ty' ty' kco ; _ -> do { -- So both are unrefined -- Bind one or the other, depending on which is bindable ; let rhs1 = ty2 `mkCastTy` mkSymCo kco rhs2 = ty1 `mkCastTy` kco b1 = tvBindFlag env tv1' rhs1 b2 = tvBindFlag env tv2' rhs2 ty1 = mkTyVarTy tv1' ; case (b1, b2) of (BindMe, _) -> bindTv env tv1' rhs1 (_, BindMe) | um_unif env -> bindTv (umSwapRn env) tv2 rhs2 _ | tv1' == tv2' -> return () -- How could this happen? If we're only matching and if -- we're comparing forall-bound variables. _ -> surelyApart }}}} uUnrefined env tv1' ty2 _ kco -- ty2 is not a type variable = case tvBindFlag env tv1' rhs of Apart -> surelyApart BindMe -> bindTv env tv1' rhs where rhs = ty2 `mkCastTy` mkSymCo kco bindTv :: UMEnv -> OutTyVar -> Type -> UM () -- OK, so we want to extend the substitution with tv := ty -- But first, we must do a couple of checks bindTv env tv1 ty2 = do { let free_tvs2 = tyCoVarsOfType ty2 -- Make sure tys mentions no local variables -- E.g. (forall a. b) ~ (forall a. [a]) -- We should not unify b := [a]! ; checkRnEnv env free_tvs2 -- Occurs check, see Note [Fine-grained unification] -- Make sure you include 'kco' (which ty2 does) #14846 ; occurs <- occursCheck env tv1 free_tvs2 ; if occurs then maybeApart MARInfinite else extendTvEnv tv1 ty2 } occursCheck :: UMEnv -> TyVar -> VarSet -> UM Bool occursCheck env tv free_tvs | um_unif env = do { tsubst <- getTvSubstEnv ; return (tv `elemVarSet` niSubstTvSet tsubst free_tvs) } | otherwise -- Matching; no occurs check = return False -- See Note [Self-substitution when matching] {- %************************************************************************ %* * Binding decisions * * ************************************************************************ -} data BindFlag = BindMe -- ^ A regular type variable | Apart -- ^ Declare that this type variable is /apart/ from the -- type provided. That is, the type variable will never -- be instantiated to that type. -- See also Note [Binding when looking up instances] -- in GHC.Core.InstEnv. deriving Eq -- NB: It would be conceivable to have an analogue to MaybeApart here, -- but there is not yet a need. {- ************************************************************************ * * Unification monad * * ************************************************************************ -} data UMEnv = UMEnv { um_unif :: AmIUnifying , um_inj_tf :: Bool -- Checking for injectivity? -- See (end of) Note [Specification of unification] , um_rn_env :: RnEnv2 -- Renaming InTyVars to OutTyVars; this eliminates -- shadowing, and lines up matching foralls on the left -- and right , um_skols :: TyVarSet -- OutTyVars bound by a forall in this unification; -- Do not bind these in the substitution! -- See the function tvBindFlag , um_bind_fun :: BindFun -- User-supplied BindFlag function, -- for variables not in um_skols } data UMState = UMState { um_tv_env :: TvSubstEnv , um_cv_env :: CvSubstEnv } newtype UM a = UM' { unUM :: UMState -> UnifyResultM (UMState, a) } -- See Note [The one-shot state monad trick] in GHC.Utils.Monad deriving (Functor) pattern UM :: (UMState -> UnifyResultM (UMState, a)) -> UM a -- See Note [The one-shot state monad trick] in GHC.Utils.Monad pattern UM m <- UM' m where UM m = UM' (oneShot m) instance Applicative UM where pure a = UM (\s -> pure (s, a)) (<*>) = ap instance Monad UM where {-# INLINE (>>=) #-} -- See Note [INLINE pragmas and (>>)] in GHC.Utils.Monad m >>= k = UM (\state -> do { (state', v) <- unUM m state ; unUM (k v) state' }) instance MonadFail UM where fail _ = UM (\_ -> SurelyApart) -- failed pattern match initUM :: TvSubstEnv -- subst to extend -> CvSubstEnv -> UM a -> UnifyResultM a initUM subst_env cv_subst_env um = case unUM um state of Unifiable (_, subst) -> Unifiable subst MaybeApart r (_, subst) -> MaybeApart r subst SurelyApart -> SurelyApart where state = UMState { um_tv_env = subst_env , um_cv_env = cv_subst_env } tvBindFlag :: UMEnv -> OutTyVar -> Type -> BindFlag tvBindFlag env tv rhs | tv `elemVarSet` um_skols env = Apart | otherwise = um_bind_fun env tv rhs getTvSubstEnv :: UM TvSubstEnv getTvSubstEnv = UM $ \state -> Unifiable (state, um_tv_env state) getCvSubstEnv :: UM CvSubstEnv getCvSubstEnv = UM $ \state -> Unifiable (state, um_cv_env state) getSubst :: UMEnv -> UM TCvSubst getSubst env = do { tv_env <- getTvSubstEnv ; cv_env <- getCvSubstEnv ; let in_scope = rnInScopeSet (um_rn_env env) ; return (mkTCvSubst in_scope (tv_env, cv_env)) } extendTvEnv :: TyVar -> Type -> UM () extendTvEnv tv ty = UM $ \state -> Unifiable (state { um_tv_env = extendVarEnv (um_tv_env state) tv ty }, ()) extendCvEnv :: CoVar -> Coercion -> UM () extendCvEnv cv co = UM $ \state -> Unifiable (state { um_cv_env = extendVarEnv (um_cv_env state) cv co }, ()) umRnBndr2 :: UMEnv -> TyCoVar -> TyCoVar -> UMEnv umRnBndr2 env v1 v2 = env { um_rn_env = rn_env', um_skols = um_skols env `extendVarSet` v' } where (rn_env', v') = rnBndr2_var (um_rn_env env) v1 v2 checkRnEnv :: UMEnv -> VarSet -> UM () checkRnEnv env varset | isEmptyVarSet skol_vars = return () | varset `disjointVarSet` skol_vars = return () | otherwise = surelyApart where skol_vars = um_skols env -- NB: That isEmptyVarSet guard is a critical optimization; -- it means we don't have to calculate the free vars of -- the type, often saving quite a bit of allocation. -- | Converts any SurelyApart to a MaybeApart don'tBeSoSure :: MaybeApartReason -> UM () -> UM () don'tBeSoSure r um = UM $ \ state -> case unUM um state of SurelyApart -> MaybeApart r (state, ()) other -> other umRnOccL :: UMEnv -> TyVar -> TyVar umRnOccL env v = rnOccL (um_rn_env env) v umRnOccR :: UMEnv -> TyVar -> TyVar umRnOccR env v = rnOccR (um_rn_env env) v umSwapRn :: UMEnv -> UMEnv umSwapRn env = env { um_rn_env = rnSwap (um_rn_env env) } maybeApart :: MaybeApartReason -> UM () maybeApart r = UM (\state -> MaybeApart r (state, ())) surelyApart :: UM a surelyApart = UM (\_ -> SurelyApart) {- %************************************************************************ %* * Matching a (lifted) type against a coercion %* * %************************************************************************ This section defines essentially an inverse to liftCoSubst. It is defined here to avoid a dependency from Coercion on this module. -} data MatchEnv = ME { me_tmpls :: TyVarSet , me_env :: RnEnv2 } -- | 'liftCoMatch' is sort of inverse to 'liftCoSubst'. In particular, if -- @liftCoMatch vars ty co == Just s@, then @liftCoSubst s ty == co@, -- where @==@ there means that the result of 'liftCoSubst' has the same -- type as the original co; but may be different under the hood. -- That is, it matches a type against a coercion of the same -- "shape", and returns a lifting substitution which could have been -- used to produce the given coercion from the given type. -- Note that this function is incomplete -- it might return Nothing -- when there does indeed exist a possible lifting context. -- -- This function is incomplete in that it doesn't respect the equality -- in `eqType`. That is, it's possible that this will succeed for t1 and -- fail for t2, even when t1 `eqType` t2. That's because it depends on -- there being a very similar structure between the type and the coercion. -- This incompleteness shouldn't be all that surprising, especially because -- it depends on the structure of the coercion, which is a silly thing to do. -- -- The lifting context produced doesn't have to be exacting in the roles -- of the mappings. This is because any use of the lifting context will -- also require a desired role. Thus, this algorithm prefers mapping to -- nominal coercions where it can do so. liftCoMatch :: TyCoVarSet -> Type -> Coercion -> Maybe LiftingContext liftCoMatch tmpls ty co = do { cenv1 <- ty_co_match menv emptyVarEnv ki ki_co ki_ki_co ki_ki_co ; cenv2 <- ty_co_match menv cenv1 ty co (mkNomReflCo co_lkind) (mkNomReflCo co_rkind) ; return (LC (mkEmptyTCvSubst in_scope) cenv2) } where menv = ME { me_tmpls = tmpls, me_env = mkRnEnv2 in_scope } in_scope = mkInScopeSet (tmpls `unionVarSet` tyCoVarsOfCo co) -- Like tcMatchTy, assume all the interesting variables -- in ty are in tmpls ki = typeKind ty ki_co = promoteCoercion co ki_ki_co = mkNomReflCo liftedTypeKind Pair co_lkind co_rkind = coercionKind ki_co -- | 'ty_co_match' does all the actual work for 'liftCoMatch'. ty_co_match :: MatchEnv -- ^ ambient helpful info -> LiftCoEnv -- ^ incoming subst -> Type -- ^ ty, type to match -> Coercion -- ^ co :: lty ~r rty, coercion to match against -> Coercion -- ^ :: kind(lsubst(ty)) ~N kind(lty) -> Coercion -- ^ :: kind(rsubst(ty)) ~N kind(rty) -> Maybe LiftCoEnv -- ^ Just env ==> liftCoSubst Nominal env ty == co, modulo roles. -- Also: Just env ==> lsubst(ty) == lty and rsubst(ty) == rty, -- where lsubst = lcSubstLeft(env) and rsubst = lcSubstRight(env) ty_co_match menv subst ty co lkco rkco | Just ty' <- coreView ty = ty_co_match menv subst ty' co lkco rkco -- why coreView here, not tcView? Because we're firmly after type-checking. -- This function is used only during coercion optimisation. -- handle Refl case: | tyCoVarsOfType ty `isNotInDomainOf` subst , Just (ty', _) <- isReflCo_maybe co , ty `eqType` ty' -- Why `eqType` and not `tcEqType`? Because this function is only used -- during coercion optimisation, after type-checking has finished. = Just subst where isNotInDomainOf :: VarSet -> VarEnv a -> Bool isNotInDomainOf set env = noneSet (\v -> elemVarEnv v env) set noneSet :: (Var -> Bool) -> VarSet -> Bool noneSet f = allVarSet (not . f) ty_co_match menv subst ty co lkco rkco | CastTy ty' co' <- ty -- See Note [Matching in the presence of casts (1)] = let empty_subst = mkEmptyTCvSubst (rnInScopeSet (me_env menv)) substed_co_l = substCo (liftEnvSubstLeft empty_subst subst) co' substed_co_r = substCo (liftEnvSubstRight empty_subst subst) co' in ty_co_match menv subst ty' co (substed_co_l `mkTransCo` lkco) (substed_co_r `mkTransCo` rkco) | SymCo co' <- co = swapLiftCoEnv <$> ty_co_match menv (swapLiftCoEnv subst) ty co' rkco lkco -- Match a type variable against a non-refl coercion ty_co_match menv subst (TyVarTy tv1) co lkco rkco | Just co1' <- lookupVarEnv subst tv1' -- tv1' is already bound to co1 = if eqCoercionX (nukeRnEnvL rn_env) co1' co then Just subst else Nothing -- no match since tv1 matches two different coercions | tv1' `elemVarSet` me_tmpls menv -- tv1' is a template var = if any (inRnEnvR rn_env) (tyCoVarsOfCoList co) then Nothing -- occurs check failed else Just $ extendVarEnv subst tv1' $ castCoercionKind co (mkSymCo lkco) (mkSymCo rkco) | otherwise = Nothing where rn_env = me_env menv tv1' = rnOccL rn_env tv1 -- just look through SubCo's. We don't really care about roles here. ty_co_match menv subst ty (SubCo co) lkco rkco = ty_co_match menv subst ty co lkco rkco ty_co_match menv subst (AppTy ty1a ty1b) co _lkco _rkco | Just (co2, arg2) <- splitAppCo_maybe co -- c.f. Unify.match on AppTy = ty_co_match_app menv subst ty1a [ty1b] co2 [arg2] ty_co_match menv subst ty1 (AppCo co2 arg2) _lkco _rkco | Just (ty1a, ty1b) <- repSplitAppTy_maybe ty1 -- yes, the one from Type, not TcType; this is for coercion optimization = ty_co_match_app menv subst ty1a [ty1b] co2 [arg2] ty_co_match menv subst (TyConApp tc1 tys) (TyConAppCo _ tc2 cos) _lkco _rkco = ty_co_match_tc menv subst tc1 tys tc2 cos ty_co_match menv subst (FunTy _ w ty1 ty2) co _lkco _rkco | Just (tc, [co_mult,rrco1,rrco2,co1,co2]) <- splitTyConAppCo_maybe co , tc == funTyCon = let rr1 = getRuntimeRep ty1 rr2 = getRuntimeRep ty2 Pair lkcos rkcos = traverse (fmap (mkNomReflCo . typeKind) . coercionKind) [co_mult,rrco1, rrco2,co1,co2] in -- NB: we include the RuntimeRep arguments in the matching; not doing so caused #21205. ty_co_match_args menv subst [w, rr1, rr2, ty1, ty2] [co_mult, rrco1, rrco2, co1, co2] lkcos rkcos ty_co_match menv subst (ForAllTy (Bndr tv1 _) ty1) (ForAllCo tv2 kind_co2 co2) lkco rkco | isTyVar tv1 && isTyVar tv2 = do { subst1 <- ty_co_match menv subst (tyVarKind tv1) kind_co2 ki_ki_co ki_ki_co ; let rn_env0 = me_env menv rn_env1 = rnBndr2 rn_env0 tv1 tv2 menv' = menv { me_env = rn_env1 } ; ty_co_match menv' subst1 ty1 co2 lkco rkco } where ki_ki_co = mkNomReflCo liftedTypeKind -- ty_co_match menv subst (ForAllTy (Bndr cv1 _) ty1) -- (ForAllCo cv2 kind_co2 co2) -- lkco rkco -- | isCoVar cv1 && isCoVar cv2 -- We seems not to have enough information for this case -- 1. Given: -- cv1 :: (s1 :: k1) ~r (s2 :: k2) -- kind_co2 :: (s1' ~ s2') ~N (t1 ~ t2) -- eta1 = mkNthCo role 2 (downgradeRole r Nominal kind_co2) -- :: s1' ~ t1 -- eta2 = mkNthCo role 3 (downgradeRole r Nominal kind_co2) -- :: s2' ~ t2 -- Wanted: -- subst1 <- ty_co_match menv subst s1 eta1 kco1 kco2 -- subst2 <- ty_co_match menv subst1 s2 eta2 kco3 kco4 -- Question: How do we get kcoi? -- 2. Given: -- lkco :: <*> -- See Note [Weird typing rule for ForAllTy] in GHC.Core.TyCo.Rep -- rkco :: <*> -- Wanted: -- ty_co_match menv' subst2 ty1 co2 lkco' rkco' -- Question: How do we get lkco' and rkco'? ty_co_match _ subst (CoercionTy {}) _ _ _ = Just subst -- don't inspect coercions ty_co_match menv subst ty (GRefl r t (MCo co)) lkco rkco = ty_co_match menv subst ty (GRefl r t MRefl) lkco (rkco `mkTransCo` mkSymCo co) ty_co_match menv subst ty co1 lkco rkco | Just (CastTy t co, r) <- isReflCo_maybe co1 -- In @pushRefl@, pushing reflexive coercion inside CastTy will give us -- t |> co ~ t ; ; t ~ t |> co -- But transitive coercions are not helpful. Therefore we deal -- with it here: we do recursion on the smaller reflexive coercion, -- while propagating the correct kind coercions. = let kco' = mkSymCo co in ty_co_match menv subst ty (mkReflCo r t) (lkco `mkTransCo` kco') (rkco `mkTransCo` kco') ty_co_match menv subst ty co lkco rkco | Just co' <- pushRefl co = ty_co_match menv subst ty co' lkco rkco | otherwise = Nothing ty_co_match_tc :: MatchEnv -> LiftCoEnv -> TyCon -> [Type] -> TyCon -> [Coercion] -> Maybe LiftCoEnv ty_co_match_tc menv subst tc1 tys1 tc2 cos2 = do { guard (tc1 == tc2) ; ty_co_match_args menv subst tys1 cos2 lkcos rkcos } where Pair lkcos rkcos = traverse (fmap (mkNomReflCo . typeKind) . coercionKind) cos2 ty_co_match_app :: MatchEnv -> LiftCoEnv -> Type -> [Type] -> Coercion -> [Coercion] -> Maybe LiftCoEnv ty_co_match_app menv subst ty1 ty1args co2 co2args | Just (ty1', ty1a) <- repSplitAppTy_maybe ty1 , Just (co2', co2a) <- splitAppCo_maybe co2 = ty_co_match_app menv subst ty1' (ty1a : ty1args) co2' (co2a : co2args) | otherwise = do { subst1 <- ty_co_match menv subst ki1 ki2 ki_ki_co ki_ki_co ; let Pair lkco rkco = mkNomReflCo <$> coercionKind ki2 ; subst2 <- ty_co_match menv subst1 ty1 co2 lkco rkco ; let Pair lkcos rkcos = traverse (fmap (mkNomReflCo . typeKind) . coercionKind) co2args ; ty_co_match_args menv subst2 ty1args co2args lkcos rkcos } where ki1 = typeKind ty1 ki2 = promoteCoercion co2 ki_ki_co = mkNomReflCo liftedTypeKind ty_co_match_args :: MatchEnv -> LiftCoEnv -> [Type] -> [Coercion] -> [Coercion] -> [Coercion] -> Maybe LiftCoEnv ty_co_match_args _ subst [] [] _ _ = Just subst ty_co_match_args menv subst (ty:tys) (arg:args) (lkco:lkcos) (rkco:rkcos) = do { subst' <- ty_co_match menv subst ty arg lkco rkco ; ty_co_match_args menv subst' tys args lkcos rkcos } ty_co_match_args _ _ _ _ _ _ = Nothing pushRefl :: Coercion -> Maybe Coercion pushRefl co = case (isReflCo_maybe co) of Just (AppTy ty1 ty2, Nominal) -> Just (AppCo (mkReflCo Nominal ty1) (mkNomReflCo ty2)) Just (FunTy _ w ty1 ty2, r) | Just rep1 <- getRuntimeRep_maybe ty1 , Just rep2 <- getRuntimeRep_maybe ty2 -> Just (TyConAppCo r funTyCon [ multToCo w, mkReflCo r rep1, mkReflCo r rep2 , mkReflCo r ty1, mkReflCo r ty2 ]) Just (TyConApp tc tys, r) -> Just (TyConAppCo r tc (zipWith mkReflCo (tyConRolesX r tc) tys)) Just (ForAllTy (Bndr tv _) ty, r) -> Just (ForAllCo tv (mkNomReflCo (varType tv)) (mkReflCo r ty)) -- NB: NoRefl variant. Otherwise, we get a loop! _ -> Nothing {- ************************************************************************ * * Flattening * * ************************************************************************ Note [Flattening type-family applications when matching instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in "Closed type families with overlapping equations" http://research.microsoft.com/en-us/um/people/simonpj/papers/ext-f/axioms-extended.pdf we need to flatten core types before unifying them, when checking for "surely-apart" against earlier equations of a closed type family. Flattening means replacing all top-level uses of type functions with fresh variables, *taking care to preserve sharing*. That is, the type (Either (F a b) (F a b)) should flatten to (Either c c), never (Either c d). Here is a nice example of why it's all necessary: type family F a b where F Int Bool = Char F a b = Double type family G a -- open, no instances How do we reduce (F (G Float) (G Float))? The first equation clearly doesn't match, while the second equation does. But, before reducing, we must make sure that the target can never become (F Int Bool). Well, no matter what G Float becomes, it certainly won't become *both* Int and Bool, so indeed we're safe reducing (F (G Float) (G Float)) to Double. This is necessary not only to get more reductions (which we might be willing to give up on), but for substitutivity. If we have (F x x), we can see that (F x x) can reduce to Double. So, it had better be the case that (F blah blah) can reduce to Double, no matter what (blah) is! Flattening as done below ensures this. We also use this flattening operation to check for class instances. If we have instance C (Maybe b) instance {-# OVERLAPPING #-} C (Maybe Bool) [W] C (Maybe (F a)) we want to know that the second instance might match later. So we flatten the (F a) in the target before trying to unify with instances. (This is done in GHC.Core.InstEnv.lookupInstEnv'.) The algorithm works by building up a TypeMap TyVar, mapping type family applications to fresh variables. This mapping must be threaded through all the function calls, as any entry in the mapping must be propagated to all future nodes in the tree. The algorithm also must track the set of in-scope variables, in order to make fresh variables as it flattens. (We are far from a source of fresh Uniques.) See Wrinkle 2, below. There are wrinkles, of course: 1. The flattening algorithm must account for the possibility of inner `forall`s. (A `forall` seen here can happen only because of impredicativity. However, the flattening operation is an algorithm in Core, which is impredicative.) Suppose we have (forall b. F b) -> (forall b. F b). Of course, those two bs are entirely unrelated, and so we should certainly not flatten the two calls F b to the same variable. Instead, they must be treated separately. We thus carry a substitution that freshens variables; we must apply this substitution (in `coreFlattenTyFamApp`) before looking up an application in the environment. Note that the range of the substitution contains only TyVars, never anything else. For the sake of efficiency, we only apply this substitution when absolutely necessary. Namely: * We do not perform the substitution at all if it is empty. * We only need to worry about the arguments of a type family that are within the arity of said type family, so we can get away with not applying the substitution to any oversaturated type family arguments. * Importantly, we do /not/ achieve this substitution by recursively flattening the arguments, as this would be wrong. Consider `F (G a)`, where F and G are type families. We might decide that `F (G a)` flattens to `beta`. Later, the substitution is non-empty (but does not map `a`) and so we flatten `G a` to `gamma` and try to flatten `F gamma`. Of course, `F gamma` is unknown, and so we flatten it to `delta`, but it really should have been `beta`! Argh! Moral of the story: instead of flattening the arguments, just substitute them directly. 2. There are two different reasons we might add a variable to the in-scope set as we work: A. We have just invented a new flattening variable. B. We have entered a `forall`. Annoying here is that in-scope variable source (A) must be threaded through the calls. For example, consider (F b -> forall c. F c). Suppose that, when flattening F b, we invent a fresh variable c. Now, when we encounter (forall c. F c), we need to know c is already in scope so that we locally rename c to c'. However, if we don't thread through the in-scope set from one argument of (->) to the other, we won't know this and might get very confused. In contrast, source (B) increases only as we go deeper, as in-scope sets normally do. However, even here we must be careful. The TypeMap TyVar that contains mappings from type family applications to freshened variables will be threaded through both sides of (forall b. F b) -> (forall b. F b). We thus must make sure that the two `b`s don't get renamed to the same b1. (If they did, then looking up `F b1` would yield the same flatten var for each.) So, even though `forall`-bound variables should really be in the in-scope set only when they are in scope, we retain these variables even outside of their scope. This ensures that, if we encounter a fresh `forall`-bound b, we will rename it to b2, not b1. Note that keeping a larger in-scope set than strictly necessary is always OK, as in-scope sets are only ever used to avoid collisions. Sadly, the freshening substitution described in (1) really mustn't bind variables outside of their scope: note that its domain is the *unrenamed* variables. This means that the substitution gets "pushed down" (like a reader monad) while the in-scope set gets threaded (like a state monad). Because a TCvSubst contains its own in-scope set, we don't carry a TCvSubst; instead, we just carry a TvSubstEnv down, tying it to the InScopeSet traveling separately as necessary. 3. Consider `F ty_1 ... ty_n`, where F is a type family with arity k: type family F ty_1 ... ty_k :: res_k It's tempting to just flatten `F ty_1 ... ty_n` to `alpha`, where alpha is a flattening skolem. But we must instead flatten it to `alpha ty_(k+1) ... ty_n`—that is, by only flattening up to the arity of the type family. Why is this better? Consider the following concrete example from #16995: type family Param :: Type -> Type type family LookupParam (a :: Type) :: Type where LookupParam (f Char) = Bool LookupParam x = Int foo :: LookupParam (Param ()) foo = 42 In order for `foo` to typecheck, `LookupParam (Param ())` must reduce to `Int`. But if we flatten `Param ()` to `alpha`, then GHC can't be sure if `alpha` is apart from `f Char`, so it won't fall through to the second equation. But since the `Param` type family has arity 0, we can instead flatten `Param ()` to `alpha ()`, about which GHC knows with confidence is apart from `f Char`, permitting the second equation to be reached. Not only does this allow more programs to be accepted, it's also important for correctness. Not doing this was the root cause of the Core Lint error in #16995. flattenTys is defined here because of module dependencies. -} data FlattenEnv = FlattenEnv { fe_type_map :: TypeMap (TyVar, TyCon, [Type]) -- domain: exactly-saturated type family applications -- range: (fresh variable, type family tycon, args) , fe_in_scope :: InScopeSet } -- See Note [Flattening type-family applications when matching instances] emptyFlattenEnv :: InScopeSet -> FlattenEnv emptyFlattenEnv in_scope = FlattenEnv { fe_type_map = emptyTypeMap , fe_in_scope = in_scope } updateInScopeSet :: FlattenEnv -> (InScopeSet -> InScopeSet) -> FlattenEnv updateInScopeSet env upd = env { fe_in_scope = upd (fe_in_scope env) } flattenTys :: InScopeSet -> [Type] -> [Type] -- See Note [Flattening type-family applications when matching instances] flattenTys in_scope tys = fst (flattenTysX in_scope tys) flattenTysX :: InScopeSet -> [Type] -> ([Type], TyVarEnv (TyCon, [Type])) -- See Note [Flattening type-family applications when matching instances] -- NB: the returned types mention the fresh type variables -- in the domain of the returned env, whose range includes -- the original type family applications. Building a substitution -- from this information and applying it would yield the original -- types -- almost. The problem is that the original type might -- have something like (forall b. F a b); the returned environment -- can't really sensibly refer to that b. So it may include a locally- -- bound tyvar in its range. Currently, the only usage of this env't -- checks whether there are any meta-variables in it -- (in GHC.Tc.Solver.Monad.mightEqualLater), so this is all OK. flattenTysX in_scope tys = let (env, result) = coreFlattenTys emptyTvSubstEnv (emptyFlattenEnv in_scope) tys in (result, build_env (fe_type_map env)) where build_env :: TypeMap (TyVar, TyCon, [Type]) -> TyVarEnv (TyCon, [Type]) build_env env_in = foldTM (\(tv, tc, tys) env_out -> extendVarEnv env_out tv (tc, tys)) env_in emptyVarEnv coreFlattenTys :: TvSubstEnv -> FlattenEnv -> [Type] -> (FlattenEnv, [Type]) coreFlattenTys subst = mapAccumL (coreFlattenTy subst) coreFlattenTy :: TvSubstEnv -> FlattenEnv -> Type -> (FlattenEnv, Type) coreFlattenTy subst = go where go env ty | Just ty' <- coreView ty = go env ty' go env (TyVarTy tv) | Just ty <- lookupVarEnv subst tv = (env, ty) | otherwise = let (env', ki) = go env (tyVarKind tv) in (env', mkTyVarTy $ setTyVarKind tv ki) go env (AppTy ty1 ty2) = let (env1, ty1') = go env ty1 (env2, ty2') = go env1 ty2 in (env2, AppTy ty1' ty2') go env (TyConApp tc tys) -- NB: Don't just check if isFamilyTyCon: this catches *data* families, -- which are generative and thus can be preserved during flattening | not (isGenerativeTyCon tc Nominal) = coreFlattenTyFamApp subst env tc tys | otherwise = let (env', tys') = coreFlattenTys subst env tys in (env', mkTyConApp tc tys') go env ty@(FunTy { ft_mult = mult, ft_arg = ty1, ft_res = ty2 }) = let (env1, ty1') = go env ty1 (env2, ty2') = go env1 ty2 (env3, mult') = go env2 mult in (env3, ty { ft_mult = mult', ft_arg = ty1', ft_res = ty2' }) go env (ForAllTy (Bndr tv vis) ty) = let (env1, subst', tv') = coreFlattenVarBndr subst env tv (env2, ty') = coreFlattenTy subst' env1 ty in (env2, ForAllTy (Bndr tv' vis) ty') go env ty@(LitTy {}) = (env, ty) go env (CastTy ty co) = let (env1, ty') = go env ty (env2, co') = coreFlattenCo subst env1 co in (env2, CastTy ty' co') go env (CoercionTy co) = let (env', co') = coreFlattenCo subst env co in (env', CoercionTy co') -- when flattening, we don't care about the contents of coercions. -- so, just return a fresh variable of the right (flattened) type coreFlattenCo :: TvSubstEnv -> FlattenEnv -> Coercion -> (FlattenEnv, Coercion) coreFlattenCo subst env co = (env2, mkCoVarCo covar) where (env1, kind') = coreFlattenTy subst env (coercionType co) covar = mkFlattenFreshCoVar (fe_in_scope env1) kind' -- Add the covar to the FlattenEnv's in-scope set. -- See Note [Flattening type-family applications when matching instances], wrinkle 2A. env2 = updateInScopeSet env1 (flip extendInScopeSet covar) coreFlattenVarBndr :: TvSubstEnv -> FlattenEnv -> TyCoVar -> (FlattenEnv, TvSubstEnv, TyVar) coreFlattenVarBndr subst env tv = (env2, subst', tv') where -- See Note [Flattening type-family applications when matching instances], wrinkle 2B. kind = varType tv (env1, kind') = coreFlattenTy subst env kind tv' = uniqAway (fe_in_scope env1) (setVarType tv kind') subst' = extendVarEnv subst tv (mkTyVarTy tv') env2 = updateInScopeSet env1 (flip extendInScopeSet tv') coreFlattenTyFamApp :: TvSubstEnv -> FlattenEnv -> TyCon -- type family tycon -> [Type] -- args, already flattened -> (FlattenEnv, Type) coreFlattenTyFamApp tv_subst env fam_tc fam_args = case lookupTypeMap type_map fam_ty of Just (tv, _, _) -> (env', mkAppTys (mkTyVarTy tv) leftover_args') Nothing -> let tyvar_name = mkFlattenFreshTyName fam_tc tv = uniqAway in_scope $ mkTyVar tyvar_name (typeKind fam_ty) ty' = mkAppTys (mkTyVarTy tv) leftover_args' env'' = env' { fe_type_map = extendTypeMap type_map fam_ty (tv, fam_tc, sat_fam_args) , fe_in_scope = extendInScopeSet in_scope tv } in (env'', ty') where arity = tyConArity fam_tc tcv_subst = TCvSubst (fe_in_scope env) tv_subst emptyVarEnv (sat_fam_args, leftover_args) = assert (arity <= length fam_args) $ splitAt arity fam_args -- Apply the substitution before looking up an application in the -- environment. See Note [Flattening type-family applications when matching instances], -- wrinkle 1. -- NB: substTys short-cuts the common case when the substitution is empty. sat_fam_args' = substTys tcv_subst sat_fam_args (env', leftover_args') = coreFlattenTys tv_subst env leftover_args -- `fam_tc` may be over-applied to `fam_args` (see -- Note [Flattening type-family applications when matching instances] -- wrinkle 3), so we split it into the arguments needed to saturate it -- (sat_fam_args') and the rest (leftover_args') fam_ty = mkTyConApp fam_tc sat_fam_args' FlattenEnv { fe_type_map = type_map , fe_in_scope = in_scope } = env' mkFlattenFreshTyName :: Uniquable a => a -> Name mkFlattenFreshTyName unq = mkSysTvName (getUnique unq) (fsLit "flt") mkFlattenFreshCoVar :: InScopeSet -> Kind -> CoVar mkFlattenFreshCoVar in_scope kind = let uniq = unsafeGetFreshLocalUnique in_scope name = mkSystemVarName uniq (fsLit "flc") in mkCoVar name kind ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/UsageEnv.hs0000644000000000000000000000605514472400112020602 0ustar0000000000000000module GHC.Core.UsageEnv ( Usage(..) , UsageEnv , addUE , addUsage , bottomUE , deleteUE , lookupUE , scaleUE , scaleUsage , supUE , supUEs , unitUE , zeroUE ) where import Data.Foldable import GHC.Prelude import GHC.Core.Multiplicity import GHC.Types.Name import GHC.Types.Name.Env import GHC.Utils.Outputable import GHC.Utils.Panic -- -- * Usage environments -- -- The typechecker and the linter output usage environments. See Note [Usages] -- in Multiplicity. Every absent name being considered to map to 'Zero' of -- 'Bottom' depending on a flag. See Note [Zero as a usage] in Multiplicity, see -- Note [Bottom as a usage] in Multiplicity. data Usage = Zero | Bottom | MUsage Mult instance Outputable Usage where ppr Zero = text "0" ppr Bottom = text "Bottom" ppr (MUsage x) = ppr x addUsage :: Usage -> Usage -> Usage addUsage Zero x = x addUsage x Zero = x addUsage Bottom x = x addUsage x Bottom = x addUsage (MUsage x) (MUsage y) = MUsage $ mkMultAdd x y scaleUsage :: Mult -> Usage -> Usage scaleUsage One Bottom = Bottom scaleUsage _ Zero = Zero scaleUsage x Bottom = MUsage x scaleUsage x (MUsage y) = MUsage $ mkMultMul x y -- For now, we use extra multiplicity Bottom for empty case. data UsageEnv = UsageEnv !(NameEnv Mult) Bool unitUE :: NamedThing n => n -> Mult -> UsageEnv unitUE x w = UsageEnv (unitNameEnv (getName x) w) False zeroUE, bottomUE :: UsageEnv zeroUE = UsageEnv emptyNameEnv False bottomUE = UsageEnv emptyNameEnv True addUE :: UsageEnv -> UsageEnv -> UsageEnv addUE (UsageEnv e1 b1) (UsageEnv e2 b2) = UsageEnv (plusNameEnv_C mkMultAdd e1 e2) (b1 || b2) scaleUE :: Mult -> UsageEnv -> UsageEnv scaleUE One ue = ue scaleUE w (UsageEnv e _) = UsageEnv (mapNameEnv (mkMultMul w) e) False supUE :: UsageEnv -> UsageEnv -> UsageEnv supUE (UsageEnv e1 False) (UsageEnv e2 False) = UsageEnv (plusNameEnv_CD mkMultSup e1 Many e2 Many) False supUE (UsageEnv e1 b1) (UsageEnv e2 b2) = UsageEnv (plusNameEnv_CD2 combineUsage e1 e2) (b1 && b2) where combineUsage (Just x) (Just y) = mkMultSup x y combineUsage Nothing (Just x) | b1 = x | otherwise = Many combineUsage (Just x) Nothing | b2 = x | otherwise = Many combineUsage Nothing Nothing = pprPanic "supUE" (ppr e1 <+> ppr e2) -- Note: If you are changing this logic, check 'mkMultSup' in Multiplicity as well. supUEs :: [UsageEnv] -> UsageEnv supUEs = foldr supUE bottomUE deleteUE :: NamedThing n => UsageEnv -> n -> UsageEnv deleteUE (UsageEnv e b) x = UsageEnv (delFromNameEnv e (getName x)) b -- | |lookupUE x env| returns the multiplicity assigned to |x| in |env|, if |x| is not -- bound in |env|, then returns |Zero| or |Bottom|. lookupUE :: NamedThing n => UsageEnv -> n -> Usage lookupUE (UsageEnv e has_bottom) x = case lookupNameEnv e (getName x) of Just w -> MUsage w Nothing -> if has_bottom then Bottom else Zero instance Outputable UsageEnv where ppr (UsageEnv ne b) = text "UsageEnv:" <+> ppr ne <+> ppr b ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Utils.hs0000644000000000000000000034163514472400112020173 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Utility functions on @Core@ syntax -} -- | Commonly useful utilities for manipulating the Core language module GHC.Core.Utils ( -- * Constructing expressions mkCast, mkCastMCo, mkPiMCo, mkTick, mkTicks, mkTickNoHNF, tickHNFArgs, bindNonRec, needsCaseBinding, mkAltExpr, mkDefaultCase, mkSingleAltCase, -- * Taking expressions apart findDefault, addDefault, findAlt, isDefaultAlt, mergeAlts, trimConArgs, filterAlts, combineIdenticalAlts, refineDefaultAlt, scaleAltsBy, -- * Properties of expressions exprType, coreAltType, coreAltsType, mkLamType, mkLamTypes, mkFunctionType, exprIsDupable, exprIsTrivial, getIdFromTrivialExpr, exprIsDeadEnd, getIdFromTrivialExpr_maybe, exprIsCheap, exprIsExpandable, exprIsCheapX, CheapAppFun, exprIsHNF, exprOkForSpeculation, exprOkForSideEffects, exprOkForSpecEval, exprIsWorkFree, exprIsConLike, isCheapApp, isExpandableApp, isSaturatedConApp, exprIsTickedString, exprIsTickedString_maybe, exprIsTopLevelBindable, altsAreExhaustive, -- * Equality cheapEqExpr, cheapEqExpr', eqExpr, diffBinds, -- * Lambdas and eta reduction tryEtaReduce, canEtaReduceToArity, -- * Manipulating data constructors and types exprToType, applyTypeToArgs, dataConRepInstPat, dataConRepFSInstPat, isEmptyTy, normSplitTyConApp_maybe, -- * Working with ticks stripTicksTop, stripTicksTopE, stripTicksTopT, stripTicksE, stripTicksT, -- * StaticPtr collectMakeStaticArgs, -- * Join points isJoinBind, -- * Tag inference mkStrictFieldSeqs, shouldStrictifyIdForCbv, shouldUseCbvForId, -- * unsafeEqualityProof isUnsafeEqualityProof, -- * Dumping stuff dumpIdInfoOfProgram ) where import GHC.Prelude import GHC.Platform import GHC.Core import GHC.Core.Ppr import GHC.Core.FVs( exprFreeVars ) import GHC.Core.DataCon import GHC.Core.Type as Type import GHC.Core.FamInstEnv import GHC.Core.Predicate import GHC.Core.TyCo.Rep( TyCoBinder(..), TyBinder ) import GHC.Core.Coercion import GHC.Core.Reduction import GHC.Core.TyCon import GHC.Core.Multiplicity import GHC.Core.Map.Expr ( eqCoreExpr ) import GHC.Builtin.Names ( makeStaticName, unsafeEqualityProofIdKey ) import GHC.Builtin.PrimOps import GHC.Data.Graph.UnVar import GHC.Types.Var import GHC.Types.SrcLoc import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Name import GHC.Types.Literal import GHC.Types.Tickish import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Basic( Arity, Levity(..) ) import GHC.Types.Unique import GHC.Types.Unique.Set import GHC.Data.FastString import GHC.Data.Maybe import GHC.Data.List.SetOps( minusList ) import GHC.Data.Pair import GHC.Data.OrdList import GHC.Utils.Constants (debugIsOn) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Misc import GHC.Utils.Trace import Data.ByteString ( ByteString ) import Data.Function ( on ) import Data.List ( sort, sortBy, partition, zipWith4, mapAccumL ) import Data.Ord ( comparing ) import qualified Data.Set as Set import GHC.Types.RepType (isZeroBitTy) import GHC.Types.Demand (isStrictDmd, isAbsDmd, isDeadEndAppSig) {- ************************************************************************ * * \subsection{Find the type of a Core atom/expression} * * ************************************************************************ -} exprType :: HasDebugCallStack => CoreExpr -> Type -- ^ Recover the type of a well-typed Core expression. Fails when -- applied to the actual 'GHC.Core.Type' expression as it cannot -- really be said to have a type exprType (Var var) = idType var exprType (Lit lit) = literalType lit exprType (Coercion co) = coercionType co exprType (Let bind body) | NonRec tv rhs <- bind -- See Note [Type bindings] , Type ty <- rhs = substTyWithUnchecked [tv] [ty] (exprType body) | otherwise = exprType body exprType (Case _ _ ty _) = ty exprType (Cast _ co) = pSnd (coercionKind co) exprType (Tick _ e) = exprType e exprType (Lam binder expr) = mkLamType binder (exprType expr) exprType e@(App _ _) = case collectArgs e of (fun, args) -> applyTypeToArgs (pprCoreExpr e) (exprType fun) args exprType other = pprPanic "exprType" (pprCoreExpr other) coreAltType :: CoreAlt -> Type -- ^ Returns the type of the alternatives right hand side coreAltType alt@(Alt _ bs rhs) = case occCheckExpand bs rhs_ty of -- Note [Existential variables and silly type synonyms] Just ty -> ty Nothing -> pprPanic "coreAltType" (pprCoreAlt alt $$ ppr rhs_ty) where rhs_ty = exprType rhs coreAltsType :: [CoreAlt] -> Type -- ^ Returns the type of the first alternative, which should be the same as for all alternatives coreAltsType (alt:_) = coreAltType alt coreAltsType [] = panic "coreAltsType" mkLamType :: HasDebugCallStack => Var -> Type -> Type -- ^ Makes a @(->)@ type or an implicit forall type, depending -- on whether it is given a type variable or a term variable. -- This is used, for example, when producing the type of a lambda. -- Always uses Inferred binders. mkLamTypes :: [Var] -> Type -> Type -- ^ 'mkLamType' for multiple type or value arguments mkLamType v body_ty | isTyVar v = mkForAllTy v Inferred body_ty | isCoVar v , v `elemVarSet` tyCoVarsOfType body_ty = mkForAllTy v Required body_ty | otherwise = mkFunctionType (varMult v) (varType v) body_ty mkFunctionType :: Mult -> Type -> Type -> Type -- This one works out the AnonArgFlag from the argument type -- See GHC.Types.Var Note [AnonArgFlag] mkFunctionType mult arg_ty res_ty | isPredTy arg_ty -- See GHC.Types.Var Note [AnonArgFlag] = assert (eqType mult Many) $ mkInvisFunTy mult arg_ty res_ty | otherwise = mkVisFunTy mult arg_ty res_ty mkLamTypes vs ty = foldr mkLamType ty vs {- Note [Type bindings] ~~~~~~~~~~~~~~~~~~~~ Core does allow type bindings, although such bindings are not much used, except in the output of the desugarer. Example: let a = Int in (\x:a. x) Given this, exprType must be careful to substitute 'a' in the result type (#8522). Note [Existential variables and silly type synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = forall a. T (Funny a) type Funny a = Bool f :: T -> Bool f (T x) = x Now, the type of 'x' is (Funny a), where 'a' is existentially quantified. That means that 'exprType' and 'coreAltsType' may give a result that *appears* to mention an out-of-scope type variable. See #3409 for a more real-world example. Various possibilities suggest themselves: - Ignore the problem, and make Lint not complain about such variables - Expand all type synonyms (or at least all those that discard arguments) This is tricky, because at least for top-level things we want to retain the type the user originally specified. - Expand synonyms on the fly, when the problem arises. That is what we are doing here. It's not too expensive, I think. Note that there might be existentially quantified coercion variables, too. -} applyTypeToArgs :: HasDebugCallStack => SDoc -> Type -> [CoreExpr] -> Type -- ^ Determines the type resulting from applying an expression with given type --- to given argument expressions. -- The first argument is just for debugging, and gives some context applyTypeToArgs pp_e op_ty args = go op_ty args where go op_ty [] = op_ty go op_ty (Type ty : args) = go_ty_args op_ty [ty] args go op_ty (Coercion co : args) = go_ty_args op_ty [mkCoercionTy co] args go op_ty (_ : args) | Just (_, _, res_ty) <- splitFunTy_maybe op_ty = go res_ty args go _ args = pprPanic "applyTypeToArgs" (panic_msg args) -- go_ty_args: accumulate type arguments so we can -- instantiate all at once with piResultTys go_ty_args op_ty rev_tys (Type ty : args) = go_ty_args op_ty (ty:rev_tys) args go_ty_args op_ty rev_tys (Coercion co : args) = go_ty_args op_ty (mkCoercionTy co : rev_tys) args go_ty_args op_ty rev_tys args = go (piResultTys op_ty (reverse rev_tys)) args panic_msg as = vcat [ text "Expression:" <+> pp_e , text "Type:" <+> ppr op_ty , text "Args:" <+> ppr args , text "Args':" <+> ppr as ] mkCastMCo :: CoreExpr -> MCoercionR -> CoreExpr mkCastMCo e MRefl = e mkCastMCo e (MCo co) = Cast e co -- We are careful to use (MCo co) only when co is not reflexive -- Hence (Cast e co) rather than (mkCast e co) mkPiMCo :: Var -> MCoercionR -> MCoercionR mkPiMCo _ MRefl = MRefl mkPiMCo v (MCo co) = MCo (mkPiCo Representational v co) {- ********************************************************************* * * Casts * * ********************************************************************* -} -- | Wrap the given expression in the coercion safely, dropping -- identity coercions and coalescing nested coercions mkCast :: CoreExpr -> CoercionR -> CoreExpr mkCast e co | assertPpr (coercionRole co == Representational) (text "coercion" <+> ppr co <+> text "passed to mkCast" <+> ppr e <+> text "has wrong role" <+> ppr (coercionRole co)) $ isReflCo co = e mkCast (Coercion e_co) co | isCoVarType (coercionRKind co) -- The guard here checks that g has a (~#) on both sides, -- otherwise decomposeCo fails. Can in principle happen -- with unsafeCoerce = Coercion (mkCoCast e_co co) mkCast (Cast expr co2) co = warnPprTrace (let { from_ty = coercionLKind co; to_ty2 = coercionRKind co2 } in not (from_ty `eqType` to_ty2)) "mkCast" (vcat ([ text "expr:" <+> ppr expr , text "co2:" <+> ppr co2 , text "co:" <+> ppr co ])) $ mkCast expr (mkTransCo co2 co) mkCast (Tick t expr) co = Tick t (mkCast expr co) mkCast expr co = let from_ty = coercionLKind co in warnPprTrace (not (from_ty `eqType` exprType expr)) "Trying to coerce" (text "(" <> ppr expr $$ text "::" <+> ppr (exprType expr) <> text ")" $$ ppr co $$ ppr (coercionType co) $$ callStackDoc) $ (Cast expr co) {- ********************************************************************* * * Attaching ticks * * ********************************************************************* -} -- | Wraps the given expression in the source annotation, dropping the -- annotation if possible. mkTick :: CoreTickish -> CoreExpr -> CoreExpr mkTick t orig_expr = mkTick' id id orig_expr where -- Some ticks (cost-centres) can be split in two, with the -- non-counting part having laxer placement properties. canSplit = tickishCanSplit t && tickishPlace (mkNoCount t) /= tickishPlace t mkTick' :: (CoreExpr -> CoreExpr) -- apply after adding tick (float through) -> (CoreExpr -> CoreExpr) -- apply before adding tick (float with) -> CoreExpr -- current expression -> CoreExpr mkTick' top rest expr = case expr of -- Cost centre ticks should never be reordered relative to each -- other. Therefore we can stop whenever two collide. Tick t2 e | ProfNote{} <- t2, ProfNote{} <- t -> top $ Tick t $ rest expr -- Otherwise we assume that ticks of different placements float -- through each other. | tickishPlace t2 /= tickishPlace t -> mkTick' (top . Tick t2) rest e -- For annotations this is where we make sure to not introduce -- redundant ticks. | tickishContains t t2 -> mkTick' top rest e | tickishContains t2 t -> orig_expr | otherwise -> mkTick' top (rest . Tick t2) e -- Ticks don't care about types, so we just float all ticks -- through them. Note that it's not enough to check for these -- cases top-level. While mkTick will never produce Core with type -- expressions below ticks, such constructs can be the result of -- unfoldings. We therefore make an effort to put everything into -- the right place no matter what we start with. Cast e co -> mkTick' (top . flip Cast co) rest e Coercion co -> Coercion co Lam x e -- Always float through type lambdas. Even for non-type lambdas, -- floating is allowed for all but the most strict placement rule. | not (isRuntimeVar x) || tickishPlace t /= PlaceRuntime -> mkTick' (top . Lam x) rest e -- If it is both counting and scoped, we split the tick into its -- two components, often allowing us to keep the counting tick on -- the outside of the lambda and push the scoped tick inside. -- The point of this is that the counting tick can probably be -- floated, and the lambda may then be in a position to be -- beta-reduced. | canSplit -> top $ Tick (mkNoScope t) $ rest $ Lam x $ mkTick (mkNoCount t) e App f arg -- Always float through type applications. | not (isRuntimeArg arg) -> mkTick' (top . flip App arg) rest f -- We can also float through constructor applications, placement -- permitting. Again we can split. | isSaturatedConApp expr && (tickishPlace t==PlaceCostCentre || canSplit) -> if tickishPlace t == PlaceCostCentre then top $ rest $ tickHNFArgs t expr else top $ Tick (mkNoScope t) $ rest $ tickHNFArgs (mkNoCount t) expr Var x | notFunction && tickishPlace t == PlaceCostCentre -> orig_expr | notFunction && canSplit -> top $ Tick (mkNoScope t) $ rest expr where -- SCCs can be eliminated on variables provided the variable -- is not a function. In these cases the SCC makes no difference: -- the cost of evaluating the variable will be attributed to its -- definition site. When the variable refers to a function, however, -- an SCC annotation on the variable affects the cost-centre stack -- when the function is called, so we must retain those. notFunction = not (isFunTy (idType x)) Lit{} | tickishPlace t == PlaceCostCentre -> orig_expr -- Catch-all: Annotate where we stand _any -> top $ Tick t $ rest expr mkTicks :: [CoreTickish] -> CoreExpr -> CoreExpr mkTicks ticks expr = foldr mkTick expr ticks isSaturatedConApp :: CoreExpr -> Bool isSaturatedConApp e = go e [] where go (App f a) as = go f (a:as) go (Var fun) args = isConLikeId fun && idArity fun == valArgCount args go (Cast f _) as = go f as go _ _ = False mkTickNoHNF :: CoreTickish -> CoreExpr -> CoreExpr mkTickNoHNF t e | exprIsHNF e = tickHNFArgs t e | otherwise = mkTick t e -- push a tick into the arguments of a HNF (call or constructor app) tickHNFArgs :: CoreTickish -> CoreExpr -> CoreExpr tickHNFArgs t e = push t e where push t (App f (Type u)) = App (push t f) (Type u) push t (App f arg) = App (push t f) (mkTick t arg) push _t e = e -- | Strip ticks satisfying a predicate from top of an expression stripTicksTop :: (CoreTickish -> Bool) -> Expr b -> ([CoreTickish], Expr b) stripTicksTop p = go [] where go ts (Tick t e) | p t = go (t:ts) e go ts other = (reverse ts, other) -- | Strip ticks satisfying a predicate from top of an expression, -- returning the remaining expression stripTicksTopE :: (CoreTickish -> Bool) -> Expr b -> Expr b stripTicksTopE p = go where go (Tick t e) | p t = go e go other = other -- | Strip ticks satisfying a predicate from top of an expression, -- returning the ticks stripTicksTopT :: (CoreTickish -> Bool) -> Expr b -> [CoreTickish] stripTicksTopT p = go [] where go ts (Tick t e) | p t = go (t:ts) e go ts _ = ts -- | Completely strip ticks satisfying a predicate from an -- expression. Note this is O(n) in the size of the expression! stripTicksE :: (CoreTickish -> Bool) -> Expr b -> Expr b stripTicksE p expr = go expr where go (App e a) = App (go e) (go a) go (Lam b e) = Lam b (go e) go (Let b e) = Let (go_bs b) (go e) go (Case e b t as) = Case (go e) b t (map go_a as) go (Cast e c) = Cast (go e) c go (Tick t e) | p t = go e | otherwise = Tick t (go e) go other = other go_bs (NonRec b e) = NonRec b (go e) go_bs (Rec bs) = Rec (map go_b bs) go_b (b, e) = (b, go e) go_a (Alt c bs e) = Alt c bs (go e) stripTicksT :: (CoreTickish -> Bool) -> Expr b -> [CoreTickish] stripTicksT p expr = fromOL $ go expr where go (App e a) = go e `appOL` go a go (Lam _ e) = go e go (Let b e) = go_bs b `appOL` go e go (Case e _ _ as) = go e `appOL` concatOL (map go_a as) go (Cast e _) = go e go (Tick t e) | p t = t `consOL` go e | otherwise = go e go _ = nilOL go_bs (NonRec _ e) = go e go_bs (Rec bs) = concatOL (map go_b bs) go_b (_, e) = go e go_a (Alt _ _ e) = go e {- ************************************************************************ * * \subsection{Other expression construction} * * ************************************************************************ -} bindNonRec :: Id -> CoreExpr -> CoreExpr -> CoreExpr -- ^ @bindNonRec x r b@ produces either: -- -- > let x = r in b -- -- or: -- -- > case r of x { _DEFAULT_ -> b } -- -- depending on whether we have to use a @case@ or @let@ -- binding for the expression (see 'needsCaseBinding'). -- It's used by the desugarer to avoid building bindings -- that give Core Lint a heart attack, although actually -- the simplifier deals with them perfectly well. See -- also 'GHC.Core.Make.mkCoreLet' bindNonRec bndr rhs body | isTyVar bndr = let_bind | isCoVar bndr = if isCoArg rhs then let_bind {- See Note [Binding coercions] -} else case_bind | isJoinId bndr = let_bind | needsCaseBinding (idType bndr) rhs = case_bind | otherwise = let_bind where case_bind = mkDefaultCase rhs bndr body let_bind = Let (NonRec bndr rhs) body -- | Tests whether we have to use a @case@ rather than @let@ binding for this expression -- as per the invariants of 'CoreExpr': see "GHC.Core#let_app_invariant" needsCaseBinding :: Type -> CoreExpr -> Bool needsCaseBinding ty rhs = mightBeUnliftedType ty && not (exprOkForSpeculation rhs) -- Make a case expression instead of a let -- These can arise either from the desugarer, -- or from beta reductions: (\x.e) (x +# y) mkAltExpr :: AltCon -- ^ Case alternative constructor -> [CoreBndr] -- ^ Things bound by the pattern match -> [Type] -- ^ The type arguments to the case alternative -> CoreExpr -- ^ This guy constructs the value that the scrutinee must have -- given that you are in one particular branch of a case mkAltExpr (DataAlt con) args inst_tys = mkConApp con (map Type inst_tys ++ varsToCoreExprs args) mkAltExpr (LitAlt lit) [] [] = Lit lit mkAltExpr (LitAlt _) _ _ = panic "mkAltExpr LitAlt" mkAltExpr DEFAULT _ _ = panic "mkAltExpr DEFAULT" mkDefaultCase :: CoreExpr -> Id -> CoreExpr -> CoreExpr -- Make (case x of y { DEFAULT -> e } mkDefaultCase scrut case_bndr body = Case scrut case_bndr (exprType body) [Alt DEFAULT [] body] mkSingleAltCase :: CoreExpr -> Id -> AltCon -> [Var] -> CoreExpr -> CoreExpr -- Use this function if possible, when building a case, -- because it ensures that the type on the Case itself -- doesn't mention variables bound by the case -- See Note [Care with the type of a case expression] mkSingleAltCase scrut case_bndr con bndrs body = Case scrut case_bndr case_ty [Alt con bndrs body] where body_ty = exprType body case_ty -- See Note [Care with the type of a case expression] | Just body_ty' <- occCheckExpand bndrs body_ty = body_ty' | otherwise = pprPanic "mkSingleAltCase" (ppr scrut $$ ppr bndrs $$ ppr body_ty) {- Note [Care with the type of a case expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a phantom type synonym type S a = Int and we want to form the case expression case x of K (a::*) -> (e :: S a) We must not make the type field of the case-expression (S a) because 'a' isn't in scope. Hence the call to occCheckExpand. This caused issue #17056. NB: this situation can only arise with type synonyms, which can falsely "mention" type variables that aren't "really there", and which can be eliminated by expanding the synonym. Note [Binding coercions] ~~~~~~~~~~~~~~~~~~~~~~~~ Consider binding a CoVar, c = e. Then, we must satisfy Note [Core type and coercion invariant] in GHC.Core, which allows only (Coercion co) on the RHS. ************************************************************************ * * Operations over case alternatives * * ************************************************************************ The default alternative must be first, if it exists at all. This makes it easy to find, though it makes matching marginally harder. -} -- | Extract the default case alternative findDefault :: [Alt b] -> ([Alt b], Maybe (Expr b)) findDefault (Alt DEFAULT args rhs : alts) = assert (null args) (alts, Just rhs) findDefault alts = (alts, Nothing) addDefault :: [Alt b] -> Maybe (Expr b) -> [Alt b] addDefault alts Nothing = alts addDefault alts (Just rhs) = Alt DEFAULT [] rhs : alts isDefaultAlt :: Alt b -> Bool isDefaultAlt (Alt DEFAULT _ _) = True isDefaultAlt _ = False -- | Find the case alternative corresponding to a particular -- constructor: panics if no such constructor exists findAlt :: AltCon -> [Alt b] -> Maybe (Alt b) -- A "Nothing" result *is* legitimate -- See Note [Unreachable code] findAlt con alts = case alts of (deflt@(Alt DEFAULT _ _):alts) -> go alts (Just deflt) _ -> go alts Nothing where go [] deflt = deflt go (alt@(Alt con1 _ _) : alts) deflt = case con `cmpAltCon` con1 of LT -> deflt -- Missed it already; the alts are in increasing order EQ -> Just alt GT -> assert (not (con1 == DEFAULT)) $ go alts deflt {- Note [Unreachable code] ~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible (although unusual) for GHC to find a case expression that cannot match. For example: data Col = Red | Green | Blue x = Red f v = case x of Red -> ... _ -> ...(case x of { Green -> e1; Blue -> e2 })... Suppose that for some silly reason, x isn't substituted in the case expression. (Perhaps there's a NOINLINE on it, or profiling SCC stuff gets in the way; cf #3118.) Then the full-laziness pass might produce this x = Red lvl = case x of { Green -> e1; Blue -> e2 }) f v = case x of Red -> ... _ -> ...lvl... Now if x gets inlined, we won't be able to find a matching alternative for 'Red'. That's because 'lvl' is unreachable. So rather than crashing we generate (error "Inaccessible alternative"). Similar things can happen (augmented by GADTs) when the Simplifier filters down the matching alternatives in GHC.Core.Opt.Simplify.rebuildCase. -} --------------------------------- mergeAlts :: [Alt a] -> [Alt a] -> [Alt a] -- ^ Merge alternatives preserving order; alternatives in -- the first argument shadow ones in the second mergeAlts [] as2 = as2 mergeAlts as1 [] = as1 mergeAlts (a1:as1) (a2:as2) = case a1 `cmpAlt` a2 of LT -> a1 : mergeAlts as1 (a2:as2) EQ -> a1 : mergeAlts as1 as2 -- Discard a2 GT -> a2 : mergeAlts (a1:as1) as2 --------------------------------- trimConArgs :: AltCon -> [CoreArg] -> [CoreArg] -- ^ Given: -- -- > case (C a b x y) of -- > C b x y -> ... -- -- We want to drop the leading type argument of the scrutinee -- leaving the arguments to match against the pattern trimConArgs DEFAULT args = assert (null args) [] trimConArgs (LitAlt _) args = assert (null args) [] trimConArgs (DataAlt dc) args = dropList (dataConUnivTyVars dc) args filterAlts :: TyCon -- ^ Type constructor of scrutinee's type (used to prune possibilities) -> [Type] -- ^ And its type arguments -> [AltCon] -- ^ 'imposs_cons': constructors known to be impossible due to the form of the scrutinee -> [Alt b] -- ^ Alternatives -> ([AltCon], [Alt b]) -- Returns: -- 1. Constructors that will never be encountered by the -- *default* case (if any). A superset of imposs_cons -- 2. The new alternatives, trimmed by -- a) remove imposs_cons -- b) remove constructors which can't match because of GADTs -- -- NB: the final list of alternatives may be empty: -- This is a tricky corner case. If the data type has no constructors, -- which GHC allows, or if the imposs_cons covers all constructors (after taking -- account of GADTs), then no alternatives can match. -- -- If callers need to preserve the invariant that there is always at least one branch -- in a "case" statement then they will need to manually add a dummy case branch that just -- calls "error" or similar. filterAlts _tycon inst_tys imposs_cons alts = (imposs_deflt_cons, addDefault trimmed_alts maybe_deflt) where (alts_wo_default, maybe_deflt) = findDefault alts alt_cons = [con | Alt con _ _ <- alts_wo_default] trimmed_alts = filterOut (impossible_alt inst_tys) alts_wo_default imposs_cons_set = Set.fromList imposs_cons imposs_deflt_cons = imposs_cons ++ filterOut (`Set.member` imposs_cons_set) alt_cons -- "imposs_deflt_cons" are handled -- EITHER by the context, -- OR by a non-DEFAULT branch in this case expression. impossible_alt :: [Type] -> Alt b -> Bool impossible_alt _ (Alt con _ _) | con `Set.member` imposs_cons_set = True impossible_alt inst_tys (Alt (DataAlt con) _ _) = dataConCannotMatch inst_tys con impossible_alt _ _ = False -- | Refine the default alternative to a 'DataAlt', if there is a unique way to do so. -- See Note [Refine DEFAULT case alternatives] refineDefaultAlt :: [Unique] -- ^ Uniques for constructing new binders -> Mult -- ^ Multiplicity annotation of the case expression -> TyCon -- ^ Type constructor of scrutinee's type -> [Type] -- ^ Type arguments of scrutinee's type -> [AltCon] -- ^ Constructors that cannot match the DEFAULT (if any) -> [CoreAlt] -> (Bool, [CoreAlt]) -- ^ 'True', if a default alt was replaced with a 'DataAlt' refineDefaultAlt us mult tycon tys imposs_deflt_cons all_alts | Alt DEFAULT _ rhs : rest_alts <- all_alts , isAlgTyCon tycon -- It's a data type, tuple, or unboxed tuples. , not (isNewTyCon tycon) -- We can have a newtype, if we are just doing an eval: -- case x of { DEFAULT -> e } -- and we don't want to fill in a default for them! , Just all_cons <- tyConDataCons_maybe tycon , let imposs_data_cons = mkUniqSet [con | DataAlt con <- imposs_deflt_cons] -- We now know it's a data type, so we can use -- UniqSet rather than Set (more efficient) impossible con = con `elementOfUniqSet` imposs_data_cons || dataConCannotMatch tys con = case filterOut impossible all_cons of -- Eliminate the default alternative -- altogether if it can't match: [] -> (False, rest_alts) -- It matches exactly one constructor, so fill it in: [con] -> (True, mergeAlts rest_alts [Alt (DataAlt con) (ex_tvs ++ arg_ids) rhs]) -- We need the mergeAlts to keep the alternatives in the right order where (ex_tvs, arg_ids) = dataConRepInstPat us mult con tys -- It matches more than one, so do nothing _ -> (False, all_alts) | debugIsOn, isAlgTyCon tycon, null (tyConDataCons tycon) , not (isFamilyTyCon tycon || isAbstractTyCon tycon) -- Check for no data constructors -- This can legitimately happen for abstract types and type families, -- so don't report that = (False, all_alts) | otherwise -- The common case = (False, all_alts) {- Note [Refine DEFAULT case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ refineDefaultAlt replaces the DEFAULT alt with a constructor if there is one possible value it could be. The simplest example being foo :: () -> () foo x = case x of !_ -> () which rewrites to foo :: () -> () foo x = case x of () -> () There are two reasons in general why replacing a DEFAULT alternative with a specific constructor is desirable. 1. We can simplify inner expressions. For example data Foo = Foo1 () test :: Foo -> () test x = case x of DEFAULT -> mid (case x of Foo1 x1 -> x1) refineDefaultAlt fills in the DEFAULT here with `Foo ip1` and then x becomes bound to `Foo ip1` so is inlined into the other case which causes the KnownBranch optimisation to kick in. If we don't refine DEFAULT to `Foo ip1`, we are left with both case expressions. 2. combineIdenticalAlts does a better job. For exapple (Simon Jacobi) data D = C0 | C1 | C2 case e of DEFAULT -> e0 C0 -> e1 C1 -> e1 When we apply combineIdenticalAlts to this expression, it can't combine the alts for C0 and C1, as we already have a default case. But if we apply refineDefaultAlt first, we get case e of C0 -> e1 C1 -> e1 C2 -> e0 and combineIdenticalAlts can turn that into case e of DEFAULT -> e1 C2 -> e0 It isn't obvious that refineDefaultAlt does this but if you look at its one call site in GHC.Core.Opt.Simplify.Utils then the `imposs_deflt_cons` argument is populated with constructors which are matched elsewhere. Note [Combine identical alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If several alternatives are identical, merge them into a single DEFAULT alternative. I've occasionally seen this making a big difference: case e of =====> case e of C _ -> f x D v -> ....v.... D v -> ....v.... DEFAULT -> f x DEFAULT -> f x The point is that we merge common RHSs, at least for the DEFAULT case. [One could do something more elaborate but I've never seen it needed.] To avoid an expensive test, we just merge branches equal to the *first* alternative; this picks up the common cases a) all branches equal b) some branches equal to the DEFAULT (which occurs first) The case where Combine Identical Alternatives transformation showed up was like this (base/Foreign/C/Err/Error.hs): x | p `is` 1 -> e1 | p `is` 2 -> e2 ...etc... where @is@ was something like p `is` n = p /= (-1) && p == n This gave rise to a horrible sequence of cases case p of (-1) -> $j p 1 -> e1 DEFAULT -> $j p and similarly in cascade for all the join points! Note [Combine identical alternatives: wrinkles] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * It's important that we try to combine alternatives *before* simplifying them, rather than after. Reason: because Simplify.simplAlt may zap the occurrence info on the binders in the alternatives, which in turn defeats combineIdenticalAlts use of isDeadBinder (see #7360). You can see this in the call to combineIdenticalAlts in GHC.Core.Opt.Simplify.Utils.prepareAlts. Here the alternatives have type InAlt (the "In" meaning input) rather than OutAlt. * combineIdenticalAlts does not work well for nullary constructors case x of y [] -> f [] (_:_) -> f y Here we won't see that [] and y are the same. Sigh! This problem is solved in CSE, in GHC.Core.Opt.CSE.combineAlts, which does a better version of combineIdenticalAlts. But sadly it doesn't have the occurrence info we have here. See Note [Combine case alts: awkward corner] in GHC.Core.Opt.CSE). Note [Care with impossible-constructors when combining alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have (#10538) data T = A | B | C | D case x::T of (Imposs-default-cons {A,B}) DEFAULT -> e1 A -> e2 B -> e1 When calling combineIdentialAlts, we'll have computed that the "impossible constructors" for the DEFAULT alt is {A,B}, since if x is A or B we'll take the other alternatives. But suppose we combine B into the DEFAULT, to get case x::T of (Imposs-default-cons {A}) DEFAULT -> e1 A -> e2 Then we must be careful to trim the impossible constructors to just {A}, else we risk compiling 'e1' wrong! Not only that, but we take care when there is no DEFAULT beforehand, because we are introducing one. Consider case x of (Imposs-default-cons {A,B,C}) A -> e1 B -> e2 C -> e1 Then when combining the A and C alternatives we get case x of (Imposs-default-cons {B}) DEFAULT -> e1 B -> e2 Note that we have a new DEFAULT branch that we didn't have before. So we need delete from the "impossible-default-constructors" all the known-con alternatives that we have eliminated. (In #11172 we missed the first one.) -} combineIdenticalAlts :: [AltCon] -- Constructors that cannot match DEFAULT -> [CoreAlt] -> (Bool, -- True <=> something happened [AltCon], -- New constructors that cannot match DEFAULT [CoreAlt]) -- New alternatives -- See Note [Combine identical alternatives] -- True <=> we did some combining, result is a single DEFAULT alternative combineIdenticalAlts imposs_deflt_cons (Alt con1 bndrs1 rhs1 : rest_alts) | all isDeadBinder bndrs1 -- Remember the default , not (null elim_rest) -- alternative comes first = (True, imposs_deflt_cons', deflt_alt : filtered_rest) where (elim_rest, filtered_rest) = partition identical_to_alt1 rest_alts deflt_alt = Alt DEFAULT [] (mkTicks (concat tickss) rhs1) -- See Note [Care with impossible-constructors when combining alternatives] imposs_deflt_cons' = imposs_deflt_cons `minusList` elim_cons elim_cons = elim_con1 ++ map (\(Alt con _ _) -> con) elim_rest elim_con1 = case con1 of -- Don't forget con1! DEFAULT -> [] -- See Note [ _ -> [con1] cheapEqTicked e1 e2 = cheapEqExpr' tickishFloatable e1 e2 identical_to_alt1 (Alt _con bndrs rhs) = all isDeadBinder bndrs && rhs `cheapEqTicked` rhs1 tickss = map (\(Alt _ _ rhs) -> stripTicksT tickishFloatable rhs) elim_rest combineIdenticalAlts imposs_cons alts = (False, imposs_cons, alts) -- Scales the multiplicity of the binders of a list of case alternatives. That -- is, in [C x1…xn -> u], the multiplicity of x1…xn is scaled. scaleAltsBy :: Mult -> [CoreAlt] -> [CoreAlt] scaleAltsBy w alts = map scaleAlt alts where scaleAlt :: CoreAlt -> CoreAlt scaleAlt (Alt con bndrs rhs) = Alt con (map scaleBndr bndrs) rhs scaleBndr :: CoreBndr -> CoreBndr scaleBndr b = scaleVarBy w b {- ********************************************************************* * * exprIsTrivial * * ************************************************************************ Note [exprIsTrivial] ~~~~~~~~~~~~~~~~~~~~ @exprIsTrivial@ is true of expressions we are unconditionally happy to duplicate; simple variables and constants, and type applications. Note that primop Ids aren't considered trivial unless Note [Variables are trivial] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There used to be a gruesome test for (hasNoBinding v) in the Var case: exprIsTrivial (Var v) | hasNoBinding v = idArity v == 0 The idea here is that a constructor worker, like \$wJust, is really short for (\x -> \$wJust x), because \$wJust has no binding. So it should be treated like a lambda. Ditto unsaturated primops. But now constructor workers are not "have-no-binding" Ids. And completely un-applied primops and foreign-call Ids are sufficiently rare that I plan to allow them to be duplicated and put up with saturating them. Note [Tick trivial] ~~~~~~~~~~~~~~~~~~~ Ticks are only trivial if they are pure annotations. If we treat "tick x" as trivial, it will be inlined inside lambdas and the entry count will be skewed, for example. Furthermore "scc x" will turn into just "x" in mkTick. Note [Empty case is trivial] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The expression (case (x::Int) Bool of {}) is just a type-changing case used when we are sure that 'x' will not return. See Note [Empty case alternatives] in GHC.Core. If the scrutinee is trivial, then so is the whole expression; and the CoreToSTG pass in fact drops the case expression leaving only the scrutinee. Having more trivial expressions is good. Moreover, if we don't treat it as trivial we may land up with let-bindings like let v = case x of {} in ... and after CoreToSTG that gives let v = x in ... and that confuses the code generator (#11155). So best to kill it off at source. -} exprIsTrivial :: CoreExpr -> Bool -- If you modify this function, you may also -- need to modify getIdFromTrivialExpr exprIsTrivial (Var _) = True -- See Note [Variables are trivial] exprIsTrivial (Type _) = True exprIsTrivial (Coercion _) = True exprIsTrivial (Lit lit) = litIsTrivial lit exprIsTrivial (App e arg) = not (isRuntimeArg arg) && exprIsTrivial e exprIsTrivial (Lam b e) = not (isRuntimeVar b) && exprIsTrivial e exprIsTrivial (Tick t e) = not (tickishIsCode t) && exprIsTrivial e -- See Note [Tick trivial] exprIsTrivial (Cast e _) = exprIsTrivial e exprIsTrivial (Case e _ _ []) = exprIsTrivial e -- See Note [Empty case is trivial] exprIsTrivial _ = False {- Note [getIdFromTrivialExpr] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When substituting in a breakpoint we need to strip away the type cruft from a trivial expression and get back to the Id. The invariant is that the expression we're substituting was originally trivial according to exprIsTrivial, AND the expression is not a literal. See Note [substTickish] for how breakpoint substitution preserves this extra invariant. We also need this functionality in CorePrep to extract out Id of a function which we are saturating. However, in this case we don't know if the variable actually refers to a literal; thus we use 'getIdFromTrivialExpr_maybe' to handle this case. See test T12076lit for an example where this matters. -} getIdFromTrivialExpr :: HasDebugCallStack => CoreExpr -> Id getIdFromTrivialExpr e = fromMaybe (pprPanic "getIdFromTrivialExpr" (ppr e)) (getIdFromTrivialExpr_maybe e) getIdFromTrivialExpr_maybe :: CoreExpr -> Maybe Id -- See Note [getIdFromTrivialExpr] -- Th equations for this should line up with those for exprIsTrivial getIdFromTrivialExpr_maybe e = go e where go (App f t) | not (isRuntimeArg t) = go f go (Tick t e) | not (tickishIsCode t) = go e go (Cast e _) = go e go (Lam b e) | not (isRuntimeVar b) = go e go (Case e _ _ []) = go e go (Var v) = Just v go _ = Nothing {- exprIsDeadEnd is a very cheap and cheerful function; it may return False for bottoming expressions, but it never costs much to ask. See also GHC.Core.Opt.Arity.exprBotStrictness_maybe, but that's a bit more expensive. -} exprIsDeadEnd :: CoreExpr -> Bool -- See Note [Bottoming expressions] exprIsDeadEnd e | isEmptyTy (exprType e) = True | otherwise = go 0 e where go n (Var v) = isDeadEndAppSig (idDmdSig v) n go n (App e a) | isTypeArg a = go n e | otherwise = go (n+1) e go n (Tick _ e) = go n e go n (Cast e _) = go n e go n (Let _ e) = go n e go n (Lam v e) | isTyVar v = go n e go _ (Case _ _ _ alts) = null alts -- See Note [Empty case alternatives] in GHC.Core go _ _ = False {- Note [Bottoming expressions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A bottoming expression is guaranteed to diverge, or raise an exception. We can test for it in two different ways, and exprIsDeadEnd checks for both of these situations: * Visibly-bottom computations. For example (error Int "Hello") is visibly bottom. The strictness analyser also finds out if a function diverges or raises an exception, and puts that info in its strictness signature. * Empty types. If a type is empty, its only inhabitant is bottom. For example: data T f :: T -> Bool f = \(x:t). case x of Bool {} Since T has no data constructors, the case alternatives are of course empty. However note that 'x' is not bound to a visibly-bottom value; it's the *type* that tells us it's going to diverge. A GADT may also be empty even though it has constructors: data T a where T1 :: a -> T Bool T2 :: T Int ...(case (x::T Char) of {})... Here (T Char) is uninhabited. A more realistic case is (Int ~ Bool), which is likewise uninhabited. ************************************************************************ * * exprIsDupable * * ************************************************************************ Note [exprIsDupable] ~~~~~~~~~~~~~~~~~~~~ @exprIsDupable@ is true of expressions that can be duplicated at a modest cost in code size. This will only happen in different case branches, so there's no issue about duplicating work. That is, exprIsDupable returns True of (f x) even if f is very very expensive to call. Its only purpose is to avoid fruitless let-binding and then inlining of case join points -} exprIsDupable :: Platform -> CoreExpr -> Bool exprIsDupable platform e = isJust (go dupAppSize e) where go :: Int -> CoreExpr -> Maybe Int go n (Type {}) = Just n go n (Coercion {}) = Just n go n (Var {}) = decrement n go n (Tick _ e) = go n e go n (Cast e _) = go n e go n (App f a) | Just n' <- go n a = go n' f go n (Lit lit) | litIsDupable platform lit = decrement n go _ _ = Nothing decrement :: Int -> Maybe Int decrement 0 = Nothing decrement n = Just (n-1) dupAppSize :: Int dupAppSize = 8 -- Size of term we are prepared to duplicate -- This is *just* big enough to make test MethSharing -- inline enough join points. Really it should be -- smaller, and could be if we fixed #4960. {- ************************************************************************ * * exprIsCheap, exprIsExpandable * * ************************************************************************ Note [exprIsWorkFree] ~~~~~~~~~~~~~~~~~~~~~ exprIsWorkFree is used when deciding whether to inline something; we don't inline it if doing so might duplicate work, by peeling off a complete copy of the expression. Here we do not want even to duplicate a primop (#5623): eg let x = a #+ b in x +# x we do not want to inline/duplicate x Previously we were a bit more liberal, which led to the primop-duplicating problem. However, being more conservative did lead to a big regression in one nofib benchmark, wheel-sieve1. The situation looks like this: let noFactor_sZ3 :: GHC.Types.Int -> GHC.Types.Bool noFactor_sZ3 = case s_adJ of _ { GHC.Types.I# x_aRs -> case GHC.Prim.<=# x_aRs 2 of _ { GHC.Types.False -> notDivBy ps_adM qs_adN; GHC.Types.True -> lvl_r2Eb }} go = \x. ...(noFactor (I# y))....(go x')... The function 'noFactor' is heap-allocated and then called. Turns out that 'notDivBy' is strict in its THIRD arg, but that is invisible to the caller of noFactor, which therefore cannot do w/w and heap-allocates noFactor's argument. At the moment (May 12) we are just going to put up with this, because the previous more aggressive inlining (which treated 'noFactor' as work-free) was duplicating primops, which in turn was making inner loops of array calculations runs slow (#5623) Note [Case expressions are work-free] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Are case-expressions work-free? Consider let v = case x of (p,q) -> p go = \y -> ...case v of ... Should we inline 'v' at its use site inside the loop? At the moment we do. I experimented with saying that case are *not* work-free, but that increased allocation slightly. It's a fairly small effect, and at the moment we go for the slightly more aggressive version which treats (case x of ....) as work-free if the alternatives are. Moreover it improves arities of overloaded functions where there is only dictionary selection (no construction) involved Note [exprIsCheap] ~~~~~~~~~~~~~~~~~~ See also Note [Interaction of exprIsWorkFree and lone variables] in GHC.Core.Unfold @exprIsCheap@ looks at a Core expression and returns \tr{True} if it is obviously in weak head normal form, or is cheap to get to WHNF. Note that that's not the same as exprIsDupable; an expression might be big, and hence not dupable, but still cheap. By ``cheap'' we mean a computation we're willing to: push inside a lambda, or inline at more than one place That might mean it gets evaluated more than once, instead of being shared. The main examples of things which aren't WHNF but are ``cheap'' are: * case e of pi -> ei (where e, and all the ei are cheap) * let x = e in b (where e and b are cheap) * op x1 ... xn (where op is a cheap primitive operator) * error "foo" (because we are happy to substitute it inside a lambda) Notice that a variable is considered 'cheap': we can push it inside a lambda, because sharing will make sure it is only evaluated once. Note [exprIsCheap and exprIsHNF] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that exprIsHNF does not imply exprIsCheap. Eg let x = fac 20 in Just x This responds True to exprIsHNF (you can discard a seq), but False to exprIsCheap. Note [Arguments and let-bindings exprIsCheapX] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ What predicate should we apply to the argument of an application, or the RHS of a let-binding? We used to say "exprIsTrivial arg" due to concerns about duplicating nested constructor applications, but see #4978. So now we just recursively use exprIsCheapX. We definitely want to treat let and app the same. The principle here is that let x = blah in f x should behave equivalently to f blah This in turn means that the 'letrec g' does not prevent eta expansion in this (which it previously was): f = \x. let v = case x of True -> letrec g = \w. blah in g False -> \x. x in \w. v True -} -------------------- exprIsWorkFree :: CoreExpr -> Bool -- See Note [exprIsWorkFree] exprIsWorkFree e = exprIsCheapX isWorkFreeApp e exprIsCheap :: CoreExpr -> Bool exprIsCheap e = exprIsCheapX isCheapApp e exprIsCheapX :: CheapAppFun -> CoreExpr -> Bool {-# INLINE exprIsCheapX #-} -- allow specialization of exprIsCheap and exprIsWorkFree -- instead of having an unknown call to ok_app exprIsCheapX ok_app e = ok e where ok e = go 0 e -- n is the number of value arguments go n (Var v) = ok_app v n go _ (Lit {}) = True go _ (Type {}) = True go _ (Coercion {}) = True go n (Cast e _) = go n e go n (Case scrut _ _ alts) = ok scrut && and [ go n rhs | Alt _ _ rhs <- alts ] go n (Tick t e) | tickishCounts t = False | otherwise = go n e go n (Lam x e) | isRuntimeVar x = n==0 || go (n-1) e | otherwise = go n e go n (App f e) | isRuntimeArg e = go (n+1) f && ok e | otherwise = go n f go n (Let (NonRec _ r) e) = go n e && ok r go n (Let (Rec prs) e) = go n e && all (ok . snd) prs -- Case: see Note [Case expressions are work-free] -- App, Let: see Note [Arguments and let-bindings exprIsCheapX] {- Note [exprIsExpandable] ~~~~~~~~~~~~~~~~~~~~~~~~~~ An expression is "expandable" if we are willing to duplicate it, if doing so might make a RULE or case-of-constructor fire. Consider let x = (a,b) y = build g in ....(case x of (p,q) -> rhs)....(foldr k z y).... We don't inline 'x' or 'y' (see Note [Lone variables] in GHC.Core.Unfold), but we do want * the case-expression to simplify (via exprIsConApp_maybe, exprIsLiteral_maybe) * the foldr/build RULE to fire (by expanding the unfolding during rule matching) So we classify the unfolding of a let-binding as "expandable" (via the uf_expandable field) if we want to do this kind of on-the-fly expansion. Specifically: * True of constructor applications (K a b) * True of applications of a "CONLIKE" Id; see Note [CONLIKE pragma] in GHC.Types.Basic. (NB: exprIsCheap might not be true of this) * False of case-expressions. If we have let x = case ... in ...(case x of ...)... we won't simplify. We have to inline x. See #14688. * False of let-expressions (same reason); and in any case we float lets out of an RHS if doing so will reveal an expandable application (see SimplEnv.doFloatFromRhs). * Take care: exprIsExpandable should /not/ be true of primops. I found this in test T5623a: let q = /\a. Ptr a (a +# b) in case q @ Float of Ptr v -> ...q... q's inlining should not be expandable, else exprIsConApp_maybe will say that (q @ Float) expands to (Ptr a (a +# b)), and that will duplicate the (a +# b) primop, which we should not do lightly. (It's quite hard to trigger this bug, but T13155 does so for GHC 8.0.) -} ------------------------------------- exprIsExpandable :: CoreExpr -> Bool -- See Note [exprIsExpandable] exprIsExpandable e = ok e where ok e = go 0 e -- n is the number of value arguments go n (Var v) = isExpandableApp v n go _ (Lit {}) = True go _ (Type {}) = True go _ (Coercion {}) = True go n (Cast e _) = go n e go n (Tick t e) | tickishCounts t = False | otherwise = go n e go n (Lam x e) | isRuntimeVar x = n==0 || go (n-1) e | otherwise = go n e go n (App f e) | isRuntimeArg e = go (n+1) f && ok e | otherwise = go n f go _ (Case {}) = False go _ (Let {}) = False ------------------------------------- type CheapAppFun = Id -> Arity -> Bool -- Is an application of this function to n *value* args -- always cheap, assuming the arguments are cheap? -- True mainly of data constructors, partial applications; -- but with minor variations: -- isWorkFreeApp -- isCheapApp isWorkFreeApp :: CheapAppFun isWorkFreeApp fn n_val_args | n_val_args == 0 -- No value args = True | n_val_args < idArity fn -- Partial application = True | otherwise = case idDetails fn of DataConWorkId {} -> True _ -> False isCheapApp :: CheapAppFun isCheapApp fn n_val_args | isWorkFreeApp fn n_val_args = True | isDeadEndId fn = True -- See Note [isCheapApp: bottoming functions] | otherwise = case idDetails fn of DataConWorkId {} -> True -- Actually handled by isWorkFreeApp RecSelId {} -> n_val_args == 1 -- See Note [Record selection] ClassOpId {} -> n_val_args == 1 PrimOpId op -> primOpIsCheap op _ -> False -- In principle we should worry about primops -- that return a type variable, since the result -- might be applied to something, but I'm not going -- to bother to check the number of args isExpandableApp :: CheapAppFun isExpandableApp fn n_val_args | isWorkFreeApp fn n_val_args = True | otherwise = case idDetails fn of RecSelId {} -> n_val_args == 1 -- See Note [Record selection] ClassOpId {} -> n_val_args == 1 PrimOpId {} -> False _ | isDeadEndId fn -> False -- See Note [isExpandableApp: bottoming functions] | isConLikeId fn -> True | all_args_are_preds -> True | otherwise -> False where -- See if all the arguments are PredTys (implicit params or classes) -- If so we'll regard it as expandable; see Note [Expandable overloadings] all_args_are_preds = all_pred_args n_val_args (idType fn) all_pred_args n_val_args ty | n_val_args == 0 = True | Just (bndr, ty) <- splitPiTy_maybe ty = case bndr of Named {} -> all_pred_args n_val_args ty Anon InvisArg _ -> all_pred_args (n_val_args-1) ty Anon VisArg _ -> False | otherwise = False {- Note [isCheapApp: bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ I'm not sure why we have a special case for bottoming functions in isCheapApp. Maybe we don't need it. Note [isExpandableApp: bottoming functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's important that isExpandableApp does not respond True to bottoming functions. Recall undefined :: HasCallStack => a Suppose isExpandableApp responded True to (undefined d), and we had: x = undefined Then Simplify.prepareRhs would ANF the RHS: d = x = undefined d This is already bad: we gain nothing from having x bound to (undefined var), unlike the case for data constructors. Worse, we get the simplifier loop described in OccurAnal Note [Cascading inlines]. Suppose x occurs just once; OccurAnal.occAnalNonRecRhs decides x will certainly_inline; so we end up inlining d right back into x; but in the end x doesn't inline because it is bottom (preInlineUnconditionally); so the process repeats.. We could elaborate the certainly_inline logic some more, but it's better just to treat bottoming bindings as non-expandable, because ANFing them is a bad idea in the first place. Note [Record selection] ~~~~~~~~~~~~~~~~~~~~~~~~~~ I'm experimenting with making record selection look cheap, so we will substitute it inside a lambda. Particularly for dictionary field selection. BUT: Take care with (sel d x)! The (sel d) might be cheap, but there's no guarantee that (sel d x) will be too. Hence (n_val_args == 1) Note [Expandable overloadings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose the user wrote this {-# RULE forall x. foo (negate x) = h x #-} f x = ....(foo (negate x)).... They'd expect the rule to fire. But since negate is overloaded, we might get this: f = \d -> let n = negate d in \x -> ...foo (n x)... So we treat the application of a function (negate in this case) to a *dictionary* as expandable. In effect, every function is CONLIKE when it's applied only to dictionaries. ************************************************************************ * * exprOkForSpeculation * * ************************************************************************ -} ----------------------------- -- | 'exprOkForSpeculation' returns True of an expression that is: -- -- * Safe to evaluate even if normal order eval might not -- evaluate the expression at all, or -- -- * Safe /not/ to evaluate even if normal order would do so -- -- It is usually called on arguments of unlifted type, but not always -- In particular, Simplify.rebuildCase calls it on lifted types -- when a 'case' is a plain 'seq'. See the example in -- Note [exprOkForSpeculation: case expressions] below -- -- Precisely, it returns @True@ iff: -- a) The expression guarantees to terminate, -- b) soon, -- c) without causing a write side effect (e.g. writing a mutable variable) -- d) without throwing a Haskell exception -- e) without risking an unchecked runtime exception (array out of bounds, -- divide by zero) -- -- For @exprOkForSideEffects@ the list is the same, but omitting (e). -- -- Note that -- exprIsHNF implies exprOkForSpeculation -- exprOkForSpeculation implies exprOkForSideEffects -- -- See Note [PrimOp can_fail and has_side_effects] in "GHC.Builtin.PrimOps" -- and Note [Transformations affected by can_fail and has_side_effects] -- -- As an example of the considerations in this test, consider: -- -- > let x = case y# +# 1# of { r# -> I# r# } -- > in E -- -- being translated to: -- -- > case y# +# 1# of { r# -> -- > let x = I# r# -- > in E -- > } -- -- We can only do this if the @y + 1@ is ok for speculation: it has no -- side effects, and can't diverge or raise an exception. exprOkForSpeculation, exprOkForSideEffects :: CoreExpr -> Bool exprOkForSpeculation = expr_ok fun_always_ok primOpOkForSpeculation exprOkForSideEffects = expr_ok fun_always_ok primOpOkForSideEffects fun_always_ok :: Id -> Bool fun_always_ok _ = True -- | A special version of 'exprOkForSpeculation' used during -- Note [Speculative evaluation]. When the predicate arg `fun_ok` returns False -- for `b`, then `b` is never considered ok-for-spec. exprOkForSpecEval :: (Id -> Bool) -> CoreExpr -> Bool exprOkForSpecEval fun_ok = expr_ok fun_ok primOpOkForSpeculation expr_ok :: (Id -> Bool) -> (PrimOp -> Bool) -> CoreExpr -> Bool expr_ok _ _ (Lit _) = True expr_ok _ _ (Type _) = True expr_ok _ _ (Coercion _) = True expr_ok fun_ok primop_ok (Var v) = app_ok fun_ok primop_ok v [] expr_ok fun_ok primop_ok (Cast e _) = expr_ok fun_ok primop_ok e expr_ok fun_ok primop_ok (Lam b e) | isTyVar b = expr_ok fun_ok primop_ok e | otherwise = True -- Tick annotations that *tick* cannot be speculated, because these -- are meant to identify whether or not (and how often) the particular -- source expression was evaluated at runtime. expr_ok fun_ok primop_ok (Tick tickish e) | tickishCounts tickish = False | otherwise = expr_ok fun_ok primop_ok e expr_ok _ _ (Let {}) = False -- Lets can be stacked deeply, so just give up. -- In any case, the argument of exprOkForSpeculation is -- usually in a strict context, so any lets will have been -- floated away. expr_ok fun_ok primop_ok (Case scrut bndr _ alts) = -- See Note [exprOkForSpeculation: case expressions] expr_ok fun_ok primop_ok scrut && isUnliftedType (idType bndr) -- OK to call isUnliftedType: binders always have a fixed RuntimeRep && all (\(Alt _ _ rhs) -> expr_ok fun_ok primop_ok rhs) alts && altsAreExhaustive alts expr_ok fun_ok primop_ok other_expr | (expr, args) <- collectArgs other_expr = case stripTicksTopE (not . tickishCounts) expr of Var f -> app_ok fun_ok primop_ok f args -- 'LitRubbish' is the only literal that can occur in the head of an -- application and will not be matched by the above case (Var /= Lit). -- See Note [How a rubbish literal can be the head of an application] -- in GHC.Types.Literal Lit lit | debugIsOn, not (isLitRubbish lit) -> pprPanic "Non-rubbish lit in app head" (ppr lit) | otherwise -> True _ -> False ----------------------------- app_ok :: (Id -> Bool) -> (PrimOp -> Bool) -> Id -> [CoreExpr] -> Bool app_ok fun_ok primop_ok fun args | not (fun_ok fun) = False -- This code path is only taken for Note [Speculative evaluation] | otherwise = case idDetails fun of DFunId new_type -> not new_type -- DFuns terminate, unless the dict is implemented -- with a newtype in which case they may not DataConWorkId {} -> True -- The strictness of the constructor has already -- been expressed by its "wrapper", so we don't need -- to take the arguments into account PrimOpId op | primOpIsDiv op , [arg1, Lit lit] <- args -> not (isZeroLit lit) && expr_ok fun_ok primop_ok arg1 -- Special case for dividing operations that fail -- In general they are NOT ok-for-speculation -- (which primop_ok will catch), but they ARE OK -- if the divisor is definitely non-zero. -- Often there is a literal divisor, and this -- can get rid of a thunk in an inner loop | SeqOp <- op -- See Note [exprOkForSpeculation and SeqOp/DataToTagOp] -> False -- for the special cases for SeqOp and DataToTagOp | DataToTagOp <- op -> False | KeepAliveOp <- op -> False | otherwise -> primop_ok op -- Check the primop itself && and (zipWith primop_arg_ok arg_tys args) -- Check the arguments _ -- Unlifted types -- c.f. the Var case of exprIsHNF | Just Unlifted <- typeLevity_maybe (idType fun) -> assertPpr (n_val_args == 0) (ppr fun $$ ppr args) True -- Our only unlifted types are Int# etc, so will have -- no value args. The assert is just to check this. -- If we added unlifted function types this would change, -- and we'd need to actually test n_val_args == 0. -- Partial applications | idArity fun > n_val_args -> True -- Functions that terminate fast without raising exceptions etc -- See Note [Discarding unnecessary unsafeEqualityProofs] | fun `hasKey` unsafeEqualityProofIdKey -> True | otherwise -> False -- NB: even in the nullary case, do /not/ check -- for evaluated-ness of the fun; -- see Note [exprOkForSpeculation and evaluated variables] where n_val_args = valArgCount args (arg_tys, _) = splitPiTys (idType fun) primop_arg_ok :: TyBinder -> CoreExpr -> Bool primop_arg_ok (Named _) _ = True -- A type argument primop_arg_ok (Anon _ ty) arg -- A term argument | Just Lifted <- typeLevity_maybe (scaledThing ty) = True -- See Note [Primops with lifted arguments] | otherwise = expr_ok fun_ok primop_ok arg ----------------------------- altsAreExhaustive :: [Alt b] -> Bool -- True <=> the case alternatives are definitely exhaustive -- False <=> they may or may not be altsAreExhaustive [] = False -- Should not happen altsAreExhaustive (Alt con1 _ _ : alts) = case con1 of DEFAULT -> True LitAlt {} -> False DataAlt c -> alts `lengthIs` (tyConFamilySize (dataConTyCon c) - 1) -- It is possible to have an exhaustive case that does not -- enumerate all constructors, notably in a GADT match, but -- we behave conservatively here -- I don't think it's important -- enough to deserve special treatment {- Note [exprOkForSpeculation: case expressions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ exprOkForSpeculation accepts very special case expressions. Reason: (a ==# b) is ok-for-speculation, but the litEq rules in GHC.Core.Opt.ConstantFold convert it (a ==# 3#) to case a of { DEFAULT -> 0#; 3# -> 1# } for excellent reasons described in GHC.Core.Opt.ConstantFold Note [The litEq rule: converting equality to case]. So, annoyingly, we want that case expression to be ok-for-speculation too. Bother. But we restrict it sharply: * We restrict it to unlifted scrutinees. Consider this: case x of y { DEFAULT -> ... (let v::Int# = case y of { True -> e1 ; False -> e2 } in ...) ... Does the RHS of v satisfy the let/app invariant? Previously we said yes, on the grounds that y is evaluated. But the binder-swap done by GHC.Core.Opt.SetLevels would transform the inner alternative to DEFAULT -> ... (let v::Int# = case x of { ... } in ...) .... which does /not/ satisfy the let/app invariant, because x is not evaluated. See Note [Binder-swap during float-out] in GHC.Core.Opt.SetLevels. To avoid this awkwardness it seems simpler to stick to unlifted scrutinees where the issue does not arise. * We restrict it to exhaustive alternatives. A non-exhaustive case manifestly isn't ok-for-speculation. for example, this is a valid program (albeit a slightly dodgy one) let v = case x of { B -> ...; C -> ... } in case x of A -> ... _ -> ...v...v.... Should v be considered ok-for-speculation? Its scrutinee may be evaluated, but the alternatives are incomplete so we should not evaluate it strictly. Now, all this is for lifted types, but it'd be the same for any finite unlifted type. We don't have many of them, but we might add unlifted algebraic types in due course. ----- Historical note: #15696: -------- Previously GHC.Core.Opt.SetLevels used exprOkForSpeculation to guide floating of single-alternative cases; it now uses exprIsHNF Note [Floating single-alternative cases]. But in those days, consider case e of x { DEAFULT -> ...(case x of y A -> ... _ -> ...(case (case x of { B -> p; C -> p }) of I# r -> blah)... If GHC.Core.Opt.SetLevels considers the inner nested case as ok-for-speculation it can do case-floating (in GHC.Core.Opt.SetLevels). So we'd float to: case e of x { DEAFULT -> case (case x of { B -> p; C -> p }) of I# r -> ...(case x of y A -> ... _ -> ...blah...)... which is utterly bogus (seg fault); see #5453. ----- Historical note: #3717: -------- foo :: Int -> Int foo 0 = 0 foo n = (if n < 5 then 1 else 2) `seq` foo (n-1) In earlier GHCs, we got this: T.$wfoo = \ (ww :: GHC.Prim.Int#) -> case ww of ds { __DEFAULT -> case (case <# ds 5 of _ { GHC.Types.False -> lvl1; GHC.Types.True -> lvl}) of _ { __DEFAULT -> T.$wfoo (GHC.Prim.-# ds_XkE 1) }; 0 -> 0 } Before join-points etc we could only get rid of two cases (which are redundant) by recognising that the (case <# ds 5 of { ... }) is ok-for-speculation, even though it has /lifted/ type. But now join points do the job nicely. ------- End of historical note ------------ Note [Primops with lifted arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Is this ok-for-speculation (see #13027)? reallyUnsafePtrEquality# a b Well, yes. The primop accepts lifted arguments and does not evaluate them. Indeed, in general primops are, well, primitive and do not perform evaluation. Bottom line: * In exprOkForSpeculation we simply ignore all lifted arguments. * In the rare case of primops that /do/ evaluate their arguments, (namely DataToTagOp and SeqOp) return False; see Note [exprOkForSpeculation and evaluated variables] Note [exprOkForSpeculation and SeqOp/DataToTagOp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most primops with lifted arguments don't evaluate them (see Note [Primops with lifted arguments]), so we can ignore that argument entirely when doing exprOkForSpeculation. But DataToTagOp and SeqOp are exceptions to that rule. For reasons described in Note [exprOkForSpeculation and evaluated variables], we simply return False for them. Not doing this made #5129 go bad. Lots of discussion in #15696. Note [exprOkForSpeculation and evaluated variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Recall that seq# :: forall a s. a -> State# s -> (# State# s, a #) dataToTag# :: forall a. a -> Int# must always evaluate their first argument. Now consider these examples: * case x of y { DEFAULT -> ....y.... } Should 'y' (alone) be considered ok-for-speculation? * case x of y { DEFAULT -> ....f (dataToTag# y)... } Should (dataToTag# y) be considered ok-for-spec? You could argue 'yes', because in the case alternative we know that 'y' is evaluated. But the binder-swap transformation, which is extremely useful for float-out, changes these expressions to case x of y { DEFAULT -> ....x.... } case x of y { DEFAULT -> ....f (dataToTag# x)... } And now the expression does not obey the let/app invariant! Yikes! Moreover we really might float (f (dataToTag# x)) outside the case, and then it really, really doesn't obey the let/app invariant. The solution is simple: exprOkForSpeculation does not try to take advantage of the evaluated-ness of (lifted) variables. And it returns False (always) for DataToTagOp and SeqOp. Note that exprIsHNF /can/ and does take advantage of evaluated-ness; it doesn't have the trickiness of the let/app invariant to worry about. Note [Discarding unnecessary unsafeEqualityProofs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #20143 we found case unsafeEqualityProof @t1 @t2 of UnsafeRefl cv[dead] -> blah where 'blah' didn't mention 'cv'. We'd like to discard this redundant use of unsafeEqualityProof, via GHC.Core.Opt.Simplify.rebuildCase. To do this we need to know (a) that cv is unused (done by OccAnal), and (b) that unsafeEqualityProof terminates rapidly without side effects. At the moment we check that explicitly here in exprOkForSideEffects, but one might imagine a more systematic check in future. ************************************************************************ * * exprIsHNF, exprIsConLike * * ************************************************************************ -} -- Note [exprIsHNF] See also Note [exprIsCheap and exprIsHNF] -- ~~~~~~~~~~~~~~~~ -- | exprIsHNF returns true for expressions that are certainly /already/ -- evaluated to /head/ normal form. This is used to decide whether it's ok -- to change: -- -- > case x of _ -> e -- -- into: -- -- > e -- -- and to decide whether it's safe to discard a 'seq'. -- -- So, it does /not/ treat variables as evaluated, unless they say they are. -- However, it /does/ treat partial applications and constructor applications -- as values, even if their arguments are non-trivial, provided the argument -- type is lifted. For example, both of these are values: -- -- > (:) (f x) (map f xs) -- > map (...redex...) -- -- because 'seq' on such things completes immediately. -- -- For unlifted argument types, we have to be careful: -- -- > C (f x :: Int#) -- -- Suppose @f x@ diverges; then @C (f x)@ is not a value. However this can't -- happen: see "GHC.Core#let_app_invariant". This invariant states that arguments of -- unboxed type must be ok-for-speculation (or trivial). exprIsHNF :: CoreExpr -> Bool -- True => Value-lambda, constructor, PAP exprIsHNF = exprIsHNFlike isDataConWorkId isEvaldUnfolding -- | Similar to 'exprIsHNF' but includes CONLIKE functions as well as -- data constructors. Conlike arguments are considered interesting by the -- inliner. exprIsConLike :: CoreExpr -> Bool -- True => lambda, conlike, PAP exprIsConLike = exprIsHNFlike isConLikeId isConLikeUnfolding -- | Returns true for values or value-like expressions. These are lambdas, -- constructors / CONLIKE functions (as determined by the function argument) -- or PAPs. -- exprIsHNFlike :: (Var -> Bool) -> (Unfolding -> Bool) -> CoreExpr -> Bool exprIsHNFlike is_con is_con_unf = is_hnf_like where is_hnf_like (Var v) -- NB: There are no value args at this point = id_app_is_value v 0 -- Catches nullary constructors, -- so that [] and () are values, for example -- and (e.g.) primops that don't have unfoldings || is_con_unf (idUnfolding v) -- Check the thing's unfolding; it might be bound to a value -- or to a guaranteed-evaluated variable (isEvaldUnfolding) -- Contrast with Note [exprOkForSpeculation and evaluated variables] -- We don't look through loop breakers here, which is a bit conservative -- but otherwise I worry that if an Id's unfolding is just itself, -- we could get an infinite loop || ( typeLevity_maybe (idType v) == Just Unlifted ) -- Unlifted binders are always evaluated (#20140) is_hnf_like (Lit l) = not (isLitRubbish l) -- Regarding a LitRubbish as ConLike leads to unproductive inlining in -- WWRec, see #20035 is_hnf_like (Type _) = True -- Types are honorary Values; -- we don't mind copying them is_hnf_like (Coercion _) = True -- Same for coercions is_hnf_like (Lam b e) = isRuntimeVar b || is_hnf_like e is_hnf_like (Tick tickish e) = not (tickishCounts tickish) && is_hnf_like e -- See Note [exprIsHNF Tick] is_hnf_like (Cast e _) = is_hnf_like e is_hnf_like (App e a) | isValArg a = app_is_value e 1 | otherwise = is_hnf_like e is_hnf_like (Let _ e) = is_hnf_like e -- Lazy let(rec)s don't affect us is_hnf_like _ = False -- 'n' is the number of value args to which the expression is applied -- And n>0: there is at least one value argument app_is_value :: CoreExpr -> Int -> Bool app_is_value (Var f) nva = id_app_is_value f nva app_is_value (Tick _ f) nva = app_is_value f nva app_is_value (Cast f _) nva = app_is_value f nva app_is_value (App f a) nva | isValArg a = app_is_value f (nva + 1) | otherwise = app_is_value f nva app_is_value _ _ = False id_app_is_value id n_val_args = is_con id || idArity id > n_val_args {- Note [exprIsHNF Tick] ~~~~~~~~~~~~~~~~~~~~~ We can discard source annotations on HNFs as long as they aren't tick-like: scc c (\x . e) => \x . e scc c (C x1..xn) => C x1..xn So we regard these as HNFs. Tick annotations that tick are not regarded as HNF if the expression they surround is HNF, because the tick is there to tell us that the expression was evaluated, so we don't want to discard a seq on it. -} -- | Can we bind this 'CoreExpr' at the top level? exprIsTopLevelBindable :: CoreExpr -> Type -> Bool -- See Note [Core top-level string literals] -- Precondition: exprType expr = ty -- Top-level literal strings can't even be wrapped in ticks -- see Note [Core top-level string literals] in "GHC.Core" exprIsTopLevelBindable expr ty = not (mightBeUnliftedType ty) -- Note that 'expr' may not have a fixed runtime representation here, -- consequently we must use 'mightBeUnliftedType' rather than 'isUnliftedType', -- as the latter would panic. || exprIsTickedString expr -- | Check if the expression is zero or more Ticks wrapped around a literal -- string. exprIsTickedString :: CoreExpr -> Bool exprIsTickedString = isJust . exprIsTickedString_maybe -- | Extract a literal string from an expression that is zero or more Ticks -- wrapped around a literal string. Returns Nothing if the expression has a -- different shape. -- Used to "look through" Ticks in places that need to handle literal strings. exprIsTickedString_maybe :: CoreExpr -> Maybe ByteString exprIsTickedString_maybe (Lit (LitString bs)) = Just bs exprIsTickedString_maybe (Tick t e) -- we don't tick literals with CostCentre ticks, compare to mkTick | tickishPlace t == PlaceCostCentre = Nothing | otherwise = exprIsTickedString_maybe e exprIsTickedString_maybe _ = Nothing {- ************************************************************************ * * Instantiating data constructors * * ************************************************************************ These InstPat functions go here to avoid circularity between DataCon and Id -} dataConRepInstPat :: [Unique] -> Mult -> DataCon -> [Type] -> ([TyCoVar], [Id]) dataConRepFSInstPat :: [FastString] -> [Unique] -> Mult -> DataCon -> [Type] -> ([TyCoVar], [Id]) dataConRepInstPat = dataConInstPat (repeat ((fsLit "ipv"))) dataConRepFSInstPat = dataConInstPat dataConInstPat :: [FastString] -- A long enough list of FSs to use for names -> [Unique] -- An equally long list of uniques, at least one for each binder -> Mult -- The multiplicity annotation of the case expression: scales the multiplicity of variables -> DataCon -> [Type] -- Types to instantiate the universally quantified tyvars -> ([TyCoVar], [Id]) -- Return instantiated variables -- dataConInstPat arg_fun fss us mult con inst_tys returns a tuple -- (ex_tvs, arg_ids), -- -- ex_tvs are intended to be used as binders for existential type args -- -- arg_ids are indended to be used as binders for value arguments, -- and their types have been instantiated with inst_tys and ex_tys -- The arg_ids include both evidence and -- programmer-specified arguments (both after rep-ing) -- -- Example. -- The following constructor T1 -- -- data T a where -- T1 :: forall b. Int -> b -> T(a,b) -- ... -- -- has representation type -- forall a. forall a1. forall b. (a ~ (a1,b)) => -- Int -> b -> T a -- -- dataConInstPat fss us T1 (a1',b') will return -- -- ([a1'', b''], [c :: (a1', b')~(a1'', b''), x :: Int, y :: b'']) -- -- where the double-primed variables are created with the FastStrings and -- Uniques given as fss and us dataConInstPat fss uniqs mult con inst_tys = assert (univ_tvs `equalLength` inst_tys) $ (ex_bndrs, arg_ids) where univ_tvs = dataConUnivTyVars con ex_tvs = dataConExTyCoVars con arg_tys = dataConRepArgTys con arg_strs = dataConRepStrictness con -- 1-1 with arg_tys n_ex = length ex_tvs -- split the Uniques and FastStrings (ex_uniqs, id_uniqs) = splitAt n_ex uniqs (ex_fss, id_fss) = splitAt n_ex fss -- Make the instantiating substitution for universals univ_subst = zipTvSubst univ_tvs inst_tys -- Make existential type variables, applying and extending the substitution (full_subst, ex_bndrs) = mapAccumL mk_ex_var univ_subst (zip3 ex_tvs ex_fss ex_uniqs) mk_ex_var :: TCvSubst -> (TyCoVar, FastString, Unique) -> (TCvSubst, TyCoVar) mk_ex_var subst (tv, fs, uniq) = (Type.extendTCvSubstWithClone subst tv new_tv , new_tv) where new_tv | isTyVar tv = mkTyVar (mkSysTvName uniq fs) kind | otherwise = mkCoVar (mkSystemVarName uniq fs) kind kind = Type.substTyUnchecked subst (varType tv) -- Make value vars, instantiating types arg_ids = zipWith4 mk_id_var id_uniqs id_fss arg_tys arg_strs mk_id_var uniq fs (Scaled m ty) str = setCaseBndrEvald str $ -- See Note [Mark evaluated arguments] mkLocalIdOrCoVar name (mult `mkMultMul` m) (Type.substTy full_subst ty) where name = mkInternalName uniq (mkVarOccFS fs) noSrcSpan {- Note [Mark evaluated arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When pattern matching on a constructor with strict fields, the binder can have an 'evaldUnfolding'. Moreover, it *should* have one, so that when loading an interface file unfolding like: data T = MkT !Int f x = case x of { MkT y -> let v::Int# = case y of I# n -> n+1 in ... } we don't want Lint to complain. The 'y' is evaluated, so the case in the RHS of the binding for 'v' is fine. But only if we *know* that 'y' is evaluated. c.f. add_evals in GHC.Core.Opt.Simplify.simplAlt ************************************************************************ * * Equality * * ************************************************************************ -} -- | A cheap equality test which bales out fast! -- If it returns @True@ the arguments are definitely equal, -- otherwise, they may or may not be equal. cheapEqExpr :: Expr b -> Expr b -> Bool cheapEqExpr = cheapEqExpr' (const False) -- | Cheap expression equality test, can ignore ticks by type. cheapEqExpr' :: (CoreTickish -> Bool) -> Expr b -> Expr b -> Bool {-# INLINE cheapEqExpr' #-} cheapEqExpr' ignoreTick e1 e2 = go e1 e2 where go (Var v1) (Var v2) = v1 == v2 go (Lit lit1) (Lit lit2) = lit1 == lit2 go (Type t1) (Type t2) = t1 `eqType` t2 go (Coercion c1) (Coercion c2) = c1 `eqCoercion` c2 go (App f1 a1) (App f2 a2) = f1 `go` f2 && a1 `go` a2 go (Cast e1 t1) (Cast e2 t2) = e1 `go` e2 && t1 `eqCoercion` t2 go (Tick t1 e1) e2 | ignoreTick t1 = go e1 e2 go e1 (Tick t2 e2) | ignoreTick t2 = go e1 e2 go (Tick t1 e1) (Tick t2 e2) = t1 == t2 && e1 `go` e2 go _ _ = False eqExpr :: InScopeSet -> CoreExpr -> CoreExpr -> Bool -- Compares for equality, modulo alpha -- TODO: remove eqExpr once GHC 9.4 is released eqExpr _ = eqCoreExpr {-# DEPRECATED eqExpr "Use 'GHC.Core.Map.Expr.eqCoreExpr', 'eqExpr' will be removed in GHC 9.6" #-} -- Used by diffBinds, which is itself only used in GHC.Core.Lint.lintAnnots eqTickish :: RnEnv2 -> CoreTickish -> CoreTickish -> Bool eqTickish env (Breakpoint lext lid lids) (Breakpoint rext rid rids) = lid == rid && map (rnOccL env) lids == map (rnOccR env) rids && lext == rext eqTickish _ l r = l == r -- | Finds differences between core bindings, see @diffExpr@. -- -- The main problem here is that while we expect the binds to have the -- same order in both lists, this is not guaranteed. To do this -- properly we'd either have to do some sort of unification or check -- all possible mappings, which would be seriously expensive. So -- instead we simply match single bindings as far as we can. This -- leaves us just with mutually recursive and/or mismatching bindings, -- which we then speculatively match by ordering them. It's by no means -- perfect, but gets the job done well enough. -- -- Only used in GHC.Core.Lint.lintAnnots diffBinds :: Bool -> RnEnv2 -> [(Var, CoreExpr)] -> [(Var, CoreExpr)] -> ([SDoc], RnEnv2) diffBinds top env binds1 = go (length binds1) env binds1 where go _ env [] [] = ([], env) go fuel env binds1 binds2 -- No binds left to compare? Bail out early. | null binds1 || null binds2 = (warn env binds1 binds2, env) -- Iterated over all binds without finding a match? Then -- try speculatively matching binders by order. | fuel == 0 = if not $ env `inRnEnvL` fst (head binds1) then let env' = uncurry (rnBndrs2 env) $ unzip $ zip (sort $ map fst binds1) (sort $ map fst binds2) in go (length binds1) env' binds1 binds2 -- If we have already tried that, give up else (warn env binds1 binds2, env) go fuel env ((bndr1,expr1):binds1) binds2 | let matchExpr (bndr,expr) = (isTyVar bndr || not top || null (diffIdInfo env bndr bndr1)) && null (diffExpr top (rnBndr2 env bndr1 bndr) expr1 expr) , (binds2l, (bndr2,_):binds2r) <- break matchExpr binds2 = go (length binds1) (rnBndr2 env bndr1 bndr2) binds1 (binds2l ++ binds2r) | otherwise -- No match, so push back (FIXME O(n^2)) = go (fuel-1) env (binds1++[(bndr1,expr1)]) binds2 go _ _ _ _ = panic "diffBinds: impossible" -- GHC isn't smart enough -- We have tried everything, but couldn't find a good match. So -- now we just return the comparison results when we pair up -- the binds in a pseudo-random order. warn env binds1 binds2 = concatMap (uncurry (diffBind env)) (zip binds1' binds2') ++ unmatched "unmatched left-hand:" (drop l binds1') ++ unmatched "unmatched right-hand:" (drop l binds2') where binds1' = sortBy (comparing fst) binds1 binds2' = sortBy (comparing fst) binds2 l = min (length binds1') (length binds2') unmatched _ [] = [] unmatched txt bs = [text txt $$ ppr (Rec bs)] diffBind env (bndr1,expr1) (bndr2,expr2) | ds@(_:_) <- diffExpr top env expr1 expr2 = locBind "in binding" bndr1 bndr2 ds -- Special case for TyVar, which we checked were bound to the same types in -- diffExpr, but don't have any IdInfo we would panic if called diffIdInfo. -- These let-bound types are created temporarily by the simplifier but inlined -- immediately. | isTyVar bndr1 && isTyVar bndr2 = [] | otherwise = diffIdInfo env bndr1 bndr2 -- | Finds differences between core expressions, modulo alpha and -- renaming. Setting @top@ means that the @IdInfo@ of bindings will be -- checked for differences as well. diffExpr :: Bool -> RnEnv2 -> CoreExpr -> CoreExpr -> [SDoc] diffExpr _ env (Var v1) (Var v2) | rnOccL env v1 == rnOccR env v2 = [] diffExpr _ _ (Lit lit1) (Lit lit2) | lit1 == lit2 = [] diffExpr _ env (Type t1) (Type t2) | eqTypeX env t1 t2 = [] diffExpr _ env (Coercion co1) (Coercion co2) | eqCoercionX env co1 co2 = [] diffExpr top env (Cast e1 co1) (Cast e2 co2) | eqCoercionX env co1 co2 = diffExpr top env e1 e2 diffExpr top env (Tick n1 e1) e2 | not (tickishIsCode n1) = diffExpr top env e1 e2 diffExpr top env e1 (Tick n2 e2) | not (tickishIsCode n2) = diffExpr top env e1 e2 diffExpr top env (Tick n1 e1) (Tick n2 e2) | eqTickish env n1 n2 = diffExpr top env e1 e2 -- The error message of failed pattern matches will contain -- generated names, which are allowed to differ. diffExpr _ _ (App (App (Var absent) _) _) (App (App (Var absent2) _) _) | isDeadEndId absent && isDeadEndId absent2 = [] diffExpr top env (App f1 a1) (App f2 a2) = diffExpr top env f1 f2 ++ diffExpr top env a1 a2 diffExpr top env (Lam b1 e1) (Lam b2 e2) | eqTypeX env (varType b1) (varType b2) -- False for Id/TyVar combination = diffExpr top (rnBndr2 env b1 b2) e1 e2 diffExpr top env (Let bs1 e1) (Let bs2 e2) = let (ds, env') = diffBinds top env (flattenBinds [bs1]) (flattenBinds [bs2]) in ds ++ diffExpr top env' e1 e2 diffExpr top env (Case e1 b1 t1 a1) (Case e2 b2 t2 a2) | equalLength a1 a2 && not (null a1) || eqTypeX env t1 t2 -- See Note [Empty case alternatives] in GHC.Data.TrieMap = diffExpr top env e1 e2 ++ concat (zipWith diffAlt a1 a2) where env' = rnBndr2 env b1 b2 diffAlt (Alt c1 bs1 e1) (Alt c2 bs2 e2) | c1 /= c2 = [text "alt-cons " <> ppr c1 <> text " /= " <> ppr c2] | otherwise = diffExpr top (rnBndrs2 env' bs1 bs2) e1 e2 diffExpr _ _ e1 e2 = [fsep [ppr e1, text "/=", ppr e2]] -- | Find differences in @IdInfo@. We will especially check whether -- the unfoldings match, if present (see @diffUnfold@). diffIdInfo :: RnEnv2 -> Var -> Var -> [SDoc] diffIdInfo env bndr1 bndr2 | arityInfo info1 == arityInfo info2 && cafInfo info1 == cafInfo info2 && oneShotInfo info1 == oneShotInfo info2 && inlinePragInfo info1 == inlinePragInfo info2 && occInfo info1 == occInfo info2 && demandInfo info1 == demandInfo info2 && callArityInfo info1 == callArityInfo info2 && levityInfo info1 == levityInfo info2 = locBind "in unfolding of" bndr1 bndr2 $ diffUnfold env (realUnfoldingInfo info1) (realUnfoldingInfo info2) | otherwise = locBind "in Id info of" bndr1 bndr2 [fsep [pprBndr LetBind bndr1, text "/=", pprBndr LetBind bndr2]] where info1 = idInfo bndr1; info2 = idInfo bndr2 -- | Find differences in unfoldings. Note that we will not check for -- differences of @IdInfo@ in unfoldings, as this is generally -- redundant, and can lead to an exponential blow-up in complexity. diffUnfold :: RnEnv2 -> Unfolding -> Unfolding -> [SDoc] diffUnfold _ NoUnfolding NoUnfolding = [] diffUnfold _ BootUnfolding BootUnfolding = [] diffUnfold _ (OtherCon cs1) (OtherCon cs2) | cs1 == cs2 = [] diffUnfold env (DFunUnfolding bs1 c1 a1) (DFunUnfolding bs2 c2 a2) | c1 == c2 && equalLength bs1 bs2 = concatMap (uncurry (diffExpr False env')) (zip a1 a2) where env' = rnBndrs2 env bs1 bs2 diffUnfold env (CoreUnfolding t1 _ _ c1 g1) (CoreUnfolding t2 _ _ c2 g2) | c1 == c2 && g1 == g2 = diffExpr False env t1 t2 diffUnfold _ uf1 uf2 = [fsep [ppr uf1, text "/=", ppr uf2]] -- | Add location information to diff messages locBind :: String -> Var -> Var -> [SDoc] -> [SDoc] locBind loc b1 b2 diffs = map addLoc diffs where addLoc d = d $$ nest 2 (parens (text loc <+> bindLoc)) bindLoc | b1 == b2 = ppr b1 | otherwise = ppr b1 <> char '/' <> ppr b2 {- ************************************************************************ * * Eta reduction * * ************************************************************************ Note [Eta reduction conditions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We try for eta reduction here, but *only* if we get all the way to an trivial expression. We don't want to remove extra lambdas unless we are going to avoid allocating this thing altogether. There are some particularly delicate points here: * We want to eta-reduce if doing so leaves a trivial expression, *including* a cast. For example \x. f |> co --> f |> co (provided co doesn't mention x) * Eta reduction is not valid in general: \x. bot /= bot This matters, partly for old-fashioned correctness reasons but, worse, getting it wrong can yield a seg fault. Consider f = \x.f x h y = case (case y of { True -> f `seq` True; False -> False }) of True -> ...; False -> ... If we (unsoundly) eta-reduce f to get f=f, the strictness analyser says f=bottom, and replaces the (f `seq` True) with just (f `cast` unsafe-co). BUT, as thing stand, 'f' got arity 1, and it *keeps* arity 1 (perhaps also wrongly). So CorePrep eta-expands the definition again, so that it does not terminate after all. Result: seg-fault because the boolean case actually gets a function value. See #1947. So it's important to do the right thing. * With linear types, eta-reduction can break type-checking: f :: A ⊸ B g :: A -> B g = \x. f x The above is correct, but eta-reducing g would yield g=f, the linter will complain that g and f don't have the same type. * Note [Arity care] ~~~~~~~~~~~~~~~~~ We need to be careful if we just look at f's arity. Currently (Dec07), f's arity is visible in its own RHS (see Note [Arity robustness] in GHC.Core.Opt.Simplify.Env) so we must *not* trust the arity when checking that 'f' is a value. Otherwise we will eta-reduce f = \x. f x to f = f Which might change a terminating program (think (f `seq` e)) to a non-terminating one. So we check for being a loop breaker first. However for GlobalIds we can look at the arity; and for primops we must, since they have no unfolding. * Regardless of whether 'f' is a value, we always want to reduce (/\a -> f a) to f This came up in a RULE: foldr (build (/\a -> g a)) did not match foldr (build (/\b -> ...something complex...)) The type checker can insert these eta-expanded versions, with both type and dictionary lambdas; hence the slightly ad-hoc isDictId * Never *reduce* arity. For example f = \xy. g x y Then if h has arity 1 we don't want to eta-reduce because then f's arity would decrease, and that is bad These delicacies are why we don't use exprIsTrivial and exprIsHNF here. Alas. Note [Eta reduction with casted arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (\(x:t3). f (x |> g)) :: t3 -> t2 where f :: t1 -> t2 g :: t3 ~ t1 This should be eta-reduced to f |> (sym g -> t2) So we need to accumulate a coercion, pushing it inward (past variable arguments only) thus: f (x |> co_arg) |> co --> (f |> (sym co_arg -> co)) x f (x:t) |> co --> (f |> (t -> co)) x f @ a |> co --> (f |> (forall a.co)) @ a f @ (g:t1~t2) |> co --> (f |> (t1~t2 => co)) @ (g:t1~t2) These are the equations for ok_arg. It's true that we could also hope to eta reduce these: (\xy. (f x |> g) y) (\xy. (f x y) |> g) But the simplifier pushes those casts outwards, so we don't need to address that here. -} -- When updating this function, make sure to update -- CorePrep.tryEtaReducePrep as well! tryEtaReduce :: UnVarSet -> [Var] -> CoreExpr -> Maybe CoreExpr tryEtaReduce rec_ids bndrs body = go (reverse bndrs) body (mkRepReflCo (exprType body)) where incoming_arity = count isId bndrs go :: [Var] -- Binders, innermost first, types [a3,a2,a1] -> CoreExpr -- Of type tr -> Coercion -- Of type tr ~ ts -> Maybe CoreExpr -- Of type a1 -> a2 -> a3 -> ts -- See Note [Eta reduction with casted arguments] -- for why we have an accumulating coercion go [] fun co | ok_fun fun , let used_vars = exprFreeVars fun `unionVarSet` tyCoVarsOfCo co , not (any (`elemVarSet` used_vars) bndrs) = Just (mkCast fun co) -- Check for any of the binders free in the result -- including the accumulated coercion go bs (Tick t e) co | tickishFloatable t = fmap (Tick t) $ go bs e co -- Float app ticks: \x -> Tick t (e x) ==> Tick t e go (b : bs) (App fun arg) co | Just (co', ticks) <- ok_arg b arg co (exprType fun) = fmap (flip (foldr mkTick) ticks) $ go bs fun co' -- Float arg ticks: \x -> e (Tick t x) ==> Tick t e go _ _ _ = Nothing -- Failure! --------------- -- Note [Eta reduction conditions] ok_fun (App fun (Type {})) = ok_fun fun ok_fun (Cast fun _) = ok_fun fun ok_fun (Tick _ expr) = ok_fun expr ok_fun (Var fun_id) = ok_fun_id fun_id || all ok_lam bndrs ok_fun _fun = False --------------- ok_fun_id fun = -- Don't eta-reduce in fun in its own recursive RHSs not (fun `elemUnVarSet` rec_ids) && -- criterion (R) -- There are arguments to reduce... fun_arity fun >= incoming_arity && -- ... and the function can be eta reduced to arity 0 canEtaReduceToArity fun 0 0 --------------- fun_arity fun -- See Note [Arity care] | arity > 0 = arity | isEvaldUnfolding (idUnfolding fun) = 1 -- See Note [Eta reduction of an eval'd function] | otherwise = 0 where arity = idArity fun --------------- ok_lam v = isTyVar v || isEvVar v --------------- ok_arg :: Var -- Of type bndr_t -> CoreExpr -- Of type arg_t -> Coercion -- Of kind (t1~t2) -> Type -- Type of the function to which the argument is applied -> Maybe (Coercion -- Of type (arg_t -> t1 ~ bndr_t -> t2) -- (and similarly for tyvars, coercion args) , [CoreTickish]) -- See Note [Eta reduction with casted arguments] ok_arg bndr (Type ty) co _ | Just tv <- getTyVar_maybe ty , bndr == tv = Just (mkHomoForAllCos [tv] co, []) ok_arg bndr (Var v) co fun_ty | bndr == v , let mult = idMult bndr , Just (fun_mult, _, _) <- splitFunTy_maybe fun_ty , mult `eqType` fun_mult -- There is no change in multiplicity, otherwise we must abort = Just (mkFunResCo Representational (idScaledType bndr) co, []) ok_arg bndr (Cast e co_arg) co fun_ty | (ticks, Var v) <- stripTicksTop tickishFloatable e , Just (fun_mult, _, _) <- splitFunTy_maybe fun_ty , bndr == v , fun_mult `eqType` idMult bndr = Just (mkFunCo Representational (multToCo fun_mult) (mkSymCo co_arg) co, ticks) -- The simplifier combines multiple casts into one, -- so we can have a simple-minded pattern match here ok_arg bndr (Tick t arg) co fun_ty | tickishFloatable t, Just (co', ticks) <- ok_arg bndr arg co fun_ty = Just (co', t:ticks) ok_arg _ _ _ _ = Nothing -- | Can we eta-reduce the given function to the specified arity? -- See Note [Eta reduction conditions]. canEtaReduceToArity :: Id -> JoinArity -> Arity -> Bool canEtaReduceToArity fun dest_join_arity dest_arity = not $ hasNoBinding fun -- Don't undersaturate functions with no binding. || ( isJoinId fun && dest_join_arity < idJoinArity fun ) -- Don't undersaturate join points. -- See Note [Invariants on join points] in GHC.Core, and #20599 || ( dest_arity < idCbvMarkArity fun ) -- Don't undersaturate StrictWorkerIds. -- See Note [CBV Function Ids] in GHC.CoreToStg.Prep. || isLinearType (idType fun) -- Don't perform eta reduction on linear types. -- If `f :: A %1-> B` and `g :: A -> B`, -- then `g x = f x` is OK but `g = f` is not. -- See Note [Eta reduction conditions]. {- Note [Eta reduction of an eval'd function] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Haskell it is not true that f = \x. f x because f might be bottom, and 'seq' can distinguish them. But it *is* true that f = f `seq` \x. f x and we'd like to simplify the latter to the former. This amounts to the rule that * when there is just *one* value argument, * f is not bottom we can eta-reduce \x. f x ===> f This turned up in #7542. -} {- ********************************************************************* * * \subsection{Determining non-updatable right-hand-sides} * * ************************************************************************ Top-level constructor applications can usually be allocated statically, but they can't if the constructor, or any of the arguments, come from another DLL (because we can't refer to static labels in other DLLs). If this happens we simply make the RHS into an updatable thunk, and 'execute' it rather than allocating it statically. -} {- ************************************************************************ * * \subsection{Type utilities} * * ************************************************************************ -} -- | True if the type has no non-bottom elements, e.g. when it is an empty -- datatype, or a GADT with non-satisfiable type parameters, e.g. Int :~: Bool. -- See Note [Bottoming expressions] -- -- See Note [No alternatives lint check] for another use of this function. isEmptyTy :: Type -> Bool isEmptyTy ty -- Data types where, given the particular type parameters, no data -- constructor matches, are empty. -- This includes data types with no constructors, e.g. Data.Void.Void. | Just (tc, inst_tys) <- splitTyConApp_maybe ty , Just dcs <- tyConDataCons_maybe tc , all (dataConCannotMatch inst_tys) dcs = True | otherwise = False -- | If @normSplitTyConApp_maybe _ ty = Just (tc, tys, co)@ -- then @ty |> co = tc tys@. It's 'splitTyConApp_maybe', but looks through -- coercions via 'topNormaliseType_maybe'. Hence the \"norm\" prefix. normSplitTyConApp_maybe :: FamInstEnvs -> Type -> Maybe (TyCon, [Type], Coercion) normSplitTyConApp_maybe fam_envs ty | let Reduction co ty1 = topNormaliseType_maybe fam_envs ty `orElse` (mkReflRedn Representational ty) , Just (tc, tc_args) <- splitTyConApp_maybe ty1 = Just (tc, tc_args, co) normSplitTyConApp_maybe _ _ = Nothing {- ***************************************************** * * StaticPtr * ***************************************************** -} -- | @collectMakeStaticArgs (makeStatic t srcLoc e)@ yields -- @Just (makeStatic, t, srcLoc, e)@. -- -- Returns @Nothing@ for every other expression. collectMakeStaticArgs :: CoreExpr -> Maybe (CoreExpr, Type, CoreExpr, CoreExpr) collectMakeStaticArgs e | (fun@(Var b), [Type t, loc, arg], _) <- collectArgsTicks (const True) e , idName b == makeStaticName = Just (fun, t, loc, arg) collectMakeStaticArgs _ = Nothing {- ************************************************************************ * * \subsection{Join points} * * ************************************************************************ -} -- | Does this binding bind a join point (or a recursive group of join points)? isJoinBind :: CoreBind -> Bool isJoinBind (NonRec b _) = isJoinId b isJoinBind (Rec ((b, _) : _)) = isJoinId b isJoinBind _ = False dumpIdInfoOfProgram :: Bool -> (IdInfo -> SDoc) -> CoreProgram -> SDoc dumpIdInfoOfProgram dump_locals ppr_id_info binds = vcat (map printId ids) where ids = sortBy (stableNameCmp `on` getName) (concatMap getIds binds) getIds (NonRec i _) = [ i ] getIds (Rec bs) = map fst bs -- By default only include full info for exported ids, unless we run in the verbose -- pprDebug mode. printId id | isExportedId id || dump_locals = ppr id <> colon <+> (ppr_id_info (idInfo id)) | otherwise = empty {- ************************************************************************ * * \subsection{Tag inference things} * * ************************************************************************ -} {- Note [Call-by-value for worker args] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we unbox a constructor with strict fields we want to preserve the information that some of the arguments came out of strict fields and therefore should be already properly tagged, however we can't express this directly in core. Instead what we do is generate a worker like this: data T = MkT A !B foo = case T of MkT a b -> $wfoo a b $wfoo a b = case b of b' -> rhs[b/b'] This makes the worker strict in b causing us to use a more efficient calling convention for `b` where the caller needs to ensure `b` is properly tagged and evaluated before it's passed to $wfoo. See Note [CBV Function Ids]. Usually the argument will be known to be properly tagged at the call site so there is no additional work for the caller and the worker can be more efficient since it can assume the presence of a tag. This is especially true for recursive functions like this: -- myPred expect it's argument properly tagged myPred !x = ... loop :: MyPair -> Int loop (MyPair !x !y) = case x of A -> 1 B -> 2 _ -> loop (MyPair (myPred x) (myPred y)) Here we would ordinarily not be strict in y after unboxing. However if we pass it as a regular argument then this means on every iteration of loop we will incur an extra seq on y before we can pass it to `myPred` which isn't great! That is in STG after tag inference we get: Rec { Find.$wloop [InlPrag=[2], Occ=LoopBreaker] :: Find.MyEnum -> Find.MyEnum -> GHC.Prim.Int# [GblId[StrictWorker([!, ~])], Arity=2, Str=<1L>, Unf=OtherCon []] = {} \r [x y] case x of x' [Occ=Once1] { __DEFAULT -> case y of y' [Occ=Once1] { __DEFAULT -> case Find.$wmyPred y' of pred_y [Occ=Once1] { __DEFAULT -> case Find.$wmyPred x' of pred_x [Occ=Once1] { __DEFAULT -> Find.$wloop pred_x pred_y; }; }; Find.A -> 1#; Find.B -> 2#; }; end Rec } Here comes the tricky part: If we make $wloop strict in both x/y and we get: Rec { Find.$wloop [InlPrag=[2], Occ=LoopBreaker] :: Find.MyEnum -> Find.MyEnum -> GHC.Prim.Int# [GblId[StrictWorker([!, !])], Arity=2, Str=<1L>, Unf=OtherCon []] = {} \r [x y] case y of y' [Occ=Once1] { __DEFAULT -> case x of x' [Occ=Once1] { __DEFAULT -> case Find.$wmyPred y' of pred_y [Occ=Once1] { __DEFAULT -> case Find.$wmyPred x' of pred_x [Occ=Once1] { __DEFAULT -> Find.$wloop pred_x pred_y; }; }; Find.A -> 1#; Find.B -> 2#; }; end Rec } Here both x and y are known to be tagged in the function body since we pass strict worker args using unlifted cbv. This means the seqs on x and y both become no-ops and compared to the first version the seq on `y` disappears at runtime. The downside is that the caller of $wfoo potentially has to evaluate `y` once if we can't prove it isn't already evaluated. But y coming out of a strict field is in WHNF so safe to evaluated. And most of the time it will be properly tagged+evaluated already at the call site because of the Strict Field Invariant! See Note [Strict Field Invariant] for more in this. This makes GHC itself around 1% faster despite doing slightly more work! So this is generally quite good. We only apply this when we think there is a benefit in doing so however. There are a number of cases in which it would be useless to insert an extra seq. ShouldStrictifyIdForCbv tries to identify these to avoid churn in the simplifier. See Note [Which Ids should be strictified] for details on this. -} mkStrictFieldSeqs :: [(Id,StrictnessMark)] -> CoreExpr -> (CoreExpr) mkStrictFieldSeqs args rhs = foldr addEval rhs args where case_ty = exprType rhs addEval :: (Id,StrictnessMark) -> (CoreExpr) -> (CoreExpr) addEval (arg_id,arg_cbv) (rhs) -- Argument representing strict field. | isMarkedStrict arg_cbv , shouldStrictifyIdForCbv arg_id -- Make sure to remove unfoldings here to avoid the simplifier dropping those for OtherCon[] unfoldings. = Case (Var $! zapIdUnfolding arg_id) arg_id case_ty ([Alt DEFAULT [] rhs]) -- Normal argument | otherwise = do rhs {- Note [Which Ids should be strictified] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For some arguments we would like to convince GHC to pass them call by value. One way to achieve this is described in see Note [Call-by-value for worker args]. We separate the concerns of "should we pass this argument using cbv" and "should we do so by making the rhs strict in this argument". This note deals with the second part. There are multiple reasons why we might not want to insert a seq in the rhs to strictify a functions argument: 1) The argument doesn't exist at runtime. For zero width types (like Types) there is no benefit as we don't operate on them at runtime at all. This includes things like void#, coercions and state tokens. 2) The argument is a unlifted type. If the argument is a unlifted type the calling convention already is explicitly cbv. This means inserting a seq on this argument wouldn't do anything as the seq would be a no-op *and* it wouldn't affect the calling convention. 3) The argument is absent. If the argument is absent in the body there is no advantage to it being passed as cbv to the function. The function won't ever look at it so we don't safe any work. This mostly happens for join point. For example we might have: data T = MkT ![Int] [Char] f t = case t of MkT xs{strict} ys-> snd (xs,ys) and abstract the case alternative to: f t = join j1 = \xs ys -> snd (xs,ys) in case t of MkT xs{strict} ys-> j1 xs xy While we "use" xs inside `j1` it's not used inside the function `snd` we pass it to. In short a absent demand means neither our RHS, nor any function we pass the argument to will inspect it. So there is no work to be saved by forcing `xs` early. NB: There is an edge case where if we rebox we *can* end up seqing an absent value. Note [Absent fillers] has an example of this. However this is so rare it's not worth caring about here. 4) The argument is already strict. Consider this code: data T = MkT ![Int] f t = case t of MkT xs{strict} -> reverse xs The `xs{strict}` indicates that `xs` is used strictly by the `reverse xs`. If we do a w/w split, and add the extra eval on `xs`, we'll get $wf xs = case xs of xs1 -> let t = MkT xs1 in case t of MkT xs2 -> reverse xs2 That's not wrong; but the w/w body will simplify to $wf xs = case xs of xs1 -> reverse xs1 and now we'll drop the `case xs` because `xs1` is used strictly in its scope. Adding that eval was a waste of time. So don't add it for strictly-demanded Ids. 5) Functions Functions are tricky (see Note [TagInfo of functions] in InferTags). But the gist of it even if we make a higher order function argument strict we can't avoid the tag check when it's used later in the body. So there is no benefit. -} -- | Do we expect there to be any benefit if we make this var strict -- in order for it to get treated as as cbv argument? -- See Note [Which Ids should be strictified] -- See Note [CBV Function Ids] for more background. shouldStrictifyIdForCbv :: Var -> Bool shouldStrictifyIdForCbv = wantCbvForId False -- Like shouldStrictifyIdForCbv but also wants to use cbv for strict args. shouldUseCbvForId :: Var -> Bool shouldUseCbvForId = wantCbvForId True -- When we strictify we want to skip strict args otherwise the logic is the same -- as for shouldUseCbvForId so we common up the logic here. -- Basically returns true if it would be benefitial for runtime to pass this argument -- as CBV independent of weither or not it's correct. E.g. it might return true for lazy args -- we are not allowed to force. wantCbvForId :: Bool -> Var -> Bool wantCbvForId cbv_for_strict v -- Must be a runtime var. -- See Note [Which Ids should be strictified] point 1) | isId v , not $ isZeroBitTy ty -- Unlifted things don't need special measures to be treated as cbv -- See Note [Which Ids should be strictified] point 2) , mightBeLiftedType ty -- Functions sometimes get a zero tag so we can't eliminate the tag check. -- See Note [TagInfo of functions] in InferTags. -- See Note [Which Ids should be strictified] point 5) , not $ isFunTy ty -- If the var is strict already a seq is redundant. -- See Note [Which Ids should be strictified] point 4) , not (isStrictDmd dmd) || cbv_for_strict -- If the var is absent a seq is almost always useless. -- See Note [Which Ids should be strictified] point 3) , not (isAbsDmd dmd) = True | otherwise = False where ty = idType v dmd = idDemandInfo v {- ********************************************************************* * * unsafeEqualityProof * * ********************************************************************* -} isUnsafeEqualityProof :: CoreExpr -> Bool -- See (U3) and (U4) in -- Note [Implementing unsafeCoerce] in base:Unsafe.Coerce isUnsafeEqualityProof e | Var v `App` Type _ `App` Type _ `App` Type _ <- e = v `hasKey` unsafeEqualityProofIdKey | otherwise = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/CoreToIface.hs0000644000000000000000000006761414472400112020330 0ustar0000000000000000 {-# LANGUAGE Strict #-} -- See Note [Avoiding space leaks in toIface*] -- | Functions for converting Core things to interface file things. module GHC.CoreToIface ( -- * Binders toIfaceTvBndr , toIfaceTvBndrs , toIfaceIdBndr , toIfaceBndr , toIfaceForAllBndr , toIfaceTyCoVarBinders , toIfaceTyVar -- * Types , toIfaceType, toIfaceTypeX , toIfaceKind , toIfaceTcArgs , toIfaceTyCon , toIfaceTyCon_name , toIfaceTyLit -- * Tidying types , tidyToIfaceType , tidyToIfaceContext , tidyToIfaceTcArgs -- * Coercions , toIfaceCoercion, toIfaceCoercionX -- * Pattern synonyms , patSynToIfaceDecl -- * Expressions , toIfaceExpr , toIfaceBang , toIfaceSrcBang , toIfaceLetBndr , toIfaceIdDetails , toIfaceIdInfo , toIfUnfolding , toIfaceTickish , toIfaceBind , toIfaceAlt , toIfaceCon , toIfaceApp , toIfaceVar -- * Other stuff , toIfaceLFInfo -- * CgBreakInfo , dehydrateCgBreakInfo ) where import GHC.Prelude import Data.Word import GHC.StgToCmm.Types import GHC.ByteCode.Types import GHC.Core import GHC.Core.TyCon hiding ( pprPromotionQuote ) import GHC.Core.Coercion.Axiom import GHC.Core.DataCon import GHC.Core.Type import GHC.Core.Multiplicity import GHC.Core.PatSyn import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Tidy ( tidyCo ) import GHC.Builtin.Types.Prim ( eqPrimTyCon, eqReprPrimTyCon ) import GHC.Builtin.Types ( heqTyCon ) import GHC.Builtin.Names import GHC.Iface.Syntax import GHC.Data.FastString import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Id.Make ( noinlineIdName ) import GHC.Types.Literal import GHC.Types.Name import GHC.Types.Basic import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Types.Tickish import GHC.Types.Demand ( isNopSig ) import GHC.Types.Cpr ( topCprSig ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Utils.Trace import Data.Maybe ( catMaybes ) {- Note [Avoiding space leaks in toIface*] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Building a interface file depends on the output of the simplifier. If we build these lazily this would mean keeping the Core AST alive much longer than necessary causing a space "leak". This happens for example when we only write the interface file to disk after code gen has run, in which case we might carry megabytes of core AST in the heap which is no longer needed. We avoid this in two ways. * First we use -XStrict in GHC.CoreToIface which avoids many thunks to begin with. * Second we define NFData instance for Iface syntax and use them to force any remaining thunks. -XStrict is not sufficient as patterns of the form `f (g x)` would still result in a thunk being allocated for `g x`. NFData is sufficient for the space leak, but using -XStrict reduces allocation by ~0.1% when compiling with -O. (nofib/spectral/simple, T10370). It's essentially free performance hence we use -XStrict on top of NFData. MR !1633 on gitlab, has more discussion on the topic. -} ---------------- toIfaceTvBndr :: TyVar -> IfaceTvBndr toIfaceTvBndr = toIfaceTvBndrX emptyVarSet toIfaceTvBndrX :: VarSet -> TyVar -> IfaceTvBndr toIfaceTvBndrX fr tyvar = ( occNameFS (getOccName tyvar) , toIfaceTypeX fr (tyVarKind tyvar) ) toIfaceTvBndrs :: [TyVar] -> [IfaceTvBndr] toIfaceTvBndrs = map toIfaceTvBndr toIfaceIdBndr :: Id -> IfaceIdBndr toIfaceIdBndr = toIfaceIdBndrX emptyVarSet toIfaceIdBndrX :: VarSet -> CoVar -> IfaceIdBndr toIfaceIdBndrX fr covar = ( toIfaceType (idMult covar) , occNameFS (getOccName covar) , toIfaceTypeX fr (varType covar) ) toIfaceBndr :: Var -> IfaceBndr toIfaceBndr var | isId var = IfaceIdBndr (toIfaceIdBndr var) | otherwise = IfaceTvBndr (toIfaceTvBndr var) toIfaceBndrX :: VarSet -> Var -> IfaceBndr toIfaceBndrX fr var | isId var = IfaceIdBndr (toIfaceIdBndrX fr var) | otherwise = IfaceTvBndr (toIfaceTvBndrX fr var) toIfaceTyCoVarBinder :: VarBndr Var vis -> VarBndr IfaceBndr vis toIfaceTyCoVarBinder (Bndr tv vis) = Bndr (toIfaceBndr tv) vis toIfaceTyCoVarBinders :: [VarBndr Var vis] -> [VarBndr IfaceBndr vis] toIfaceTyCoVarBinders = map toIfaceTyCoVarBinder {- ************************************************************************ * * Conversion from Type to IfaceType * * ************************************************************************ -} toIfaceKind :: Type -> IfaceType toIfaceKind = toIfaceType --------------------- toIfaceType :: Type -> IfaceType toIfaceType = toIfaceTypeX emptyVarSet toIfaceTypeX :: VarSet -> Type -> IfaceType -- (toIfaceTypeX free ty) -- translates the tyvars in 'free' as IfaceFreeTyVars -- -- Synonyms are retained in the interface type toIfaceTypeX fr (TyVarTy tv) -- See Note [Free tyvars in IfaceType] in GHC.Iface.Type | tv `elemVarSet` fr = IfaceFreeTyVar tv | otherwise = IfaceTyVar (toIfaceTyVar tv) toIfaceTypeX fr ty@(AppTy {}) = -- Flatten as many argument AppTys as possible, then turn them into an -- IfaceAppArgs list. -- See Note [Suppressing invisible arguments] in GHC.Iface.Type. let (head, args) = splitAppTys ty in IfaceAppTy (toIfaceTypeX fr head) (toIfaceAppTyArgsX fr head args) toIfaceTypeX _ (LitTy n) = IfaceLitTy (toIfaceTyLit n) toIfaceTypeX fr (ForAllTy b t) = IfaceForAllTy (toIfaceForAllBndrX fr b) (toIfaceTypeX (fr `delVarSet` binderVar b) t) toIfaceTypeX fr (FunTy { ft_arg = t1, ft_mult = w, ft_res = t2, ft_af = af }) = IfaceFunTy af (toIfaceTypeX fr w) (toIfaceTypeX fr t1) (toIfaceTypeX fr t2) toIfaceTypeX fr (CastTy ty co) = IfaceCastTy (toIfaceTypeX fr ty) (toIfaceCoercionX fr co) toIfaceTypeX fr (CoercionTy co) = IfaceCoercionTy (toIfaceCoercionX fr co) toIfaceTypeX fr (TyConApp tc tys) -- tuples | Just sort <- tyConTuple_maybe tc , n_tys == arity = IfaceTupleTy sort NotPromoted (toIfaceTcArgsX fr tc tys) | Just dc <- isPromotedDataCon_maybe tc , isBoxedTupleDataCon dc , n_tys == 2*arity = IfaceTupleTy BoxedTuple IsPromoted (toIfaceTcArgsX fr tc (drop arity tys)) | tc `elem` [ eqPrimTyCon, eqReprPrimTyCon, heqTyCon ] , (k1:k2:_) <- tys = let info = mkIfaceTyConInfo NotPromoted sort sort | k1 `eqType` k2 = IfaceEqualityTyCon | otherwise = IfaceNormalTyCon in IfaceTyConApp (IfaceTyCon (tyConName tc) info) (toIfaceTcArgsX fr tc tys) -- other applications | otherwise = IfaceTyConApp (toIfaceTyCon tc) (toIfaceTcArgsX fr tc tys) where arity = tyConArity tc n_tys = length tys toIfaceTyVar :: TyVar -> FastString toIfaceTyVar = occNameFS . getOccName toIfaceCoVar :: CoVar -> FastString toIfaceCoVar = occNameFS . getOccName toIfaceForAllBndr :: (VarBndr TyCoVar flag) -> (VarBndr IfaceBndr flag) toIfaceForAllBndr = toIfaceForAllBndrX emptyVarSet toIfaceForAllBndrX :: VarSet -> (VarBndr TyCoVar flag) -> (VarBndr IfaceBndr flag) toIfaceForAllBndrX fr (Bndr v vis) = Bndr (toIfaceBndrX fr v) vis ---------------- toIfaceTyCon :: TyCon -> IfaceTyCon toIfaceTyCon tc = IfaceTyCon tc_name info where tc_name = tyConName tc info = mkIfaceTyConInfo promoted sort promoted | isPromotedDataCon tc = IsPromoted | otherwise = NotPromoted tupleSort :: TyCon -> Maybe IfaceTyConSort tupleSort tc' = case tyConTuple_maybe tc' of Just UnboxedTuple -> let arity = tyConArity tc' `div` 2 in Just $ IfaceTupleTyCon arity UnboxedTuple Just sort -> let arity = tyConArity tc' in Just $ IfaceTupleTyCon arity sort Nothing -> Nothing sort | Just tsort <- tupleSort tc = tsort | Just dcon <- isPromotedDataCon_maybe tc , let tc' = dataConTyCon dcon , Just tsort <- tupleSort tc' = tsort | isUnboxedSumTyCon tc , Just cons <- tyConDataCons_maybe tc = IfaceSumTyCon (length cons) | otherwise = IfaceNormalTyCon toIfaceTyCon_name :: Name -> IfaceTyCon toIfaceTyCon_name n = IfaceTyCon n info where info = mkIfaceTyConInfo NotPromoted IfaceNormalTyCon -- Used for the "rough-match" tycon stuff, -- where pretty-printing is not an issue toIfaceTyLit :: TyLit -> IfaceTyLit toIfaceTyLit (NumTyLit x) = IfaceNumTyLit x toIfaceTyLit (StrTyLit x) = IfaceStrTyLit x toIfaceTyLit (CharTyLit x) = IfaceCharTyLit x ---------------- toIfaceCoercion :: Coercion -> IfaceCoercion toIfaceCoercion = toIfaceCoercionX emptyVarSet toIfaceCoercionX :: VarSet -> Coercion -> IfaceCoercion -- (toIfaceCoercionX free ty) -- translates the tyvars in 'free' as IfaceFreeTyVars toIfaceCoercionX fr co = go co where go_mco MRefl = IfaceMRefl go_mco (MCo co) = IfaceMCo $ go co go (Refl ty) = IfaceReflCo (toIfaceTypeX fr ty) go (GRefl r ty mco) = IfaceGReflCo r (toIfaceTypeX fr ty) (go_mco mco) go (CoVarCo cv) -- See Note [Free tyvars in IfaceType] in GHC.Iface.Type | cv `elemVarSet` fr = IfaceFreeCoVar cv | otherwise = IfaceCoVarCo (toIfaceCoVar cv) go (HoleCo h) = IfaceHoleCo (coHoleCoVar h) go (AppCo co1 co2) = IfaceAppCo (go co1) (go co2) go (SymCo co) = IfaceSymCo (go co) go (TransCo co1 co2) = IfaceTransCo (go co1) (go co2) go (NthCo _r d co) = IfaceNthCo d (go co) go (LRCo lr co) = IfaceLRCo lr (go co) go (InstCo co arg) = IfaceInstCo (go co) (go arg) go (KindCo c) = IfaceKindCo (go c) go (SubCo co) = IfaceSubCo (go co) go (AxiomRuleCo co cs) = IfaceAxiomRuleCo (coaxrName co) (map go cs) go (AxiomInstCo c i cs) = IfaceAxiomInstCo (coAxiomName c) i (map go cs) go (UnivCo p r t1 t2) = IfaceUnivCo (go_prov p) r (toIfaceTypeX fr t1) (toIfaceTypeX fr t2) go (TyConAppCo r tc cos) | tc `hasKey` funTyConKey , [_,_,_,_, _] <- cos = panic "toIfaceCoercion" | otherwise = IfaceTyConAppCo r (toIfaceTyCon tc) (map go cos) go (FunCo r w co1 co2) = IfaceFunCo r (go w) (go co1) (go co2) go (ForAllCo tv k co) = IfaceForAllCo (toIfaceBndr tv) (toIfaceCoercionX fr' k) (toIfaceCoercionX fr' co) where fr' = fr `delVarSet` tv go_prov :: UnivCoProvenance -> IfaceUnivCoProv go_prov (PhantomProv co) = IfacePhantomProv (go co) go_prov (ProofIrrelProv co) = IfaceProofIrrelProv (go co) go_prov (PluginProv str) = IfacePluginProv str go_prov (CorePrepProv b) = IfaceCorePrepProv b toIfaceTcArgs :: TyCon -> [Type] -> IfaceAppArgs toIfaceTcArgs = toIfaceTcArgsX emptyVarSet toIfaceTcArgsX :: VarSet -> TyCon -> [Type] -> IfaceAppArgs toIfaceTcArgsX fr tc ty_args = toIfaceAppArgsX fr (tyConKind tc) ty_args toIfaceAppTyArgsX :: VarSet -> Type -> [Type] -> IfaceAppArgs toIfaceAppTyArgsX fr ty ty_args = toIfaceAppArgsX fr (typeKind ty) ty_args toIfaceAppArgsX :: VarSet -> Kind -> [Type] -> IfaceAppArgs -- See Note [Suppressing invisible arguments] in GHC.Iface.Type -- We produce a result list of args describing visibility -- The awkward case is -- T :: forall k. * -> k -- And consider -- T (forall j. blah) * blib -- Is 'blib' visible? It depends on the visibility flag on j, -- so we have to substitute for k. Annoying! toIfaceAppArgsX fr kind ty_args = go (mkEmptyTCvSubst in_scope) kind ty_args where in_scope = mkInScopeSet (tyCoVarsOfTypes ty_args) go _ _ [] = IA_Nil go env ty ts | Just ty' <- coreView ty = go env ty' ts go env (ForAllTy (Bndr tv vis) res) (t:ts) = IA_Arg t' vis ts' where t' = toIfaceTypeX fr t ts' = go (extendTCvSubst env tv t) res ts go env (FunTy { ft_af = af, ft_res = res }) (t:ts) = IA_Arg (toIfaceTypeX fr t) argf (go env res ts) where argf = case af of VisArg -> Required InvisArg -> Inferred -- It's rare for a kind to have a constraint argument, but -- it can happen. See Note [AnonTCB InvisArg] in GHC.Core.TyCon. go env ty ts@(t1:ts1) | not (isEmptyTCvSubst env) = go (zapTCvSubst env) (substTy env ty) ts -- See Note [Care with kind instantiation] in GHC.Core.Type | otherwise = -- There's a kind error in the type we are trying to print -- e.g. kind = k, ty_args = [Int] -- This is probably a compiler bug, so we print a trace and -- carry on as if it were FunTy. Without the test for -- isEmptyTCvSubst we'd get an infinite loop (#15473) warnPprTrace True "toIfaceAppArgsX" (ppr kind $$ ppr ty_args) $ IA_Arg (toIfaceTypeX fr t1) Required (go env ty ts1) tidyToIfaceType :: TidyEnv -> Type -> IfaceType tidyToIfaceType env ty = toIfaceType (tidyType env ty) tidyToIfaceTcArgs :: TidyEnv -> TyCon -> [Type] -> IfaceAppArgs tidyToIfaceTcArgs env tc tys = toIfaceTcArgs tc (tidyTypes env tys) tidyToIfaceContext :: TidyEnv -> ThetaType -> IfaceContext tidyToIfaceContext env theta = map (tidyToIfaceType env) theta {- ************************************************************************ * * Conversion of pattern synonyms * * ************************************************************************ -} patSynToIfaceDecl :: PatSyn -> IfaceDecl patSynToIfaceDecl ps = IfacePatSyn { ifName = getName $ ps , ifPatMatcher = to_if_pr (patSynMatcher ps) , ifPatBuilder = fmap to_if_pr (patSynBuilder ps) , ifPatIsInfix = patSynIsInfix ps , ifPatUnivBndrs = map toIfaceForAllBndr univ_bndrs' , ifPatExBndrs = map toIfaceForAllBndr ex_bndrs' , ifPatProvCtxt = tidyToIfaceContext env2 prov_theta , ifPatReqCtxt = tidyToIfaceContext env2 req_theta , ifPatArgs = map (tidyToIfaceType env2 . scaledThing) args , ifPatTy = tidyToIfaceType env2 rhs_ty , ifFieldLabels = (patSynFieldLabels ps) } where (_univ_tvs, req_theta, _ex_tvs, prov_theta, args, rhs_ty) = patSynSig ps univ_bndrs = patSynUnivTyVarBinders ps ex_bndrs = patSynExTyVarBinders ps (env1, univ_bndrs') = tidyTyCoVarBinders emptyTidyEnv univ_bndrs (env2, ex_bndrs') = tidyTyCoVarBinders env1 ex_bndrs to_if_pr (name, _type, needs_dummy) = (name, needs_dummy) {- ************************************************************************ * * Conversion of other things * * ************************************************************************ -} toIfaceBang :: TidyEnv -> HsImplBang -> IfaceBang toIfaceBang _ HsLazy = IfNoBang toIfaceBang _ (HsUnpack Nothing) = IfUnpack toIfaceBang env (HsUnpack (Just co)) = IfUnpackCo (toIfaceCoercion (tidyCo env co)) toIfaceBang _ HsStrict = IfStrict toIfaceSrcBang :: HsSrcBang -> IfaceSrcBang toIfaceSrcBang (HsSrcBang _ unpk bang) = IfSrcBang unpk bang toIfaceLetBndr :: Id -> IfaceLetBndr toIfaceLetBndr id = IfLetBndr (occNameFS (getOccName id)) (toIfaceType (idType id)) (toIfaceIdInfo (idInfo id)) (toIfaceJoinInfo (isJoinId_maybe id)) -- Put into the interface file any IdInfo that GHC.Core.Tidy.tidyLetBndr -- has left on the Id. See Note [IdInfo on nested let-bindings] in GHC.Iface.Syntax toIfaceIdDetails :: IdDetails -> IfaceIdDetails toIfaceIdDetails VanillaId = IfVanillaId toIfaceIdDetails (WorkerLikeId dmds) = IfWorkerLikeId dmds toIfaceIdDetails (DFunId {}) = IfDFunId toIfaceIdDetails (RecSelId { sel_naughty = n , sel_tycon = tc }) = let iface = case tc of RecSelData ty_con -> Left (toIfaceTyCon ty_con) RecSelPatSyn pat_syn -> Right (patSynToIfaceDecl pat_syn) in IfRecSelId iface n -- The remaining cases are all "implicit Ids" which don't -- appear in interface files at all toIfaceIdDetails other = pprTrace "toIfaceIdDetails" (ppr other) IfVanillaId -- Unexpected; the other toIfaceIdInfo :: IdInfo -> IfaceIdInfo toIfaceIdInfo id_info = catMaybes [arity_hsinfo, caf_hsinfo, strict_hsinfo, cpr_hsinfo, inline_hsinfo, unfold_hsinfo, levity_hsinfo] -- NB: strictness and arity must appear in the list before unfolding -- See GHC.IfaceToCore.tcUnfolding where ------------ Arity -------------- arity_info = arityInfo id_info arity_hsinfo | arity_info == 0 = Nothing | otherwise = Just (HsArity arity_info) ------------ Caf Info -------------- caf_info = cafInfo id_info caf_hsinfo = case caf_info of NoCafRefs -> Just HsNoCafRefs _other -> Nothing ------------ Strictness -------------- -- No point in explicitly exporting TopSig sig_info = dmdSigInfo id_info strict_hsinfo | not (isNopSig sig_info) = Just (HsDmdSig sig_info) | otherwise = Nothing ------------ CPR -------------- cpr_info = cprSigInfo id_info cpr_hsinfo | cpr_info /= topCprSig = Just (HsCprSig cpr_info) | otherwise = Nothing ------------ Unfolding -------------- unfold_hsinfo = toIfUnfolding loop_breaker (realUnfoldingInfo id_info) loop_breaker = isStrongLoopBreaker (occInfo id_info) ------------ Inline prag -------------- inline_prag = inlinePragInfo id_info inline_hsinfo | isDefaultInlinePragma inline_prag = Nothing | otherwise = Just (HsInline inline_prag) ------------ Representation polymorphism ---------- levity_hsinfo | isNeverRepPolyIdInfo id_info = Just HsLevity | otherwise = Nothing toIfaceJoinInfo :: Maybe JoinArity -> IfaceJoinInfo toIfaceJoinInfo (Just ar) = IfaceJoinPoint ar toIfaceJoinInfo Nothing = IfaceNotJoinPoint -------------------------- toIfUnfolding :: Bool -> Unfolding -> Maybe IfaceInfoItem toIfUnfolding lb (CoreUnfolding { uf_tmpl = rhs , uf_src = src , uf_cache = cache , uf_guidance = guidance }) = Just $ HsUnfold lb $ case src of InlineStable -> case guidance of UnfWhen {ug_arity = arity, ug_unsat_ok = unsat_ok, ug_boring_ok = boring_ok } -> IfInlineRule arity unsat_ok boring_ok if_rhs _other -> IfCoreUnfold True cache if_rhs InlineCompulsory -> IfCompulsory if_rhs InlineRhs -> IfCoreUnfold False cache if_rhs -- Yes, even if guidance is UnfNever, expose the unfolding -- If we didn't want to expose the unfolding, GHC.Iface.Tidy would -- have stuck in NoUnfolding. For supercompilation we want -- to see that unfolding! where if_rhs = toIfaceExpr rhs toIfUnfolding lb (DFunUnfolding { df_bndrs = bndrs, df_args = args }) = Just (HsUnfold lb (IfDFunUnfold (map toIfaceBndr bndrs) (map toIfaceExpr args))) -- No need to serialise the data constructor; -- we can recover it from the type of the dfun toIfUnfolding _ (OtherCon {}) = Nothing -- The binding site of an Id doesn't have OtherCon, except perhaps -- where we have called trimUnfolding; and that evald'ness info is -- not needed by importing modules toIfUnfolding _ BootUnfolding = Nothing -- Can't happen; we only have BootUnfolding for imported binders toIfUnfolding _ NoUnfolding = Nothing {- ************************************************************************ * * Conversion of expressions * * ************************************************************************ -} toIfaceExpr :: CoreExpr -> IfaceExpr toIfaceExpr (Var v) = toIfaceVar v toIfaceExpr (Lit (LitRubbish r)) = IfaceLitRubbish (toIfaceType r) toIfaceExpr (Lit l) = IfaceLit l toIfaceExpr (Type ty) = IfaceType (toIfaceType ty) toIfaceExpr (Coercion co) = IfaceCo (toIfaceCoercion co) toIfaceExpr (Lam x b) = IfaceLam (toIfaceBndr x, toIfaceOneShot x) (toIfaceExpr b) toIfaceExpr (App f a) = toIfaceApp f [a] toIfaceExpr (Case s x ty as) | null as = IfaceECase (toIfaceExpr s) (toIfaceType ty) | otherwise = IfaceCase (toIfaceExpr s) (getOccFS x) (map toIfaceAlt as) toIfaceExpr (Let b e) = IfaceLet (toIfaceBind b) (toIfaceExpr e) toIfaceExpr (Cast e co) = IfaceCast (toIfaceExpr e) (toIfaceCoercion co) toIfaceExpr (Tick t e) | Just t' <- toIfaceTickish t = IfaceTick t' (toIfaceExpr e) | otherwise = toIfaceExpr e toIfaceOneShot :: Id -> IfaceOneShot toIfaceOneShot id | isId id , OneShotLam <- oneShotInfo (idInfo id) = IfaceOneShot | otherwise = IfaceNoOneShot --------------------- toIfaceTickish :: CoreTickish -> Maybe IfaceTickish toIfaceTickish (ProfNote cc tick push) = Just (IfaceSCC cc tick push) toIfaceTickish (HpcTick modl ix) = Just (IfaceHpcTick modl ix) toIfaceTickish (SourceNote src names) = Just (IfaceSource src names) toIfaceTickish (Breakpoint {}) = Nothing -- Ignore breakpoints, since they are relevant only to GHCi, and -- should not be serialised (#8333) --------------------- toIfaceBind :: Bind Id -> IfaceBinding toIfaceBind (NonRec b r) = IfaceNonRec (toIfaceLetBndr b) (toIfaceExpr r) toIfaceBind (Rec prs) = IfaceRec [(toIfaceLetBndr b, toIfaceExpr r) | (b,r) <- prs] --------------------- toIfaceAlt :: CoreAlt -> IfaceAlt toIfaceAlt (Alt c bs r) = IfaceAlt (toIfaceCon c) (map getOccFS bs) (toIfaceExpr r) --------------------- toIfaceCon :: AltCon -> IfaceConAlt toIfaceCon (DataAlt dc) = IfaceDataAlt (getName dc) toIfaceCon (LitAlt l) = assertPpr (not (isLitRubbish l)) (ppr l) $ -- assert: see Note [Rubbish literals] wrinkle (b) IfaceLitAlt l toIfaceCon DEFAULT = IfaceDefault --------------------- toIfaceApp :: Expr CoreBndr -> [Arg CoreBndr] -> IfaceExpr toIfaceApp (App f a) as = toIfaceApp f (a:as) toIfaceApp (Var v) as = case isDataConWorkId_maybe v of -- We convert the *worker* for tuples into IfaceTuples Just dc | saturated , Just tup_sort <- tyConTuple_maybe tc -> IfaceTuple tup_sort tup_args where val_args = dropWhile isTypeArg as saturated = val_args `lengthIs` idArity v tup_args = map toIfaceExpr val_args tc = dataConTyCon dc _ -> mkIfaceApps (toIfaceVar v) as toIfaceApp e as = mkIfaceApps (toIfaceExpr e) as mkIfaceApps :: IfaceExpr -> [CoreExpr] -> IfaceExpr mkIfaceApps f as = foldl' (\f a -> IfaceApp f (toIfaceExpr a)) f as --------------------- toIfaceVar :: Id -> IfaceExpr toIfaceVar v | isBootUnfolding (idUnfolding v) = -- See Note [Inlining and hs-boot files] IfaceApp (IfaceApp (IfaceExt noinlineIdName) (IfaceType (toIfaceType (idType v)))) (IfaceExt name) -- don't use mkIfaceApps, or infinite loop | Just fcall <- isFCallId_maybe v = IfaceFCall fcall (toIfaceType (idType v)) -- Foreign calls have special syntax | isExternalName name = IfaceExt name | otherwise = IfaceLcl (getOccFS name) where name = idName v --------------------- toIfaceLFInfo :: Name -> LambdaFormInfo -> IfaceLFInfo toIfaceLFInfo nm lfi = case lfi of LFReEntrant top_lvl arity no_fvs _arg_descr -> -- Exported LFReEntrant closures are top level, and top-level closures -- don't have free variables assertPpr (isTopLevel top_lvl) (ppr nm) $ assertPpr no_fvs (ppr nm) $ IfLFReEntrant arity LFThunk top_lvl no_fvs updatable sfi mb_fun -> -- Exported LFThunk closures are top level (which don't have free -- variables) and non-standard (see cgTopRhsClosure) assertPpr (isTopLevel top_lvl) (ppr nm) $ assertPpr no_fvs (ppr nm) $ assertPpr (sfi == NonStandardThunk) (ppr nm) $ IfLFThunk updatable mb_fun LFCon dc -> IfLFCon (dataConName dc) LFUnknown mb_fun -> IfLFUnknown mb_fun LFUnlifted -> IfLFUnlifted LFLetNoEscape -> panic "toIfaceLFInfo: LFLetNoEscape" -- Dehydrating CgBreakInfo dehydrateCgBreakInfo :: [TyVar] -> [Maybe (Id, Word16)] -> Type -> CgBreakInfo dehydrateCgBreakInfo ty_vars idOffSets tick_ty = CgBreakInfo { cgb_tyvars = map toIfaceTvBndr ty_vars , cgb_vars = map (fmap (\(i, offset) -> (toIfaceIdBndr i, offset))) idOffSets , cgb_resty = toIfaceType tick_ty } {- Note [Inlining and hs-boot files] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this example (#10083, #12789): ---------- RSR.hs-boot ------------ module RSR where data RSR eqRSR :: RSR -> RSR -> Bool ---------- SR.hs ------------ module SR where import {-# SOURCE #-} RSR data SR = MkSR RSR eqSR (MkSR r1) (MkSR r2) = eqRSR r1 r2 ---------- RSR.hs ------------ module RSR where import SR data RSR = MkRSR SR -- deriving( Eq ) eqRSR (MkRSR s1) (MkRSR s2) = (eqSR s1 s2) foo x y = not (eqRSR x y) When compiling RSR we get this code RSR.eqRSR :: RSR -> RSR -> Bool RSR.eqRSR = \ (ds1 :: RSR.RSR) (ds2 :: RSR.RSR) -> case ds1 of _ { RSR.MkRSR s1 -> case ds2 of _ { RSR.MkRSR s2 -> SR.eqSR s1 s2 }} RSR.foo :: RSR -> RSR -> Bool RSR.foo = \ (x :: RSR) (y :: RSR) -> not (RSR.eqRSR x y) Now, when optimising foo: Inline eqRSR (small, non-rec) Inline eqSR (small, non-rec) but the result of inlining eqSR from SR is another call to eqRSR, so everything repeats. Neither eqSR nor eqRSR are (apparently) loop breakers. Solution: in the unfolding of eqSR in SR.hi, replace `eqRSR` in SR with `noinline eqRSR`, so that eqRSR doesn't get inlined. This means that when GHC inlines `eqSR`, it will not also inline `eqRSR`, exactly as would have been the case if `foo` had been defined in SR.hs (and marked as a loop-breaker). But how do we arrange for this to happen? There are two ingredients: 1. When we serialize out unfoldings to IfaceExprs (toIfaceVar), for every variable reference we see if we are referring to an 'Id' that came from an hs-boot file. If so, we add a `noinline` to the reference. 2. But how do we know if a reference came from an hs-boot file or not? We could record this directly in the 'IdInfo', but actually we deduce this by looking at the unfolding: 'Id's that come from boot files are given a special unfolding (upon typechecking) 'BootUnfolding' which say that there is no unfolding, and the reason is because the 'Id' came from a boot file. Here is a solution that doesn't work: when compiling RSR, add a NOINLINE pragma to every function exported by the boot-file for RSR (if it exists). Doing so makes the bootstrapped GHC itself slower by 8% overall (on #9872a-d, and T1969: the reason is that these NOINLINE'd functions now can't be profitably inlined outside of the hs-boot loop. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Bag.hs0000644000000000000000000003156714472400112017545 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Bag: an unordered collection with duplicates -} {-# LANGUAGE ScopedTypeVariables, DeriveFunctor, TypeFamilies #-} module GHC.Data.Bag ( Bag, -- abstract type emptyBag, unitBag, unionBags, unionManyBags, mapBag, elemBag, lengthBag, filterBag, partitionBag, partitionBagWith, concatBag, catBagMaybes, foldBag, isEmptyBag, isSingletonBag, consBag, snocBag, anyBag, allBag, listToBag, nonEmptyToBag, bagToList, headMaybe, mapAccumBagL, concatMapBag, concatMapBagPair, mapMaybeBag, mapBagM, mapBagM_, flatMapBagM, flatMapBagPairM, mapAndUnzipBagM, mapAccumBagLM, anyBagM, filterBagM ) where import GHC.Prelude import GHC.Exts ( IsList(..) ) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Monad import Control.Monad import Data.Data import Data.Maybe( mapMaybe, listToMaybe ) import Data.List ( partition, mapAccumL ) import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.Foldable as Foldable import qualified Data.Semigroup ( (<>) ) infixr 3 `consBag` infixl 3 `snocBag` data Bag a = EmptyBag | UnitBag a | TwoBags (Bag a) (Bag a) -- INVARIANT: neither branch is empty | ListBag [a] -- INVARIANT: the list is non-empty deriving (Functor) emptyBag :: Bag a emptyBag = EmptyBag unitBag :: a -> Bag a unitBag = UnitBag lengthBag :: Bag a -> Int lengthBag EmptyBag = 0 lengthBag (UnitBag {}) = 1 lengthBag (TwoBags b1 b2) = lengthBag b1 + lengthBag b2 lengthBag (ListBag xs) = length xs elemBag :: Eq a => a -> Bag a -> Bool elemBag _ EmptyBag = False elemBag x (UnitBag y) = x == y elemBag x (TwoBags b1 b2) = x `elemBag` b1 || x `elemBag` b2 elemBag x (ListBag ys) = any (x ==) ys unionManyBags :: [Bag a] -> Bag a unionManyBags xs = foldr unionBags EmptyBag xs -- This one is a bit stricter! The bag will get completely evaluated. unionBags :: Bag a -> Bag a -> Bag a unionBags EmptyBag b = b unionBags b EmptyBag = b unionBags b1 b2 = TwoBags b1 b2 consBag :: a -> Bag a -> Bag a snocBag :: Bag a -> a -> Bag a consBag elt bag = (unitBag elt) `unionBags` bag snocBag bag elt = bag `unionBags` (unitBag elt) isEmptyBag :: Bag a -> Bool isEmptyBag EmptyBag = True isEmptyBag _ = False isSingletonBag :: Bag a -> Bool isSingletonBag EmptyBag = False isSingletonBag (UnitBag _) = True isSingletonBag (TwoBags _ _) = False -- Neither is empty isSingletonBag (ListBag xs) = isSingleton xs filterBag :: (a -> Bool) -> Bag a -> Bag a filterBag _ EmptyBag = EmptyBag filterBag pred b@(UnitBag val) = if pred val then b else EmptyBag filterBag pred (TwoBags b1 b2) = sat1 `unionBags` sat2 where sat1 = filterBag pred b1 sat2 = filterBag pred b2 filterBag pred (ListBag vs) = listToBag (filter pred vs) filterBagM :: Monad m => (a -> m Bool) -> Bag a -> m (Bag a) filterBagM _ EmptyBag = return EmptyBag filterBagM pred b@(UnitBag val) = do flag <- pred val if flag then return b else return EmptyBag filterBagM pred (TwoBags b1 b2) = do sat1 <- filterBagM pred b1 sat2 <- filterBagM pred b2 return (sat1 `unionBags` sat2) filterBagM pred (ListBag vs) = do sat <- filterM pred vs return (listToBag sat) allBag :: (a -> Bool) -> Bag a -> Bool allBag _ EmptyBag = True allBag p (UnitBag v) = p v allBag p (TwoBags b1 b2) = allBag p b1 && allBag p b2 allBag p (ListBag xs) = all p xs anyBag :: (a -> Bool) -> Bag a -> Bool anyBag _ EmptyBag = False anyBag p (UnitBag v) = p v anyBag p (TwoBags b1 b2) = anyBag p b1 || anyBag p b2 anyBag p (ListBag xs) = any p xs anyBagM :: Monad m => (a -> m Bool) -> Bag a -> m Bool anyBagM _ EmptyBag = return False anyBagM p (UnitBag v) = p v anyBagM p (TwoBags b1 b2) = do flag <- anyBagM p b1 if flag then return True else anyBagM p b2 anyBagM p (ListBag xs) = anyM p xs concatBag :: Bag (Bag a) -> Bag a concatBag bss = foldr add emptyBag bss where add bs rs = bs `unionBags` rs catBagMaybes :: Bag (Maybe a) -> Bag a catBagMaybes bs = foldr add emptyBag bs where add Nothing rs = rs add (Just x) rs = x `consBag` rs partitionBag :: (a -> Bool) -> Bag a -> (Bag a {- Satisfy predicate -}, Bag a {- Don't -}) partitionBag _ EmptyBag = (EmptyBag, EmptyBag) partitionBag pred b@(UnitBag val) = if pred val then (b, EmptyBag) else (EmptyBag, b) partitionBag pred (TwoBags b1 b2) = (sat1 `unionBags` sat2, fail1 `unionBags` fail2) where (sat1, fail1) = partitionBag pred b1 (sat2, fail2) = partitionBag pred b2 partitionBag pred (ListBag vs) = (listToBag sats, listToBag fails) where (sats, fails) = partition pred vs partitionBagWith :: (a -> Either b c) -> Bag a -> (Bag b {- Left -}, Bag c {- Right -}) partitionBagWith _ EmptyBag = (EmptyBag, EmptyBag) partitionBagWith pred (UnitBag val) = case pred val of Left a -> (UnitBag a, EmptyBag) Right b -> (EmptyBag, UnitBag b) partitionBagWith pred (TwoBags b1 b2) = (sat1 `unionBags` sat2, fail1 `unionBags` fail2) where (sat1, fail1) = partitionBagWith pred b1 (sat2, fail2) = partitionBagWith pred b2 partitionBagWith pred (ListBag vs) = (listToBag sats, listToBag fails) where (sats, fails) = partitionWith pred vs foldBag :: (r -> r -> r) -- Replace TwoBags with this; should be associative -> (a -> r) -- Replace UnitBag with this -> r -- Replace EmptyBag with this -> Bag a -> r {- Standard definition foldBag t u e EmptyBag = e foldBag t u e (UnitBag x) = u x foldBag t u e (TwoBags b1 b2) = (foldBag t u e b1) `t` (foldBag t u e b2) foldBag t u e (ListBag xs) = foldr (t.u) e xs -} -- More tail-recursive definition, exploiting associativity of "t" foldBag _ _ e EmptyBag = e foldBag t u e (UnitBag x) = u x `t` e foldBag t u e (TwoBags b1 b2) = foldBag t u (foldBag t u e b2) b1 foldBag t u e (ListBag xs) = foldr (t.u) e xs mapBag :: (a -> b) -> Bag a -> Bag b mapBag = fmap concatMapBag :: (a -> Bag b) -> Bag a -> Bag b concatMapBag _ EmptyBag = EmptyBag concatMapBag f (UnitBag x) = f x concatMapBag f (TwoBags b1 b2) = unionBags (concatMapBag f b1) (concatMapBag f b2) concatMapBag f (ListBag xs) = foldr (unionBags . f) emptyBag xs concatMapBagPair :: (a -> (Bag b, Bag c)) -> Bag a -> (Bag b, Bag c) concatMapBagPair _ EmptyBag = (EmptyBag, EmptyBag) concatMapBagPair f (UnitBag x) = f x concatMapBagPair f (TwoBags b1 b2) = (unionBags r1 r2, unionBags s1 s2) where (r1, s1) = concatMapBagPair f b1 (r2, s2) = concatMapBagPair f b2 concatMapBagPair f (ListBag xs) = foldr go (emptyBag, emptyBag) xs where go a (s1, s2) = (unionBags r1 s1, unionBags r2 s2) where (r1, r2) = f a mapMaybeBag :: (a -> Maybe b) -> Bag a -> Bag b mapMaybeBag _ EmptyBag = EmptyBag mapMaybeBag f (UnitBag x) = case f x of Nothing -> EmptyBag Just y -> UnitBag y mapMaybeBag f (TwoBags b1 b2) = unionBags (mapMaybeBag f b1) (mapMaybeBag f b2) mapMaybeBag f (ListBag xs) = ListBag (mapMaybe f xs) mapBagM :: Monad m => (a -> m b) -> Bag a -> m (Bag b) mapBagM _ EmptyBag = return EmptyBag mapBagM f (UnitBag x) = do r <- f x return (UnitBag r) mapBagM f (TwoBags b1 b2) = do r1 <- mapBagM f b1 r2 <- mapBagM f b2 return (TwoBags r1 r2) mapBagM f (ListBag xs) = do rs <- mapM f xs return (ListBag rs) mapBagM_ :: Monad m => (a -> m b) -> Bag a -> m () mapBagM_ _ EmptyBag = return () mapBagM_ f (UnitBag x) = f x >> return () mapBagM_ f (TwoBags b1 b2) = mapBagM_ f b1 >> mapBagM_ f b2 mapBagM_ f (ListBag xs) = mapM_ f xs flatMapBagM :: Monad m => (a -> m (Bag b)) -> Bag a -> m (Bag b) flatMapBagM _ EmptyBag = return EmptyBag flatMapBagM f (UnitBag x) = f x flatMapBagM f (TwoBags b1 b2) = do r1 <- flatMapBagM f b1 r2 <- flatMapBagM f b2 return (r1 `unionBags` r2) flatMapBagM f (ListBag xs) = foldrM k EmptyBag xs where k x b2 = do { b1 <- f x; return (b1 `unionBags` b2) } flatMapBagPairM :: Monad m => (a -> m (Bag b, Bag c)) -> Bag a -> m (Bag b, Bag c) flatMapBagPairM _ EmptyBag = return (EmptyBag, EmptyBag) flatMapBagPairM f (UnitBag x) = f x flatMapBagPairM f (TwoBags b1 b2) = do (r1,s1) <- flatMapBagPairM f b1 (r2,s2) <- flatMapBagPairM f b2 return (r1 `unionBags` r2, s1 `unionBags` s2) flatMapBagPairM f (ListBag xs) = foldrM k (EmptyBag, EmptyBag) xs where k x (r2,s2) = do { (r1,s1) <- f x ; return (r1 `unionBags` r2, s1 `unionBags` s2) } mapAndUnzipBagM :: Monad m => (a -> m (b,c)) -> Bag a -> m (Bag b, Bag c) mapAndUnzipBagM _ EmptyBag = return (EmptyBag, EmptyBag) mapAndUnzipBagM f (UnitBag x) = do (r,s) <- f x return (UnitBag r, UnitBag s) mapAndUnzipBagM f (TwoBags b1 b2) = do (r1,s1) <- mapAndUnzipBagM f b1 (r2,s2) <- mapAndUnzipBagM f b2 return (TwoBags r1 r2, TwoBags s1 s2) mapAndUnzipBagM f (ListBag xs) = do ts <- mapM f xs let (rs,ss) = unzip ts return (ListBag rs, ListBag ss) mapAccumBagL ::(acc -> x -> (acc, y)) -- ^ combining function -> acc -- ^ initial state -> Bag x -- ^ inputs -> (acc, Bag y) -- ^ final state, outputs mapAccumBagL _ s EmptyBag = (s, EmptyBag) mapAccumBagL f s (UnitBag x) = let (s1, x1) = f s x in (s1, UnitBag x1) mapAccumBagL f s (TwoBags b1 b2) = let (s1, b1') = mapAccumBagL f s b1 (s2, b2') = mapAccumBagL f s1 b2 in (s2, TwoBags b1' b2') mapAccumBagL f s (ListBag xs) = let (s', xs') = mapAccumL f s xs in (s', ListBag xs') mapAccumBagLM :: Monad m => (acc -> x -> m (acc, y)) -- ^ combining function -> acc -- ^ initial state -> Bag x -- ^ inputs -> m (acc, Bag y) -- ^ final state, outputs mapAccumBagLM _ s EmptyBag = return (s, EmptyBag) mapAccumBagLM f s (UnitBag x) = do { (s1, x1) <- f s x; return (s1, UnitBag x1) } mapAccumBagLM f s (TwoBags b1 b2) = do { (s1, b1') <- mapAccumBagLM f s b1 ; (s2, b2') <- mapAccumBagLM f s1 b2 ; return (s2, TwoBags b1' b2') } mapAccumBagLM f s (ListBag xs) = do { (s', xs') <- mapAccumLM f s xs ; return (s', ListBag xs') } listToBag :: [a] -> Bag a listToBag [] = EmptyBag listToBag [x] = UnitBag x listToBag vs = ListBag vs nonEmptyToBag :: NonEmpty a -> Bag a nonEmptyToBag (x :| []) = UnitBag x nonEmptyToBag (x :| xs) = ListBag (x : xs) bagToList :: Bag a -> [a] bagToList b = foldr (:) [] b headMaybe :: Bag a -> Maybe a headMaybe EmptyBag = Nothing headMaybe (UnitBag v) = Just v headMaybe (TwoBags b1 _) = headMaybe b1 headMaybe (ListBag l) = listToMaybe l instance (Outputable a) => Outputable (Bag a) where ppr bag = braces (pprWithCommas ppr (bagToList bag)) instance Data a => Data (Bag a) where gfoldl k z b = z listToBag `k` bagToList b -- traverse abstract type abstractly toConstr _ = abstractConstr $ "Bag("++show (typeOf (undefined::a))++")" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Bag" dataCast1 x = gcast1 x instance Foldable.Foldable Bag where foldr _ z EmptyBag = z foldr k z (UnitBag x) = k x z foldr k z (TwoBags b1 b2) = foldr k (foldr k z b2) b1 foldr k z (ListBag xs) = foldr k z xs foldl _ z EmptyBag = z foldl k z (UnitBag x) = k z x foldl k z (TwoBags b1 b2) = foldl k (foldl k z b1) b2 foldl k z (ListBag xs) = foldl k z xs foldl' _ z EmptyBag = z foldl' k z (UnitBag x) = k z x foldl' k z (TwoBags b1 b2) = let r1 = foldl' k z b1 in seq r1 $ foldl' k r1 b2 foldl' k z (ListBag xs) = foldl' k z xs instance Traversable Bag where traverse _ EmptyBag = pure EmptyBag traverse f (UnitBag x) = UnitBag <$> f x traverse f (TwoBags b1 b2) = TwoBags <$> traverse f b1 <*> traverse f b2 traverse f (ListBag xs) = ListBag <$> traverse f xs instance IsList (Bag a) where type Item (Bag a) = a fromList = listToBag toList = bagToList instance Semigroup (Bag a) where (<>) = unionBags instance Monoid (Bag a) where mempty = emptyBag ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Bool.hs0000644000000000000000000000071714472400112017740 0ustar0000000000000000module GHC.Data.Bool ( OverridingBool(..) , overrideWith ) where import GHC.Prelude data OverridingBool = Auto | Never | Always deriving ( Show , Read -- ^ @since 9.4.1 , Eq -- ^ @since 9.4.1 , Ord -- ^ @since 9.4.1 , Enum -- ^ @since 9.4.1 , Bounded -- ^ @since 9.4.1 ) overrideWith :: Bool -> OverridingBool -> Bool overrideWith b Auto = b overrideWith _ Never = False overrideWith _ Always = True ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/BooleanFormula.hs0000644000000000000000000002326214472400112021752 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} -------------------------------------------------------------------------------- -- | Boolean formulas without quantifiers and without negation. -- Such a formula consists of variables, conjunctions (and), and disjunctions (or). -- -- This module is used to represent minimal complete definitions for classes. -- module GHC.Data.BooleanFormula ( BooleanFormula(..), LBooleanFormula, mkFalse, mkTrue, mkAnd, mkOr, mkVar, isFalse, isTrue, eval, simplify, isUnsatisfied, implies, impliesAtom, pprBooleanFormula, pprBooleanFormulaNice ) where import GHC.Prelude import Data.List ( nub, intersperse ) import Data.Data import GHC.Utils.Monad import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Parser.Annotation ( LocatedL ) import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Types.Unique.Set ---------------------------------------------------------------------- -- Boolean formula type and smart constructors ---------------------------------------------------------------------- type LBooleanFormula a = LocatedL (BooleanFormula a) data BooleanFormula a = Var a | And [LBooleanFormula a] | Or [LBooleanFormula a] | Parens (LBooleanFormula a) deriving (Eq, Data, Functor, Foldable, Traversable) mkVar :: a -> BooleanFormula a mkVar = Var mkFalse, mkTrue :: BooleanFormula a mkFalse = Or [] mkTrue = And [] -- Convert a Bool to a BooleanFormula mkBool :: Bool -> BooleanFormula a mkBool False = mkFalse mkBool True = mkTrue -- Make a conjunction, and try to simplify mkAnd :: Eq a => [LBooleanFormula a] -> BooleanFormula a mkAnd = maybe mkFalse (mkAnd' . nub) . concatMapM fromAnd where -- See Note [Simplification of BooleanFormulas] fromAnd :: LBooleanFormula a -> Maybe [LBooleanFormula a] fromAnd (L _ (And xs)) = Just xs -- assume that xs are already simplified -- otherwise we would need: fromAnd (And xs) = concat <$> traverse fromAnd xs fromAnd (L _ (Or [])) = Nothing -- in case of False we bail out, And [..,mkFalse,..] == mkFalse fromAnd x = Just [x] mkAnd' [x] = unLoc x mkAnd' xs = And xs mkOr :: Eq a => [LBooleanFormula a] -> BooleanFormula a mkOr = maybe mkTrue (mkOr' . nub) . concatMapM fromOr where -- See Note [Simplification of BooleanFormulas] fromOr (L _ (Or xs)) = Just xs fromOr (L _ (And [])) = Nothing fromOr x = Just [x] mkOr' [x] = unLoc x mkOr' xs = Or xs {- Note [Simplification of BooleanFormulas] ~~~~~~~~~~~~~~~~~~~~~~ The smart constructors (`mkAnd` and `mkOr`) do some attempt to simplify expressions. In particular, 1. Collapsing nested ands and ors, so `(mkAnd [x, And [y,z]]` is represented as `And [x,y,z]` Implemented by `fromAnd`/`fromOr` 2. Collapsing trivial ands and ors, so `mkAnd [x]` becomes just `x`. Implemented by mkAnd' / mkOr' 3. Conjunction with false, disjunction with true is simplified, i.e. `mkAnd [mkFalse,x]` becomes `mkFalse`. 4. Common subexpression elimination: `mkAnd [x,x,y]` is reduced to just `mkAnd [x,y]`. This simplification is not exhaustive, in the sense that it will not produce the smallest possible equivalent expression. For example, `Or [And [x,y], And [x]]` could be simplified to `And [x]`, but it currently is not. A general simplifier would need to use something like BDDs. The reason behind the (crude) simplifier is to make for more user friendly error messages. E.g. for the code > class Foo a where > {-# MINIMAL bar, (foo, baq | foo, quux) #-} > instance Foo Int where > bar = ... > baz = ... > quux = ... We don't show a ridiculous error message like Implement () and (either (`foo' and ()) or (`foo' and ())) -} ---------------------------------------------------------------------- -- Evaluation and simplification ---------------------------------------------------------------------- isFalse :: BooleanFormula a -> Bool isFalse (Or []) = True isFalse _ = False isTrue :: BooleanFormula a -> Bool isTrue (And []) = True isTrue _ = False eval :: (a -> Bool) -> BooleanFormula a -> Bool eval f (Var x) = f x eval f (And xs) = all (eval f . unLoc) xs eval f (Or xs) = any (eval f . unLoc) xs eval f (Parens x) = eval f (unLoc x) -- Simplify a boolean formula. -- The argument function should give the truth of the atoms, or Nothing if undecided. simplify :: Eq a => (a -> Maybe Bool) -> BooleanFormula a -> BooleanFormula a simplify f (Var a) = case f a of Nothing -> Var a Just b -> mkBool b simplify f (And xs) = mkAnd (map (\(L l x) -> L l (simplify f x)) xs) simplify f (Or xs) = mkOr (map (\(L l x) -> L l (simplify f x)) xs) simplify f (Parens x) = simplify f (unLoc x) -- Test if a boolean formula is satisfied when the given values are assigned to the atoms -- if it is, returns Nothing -- if it is not, return (Just remainder) isUnsatisfied :: Eq a => (a -> Bool) -> BooleanFormula a -> Maybe (BooleanFormula a) isUnsatisfied f bf | isTrue bf' = Nothing | otherwise = Just bf' where f' x = if f x then Just True else Nothing bf' = simplify f' bf -- prop_simplify: -- eval f x == True <==> isTrue (simplify (Just . f) x) -- eval f x == False <==> isFalse (simplify (Just . f) x) -- If the boolean formula holds, does that mean that the given atom is always true? impliesAtom :: Eq a => BooleanFormula a -> a -> Bool Var x `impliesAtom` y = x == y And xs `impliesAtom` y = any (\x -> (unLoc x) `impliesAtom` y) xs -- we have all of xs, so one of them implying y is enough Or xs `impliesAtom` y = all (\x -> (unLoc x) `impliesAtom` y) xs Parens x `impliesAtom` y = (unLoc x) `impliesAtom` y implies :: Uniquable a => BooleanFormula a -> BooleanFormula a -> Bool implies e1 e2 = go (Clause emptyUniqSet [e1]) (Clause emptyUniqSet [e2]) where go :: Uniquable a => Clause a -> Clause a -> Bool go l@Clause{ clauseExprs = hyp:hyps } r = case hyp of Var x | memberClauseAtoms x r -> True | otherwise -> go (extendClauseAtoms l x) { clauseExprs = hyps } r Parens hyp' -> go l { clauseExprs = unLoc hyp':hyps } r And hyps' -> go l { clauseExprs = map unLoc hyps' ++ hyps } r Or hyps' -> all (\hyp' -> go l { clauseExprs = unLoc hyp':hyps } r) hyps' go l r@Clause{ clauseExprs = con:cons } = case con of Var x | memberClauseAtoms x l -> True | otherwise -> go l (extendClauseAtoms r x) { clauseExprs = cons } Parens con' -> go l r { clauseExprs = unLoc con':cons } And cons' -> all (\con' -> go l r { clauseExprs = unLoc con':cons }) cons' Or cons' -> go l r { clauseExprs = map unLoc cons' ++ cons } go _ _ = False -- A small sequent calculus proof engine. data Clause a = Clause { clauseAtoms :: UniqSet a, clauseExprs :: [BooleanFormula a] } extendClauseAtoms :: Uniquable a => Clause a -> a -> Clause a extendClauseAtoms c x = c { clauseAtoms = addOneToUniqSet (clauseAtoms c) x } memberClauseAtoms :: Uniquable a => a -> Clause a -> Bool memberClauseAtoms x c = x `elementOfUniqSet` clauseAtoms c ---------------------------------------------------------------------- -- Pretty printing ---------------------------------------------------------------------- -- Pretty print a BooleanFormula, -- using the arguments as pretty printers for Var, And and Or respectively pprBooleanFormula' :: (Rational -> a -> SDoc) -> (Rational -> [SDoc] -> SDoc) -> (Rational -> [SDoc] -> SDoc) -> Rational -> BooleanFormula a -> SDoc pprBooleanFormula' pprVar pprAnd pprOr = go where go p (Var x) = pprVar p x go p (And []) = cparen (p > 0) $ empty go p (And xs) = pprAnd p (map (go 3 . unLoc) xs) go _ (Or []) = keyword $ text "FALSE" go p (Or xs) = pprOr p (map (go 2 . unLoc) xs) go p (Parens x) = go p (unLoc x) -- Pretty print in source syntax, "a | b | c,d,e" pprBooleanFormula :: (Rational -> a -> SDoc) -> Rational -> BooleanFormula a -> SDoc pprBooleanFormula pprVar = pprBooleanFormula' pprVar pprAnd pprOr where pprAnd p = cparen (p > 3) . fsep . punctuate comma pprOr p = cparen (p > 2) . fsep . intersperse vbar -- Pretty print human in readable format, "either `a' or `b' or (`c', `d' and `e')"? pprBooleanFormulaNice :: Outputable a => BooleanFormula a -> SDoc pprBooleanFormulaNice = pprBooleanFormula' pprVar pprAnd pprOr 0 where pprVar _ = quotes . ppr pprAnd p = cparen (p > 1) . pprAnd' pprAnd' [] = empty pprAnd' [x,y] = x <+> text "and" <+> y pprAnd' xs@(_:_) = fsep (punctuate comma (init xs)) <> text ", and" <+> last xs pprOr p xs = cparen (p > 1) $ text "either" <+> sep (intersperse (text "or") xs) instance (OutputableBndr a) => Outputable (BooleanFormula a) where ppr = pprBooleanFormulaNormal pprBooleanFormulaNormal :: (OutputableBndr a) => BooleanFormula a -> SDoc pprBooleanFormulaNormal = go where go (Var x) = pprPrefixOcc x go (And xs) = fsep $ punctuate comma (map (go . unLoc) xs) go (Or []) = keyword $ text "FALSE" go (Or xs) = fsep $ intersperse vbar (map (go . unLoc) xs) go (Parens x) = parens (go $ unLoc x) ---------------------------------------------------------------------- -- Binary ---------------------------------------------------------------------- instance Binary a => Binary (BooleanFormula a) where put_ bh (Var x) = putByte bh 0 >> put_ bh x put_ bh (And xs) = putByte bh 1 >> put_ bh xs put_ bh (Or xs) = putByte bh 2 >> put_ bh xs put_ bh (Parens x) = putByte bh 3 >> put_ bh x get bh = do h <- getByte bh case h of 0 -> Var <$> get bh 1 -> And <$> get bh 2 -> Or <$> get bh _ -> Parens <$> get bh ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/EnumSet.hs0000644000000000000000000000375614472400112020433 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} -- | A tiny wrapper around 'IntSet.IntSet' for representing sets of 'Enum' -- things. module GHC.Data.EnumSet ( EnumSet , member , insert , delete , toList , fromList , empty , difference ) where import GHC.Prelude import GHC.Utils.Binary import qualified Data.IntSet as IntSet newtype EnumSet a = EnumSet IntSet.IntSet deriving (Semigroup, Monoid) member :: Enum a => a -> EnumSet a -> Bool member x (EnumSet s) = IntSet.member (fromEnum x) s insert :: Enum a => a -> EnumSet a -> EnumSet a insert x (EnumSet s) = EnumSet $ IntSet.insert (fromEnum x) s delete :: Enum a => a -> EnumSet a -> EnumSet a delete x (EnumSet s) = EnumSet $ IntSet.delete (fromEnum x) s toList :: Enum a => EnumSet a -> [a] toList (EnumSet s) = map toEnum $ IntSet.toList s fromList :: Enum a => [a] -> EnumSet a fromList = EnumSet . IntSet.fromList . map fromEnum empty :: EnumSet a empty = EnumSet IntSet.empty difference :: EnumSet a -> EnumSet a -> EnumSet a difference (EnumSet a) (EnumSet b) = EnumSet (IntSet.difference a b) -- | Represents the 'EnumSet' as a bit set. -- -- Assumes that all elements are non-negative. -- -- This is only efficient for values that are sufficiently small, -- for example in the lower hundreds. instance Binary (EnumSet a) where put_ bh = put_ bh . enumSetToBitArray get bh = bitArrayToEnumSet <$> get bh -- TODO: Using 'Natural' instead of 'Integer' should be slightly more efficient -- but we don't currently have a 'Binary' instance for 'Natural'. type BitArray = Integer enumSetToBitArray :: EnumSet a -> BitArray enumSetToBitArray (EnumSet int_set) = IntSet.foldl' setBit 0 int_set bitArrayToEnumSet :: BitArray -> EnumSet a bitArrayToEnumSet ba = EnumSet (go (popCount ba) 0 IntSet.empty) where go 0 _ !int_set = int_set go n i !int_set = if ba `testBit` i then go (pred n) (succ i) (IntSet.insert i int_set) else go n (succ i) int_set ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/FastMutInt.hs0000644000000000000000000000237414472400112021104 0ustar0000000000000000{-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples #-} {-# OPTIONS_GHC -O2 #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected -- -- (c) The University of Glasgow 2002-2006 -- -- Unboxed mutable Ints module GHC.Data.FastMutInt( FastMutInt, newFastMutInt, readFastMutInt, writeFastMutInt, atomicFetchAddFastMut ) where import GHC.Prelude import GHC.Base data FastMutInt = FastMutInt !(MutableByteArray# RealWorld) newFastMutInt :: Int -> IO FastMutInt newFastMutInt n = do x <- create writeFastMutInt x n return x where !(I# size) = finiteBitSize (0 :: Int) `unsafeShiftR` 3 create = IO $ \s -> case newByteArray# size s of (# s, arr #) -> (# s, FastMutInt arr #) readFastMutInt :: FastMutInt -> IO Int readFastMutInt (FastMutInt arr) = IO $ \s -> case readIntArray# arr 0# s of (# s, i #) -> (# s, I# i #) writeFastMutInt :: FastMutInt -> Int -> IO () writeFastMutInt (FastMutInt arr) (I# i) = IO $ \s -> case writeIntArray# arr 0# i s of s -> (# s, () #) atomicFetchAddFastMut :: FastMutInt -> Int -> IO Int atomicFetchAddFastMut (FastMutInt arr) (I# i) = IO $ \s -> case fetchAddIntArray# arr 0# i s of (# s, n #) -> (# s, I# n #) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/FastString.hs0000644000000000000000000006147014472400112021134 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE UnliftedFFITypes #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected -- | -- There are two principal string types used internally by GHC: -- -- ['FastString'] -- -- * A compact, hash-consed, representation of character strings. -- * Generated by 'fsLit'. -- * You can get a 'GHC.Types.Unique.Unique' from them. -- * Equality test is O(1) (it uses the Unique). -- * Comparison is O(1) or O(n): -- * O(n) but deterministic with lexical comparison (`lexicalCompareFS`) -- * O(1) but non-deterministic with Unique comparison (`uniqCompareFS`) -- * Turn into 'GHC.Utils.Outputable.SDoc' with 'GHC.Utils.Outputable.ftext'. -- -- ['PtrString'] -- -- * Pointer and size of a Latin-1 encoded string. -- * Practically no operations. -- * Outputting them is fast. -- * Generated by 'mkPtrString'. -- * Length of string literals (mkPtrString "abc") is computed statically -- * Turn into 'GHC.Utils.Outputable.SDoc' with 'GHC.Utils.Outputable.ptext' -- * Requires manual memory management. -- Improper use may lead to memory leaks or dangling pointers. -- * It assumes Latin-1 as the encoding, therefore it cannot represent -- arbitrary Unicode strings. -- -- Use 'PtrString' unless you want the facilities of 'FastString'. module GHC.Data.FastString ( -- * ByteString bytesFS, fastStringToByteString, mkFastStringByteString, fastZStringToByteString, unsafeMkByteString, -- * ShortByteString fastStringToShortByteString, mkFastStringShortByteString, -- * FastZString FastZString, hPutFZS, zString, lengthFZS, -- * FastStrings FastString(..), -- not abstract, for now. NonDetFastString (..), LexicalFastString (..), -- ** Construction fsLit, mkFastString, mkFastStringBytes, mkFastStringByteList, mkFastString#, -- ** Deconstruction unpackFS, -- :: FastString -> String unconsFS, -- :: FastString -> Maybe (Char, FastString) -- ** Encoding zEncodeFS, -- ** Operations uniqueOfFS, lengthFS, nullFS, appendFS, headFS, concatFS, consFS, nilFS, isUnderscoreFS, lexicalCompareFS, uniqCompareFS, -- ** Outputting hPutFS, -- ** Internal getFastStringTable, getFastStringZEncCounter, -- * PtrStrings PtrString (..), -- ** Construction mkPtrString#, mkPtrString, -- ** Deconstruction unpackPtrString, -- ** Operations lengthPS ) where import GHC.Prelude as Prelude import GHC.Utils.Encoding import GHC.Utils.IO.Unsafe import GHC.Utils.Panic.Plain import GHC.Utils.Misc import GHC.Data.FastMutInt import Control.Concurrent.MVar import Control.DeepSeq import Control.Monad import Data.ByteString (ByteString) import Data.ByteString.Short (ShortByteString) import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BSC import qualified Data.ByteString.Unsafe as BS import qualified Data.ByteString.Short as SBS #if !MIN_VERSION_bytestring(0,11,0) import qualified Data.ByteString.Short.Internal as SBS #endif import Foreign.C import System.IO import Data.Data import Data.IORef import Data.Char import Data.Semigroup as Semi import Foreign #if MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) import GHC.Conc.Sync (sharedCAF) #endif #if __GLASGOW_HASKELL__ < 811 import GHC.Base (unpackCString#,unpackNBytes#) #endif import GHC.Exts import GHC.IO -- | Gives the Modified UTF-8 encoded bytes corresponding to a 'FastString' bytesFS, fastStringToByteString :: FastString -> ByteString {-# INLINE[1] bytesFS #-} bytesFS f = SBS.fromShort $ fs_sbs f {-# DEPRECATED fastStringToByteString "Use `bytesFS` instead" #-} fastStringToByteString = bytesFS fastStringToShortByteString :: FastString -> ShortByteString fastStringToShortByteString = fs_sbs fastZStringToByteString :: FastZString -> ByteString fastZStringToByteString (FastZString bs) = bs -- This will drop information if any character > '\xFF' unsafeMkByteString :: String -> ByteString unsafeMkByteString = BSC.pack hashFastString :: FastString -> Int hashFastString fs = hashStr $ fs_sbs fs -- ----------------------------------------------------------------------------- newtype FastZString = FastZString ByteString deriving NFData hPutFZS :: Handle -> FastZString -> IO () hPutFZS handle (FastZString bs) = BS.hPut handle bs zString :: FastZString -> String zString (FastZString bs) = inlinePerformIO $ BS.unsafeUseAsCStringLen bs peekCAStringLen lengthFZS :: FastZString -> Int lengthFZS (FastZString bs) = BS.length bs mkFastZStringString :: String -> FastZString mkFastZStringString str = FastZString (BSC.pack str) -- ----------------------------------------------------------------------------- {-| A 'FastString' is a UTF-8 encoded string together with a unique ID. All 'FastString's are stored in a global hashtable to support fast O(1) comparison. It is also associated with a lazy reference to the Z-encoding of this string which is used by the compiler internally. -} data FastString = FastString { uniq :: {-# UNPACK #-} !Int, -- unique id n_chars :: {-# UNPACK #-} !Int, -- number of chars fs_sbs :: {-# UNPACK #-} !ShortByteString, fs_zenc :: FastZString -- ^ Lazily computed Z-encoding of this string. See Note [Z-Encoding] in -- GHC.Utils.Encoding. -- -- Since 'FastString's are globally memoized this is computed at most -- once for any given string. } instance Eq FastString where f1 == f2 = uniq f1 == uniq f2 -- We don't provide any "Ord FastString" instance to force you to think about -- which ordering you want: -- * lexical: deterministic, O(n). Cf lexicalCompareFS and LexicalFastString. -- * by unique: non-deterministic, O(1). Cf uniqCompareFS and NonDetFastString. instance IsString FastString where fromString = fsLit instance Semi.Semigroup FastString where (<>) = appendFS instance Monoid FastString where mempty = nilFS mappend = (Semi.<>) mconcat = concatFS instance Show FastString where show fs = show (unpackFS fs) instance Data FastString where -- don't traverse? toConstr _ = abstractConstr "FastString" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "FastString" instance NFData FastString where rnf fs = seq fs () -- | Compare FastString lexically -- -- If you don't care about the lexical ordering, use `uniqCompareFS` instead. lexicalCompareFS :: FastString -> FastString -> Ordering lexicalCompareFS fs1 fs2 = if uniq fs1 == uniq fs2 then EQ else utf8CompareShortByteString (fs_sbs fs1) (fs_sbs fs2) -- perform a lexical comparison taking into account the Modified UTF-8 -- encoding we use (cf #18562) -- | Compare FastString by their Unique (not lexically). -- -- Much cheaper than `lexicalCompareFS` but non-deterministic! uniqCompareFS :: FastString -> FastString -> Ordering uniqCompareFS fs1 fs2 = compare (uniq fs1) (uniq fs2) -- | Non-deterministic FastString -- -- This is a simple FastString wrapper with an Ord instance using -- `uniqCompareFS` (i.e. which compares FastStrings on their Uniques). Hence it -- is not deterministic from one run to the other. newtype NonDetFastString = NonDetFastString FastString deriving newtype (Eq, Show) deriving stock Data instance Ord NonDetFastString where compare (NonDetFastString fs1) (NonDetFastString fs2) = uniqCompareFS fs1 fs2 -- | Lexical FastString -- -- This is a simple FastString wrapper with an Ord instance using -- `lexicalCompareFS` (i.e. which compares FastStrings on their String -- representation). Hence it is deterministic from one run to the other. newtype LexicalFastString = LexicalFastString FastString deriving newtype (Eq, Show) deriving stock Data instance Ord LexicalFastString where compare (LexicalFastString fs1) (LexicalFastString fs2) = lexicalCompareFS fs1 fs2 -- ----------------------------------------------------------------------------- -- Construction {- Internally, the compiler will maintain a fast string symbol table, providing sharing and fast comparison. Creation of new @FastString@s then covertly does a lookup, re-using the @FastString@ if there was a hit. The design of the FastString hash table allows for lockless concurrent reads and updates to multiple buckets with low synchronization overhead. See Note [Updating the FastString table] on how it's updated. -} data FastStringTable = FastStringTable {-# UNPACK #-} !FastMutInt -- the unique ID counter shared with all buckets {-# UNPACK #-} !FastMutInt -- number of computed z-encodings for all buckets (Array# (IORef FastStringTableSegment)) -- concurrent segments data FastStringTableSegment = FastStringTableSegment {-# UNPACK #-} !(MVar ()) -- the lock for write in each segment {-# UNPACK #-} !FastMutInt -- the number of elements (MutableArray# RealWorld [FastString]) -- buckets in this segment {- Following parameters are determined based on: * Benchmark based on testsuite/tests/utils/should_run/T14854.hs * Stats of @echo :browse | ghc --interactive -dfaststring-stats >/dev/null@: on 2018-10-24, we have 13920 entries. -} segmentBits, numSegments, segmentMask, initialNumBuckets :: Int segmentBits = 8 numSegments = 256 -- bit segmentBits segmentMask = 0xff -- bit segmentBits - 1 initialNumBuckets = 64 hashToSegment# :: Int# -> Int# hashToSegment# hash# = hash# `andI#` segmentMask# where !(I# segmentMask#) = segmentMask hashToIndex# :: MutableArray# RealWorld [FastString] -> Int# -> Int# hashToIndex# buckets# hash# = (hash# `uncheckedIShiftRL#` segmentBits#) `remInt#` size# where !(I# segmentBits#) = segmentBits size# = sizeofMutableArray# buckets# maybeResizeSegment :: IORef FastStringTableSegment -> IO FastStringTableSegment maybeResizeSegment segmentRef = do segment@(FastStringTableSegment lock counter old#) <- readIORef segmentRef let oldSize# = sizeofMutableArray# old# newSize# = oldSize# *# 2# (I# n#) <- readFastMutInt counter if isTrue# (n# <# newSize#) -- maximum load of 1 then return segment else do resizedSegment@(FastStringTableSegment _ _ new#) <- IO $ \s1# -> case newArray# newSize# [] s1# of (# s2#, arr# #) -> (# s2#, FastStringTableSegment lock counter arr# #) forM_ [0 .. (I# oldSize#) - 1] $ \(I# i#) -> do fsList <- IO $ readArray# old# i# forM_ fsList $ \fs -> do let -- Shall we store in hash value in FastString instead? !(I# hash#) = hashFastString fs idx# = hashToIndex# new# hash# IO $ \s1# -> case readArray# new# idx# s1# of (# s2#, bucket #) -> case writeArray# new# idx# (fs: bucket) s2# of s3# -> (# s3#, () #) writeIORef segmentRef resizedSegment return resizedSegment {-# NOINLINE stringTable #-} stringTable :: FastStringTable stringTable = unsafePerformIO $ do let !(I# numSegments#) = numSegments !(I# initialNumBuckets#) = initialNumBuckets loop a# i# s1# | isTrue# (i# ==# numSegments#) = s1# | otherwise = case newMVar () `unIO` s1# of (# s2#, lock #) -> case newFastMutInt 0 `unIO` s2# of (# s3#, counter #) -> case newArray# initialNumBuckets# [] s3# of (# s4#, buckets# #) -> case newIORef (FastStringTableSegment lock counter buckets#) `unIO` s4# of (# s5#, segment #) -> case writeArray# a# i# segment s5# of s6# -> loop a# (i# +# 1#) s6# uid <- newFastMutInt 603979776 -- ord '$' * 0x01000000 n_zencs <- newFastMutInt 0 tab <- IO $ \s1# -> case newArray# numSegments# (panic "string_table") s1# of (# s2#, arr# #) -> case loop arr# 0# s2# of s3# -> case unsafeFreezeArray# arr# s3# of (# s4#, segments# #) -> (# s4#, FastStringTable uid n_zencs segments# #) -- use the support wired into the RTS to share this CAF among all images of -- libHSghc #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) return tab #else sharedCAF tab getOrSetLibHSghcFastStringTable -- from the 9.3 RTS; the previouss RTS before might not have this symbol. The -- right way to do this however would be to define some HAVE_FAST_STRING_TABLE -- or similar rather than use (odd parity) development versions. foreign import ccall unsafe "getOrSetLibHSghcFastStringTable" getOrSetLibHSghcFastStringTable :: Ptr a -> IO (Ptr a) #endif {- We include the FastString table in the `sharedCAF` mechanism because we'd like FastStrings created by a Core plugin to have the same uniques as corresponding strings created by the host compiler itself. For example, this allows plugins to lookup known names (eg `mkTcOcc "MySpecialType"`) in the GlobalRdrEnv or even re-invoke the parser. In particular, the following little sanity test was failing in a plugin prototyping safe newtype-coercions: GHC.NT.Type.NT was imported, but could not be looked up /by the plugin/. let rdrName = mkModuleName "GHC.NT.Type" `mkRdrQual` mkTcOcc "NT" putMsgS $ showSDoc dflags $ ppr $ lookupGRE_RdrName rdrName $ mg_rdr_env guts `mkTcOcc` involves the lookup (or creation) of a FastString. Since the plugin's FastString.string_table is empty, constructing the RdrName also allocates new uniques for the FastStrings "GHC.NT.Type" and "NT". These uniques are almost certainly unequal to the ones that the host compiler originally assigned to those FastStrings. Thus the lookup fails since the domain of the GlobalRdrEnv is affected by the RdrName's OccName's FastString's unique. Maintaining synchronization of the two instances of this global is rather difficult because of the uses of `unsafePerformIO` in this module. Not synchronizing them risks breaking the rather major invariant that two FastStrings with the same unique have the same string. Thus we use the lower-level `sharedCAF` mechanism that relies on Globals.c. -} mkFastString# :: Addr# -> FastString {-# INLINE mkFastString# #-} mkFastString# a# = mkFastStringBytes ptr (ptrStrLength ptr) where ptr = Ptr a# {- Note [Updating the FastString table] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use a concurrent hashtable which contains multiple segments, each hash value always maps to the same segment. Read is lock-free, write to the a segment should acquire a lock for that segment to avoid race condition, writes to different segments are independent. The procedure goes like this: 1. Find out which segment to operate on based on the hash value 2. Read the relevant bucket and perform a look up of the string. 3. If it exists, return it. 4. Otherwise grab a unique ID, create a new FastString and atomically attempt to update the relevant segment with this FastString: * Resize the segment by doubling the number of buckets when the number of FastStrings in this segment grows beyond the threshold. * Double check that the string is not in the bucket. Another thread may have inserted it while we were creating our string. * Return the existing FastString if it exists. The one we preemptively created will get GCed. * Otherwise, insert and return the string we created. -} mkFastStringWith :: (Int -> FastMutInt-> IO FastString) -> ShortByteString -> IO FastString mkFastStringWith mk_fs sbs = do FastStringTableSegment lock _ buckets# <- readIORef segmentRef let idx# = hashToIndex# buckets# hash# bucket <- IO $ readArray# buckets# idx# res <- bucket_match bucket sbs case res of Just found -> return found Nothing -> do -- The withMVar below is not dupable. It can lead to deadlock if it is -- only run partially and putMVar is not called after takeMVar. noDuplicate n <- get_uid new_fs <- mk_fs n n_zencs withMVar lock $ \_ -> insert new_fs where !(FastStringTable uid n_zencs segments#) = stringTable get_uid = atomicFetchAddFastMut uid 1 !(I# hash#) = hashStr sbs (# segmentRef #) = indexArray# segments# (hashToSegment# hash#) insert fs = do FastStringTableSegment _ counter buckets# <- maybeResizeSegment segmentRef let idx# = hashToIndex# buckets# hash# bucket <- IO $ readArray# buckets# idx# res <- bucket_match bucket sbs case res of -- The FastString was added by another thread after previous read and -- before we acquired the write lock. Just found -> return found Nothing -> do IO $ \s1# -> case writeArray# buckets# idx# (fs : bucket) s1# of s2# -> (# s2#, () #) _ <- atomicFetchAddFastMut counter 1 return fs bucket_match :: [FastString] -> ShortByteString -> IO (Maybe FastString) bucket_match [] _ = return Nothing bucket_match (fs@(FastString {fs_sbs=fs_sbs}) : ls) sbs | fs_sbs == sbs = return (Just fs) | otherwise = bucket_match ls sbs mkFastStringBytes :: Ptr Word8 -> Int -> FastString mkFastStringBytes !ptr !len = -- NB: Might as well use unsafeDupablePerformIO, since mkFastStringWith is -- idempotent. unsafeDupablePerformIO $ do sbs <- newSBSFromPtr ptr len mkFastStringWith (mkNewFastStringShortByteString sbs) sbs newSBSFromPtr :: Ptr a -> Int -> IO ShortByteString newSBSFromPtr (Ptr src#) (I# len#) = IO $ \s -> case newByteArray# len# s of { (# s, dst# #) -> case copyAddrToByteArray# src# dst# 0# len# s of { s -> case unsafeFreezeByteArray# dst# s of { (# s, ba# #) -> (# s, SBS.SBS ba# #) }}} -- | Create a 'FastString' by copying an existing 'ByteString' mkFastStringByteString :: ByteString -> FastString mkFastStringByteString bs = let sbs = SBS.toShort bs in inlinePerformIO $ mkFastStringWith (mkNewFastStringShortByteString sbs) sbs -- | Create a 'FastString' from an existing 'ShortByteString' without -- copying. mkFastStringShortByteString :: ShortByteString -> FastString mkFastStringShortByteString sbs = inlinePerformIO $ mkFastStringWith (mkNewFastStringShortByteString sbs) sbs -- | Creates a UTF-8 encoded 'FastString' from a 'String' mkFastString :: String -> FastString {-# NOINLINE[1] mkFastString #-} mkFastString str = inlinePerformIO $ do sbs <- utf8EncodeShortByteString str mkFastStringWith (mkNewFastStringShortByteString sbs) sbs -- The following rule is used to avoid polluting the non-reclaimable FastString -- table with transient strings when we only want their encoding. {-# RULES "bytesFS/mkFastString" forall x. bytesFS (mkFastString x) = utf8EncodeString x #-} -- | Creates a 'FastString' from a UTF-8 encoded @[Word8]@ mkFastStringByteList :: [Word8] -> FastString mkFastStringByteList str = mkFastStringShortByteString (SBS.pack str) -- | Creates a (lazy) Z-encoded 'FastString' from a 'ShortByteString' and -- account the number of forced z-strings into the passed 'FastMutInt'. mkZFastString :: FastMutInt -> ShortByteString -> FastZString mkZFastString n_zencs sbs = unsafePerformIO $ do _ <- atomicFetchAddFastMut n_zencs 1 return $ mkFastZStringString (zEncodeString (utf8DecodeShortByteString sbs)) mkNewFastStringShortByteString :: ShortByteString -> Int -> FastMutInt -> IO FastString mkNewFastStringShortByteString sbs uid n_zencs = do let zstr = mkZFastString n_zencs sbs chars <- countUTF8Chars sbs return (FastString uid chars sbs zstr) hashStr :: ShortByteString -> Int -- produce a hash value between 0 & m (inclusive) hashStr sbs@(SBS.SBS ba#) = loop 0# 0# where !(I# len#) = SBS.length sbs loop h n = if isTrue# (n ==# len#) then I# h else let -- DO NOT move this let binding! indexCharOffAddr# reads from the -- pointer so we need to evaluate this based on the length check -- above. Not doing this right caused #17909. #if __GLASGOW_HASKELL__ >= 901 !c = int8ToInt# (indexInt8Array# ba# n) #else !c = indexInt8Array# ba# n #endif !h2 = (h *# 16777619#) `xorI#` c in loop h2 (n +# 1#) -- ----------------------------------------------------------------------------- -- Operations -- | Returns the length of the 'FastString' in characters lengthFS :: FastString -> Int lengthFS fs = n_chars fs -- | Returns @True@ if the 'FastString' is empty nullFS :: FastString -> Bool nullFS fs = SBS.null $ fs_sbs fs -- | Unpacks and decodes the FastString unpackFS :: FastString -> String unpackFS fs = utf8DecodeShortByteString $ fs_sbs fs -- | Returns a Z-encoded version of a 'FastString'. This might be the -- original, if it was already Z-encoded. The first time this -- function is applied to a particular 'FastString', the results are -- memoized. -- zEncodeFS :: FastString -> FastZString zEncodeFS fs = fs_zenc fs appendFS :: FastString -> FastString -> FastString appendFS fs1 fs2 = mkFastStringShortByteString $ (Semi.<>) (fs_sbs fs1) (fs_sbs fs2) concatFS :: [FastString] -> FastString concatFS = mkFastStringShortByteString . mconcat . map fs_sbs headFS :: FastString -> Char headFS fs | SBS.null $ fs_sbs fs = panic "headFS: Empty FastString" headFS fs = head $ unpackFS fs consFS :: Char -> FastString -> FastString consFS c fs = mkFastString (c : unpackFS fs) unconsFS :: FastString -> Maybe (Char, FastString) unconsFS fs = case unpackFS fs of [] -> Nothing (chr : str) -> Just (chr, mkFastString str) uniqueOfFS :: FastString -> Int uniqueOfFS fs = uniq fs nilFS :: FastString nilFS = mkFastString "" isUnderscoreFS :: FastString -> Bool isUnderscoreFS fs = fs == fsLit "_" -- ----------------------------------------------------------------------------- -- Stats getFastStringTable :: IO [[[FastString]]] getFastStringTable = forM [0 .. numSegments - 1] $ \(I# i#) -> do let (# segmentRef #) = indexArray# segments# i# FastStringTableSegment _ _ buckets# <- readIORef segmentRef let bucketSize = I# (sizeofMutableArray# buckets#) forM [0 .. bucketSize - 1] $ \(I# j#) -> IO $ readArray# buckets# j# where !(FastStringTable _ _ segments#) = stringTable getFastStringZEncCounter :: IO Int getFastStringZEncCounter = readFastMutInt n_zencs where !(FastStringTable _ n_zencs _) = stringTable -- ----------------------------------------------------------------------------- -- Outputting 'FastString's -- |Outputs a 'FastString' with /no decoding at all/, that is, you -- get the actual bytes in the 'FastString' written to the 'Handle'. hPutFS :: Handle -> FastString -> IO () hPutFS handle fs = BS.hPut handle $ bytesFS fs -- ToDo: we'll probably want an hPutFSLocal, or something, to output -- in the current locale's encoding (for error messages and suchlike). -- ----------------------------------------------------------------------------- -- PtrStrings, here for convenience only. -- | A 'PtrString' is a pointer to some array of Latin-1 encoded chars. data PtrString = PtrString !(Ptr Word8) !Int -- | Wrap an unboxed address into a 'PtrString'. mkPtrString# :: Addr# -> PtrString {-# INLINE mkPtrString# #-} mkPtrString# a# = PtrString (Ptr a#) (ptrStrLength (Ptr a#)) -- | Encode a 'String' into a newly allocated 'PtrString' using Latin-1 -- encoding. The original string must not contain non-Latin-1 characters -- (above codepoint @0xff@). {-# NOINLINE[0] mkPtrString #-} -- see rules below mkPtrString :: String -> PtrString mkPtrString s = -- we don't use `unsafeDupablePerformIO` here to avoid potential memory leaks -- and because someone might be using `eqAddr#` to check for string equality. unsafePerformIO (do let len = length s p <- mallocBytes len let loop :: Int -> String -> IO () loop !_ [] = return () loop n (c:cs) = do pokeByteOff p n (fromIntegral (ord c) :: Word8) loop (1+n) cs loop 0 s return (PtrString p len) ) {-# RULES "mkPtrString" forall x . mkPtrString (unpackCString# x) = mkPtrString# x #-} -- | Decode a 'PtrString' back into a 'String' using Latin-1 encoding. -- This does not free the memory associated with 'PtrString'. unpackPtrString :: PtrString -> String unpackPtrString (PtrString (Ptr p#) (I# n#)) = unpackNBytes# p# n# -- | Return the length of a 'PtrString' lengthPS :: PtrString -> Int lengthPS (PtrString _ n) = n -- ----------------------------------------------------------------------------- -- under the carpet #if !MIN_VERSION_GLASGOW_HASKELL(9,0,0,0) foreign import ccall unsafe "strlen" cstringLength# :: Addr# -> Int# #endif ptrStrLength :: Ptr Word8 -> Int {-# INLINE ptrStrLength #-} ptrStrLength (Ptr a) = I# (cstringLength# a) {-# NOINLINE fsLit #-} fsLit :: String -> FastString fsLit x = mkFastString x {-# RULES "fslit" forall x . fsLit (unpackCString# x) = mkFastString# x #-} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/FastString/Env.hs0000644000000000000000000000753014472400112021661 0ustar0000000000000000{- % % (c) The University of Glasgow 2006 % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 % -} -- | FastStringEnv: FastString environments module GHC.Data.FastString.Env ( -- * FastString environments (maps) FastStringEnv, -- ** Manipulating these environments mkFsEnv, emptyFsEnv, unitFsEnv, extendFsEnv_C, extendFsEnv_Acc, extendFsEnv, extendFsEnvList, extendFsEnvList_C, filterFsEnv, plusFsEnv, plusFsEnv_C, alterFsEnv, lookupFsEnv, lookupFsEnv_NF, delFromFsEnv, delListFromFsEnv, elemFsEnv, mapFsEnv, -- * Deterministic FastString environments (maps) DFastStringEnv, -- ** Manipulating these environments mkDFsEnv, emptyDFsEnv, dFsEnvElts, lookupDFsEnv ) where import GHC.Prelude import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Data.Maybe import GHC.Data.FastString -- | A non-deterministic set of FastStrings. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why it's not -- deterministic and why it matters. Use DFastStringEnv if the set eventually -- gets converted into a list or folded over in a way where the order -- changes the generated code. type FastStringEnv a = UniqFM FastString a -- Domain is FastString emptyFsEnv :: FastStringEnv a mkFsEnv :: [(FastString,a)] -> FastStringEnv a alterFsEnv :: (Maybe a-> Maybe a) -> FastStringEnv a -> FastString -> FastStringEnv a extendFsEnv_C :: (a->a->a) -> FastStringEnv a -> FastString -> a -> FastStringEnv a extendFsEnv_Acc :: (a->b->b) -> (a->b) -> FastStringEnv b -> FastString -> a -> FastStringEnv b extendFsEnv :: FastStringEnv a -> FastString -> a -> FastStringEnv a plusFsEnv :: FastStringEnv a -> FastStringEnv a -> FastStringEnv a plusFsEnv_C :: (a->a->a) -> FastStringEnv a -> FastStringEnv a -> FastStringEnv a extendFsEnvList :: FastStringEnv a -> [(FastString,a)] -> FastStringEnv a extendFsEnvList_C :: (a->a->a) -> FastStringEnv a -> [(FastString,a)] -> FastStringEnv a delFromFsEnv :: FastStringEnv a -> FastString -> FastStringEnv a delListFromFsEnv :: FastStringEnv a -> [FastString] -> FastStringEnv a elemFsEnv :: FastString -> FastStringEnv a -> Bool unitFsEnv :: FastString -> a -> FastStringEnv a lookupFsEnv :: FastStringEnv a -> FastString -> Maybe a lookupFsEnv_NF :: FastStringEnv a -> FastString -> a filterFsEnv :: (elt -> Bool) -> FastStringEnv elt -> FastStringEnv elt mapFsEnv :: (elt1 -> elt2) -> FastStringEnv elt1 -> FastStringEnv elt2 emptyFsEnv = emptyUFM unitFsEnv x y = unitUFM x y extendFsEnv x y z = addToUFM x y z extendFsEnvList x l = addListToUFM x l lookupFsEnv x y = lookupUFM x y alterFsEnv = alterUFM mkFsEnv l = listToUFM l elemFsEnv x y = elemUFM x y plusFsEnv x y = plusUFM x y plusFsEnv_C f x y = plusUFM_C f x y extendFsEnv_C f x y z = addToUFM_C f x y z mapFsEnv f x = mapUFM f x extendFsEnv_Acc x y z a b = addToUFM_Acc x y z a b extendFsEnvList_C x y z = addListToUFM_C x y z delFromFsEnv x y = delFromUFM x y delListFromFsEnv x y = delListFromUFM x y filterFsEnv x y = filterUFM x y lookupFsEnv_NF env n = expectJust "lookupFsEnv_NF" (lookupFsEnv env n) -- Deterministic FastStringEnv -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why we need -- DFastStringEnv. type DFastStringEnv a = UniqDFM FastString a -- Domain is FastString emptyDFsEnv :: DFastStringEnv a emptyDFsEnv = emptyUDFM dFsEnvElts :: DFastStringEnv a -> [a] dFsEnvElts = eltsUDFM mkDFsEnv :: [(FastString,a)] -> DFastStringEnv a mkDFsEnv l = listToUDFM l lookupDFsEnv :: DFastStringEnv a -> FastString -> Maybe a lookupDFsEnv = lookupUDFM ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/FiniteMap.hs0000644000000000000000000000163614472400112020722 0ustar0000000000000000-- Some extra functions to extend Data.Map module GHC.Data.FiniteMap ( insertList, insertListWith, deleteList, foldRight, foldRightWithKey ) where import GHC.Prelude import Data.Map (Map) import qualified Data.Map as Map insertList :: Ord key => [(key,elt)] -> Map key elt -> Map key elt insertList xs m = foldl' (\m (k, v) -> Map.insert k v m) m xs insertListWith :: Ord key => (elt -> elt -> elt) -> [(key,elt)] -> Map key elt -> Map key elt insertListWith f xs m0 = foldl' (\m (k, v) -> Map.insertWith f k v m) m0 xs deleteList :: Ord key => [key] -> Map key elt -> Map key elt deleteList ks m = foldl' (flip Map.delete) m ks foldRight :: (elt -> a -> a) -> a -> Map key elt -> a foldRight = Map.foldr foldRightWithKey :: (key -> elt -> a -> a) -> a -> Map key elt -> a foldRightWithKey = Map.foldrWithKey ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Graph/Directed.hs0000644000000000000000000005111714472400112021631 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} module GHC.Data.Graph.Directed ( Graph, graphFromEdgedVerticesOrd, graphFromEdgedVerticesUniq, SCC(..), Node(..), flattenSCC, flattenSCCs, stronglyConnCompG, topologicalSortG, verticesG, edgesG, hasVertexG, reachableG, reachablesG, transposeG, allReachable, outgoingG, emptyG, findCycle, -- For backwards compatibility with the simpler version of Digraph stronglyConnCompFromEdgedVerticesOrd, stronglyConnCompFromEdgedVerticesOrdR, stronglyConnCompFromEdgedVerticesUniq, stronglyConnCompFromEdgedVerticesUniqR, -- Simple way to classify edges EdgeType(..), classifyEdges ) where ------------------------------------------------------------------------------ -- A version of the graph algorithms described in: -- -- ``Lazy Depth-First Search and Linear IntGraph Algorithms in Haskell'' -- by David King and John Launchbury -- -- Also included is some additional code for printing tree structures ... -- -- If you ever find yourself in need of algorithms for classifying edges, -- or finding connected/biconnected components, consult the history; Sigbjorn -- Finne contributed some implementations in 1997, although we've since -- removed them since they were not used anywhere in GHC. ------------------------------------------------------------------------------ import GHC.Prelude import GHC.Utils.Misc ( minWith, count ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.Maybe ( expectJust ) -- std interfaces import Data.Maybe import Data.Array import Data.List ( sort ) import qualified Data.Map as Map import qualified Data.Set as Set import qualified Data.Graph as G import Data.Graph hiding (Graph, Edge, transposeG, reachable) import Data.Tree import GHC.Types.Unique import GHC.Types.Unique.FM import qualified Data.IntMap as IM import qualified Data.IntSet as IS import qualified Data.Map as M import qualified Data.Set as S {- ************************************************************************ * * * Graphs and Graph Construction * * ************************************************************************ Note [Nodes, keys, vertices] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * A 'node' is a big blob of client-stuff * Each 'node' has a unique (client) 'key', but the latter is in Ord and has fast comparison * Digraph then maps each 'key' to a Vertex (Int) which is arranged densely in 0.n -} data Graph node = Graph { gr_int_graph :: IntGraph, gr_vertex_to_node :: Vertex -> node, gr_node_to_vertex :: node -> Maybe Vertex } data Edge node = Edge node node {-| Representation for nodes of the Graph. * The @payload@ is user data, just carried around in this module * The @key@ is the node identifier. Key has an Ord instance for performance reasons. * The @[key]@ are the dependencies of the node; it's ok to have extra keys in the dependencies that are not the key of any Node in the graph -} data Node key payload = DigraphNode { node_payload :: payload, -- ^ User data node_key :: key, -- ^ User defined node id node_dependencies :: [key] -- ^ Dependencies/successors of the node } instance (Outputable a, Outputable b) => Outputable (Node a b) where ppr (DigraphNode a b c) = ppr (a, b, c) emptyGraph :: Graph a emptyGraph = Graph (array (1, 0) []) (error "emptyGraph") (const Nothing) -- See Note [Deterministic SCC] graphFromEdgedVertices :: ReduceFn key payload -> [Node key payload] -- The graph; its ok for the -- out-list to contain keys which aren't -- a vertex key, they are ignored -> Graph (Node key payload) graphFromEdgedVertices _reduceFn [] = emptyGraph graphFromEdgedVertices reduceFn edged_vertices = Graph graph vertex_fn (key_vertex . key_extractor) where key_extractor = node_key (bounds, vertex_fn, key_vertex, numbered_nodes) = reduceFn edged_vertices key_extractor graph = array bounds [ (v, sort $ mapMaybe key_vertex ks) | (v, (node_dependencies -> ks)) <- numbered_nodes] -- We normalize outgoing edges by sorting on node order, so -- that the result doesn't depend on the order of the edges -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] graphFromEdgedVerticesOrd :: Ord key => [Node key payload] -- The graph; its ok for the -- out-list to contain keys which aren't -- a vertex key, they are ignored -> Graph (Node key payload) graphFromEdgedVerticesOrd = graphFromEdgedVertices reduceNodesIntoVerticesOrd -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] graphFromEdgedVerticesUniq :: Uniquable key => [Node key payload] -- The graph; its ok for the -- out-list to contain keys which aren't -- a vertex key, they are ignored -> Graph (Node key payload) graphFromEdgedVerticesUniq = graphFromEdgedVertices reduceNodesIntoVerticesUniq type ReduceFn key payload = [Node key payload] -> (Node key payload -> key) -> (Bounds, Vertex -> Node key payload , key -> Maybe Vertex, [(Vertex, Node key payload)]) {- Note [reduceNodesIntoVertices implementations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ reduceNodesIntoVertices is parameterized by the container type. This is to accommodate key types that don't have an Ord instance and hence preclude the use of Data.Map. An example of such type would be Unique, there's no way to implement Ord Unique deterministically. For such types, there's a version with a Uniquable constraint. This leaves us with two versions of every function that depends on reduceNodesIntoVertices, one with Ord constraint and the other with Uniquable constraint. For example: graphFromEdgedVerticesOrd and graphFromEdgedVerticesUniq. The Uniq version should be a tiny bit more efficient since it uses Data.IntMap internally. -} reduceNodesIntoVertices :: ([(key, Vertex)] -> m) -> (key -> m -> Maybe Vertex) -> ReduceFn key payload reduceNodesIntoVertices fromList lookup nodes key_extractor = (bounds, (!) vertex_map, key_vertex, numbered_nodes) where max_v = length nodes - 1 bounds = (0, max_v) :: (Vertex, Vertex) -- Keep the order intact to make the result depend on input order -- instead of key order numbered_nodes = zip [0..] nodes vertex_map = array bounds numbered_nodes key_map = fromList [ (key_extractor node, v) | (v, node) <- numbered_nodes ] key_vertex k = lookup k key_map -- See Note [reduceNodesIntoVertices implementations] reduceNodesIntoVerticesOrd :: Ord key => ReduceFn key payload reduceNodesIntoVerticesOrd = reduceNodesIntoVertices Map.fromList Map.lookup -- See Note [reduceNodesIntoVertices implementations] reduceNodesIntoVerticesUniq :: Uniquable key => ReduceFn key payload reduceNodesIntoVerticesUniq = reduceNodesIntoVertices listToUFM (flip lookupUFM) {- ************************************************************************ * * * SCC * * ************************************************************************ -} type WorkItem key payload = (Node key payload, -- Tip of the path [payload]) -- Rest of the path; -- [a,b,c] means c depends on b, b depends on a -- | Find a reasonably short cycle a->b->c->a, in a strongly -- connected component. The input nodes are presumed to be -- a SCC, so you can start anywhere. findCycle :: forall payload key. Ord key => [Node key payload] -- The nodes. The dependencies can -- contain extra keys, which are ignored -> Maybe [payload] -- A cycle, starting with node -- so each depends on the next findCycle graph = go Set.empty (new_work root_deps []) [] where env :: Map.Map key (Node key payload) env = Map.fromList [ (node_key node, node) | node <- graph ] -- Find the node with fewest dependencies among the SCC modules -- This is just a heuristic to find some plausible root module root :: Node key payload root = fst (minWith snd [ (node, count (`Map.member` env) (node_dependencies node)) | node <- graph ]) DigraphNode root_payload root_key root_deps = root -- 'go' implements Dijkstra's algorithm, more or less go :: Set.Set key -- Visited -> [WorkItem key payload] -- Work list, items length n -> [WorkItem key payload] -- Work list, items length n+1 -> Maybe [payload] -- Returned cycle -- Invariant: in a call (go visited ps qs), -- visited = union (map tail (ps ++ qs)) go _ [] [] = Nothing -- No cycles go visited [] qs = go visited qs [] go visited (((DigraphNode payload key deps), path) : ps) qs | key == root_key = Just (root_payload : reverse path) | key `Set.member` visited = go visited ps qs | key `Map.notMember` env = go visited ps qs | otherwise = go (Set.insert key visited) ps (new_qs ++ qs) where new_qs = new_work deps (payload : path) new_work :: [key] -> [payload] -> [WorkItem key payload] new_work deps path = [ (n, path) | Just n <- map (`Map.lookup` env) deps ] {- ************************************************************************ * * * Strongly Connected Component wrappers for Graph * * ************************************************************************ Note: the components are returned topologically sorted: later components depend on earlier ones, but not vice versa i.e. later components only have edges going from them to earlier ones. -} {- Note [Deterministic SCC] ~~~~~~~~~~~~~~~~~~~~~~~~ stronglyConnCompFromEdgedVerticesUniq, stronglyConnCompFromEdgedVerticesUniqR, stronglyConnCompFromEdgedVerticesOrd and stronglyConnCompFromEdgedVerticesOrdR provide a following guarantee: Given a deterministically ordered list of nodes it returns a deterministically ordered list of strongly connected components, where the list of vertices in an SCC is also deterministically ordered. Note that the order of edges doesn't need to be deterministic for this to work. We use the order of nodes to normalize the order of edges. -} stronglyConnCompG :: Graph node -> [SCC node] stronglyConnCompG graph = decodeSccs graph forest where forest = {-# SCC "Digraph.scc" #-} scc (gr_int_graph graph) decodeSccs :: Graph node -> Forest Vertex -> [SCC node] decodeSccs Graph { gr_int_graph = graph, gr_vertex_to_node = vertex_fn } forest = map decode forest where decode (Node v []) | mentions_itself v = CyclicSCC [vertex_fn v] | otherwise = AcyclicSCC (vertex_fn v) decode other = CyclicSCC (dec other []) where dec (Node v ts) vs = vertex_fn v : foldr dec vs ts mentions_itself v = v `elem` (graph ! v) -- The following two versions are provided for backwards compatibility: -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesOrd :: Ord key => [Node key payload] -> [SCC payload] stronglyConnCompFromEdgedVerticesOrd = map (fmap node_payload) . stronglyConnCompFromEdgedVerticesOrdR -- The following two versions are provided for backwards compatibility: -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesUniq :: Uniquable key => [Node key payload] -> [SCC payload] stronglyConnCompFromEdgedVerticesUniq = map (fmap node_payload) . stronglyConnCompFromEdgedVerticesUniqR -- The "R" interface is used when you expect to apply SCC to -- (some of) the result of SCC, so you don't want to lose the dependency info -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesOrdR :: Ord key => [Node key payload] -> [SCC (Node key payload)] stronglyConnCompFromEdgedVerticesOrdR = stronglyConnCompG . graphFromEdgedVertices reduceNodesIntoVerticesOrd -- The "R" interface is used when you expect to apply SCC to -- (some of) the result of SCC, so you don't want to lose the dependency info -- See Note [Deterministic SCC] -- See Note [reduceNodesIntoVertices implementations] stronglyConnCompFromEdgedVerticesUniqR :: Uniquable key => [Node key payload] -> [SCC (Node key payload)] stronglyConnCompFromEdgedVerticesUniqR = stronglyConnCompG . graphFromEdgedVertices reduceNodesIntoVerticesUniq {- ************************************************************************ * * * Misc wrappers for Graph * * ************************************************************************ -} topologicalSortG :: Graph node -> [node] topologicalSortG graph = map (gr_vertex_to_node graph) result where result = {-# SCC "Digraph.topSort" #-} topSort (gr_int_graph graph) reachableG :: Graph node -> node -> [node] reachableG graph from = map (gr_vertex_to_node graph) result where from_vertex = expectJust "reachableG" (gr_node_to_vertex graph from) result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) [from_vertex] outgoingG :: Graph node -> node -> [node] outgoingG graph from = map (gr_vertex_to_node graph) result where from_vertex = expectJust "reachableG" (gr_node_to_vertex graph from) result = gr_int_graph graph ! from_vertex -- | Given a list of roots return all reachable nodes. reachablesG :: Graph node -> [node] -> [node] reachablesG graph froms = map (gr_vertex_to_node graph) result where result = {-# SCC "Digraph.reachable" #-} reachable (gr_int_graph graph) vs vs = [ v | Just v <- map (gr_node_to_vertex graph) froms ] -- | Efficiently construct a map which maps each key to it's set of transitive -- dependencies. allReachable :: Ord key => Graph node -> (node -> key) -> M.Map key (S.Set key) allReachable (Graph g from _) conv = M.fromList [(conv (from v), IS.foldr (\k vs -> conv (from k) `S.insert` vs) S.empty vs) | (v, vs) <- IM.toList int_graph] where int_graph = reachableGraph g hasVertexG :: Graph node -> node -> Bool hasVertexG graph node = isJust $ gr_node_to_vertex graph node verticesG :: Graph node -> [node] verticesG graph = map (gr_vertex_to_node graph) $ vertices (gr_int_graph graph) edgesG :: Graph node -> [Edge node] edgesG graph = map (\(v1, v2) -> Edge (v2n v1) (v2n v2)) $ edges (gr_int_graph graph) where v2n = gr_vertex_to_node graph transposeG :: Graph node -> Graph node transposeG graph = Graph (G.transposeG (gr_int_graph graph)) (gr_vertex_to_node graph) (gr_node_to_vertex graph) emptyG :: Graph node -> Bool emptyG g = graphEmpty (gr_int_graph g) {- ************************************************************************ * * * Showing Graphs * * ************************************************************************ -} instance Outputable node => Outputable (Graph node) where ppr graph = vcat [ hang (text "Vertices:") 2 (vcat (map ppr $ verticesG graph)), hang (text "Edges:") 2 (vcat (map ppr $ edgesG graph)) ] instance Outputable node => Outputable (Edge node) where ppr (Edge from to) = ppr from <+> text "->" <+> ppr to graphEmpty :: G.Graph -> Bool graphEmpty g = lo > hi where (lo, hi) = bounds g {- ************************************************************************ * * * IntGraphs * * ************************************************************************ -} type IntGraph = G.Graph {- ------------------------------------------------------------ -- Depth first search numbering ------------------------------------------------------------ -} -- Data.Tree has flatten for Tree, but nothing for Forest preorderF :: Forest a -> [a] preorderF ts = concatMap flatten ts {- ------------------------------------------------------------ -- Finding reachable vertices ------------------------------------------------------------ -} -- This generalizes reachable which was found in Data.Graph reachable :: IntGraph -> [Vertex] -> [Vertex] reachable g vs = preorderF (dfs g vs) reachableGraph :: IntGraph -> IM.IntMap IS.IntSet reachableGraph g = res where do_one v = IS.unions (IS.fromList (g ! v) : mapMaybe (flip IM.lookup res) (g ! v)) res = IM.fromList [(v, do_one v) | v <- vertices g] {- ************************************************************************ * * * Classify Edge Types * * ************************************************************************ -} -- Remark: While we could generalize this algorithm this comes at a runtime -- cost and with no advantages. If you find yourself using this with graphs -- not easily represented using Int nodes please consider rewriting this -- using the more general Graph type. -- | Edge direction based on DFS Classification data EdgeType = Forward | Cross | Backward -- ^ Loop back towards the root node. -- Eg backjumps in loops | SelfLoop -- ^ v -> v deriving (Eq,Ord) instance Outputable EdgeType where ppr Forward = text "Forward" ppr Cross = text "Cross" ppr Backward = text "Backward" ppr SelfLoop = text "SelfLoop" newtype Time = Time Int deriving (Eq,Ord,Num,Outputable) --Allow for specialization {-# INLINEABLE classifyEdges #-} -- | Given a start vertex, a way to get successors from a node -- and a list of (directed) edges classify the types of edges. classifyEdges :: forall key. Uniquable key => key -> (key -> [key]) -> [(key,key)] -> [((key, key), EdgeType)] classifyEdges root getSucc edges = --let uqe (from,to) = (getUnique from, getUnique to) --in pprTrace "Edges:" (ppr $ map uqe edges) $ zip edges $ map classify edges where (_time, starts, ends) = addTimes (0,emptyUFM,emptyUFM) root classify :: (key,key) -> EdgeType classify (from,to) | startFrom < startTo , endFrom > endTo = Forward | startFrom > startTo , endFrom < endTo = Backward | startFrom > startTo , endFrom > endTo = Cross | getUnique from == getUnique to = SelfLoop | otherwise = pprPanic "Failed to classify edge of Graph" (ppr (getUnique from, getUnique to)) where getTime event node | Just time <- lookupUFM event node = time | otherwise = pprPanic "Failed to classify edge of CFG - not not timed" (text "edges" <> ppr (getUnique from, getUnique to) <+> ppr starts <+> ppr ends ) startFrom = getTime starts from startTo = getTime starts to endFrom = getTime ends from endTo = getTime ends to addTimes :: (Time, UniqFM key Time, UniqFM key Time) -> key -> (Time, UniqFM key Time, UniqFM key Time) addTimes (time,starts,ends) n --Dont reenter nodes | elemUFM n starts = (time,starts,ends) | otherwise = let starts' = addToUFM starts n time time' = time + 1 succs = getSucc n :: [key] (time'',starts'',ends') = foldl' addTimes (time',starts',ends) succs ends'' = addToUFM ends' n time'' in (time'' + 1, starts'', ends'') ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Graph/UnVar.hs0000644000000000000000000001423414472400112021140 0ustar0000000000000000{- Copyright (c) 2014 Joachim Breitner A data structure for undirected graphs of variables (or in plain terms: Sets of unordered pairs of numbers) This is very specifically tailored for the use in CallArity. In particular it stores the graph as a union of complete and complete bipartite graph, which would be very expensive to store as sets of edges or as adjanceny lists. It does not normalize the graphs. This means that g `unionUnVarGraph` g is equal to g, but twice as expensive and large. -} module GHC.Data.Graph.UnVar ( UnVarSet , emptyUnVarSet, mkUnVarSet, varEnvDom, unionUnVarSet, unionUnVarSets , extendUnVarSet, extendUnVarSetList, delUnVarSet, delUnVarSetList , elemUnVarSet, isEmptyUnVarSet , UnVarGraph , emptyUnVarGraph , unionUnVarGraph, unionUnVarGraphs , completeGraph, completeBipartiteGraph , neighbors , hasLoopAt , delNode ) where import GHC.Prelude import GHC.Types.Id import GHC.Types.Var.Env import GHC.Types.Unique.FM import GHC.Utils.Outputable import GHC.Types.Unique import qualified Data.IntSet as S -- We need a type for sets of variables (UnVarSet). -- We do not use VarSet, because for that we need to have the actual variable -- at hand, and we do not have that when we turn the domain of a VarEnv into a UnVarSet. -- Therefore, use a IntSet directly (which is likely also a bit more efficient). -- Set of uniques, i.e. for adjancet nodes newtype UnVarSet = UnVarSet (S.IntSet) deriving Eq k :: Var -> Int k v = getKey (getUnique v) emptyUnVarSet :: UnVarSet emptyUnVarSet = UnVarSet S.empty elemUnVarSet :: Var -> UnVarSet -> Bool elemUnVarSet v (UnVarSet s) = k v `S.member` s isEmptyUnVarSet :: UnVarSet -> Bool isEmptyUnVarSet (UnVarSet s) = S.null s delUnVarSet :: UnVarSet -> Var -> UnVarSet delUnVarSet (UnVarSet s) v = UnVarSet $ k v `S.delete` s delUnVarSetList :: UnVarSet -> [Var] -> UnVarSet delUnVarSetList s vs = s `minusUnVarSet` mkUnVarSet vs minusUnVarSet :: UnVarSet -> UnVarSet -> UnVarSet minusUnVarSet (UnVarSet s) (UnVarSet s') = UnVarSet $ s `S.difference` s' sizeUnVarSet :: UnVarSet -> Int sizeUnVarSet (UnVarSet s) = S.size s mkUnVarSet :: [Var] -> UnVarSet mkUnVarSet vs = UnVarSet $ S.fromList $ map k vs varEnvDom :: VarEnv a -> UnVarSet varEnvDom ae = UnVarSet $ ufmToSet_Directly ae extendUnVarSet :: Var -> UnVarSet -> UnVarSet extendUnVarSet v (UnVarSet s) = UnVarSet $ S.insert (k v) s extendUnVarSetList :: [Var] -> UnVarSet -> UnVarSet extendUnVarSetList vs s = s `unionUnVarSet` mkUnVarSet vs unionUnVarSet :: UnVarSet -> UnVarSet -> UnVarSet unionUnVarSet (UnVarSet set1) (UnVarSet set2) = UnVarSet (set1 `S.union` set2) unionUnVarSets :: [UnVarSet] -> UnVarSet unionUnVarSets = foldl' (flip unionUnVarSet) emptyUnVarSet instance Outputable UnVarSet where ppr (UnVarSet s) = braces $ hcat $ punctuate comma [ ppr (getUnique i) | i <- S.toList s] data UnVarGraph = CBPG !UnVarSet !UnVarSet -- ^ complete bipartite graph | CG !UnVarSet -- ^ complete graph | Union UnVarGraph UnVarGraph | Del !UnVarSet UnVarGraph emptyUnVarGraph :: UnVarGraph emptyUnVarGraph = CG emptyUnVarSet unionUnVarGraph :: UnVarGraph -> UnVarGraph -> UnVarGraph {- Premature optimisation, it seems. unionUnVarGraph (UnVarGraph [CBPG s1 s2]) (UnVarGraph [CG s3, CG s4]) | s1 == s3 && s2 == s4 = pprTrace "unionUnVarGraph fired" empty $ completeGraph (s1 `unionUnVarSet` s2) unionUnVarGraph (UnVarGraph [CBPG s1 s2]) (UnVarGraph [CG s3, CG s4]) | s2 == s3 && s1 == s4 = pprTrace "unionUnVarGraph fired2" empty $ completeGraph (s1 `unionUnVarSet` s2) -} unionUnVarGraph a b | is_null a = b | is_null b = a | otherwise = Union a b unionUnVarGraphs :: [UnVarGraph] -> UnVarGraph unionUnVarGraphs = foldl' unionUnVarGraph emptyUnVarGraph -- completeBipartiteGraph A B = { {a,b} | a ∈ A, b ∈ B } completeBipartiteGraph :: UnVarSet -> UnVarSet -> UnVarGraph completeBipartiteGraph s1 s2 = prune $ CBPG s1 s2 completeGraph :: UnVarSet -> UnVarGraph completeGraph s = prune $ CG s -- (v' ∈ neighbors G v) <=> v--v' ∈ G neighbors :: UnVarGraph -> Var -> UnVarSet neighbors = go where go (Del d g) v | v `elemUnVarSet` d = emptyUnVarSet | otherwise = go g v `minusUnVarSet` d go (Union g1 g2) v = go g1 v `unionUnVarSet` go g2 v go (CG s) v = if v `elemUnVarSet` s then s else emptyUnVarSet go (CBPG s1 s2) v = (if v `elemUnVarSet` s1 then s2 else emptyUnVarSet) `unionUnVarSet` (if v `elemUnVarSet` s2 then s1 else emptyUnVarSet) -- hasLoopAt G v <=> v--v ∈ G hasLoopAt :: UnVarGraph -> Var -> Bool hasLoopAt = go where go (Del d g) v | v `elemUnVarSet` d = False | otherwise = go g v go (Union g1 g2) v = go g1 v || go g2 v go (CG s) v = v `elemUnVarSet` s go (CBPG s1 s2) v = v `elemUnVarSet` s1 && v `elemUnVarSet` s2 delNode :: UnVarGraph -> Var -> UnVarGraph delNode (Del d g) v = Del (extendUnVarSet v d) g delNode g v | is_null g = emptyUnVarGraph | otherwise = Del (mkUnVarSet [v]) g -- | Resolves all `Del`, by pushing them in, and simplifies `∅ ∪ … = …` prune :: UnVarGraph -> UnVarGraph prune = go emptyUnVarSet where go :: UnVarSet -> UnVarGraph -> UnVarGraph go dels (Del dels' g) = go (dels `unionUnVarSet` dels') g go dels (Union g1 g2) | is_null g1' = g2' | is_null g2' = g1' | otherwise = Union g1' g2' where g1' = go dels g1 g2' = go dels g2 go dels (CG s) = CG (s `minusUnVarSet` dels) go dels (CBPG s1 s2) = CBPG (s1 `minusUnVarSet` dels) (s2 `minusUnVarSet` dels) -- | Shallow empty check. is_null :: UnVarGraph -> Bool is_null (CBPG s1 s2) = isEmptyUnVarSet s1 || isEmptyUnVarSet s2 is_null (CG s) = isEmptyUnVarSet s is_null _ = False instance Outputable UnVarGraph where ppr (Del d g) = text "Del" <+> ppr (sizeUnVarSet d) <+> parens (ppr g) ppr (Union a b) = text "Union" <+> parens (ppr a) <+> parens (ppr b) ppr (CG s) = text "CG" <+> ppr (sizeUnVarSet s) ppr (CBPG a b) = text "CBPG" <+> ppr (sizeUnVarSet a) <+> ppr (sizeUnVarSet b) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/IOEnv.hs0000644000000000000000000002101514472400112020017 0ustar0000000000000000 {-# LANGUAGE DerivingVia #-} {-# LANGUAGE PatternSynonyms #-} -- -- (c) The University of Glasgow 2002-2006 -- -- | The IO Monad with an environment -- -- The environment is passed around as a Reader monad but -- as its in the IO monad, mutable references can be used -- for updating state. -- module GHC.Data.IOEnv ( IOEnv, -- Instance of Monad -- Monad utilities module GHC.Utils.Monad, -- Errors failM, failWithM, IOEnvFailure(..), -- Getting at the environment getEnv, setEnv, updEnv, runIOEnv, unsafeInterleaveM, uninterruptibleMaskM_, tryM, tryAllM, tryMostM, fixM, -- I/O operations IORef, newMutVar, readMutVar, writeMutVar, updMutVar, updMutVarM, atomicUpdMutVar, atomicUpdMutVar' ) where import GHC.Prelude import GHC.Driver.Session import {-# SOURCE #-} GHC.Driver.Hooks import GHC.IO (catchException) import GHC.Utils.Exception import GHC.Unit.Module import GHC.Utils.Panic import Data.IORef ( IORef, newIORef, readIORef, writeIORef, modifyIORef, atomicModifyIORef, atomicModifyIORef' ) import System.IO.Unsafe ( unsafeInterleaveIO ) import System.IO ( fixIO ) import Control.Monad import Control.Monad.Trans.Reader import Control.Monad.Catch (MonadCatch, MonadMask, MonadThrow) import GHC.Utils.Monad import GHC.Utils.Logger import Control.Applicative (Alternative(..)) import GHC.Exts( oneShot ) import Control.Concurrent.MVar (newEmptyMVar, readMVar, putMVar) import Control.Concurrent (forkIO, killThread) ---------------------------------------------------------------------- -- Defining the monad type ---------------------------------------------------------------------- newtype IOEnv env a = IOEnv' (env -> IO a) deriving (MonadThrow, MonadCatch, MonadMask, MonadFix) via (ReaderT env IO) -- See Note [The one-shot state monad trick] in GHC.Utils.Monad instance Functor (IOEnv env) where fmap f (IOEnv g) = IOEnv $ \env -> fmap f (g env) a <$ IOEnv g = IOEnv $ \env -> g env >> pure a instance MonadIO (IOEnv env) where liftIO f = IOEnv (\_ -> f) pattern IOEnv :: forall env a. (env -> IO a) -> IOEnv env a pattern IOEnv m <- IOEnv' m where IOEnv m = IOEnv' (oneShot m) {-# COMPLETE IOEnv #-} unIOEnv :: IOEnv env a -> (env -> IO a) unIOEnv (IOEnv m) = m instance Monad (IOEnv m) where (>>=) = thenM (>>) = (*>) instance MonadFail (IOEnv m) where fail _ = failM -- Ignore the string instance Applicative (IOEnv m) where pure = returnM IOEnv f <*> IOEnv x = IOEnv (\ env -> f env <*> x env ) (*>) = thenM_ returnM :: a -> IOEnv env a returnM a = IOEnv (\ _ -> return a) thenM :: IOEnv env a -> (a -> IOEnv env b) -> IOEnv env b thenM (IOEnv m) f = IOEnv (\ env -> do { r <- m env ; unIOEnv (f r) env }) thenM_ :: IOEnv env a -> IOEnv env b -> IOEnv env b thenM_ (IOEnv m) f = IOEnv (\ env -> do { _ <- m env ; unIOEnv f env }) failM :: IOEnv env a failM = IOEnv (\ _ -> throwIO IOEnvFailure) failWithM :: String -> IOEnv env a failWithM s = IOEnv (\ _ -> ioError (userError s)) data IOEnvFailure = IOEnvFailure instance Show IOEnvFailure where show IOEnvFailure = "IOEnv failure" instance Exception IOEnvFailure instance ContainsDynFlags env => HasDynFlags (IOEnv env) where getDynFlags = do env <- getEnv return $! extractDynFlags env instance ContainsHooks env => HasHooks (IOEnv env) where getHooks = do env <- getEnv return $! extractHooks env instance ContainsLogger env => HasLogger (IOEnv env) where getLogger = do env <- getEnv return $! extractLogger env instance ContainsModule env => HasModule (IOEnv env) where getModule = do env <- getEnv return $ extractModule env ---------------------------------------------------------------------- -- Fundamental combinators specific to the monad ---------------------------------------------------------------------- --------------------------- runIOEnv :: env -> IOEnv env a -> IO a runIOEnv env (IOEnv m) = m env --------------------------- {-# NOINLINE fixM #-} -- Aargh! Not inlining fixM alleviates a space leak problem. -- Normally fixM is used with a lazy tuple match: if the optimiser is -- shown the definition of fixM, it occasionally transforms the code -- in such a way that the code generator doesn't spot the selector -- thunks. Sigh. fixM :: (a -> IOEnv env a) -> IOEnv env a fixM f = IOEnv (\ env -> fixIO (\ r -> unIOEnv (f r) env)) --------------------------- tryM :: IOEnv env r -> IOEnv env (Either IOEnvFailure r) -- Reflect UserError exceptions (only) into IOEnv monad -- Other exceptions are not caught; they are simply propagated as exns -- -- The idea is that errors in the program being compiled will give rise -- to UserErrors. But, say, pattern-match failures in GHC itself should -- not be caught here, else they'll be reported as errors in the program -- begin compiled! tryM (IOEnv thing) = IOEnv (\ env -> tryIOEnvFailure (thing env)) tryIOEnvFailure :: IO a -> IO (Either IOEnvFailure a) tryIOEnvFailure = try tryAllM :: IOEnv env r -> IOEnv env (Either SomeException r) -- Catch *all* synchronous exceptions -- This is used when running a Template-Haskell splice, when -- even a pattern-match failure is a programmer error tryAllM (IOEnv thing) = IOEnv (\ env -> safeTry (thing env)) -- | Like 'try', but doesn't catch asynchronous exceptions safeTry :: IO a -> IO (Either SomeException a) safeTry act = do var <- newEmptyMVar -- uninterruptible because we want to mask around 'killThread', which is interruptible. uninterruptibleMask $ \restore -> do -- Fork, so that 'act' is safe from all asynchronous exceptions other than the ones we send it t <- forkIO $ try (restore act) >>= putMVar var restore (readMVar var) `catchException` \(e :: SomeException) -> do -- Control reaches this point only if the parent thread was sent an async exception -- In that case, kill the 'act' thread and re-raise the exception killThread t throwIO e tryMostM :: IOEnv env r -> IOEnv env (Either SomeException r) tryMostM (IOEnv thing) = IOEnv (\ env -> tryMost (thing env)) --------------------------- unsafeInterleaveM :: IOEnv env a -> IOEnv env a unsafeInterleaveM (IOEnv m) = IOEnv (\ env -> unsafeInterleaveIO (m env)) uninterruptibleMaskM_ :: IOEnv env a -> IOEnv env a uninterruptibleMaskM_ (IOEnv m) = IOEnv (\ env -> uninterruptibleMask_ (m env)) ---------------------------------------------------------------------- -- Alternative/MonadPlus ---------------------------------------------------------------------- instance Alternative (IOEnv env) where empty = IOEnv (const empty) m <|> n = IOEnv (\env -> unIOEnv m env <|> unIOEnv n env) instance MonadPlus (IOEnv env) ---------------------------------------------------------------------- -- Accessing input/output ---------------------------------------------------------------------- newMutVar :: a -> IOEnv env (IORef a) newMutVar val = liftIO (newIORef val) writeMutVar :: IORef a -> a -> IOEnv env () writeMutVar var val = liftIO (writeIORef var val) readMutVar :: IORef a -> IOEnv env a readMutVar var = liftIO (readIORef var) updMutVar :: IORef a -> (a -> a) -> IOEnv env () updMutVar var upd = liftIO (modifyIORef var upd) updMutVarM :: IORef a -> (a -> IOEnv env a) -> IOEnv env () updMutVarM ref upd = do { contents <- liftIO $ readIORef ref ; new_contents <- upd contents ; liftIO $ writeIORef ref new_contents } -- | Atomically update the reference. Does not force the evaluation of the -- new variable contents. For strict update, use 'atomicUpdMutVar''. atomicUpdMutVar :: IORef a -> (a -> (a, b)) -> IOEnv env b atomicUpdMutVar var upd = liftIO (atomicModifyIORef var upd) -- | Strict variant of 'atomicUpdMutVar'. atomicUpdMutVar' :: IORef a -> (a -> (a, b)) -> IOEnv env b atomicUpdMutVar' var upd = liftIO (atomicModifyIORef' var upd) ---------------------------------------------------------------------- -- Accessing the environment ---------------------------------------------------------------------- getEnv :: IOEnv env env {-# INLINE getEnv #-} getEnv = IOEnv (\ env -> return env) -- | Perform a computation with a different environment setEnv :: env' -> IOEnv env' a -> IOEnv env a {-# INLINE setEnv #-} setEnv new_env (IOEnv m) = IOEnv (\ _ -> m new_env) -- | Perform a computation with an altered environment updEnv :: (env -> env') -> IOEnv env' a -> IOEnv env a {-# INLINE updEnv #-} updEnv upd (IOEnv m) = IOEnv (\ env -> m (upd env)) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/List/SetOps.hs0000644000000000000000000001675714472400112021210 0ustar0000000000000000{-# LANGUAGE CPP #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Set-like operations on lists -- -- Avoid using them as much as possible module GHC.Data.List.SetOps ( unionLists, minusList, -- Association lists Assoc, assoc, assocMaybe, assocUsing, assocDefault, assocDefaultUsing, -- Duplicate handling hasNoDups, removeDups, nubOrdBy, findDupsEq, equivClasses, -- Indexing getNth, -- Membership isIn, isn'tIn, ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Utils.Trace import qualified Data.List as L import qualified Data.List.NonEmpty as NE import Data.List.NonEmpty (NonEmpty(..)) import qualified Data.Set as S getNth :: Outputable a => [a] -> Int -> a getNth xs n = assertPpr (xs `lengthExceeds` n) (ppr n $$ ppr xs) $ xs !! n {- ************************************************************************ * * Treating lists as sets Assumes the lists contain no duplicates, but are unordered * * ************************************************************************ -} -- | Assumes that the arguments contain no duplicates unionLists :: (HasDebugCallStack, Outputable a, Eq a) => [a] -> [a] -> [a] -- We special case some reasonable common patterns. unionLists xs [] = xs unionLists [] ys = ys unionLists [x] ys | isIn "unionLists" x ys = ys | otherwise = x:ys unionLists xs [y] | isIn "unionLists" y xs = xs | otherwise = y:xs unionLists xs ys = warnPprTrace (lengthExceeds xs 100 || lengthExceeds ys 100) "unionLists" (ppr xs $$ ppr ys) $ [x | x <- xs, isn'tIn "unionLists" x ys] ++ ys -- | Calculate the set difference of two lists. This is -- /O((m + n) log n)/, where we subtract a list of /n/ elements -- from a list of /m/ elements. -- -- Extremely short cases are handled specially: -- When /m/ or /n/ is 0, this takes /O(1)/ time. When /m/ is 1, -- it takes /O(n)/ time. minusList :: Ord a => [a] -> [a] -> [a] -- There's no point building a set to perform just one lookup, so we handle -- extremely short lists specially. It might actually be better to use -- an O(m*n) algorithm when m is a little longer (perhaps up to 4 or even 5). -- The tipping point will be somewhere in the area of where /m/ and /log n/ -- become comparable, but we probably don't want to work too hard on this. minusList [] _ = [] minusList xs@[x] ys | x `elem` ys = [] | otherwise = xs -- Using an empty set or a singleton would also be silly, so let's not. minusList xs [] = xs minusList xs [y] = filter (/= y) xs -- When each list has at least two elements, we build a set from the -- second argument, allowing us to filter the first argument fairly -- efficiently. minusList xs ys = filter (`S.notMember` yss) xs where yss = S.fromList ys {- ************************************************************************ * * \subsection[Utils-assoc]{Association lists} * * ************************************************************************ Inefficient finite maps based on association lists and equality. -} -- | A finite mapping based on equality and association lists. type Assoc a b = [(a,b)] assoc :: (Eq a) => String -> Assoc a b -> a -> b assocDefault :: (Eq a) => b -> Assoc a b -> a -> b assocUsing :: (a -> a -> Bool) -> String -> Assoc a b -> a -> b -- | Lookup key, fail gracefully using Nothing if not found. assocMaybe :: (Eq a) => Assoc a b -> a -> Maybe b assocDefaultUsing :: (a -> a -> Bool) -> b -> Assoc a b -> a -> b assocDefaultUsing _ deflt [] _ = deflt assocDefaultUsing eq deflt ((k,v) : rest) key | k `eq` key = v | otherwise = assocDefaultUsing eq deflt rest key assoc crash_msg list key = assocDefaultUsing (==) (panic ("Failed in assoc: " ++ crash_msg)) list key assocDefault deflt list key = assocDefaultUsing (==) deflt list key assocUsing eq crash_msg list key = assocDefaultUsing eq (panic ("Failed in assoc: " ++ crash_msg)) list key assocMaybe alist key = lookup alist where lookup [] = Nothing lookup ((tv,ty):rest) = if key == tv then Just ty else lookup rest {- ************************************************************************ * * \subsection[Utils-dups]{Duplicate-handling} * * ************************************************************************ -} hasNoDups :: (Eq a) => [a] -> Bool hasNoDups xs = f [] xs where f _ [] = True f seen_so_far (x:xs) = if x `is_elem` seen_so_far then False else f (x:seen_so_far) xs is_elem = isIn "hasNoDups" equivClasses :: (a -> a -> Ordering) -- Comparison -> [a] -> [NonEmpty a] equivClasses _ [] = [] equivClasses _ [stuff] = [stuff :| []] equivClasses cmp items = NE.groupBy eq (L.sortBy cmp items) where eq a b = case cmp a b of { EQ -> True; _ -> False } -- | Remove the duplicates from a list using the provided -- comparison function. -- -- Returns the list without duplicates, and accumulates -- all the duplicates in the second component of its result. removeDups :: (a -> a -> Ordering) -- Comparison function -> [a] -> ([a], -- List with no duplicates [NonEmpty a]) -- List of duplicate groups. One representative -- from each group appears in the first result removeDups _ [] = ([], []) removeDups _ [x] = ([x],[]) removeDups cmp xs = case L.mapAccumR collect_dups [] (equivClasses cmp xs) of { (dups, xs') -> (xs', dups) } where collect_dups :: [NonEmpty a] -> NonEmpty a -> ([NonEmpty a], a) collect_dups dups_so_far (x :| []) = (dups_so_far, x) collect_dups dups_so_far dups@(x :| _) = (dups:dups_so_far, x) -- | Remove the duplicates from a list using the provided -- comparison function. nubOrdBy :: (a -> a -> Ordering) -> [a] -> [a] nubOrdBy cmp xs = fst (removeDups cmp xs) findDupsEq :: (a->a->Bool) -> [a] -> [NonEmpty a] findDupsEq _ [] = [] findDupsEq eq (x:xs) | L.null eq_xs = findDupsEq eq xs | otherwise = (x :| eq_xs) : findDupsEq eq neq_xs where (eq_xs, neq_xs) = L.partition (eq x) xs -- Debugging/specialising versions of \tr{elem} and \tr{notElem} # if !defined(DEBUG) isIn, isn'tIn :: Eq a => String -> a -> [a] -> Bool isIn _msg x ys = x `elem` ys isn'tIn _msg x ys = x `notElem` ys # else /* DEBUG */ isIn, isn'tIn :: (HasDebugCallStack, Eq a) => String -> a -> [a] -> Bool isIn msg x ys = elem100 0 x ys where elem100 :: Eq a => Int -> a -> [a] -> Bool elem100 _ _ [] = False elem100 i x (y:ys) | i > 100 = warnPprTrace True ("Over-long elem in " ++ msg) empty (x `elem` (y:ys)) | otherwise = x == y || elem100 (i + 1) x ys isn'tIn msg x ys = notElem100 0 x ys where notElem100 :: Eq a => Int -> a -> [a] -> Bool notElem100 _ _ [] = True notElem100 i x (y:ys) | i > 100 = warnPprTrace True ("Over-long notElem in " ++ msg) empty (x `notElem` (y:ys)) | otherwise = x /= y && notElem100 (i + 1) x ys # endif /* DEBUG */ ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Maybe.hs0000644000000000000000000000756514472400112020112 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE FlexibleContexts #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Data.Maybe ( module Data.Maybe, MaybeErr(..), -- Instance of Monad failME, isSuccess, orElse, firstJust, firstJusts, firstJustsM, whenIsJust, expectJust, rightToMaybe, -- * MaybeT MaybeT(..), liftMaybeT, tryMaybeT ) where import GHC.Prelude import GHC.IO (catchException) import Control.Monad import Control.Monad.Trans.Maybe import Control.Exception (SomeException(..)) import Data.Maybe import Data.Foldable ( foldlM ) import GHC.Utils.Misc (HasCallStack) import Data.List.NonEmpty ( NonEmpty ) infixr 4 `orElse` {- ************************************************************************ * * \subsection[Maybe type]{The @Maybe@ type} * * ************************************************************************ -} firstJust :: Maybe a -> Maybe a -> Maybe a firstJust a b = firstJusts [a, b] -- | Takes a list of @Maybes@ and returns the first @Just@ if there is one, or -- @Nothing@ otherwise. firstJusts :: Foldable f => f (Maybe a) -> Maybe a firstJusts = msum {-# SPECIALISE firstJusts :: [Maybe a] -> Maybe a #-} {-# SPECIALISE firstJusts :: NonEmpty (Maybe a) -> Maybe a #-} -- | Takes computations returnings @Maybes@; tries each one in order. -- The first one to return a @Just@ wins. Returns @Nothing@ if all computations -- return @Nothing@. firstJustsM :: (Monad m, Foldable f) => f (m (Maybe a)) -> m (Maybe a) firstJustsM = foldlM go Nothing where go :: Monad m => Maybe a -> m (Maybe a) -> m (Maybe a) go Nothing action = action go result@(Just _) _action = return result expectJust :: HasCallStack => String -> Maybe a -> a {-# INLINE expectJust #-} expectJust _ (Just x) = x expectJust err Nothing = error ("expectJust " ++ err) whenIsJust :: Monad m => Maybe a -> (a -> m ()) -> m () whenIsJust (Just x) f = f x whenIsJust Nothing _ = return () -- | Flipped version of @fromMaybe@, useful for chaining. orElse :: Maybe a -> a -> a orElse = flip fromMaybe rightToMaybe :: Either a b -> Maybe b rightToMaybe (Left _) = Nothing rightToMaybe (Right x) = Just x {- ************************************************************************ * * \subsection[MaybeT type]{The @MaybeT@ monad transformer} * * ************************************************************************ -} -- We had our own MaybeT in the past. Now we reuse transformer's MaybeT liftMaybeT :: Monad m => m a -> MaybeT m a liftMaybeT act = MaybeT $ Just `liftM` act -- | Try performing an 'IO' action, failing on error. tryMaybeT :: IO a -> MaybeT IO a tryMaybeT action = MaybeT $ catchException (Just `fmap` action) handler where handler (SomeException _) = return Nothing {- ************************************************************************ * * \subsection[MaybeErr type]{The @MaybeErr@ type} * * ************************************************************************ -} data MaybeErr err val = Succeeded val | Failed err deriving (Functor) instance Applicative (MaybeErr err) where pure = Succeeded (<*>) = ap instance Monad (MaybeErr err) where Succeeded v >>= k = k v Failed e >>= _ = Failed e isSuccess :: MaybeErr err val -> Bool isSuccess (Succeeded {}) = True isSuccess (Failed {}) = False failME :: err -> MaybeErr err val failME e = Failed e ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/OrdList.hs0000644000000000000000000001756014472400112020431 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE UnboxedSums #-} {-# LANGUAGE UnboxedTuples #-} -- | Provide trees (of instructions), so that lists of instructions can be -- appended in linear time. module GHC.Data.OrdList ( OrdList, pattern NilOL, pattern ConsOL, pattern SnocOL, nilOL, isNilOL, unitOL, appOL, consOL, snocOL, concatOL, lastOL, headOL, mapOL, mapOL', fromOL, toOL, foldrOL, foldlOL, reverseOL, fromOLReverse, strictlyEqOL, strictlyOrdOL ) where import GHC.Prelude import Data.Foldable import GHC.Utils.Misc (strictMap) import GHC.Utils.Outputable import GHC.Utils.Panic import qualified Data.Semigroup as Semigroup infixl 5 `appOL` infixl 5 `snocOL` infixr 5 `consOL` data OrdList a = None | One a | Many [a] -- Invariant: non-empty | Cons a (OrdList a) | Snoc (OrdList a) a | Two (OrdList a) -- Invariant: non-empty (OrdList a) -- Invariant: non-empty deriving (Functor) instance Outputable a => Outputable (OrdList a) where ppr ol = ppr (fromOL ol) -- Convert to list and print that instance Semigroup (OrdList a) where (<>) = appOL instance Monoid (OrdList a) where mempty = nilOL mappend = (Semigroup.<>) mconcat = concatOL instance Foldable OrdList where foldr = foldrOL foldl' = foldlOL toList = fromOL null = isNilOL length = lengthOL instance Traversable OrdList where traverse f xs = toOL <$> traverse f (fromOL xs) nilOL :: OrdList a isNilOL :: OrdList a -> Bool unitOL :: a -> OrdList a snocOL :: OrdList a -> a -> OrdList a consOL :: a -> OrdList a -> OrdList a appOL :: OrdList a -> OrdList a -> OrdList a concatOL :: [OrdList a] -> OrdList a headOL :: OrdList a -> a lastOL :: OrdList a -> a lengthOL :: OrdList a -> Int nilOL = None unitOL as = One as snocOL as b = Snoc as b consOL a bs = Cons a bs concatOL aas = foldr appOL None aas pattern NilOL :: OrdList a pattern NilOL <- (isNilOL -> True) where NilOL = None -- | An unboxed 'Maybe' type with two unboxed fields in the 'Just' case. -- Useful for defining 'viewCons' and 'viewSnoc' without overhead. type VMaybe a b = (# (# a, b #) | (# #) #) pattern VJust :: a -> b -> VMaybe a b pattern VJust a b = (# (# a, b #) | #) pattern VNothing :: VMaybe a b pattern VNothing = (# | (# #) #) {-# COMPLETE VJust, VNothing #-} pattern ConsOL :: a -> OrdList a -> OrdList a pattern ConsOL x xs <- (viewCons -> VJust x xs) where ConsOL x xs = consOL x xs {-# COMPLETE NilOL, ConsOL #-} viewCons :: OrdList a -> VMaybe a (OrdList a) viewCons (One a) = VJust a NilOL viewCons (Cons a as) = VJust a as viewCons (Snoc as a) = case viewCons as of VJust a' as' -> VJust a' (Snoc as' a) VNothing -> VJust a NilOL viewCons (Two as1 as2) = case viewCons as1 of VJust a' as1' -> VJust a' (Two as1' as2) VNothing -> viewCons as2 viewCons _ = VNothing pattern SnocOL :: OrdList a -> a -> OrdList a pattern SnocOL xs x <- (viewSnoc -> VJust xs x) where SnocOL xs x = snocOL xs x {-# COMPLETE NilOL, SnocOL #-} viewSnoc :: OrdList a -> VMaybe (OrdList a) a viewSnoc (One a) = VJust NilOL a viewSnoc (Many (reverse -> a:as)) = VJust (Many (reverse as)) a viewSnoc (Snoc as a) = VJust as a viewSnoc (Cons a as) = case viewSnoc as of VJust as' a' -> VJust (Cons a as') a' VNothing -> VJust NilOL a viewSnoc (Two as1 as2) = case viewSnoc as2 of VJust as2' a' -> VJust (Two as1 as2') a' VNothing -> viewSnoc as1 viewSnoc _ = VNothing headOL None = panic "headOL" headOL (One a) = a headOL (Many as) = head as headOL (Cons a _) = a headOL (Snoc as _) = headOL as headOL (Two as _) = headOL as lastOL None = panic "lastOL" lastOL (One a) = a lastOL (Many as) = last as lastOL (Cons _ as) = lastOL as lastOL (Snoc _ a) = a lastOL (Two _ as) = lastOL as lengthOL None = 0 lengthOL (One _) = 1 lengthOL (Many as) = length as lengthOL (Cons _ as) = 1 + length as lengthOL (Snoc as _) = 1 + length as lengthOL (Two as bs) = length as + length bs isNilOL None = True isNilOL _ = False None `appOL` b = b a `appOL` None = a One a `appOL` b = Cons a b a `appOL` One b = Snoc a b a `appOL` b = Two a b fromOL :: OrdList a -> [a] fromOL a = go a [] where go None acc = acc go (One a) acc = a : acc go (Cons a b) acc = a : go b acc go (Snoc a b) acc = go a (b:acc) go (Two a b) acc = go a (go b acc) go (Many xs) acc = xs ++ acc fromOLReverse :: OrdList a -> [a] fromOLReverse a = go a [] -- acc is already in reverse order where go :: OrdList a -> [a] -> [a] go None acc = acc go (One a) acc = a : acc go (Cons a b) acc = go b (a : acc) go (Snoc a b) acc = b : go a acc go (Two a b) acc = go b (go a acc) go (Many xs) acc = reverse xs ++ acc mapOL :: (a -> b) -> OrdList a -> OrdList b mapOL = fmap mapOL' :: (a->b) -> OrdList a -> OrdList b mapOL' _ None = None mapOL' f (One x) = One $! f x mapOL' f (Cons x xs) = let !x1 = f x !xs1 = mapOL' f xs in Cons x1 xs1 mapOL' f (Snoc xs x) = let !x1 = f x !xs1 = mapOL' f xs in Snoc xs1 x1 mapOL' f (Two b1 b2) = let !b1' = mapOL' f b1 !b2' = mapOL' f b2 in Two b1' b2' mapOL' f (Many xs) = Many $! strictMap f xs foldrOL :: (a->b->b) -> b -> OrdList a -> b foldrOL _ z None = z foldrOL k z (One x) = k x z foldrOL k z (Cons x xs) = k x (foldrOL k z xs) foldrOL k z (Snoc xs x) = foldrOL k (k x z) xs foldrOL k z (Two b1 b2) = foldrOL k (foldrOL k z b2) b1 foldrOL k z (Many xs) = foldr k z xs -- | Strict left fold. foldlOL :: (b->a->b) -> b -> OrdList a -> b foldlOL _ z None = z foldlOL k z (One x) = k z x foldlOL k z (Cons x xs) = let !z' = (k z x) in foldlOL k z' xs foldlOL k z (Snoc xs x) = let !z' = (foldlOL k z xs) in k z' x foldlOL k z (Two b1 b2) = let !z' = (foldlOL k z b1) in foldlOL k z' b2 foldlOL k z (Many xs) = foldl' k z xs toOL :: [a] -> OrdList a toOL [] = None toOL [x] = One x toOL xs = Many xs reverseOL :: OrdList a -> OrdList a reverseOL None = None reverseOL (One x) = One x reverseOL (Cons a b) = Snoc (reverseOL b) a reverseOL (Snoc a b) = Cons b (reverseOL a) reverseOL (Two a b) = Two (reverseOL b) (reverseOL a) reverseOL (Many xs) = Many (reverse xs) -- | Compare not only the values but also the structure of two lists strictlyEqOL :: Eq a => OrdList a -> OrdList a -> Bool strictlyEqOL None None = True strictlyEqOL (One x) (One y) = x == y strictlyEqOL (Cons a as) (Cons b bs) = a == b && as `strictlyEqOL` bs strictlyEqOL (Snoc as a) (Snoc bs b) = a == b && as `strictlyEqOL` bs strictlyEqOL (Two a1 a2) (Two b1 b2) = a1 `strictlyEqOL` b1 && a2 `strictlyEqOL` b2 strictlyEqOL (Many as) (Many bs) = as == bs strictlyEqOL _ _ = False -- | Compare not only the values but also the structure of two lists strictlyOrdOL :: Ord a => OrdList a -> OrdList a -> Ordering strictlyOrdOL None None = EQ strictlyOrdOL None _ = LT strictlyOrdOL (One x) (One y) = compare x y strictlyOrdOL (One _) _ = LT strictlyOrdOL (Cons a as) (Cons b bs) = compare a b `mappend` strictlyOrdOL as bs strictlyOrdOL (Cons _ _) _ = LT strictlyOrdOL (Snoc as a) (Snoc bs b) = compare a b `mappend` strictlyOrdOL as bs strictlyOrdOL (Snoc _ _) _ = LT strictlyOrdOL (Two a1 a2) (Two b1 b2) = (strictlyOrdOL a1 b1) `mappend` (strictlyOrdOL a2 b2) strictlyOrdOL (Two _ _) _ = LT strictlyOrdOL (Many as) (Many bs) = compare as bs strictlyOrdOL (Many _ ) _ = GT ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Pair.hs0000644000000000000000000000277214472400112017743 0ustar0000000000000000{- A simple homogeneous pair type with useful Functor, Applicative, and Traversable instances. -} {-# LANGUAGE DeriveFunctor #-} module GHC.Data.Pair ( Pair(..) , unPair , toPair , swap , pLiftFst , pLiftSnd ) where import GHC.Prelude import GHC.Utils.Outputable import qualified Data.Semigroup as Semi data Pair a = Pair { pFst :: a, pSnd :: a } deriving (Functor) -- Note that Pair is a *unary* type constructor -- whereas (,) is binary -- The important thing about Pair is that it has a *homogeneous* -- Functor instance, so you can easily apply the same function -- to both components instance Applicative Pair where pure x = Pair x x (Pair f g) <*> (Pair x y) = Pair (f x) (g y) instance Foldable Pair where foldMap f (Pair x y) = f x `mappend` f y instance Traversable Pair where traverse f (Pair x y) = Pair <$> f x <*> f y instance Semi.Semigroup a => Semi.Semigroup (Pair a) where Pair a1 b1 <> Pair a2 b2 = Pair (a1 Semi.<> a2) (b1 Semi.<> b2) instance (Semi.Semigroup a, Monoid a) => Monoid (Pair a) where mempty = Pair mempty mempty mappend = (Semi.<>) instance Outputable a => Outputable (Pair a) where ppr (Pair a b) = ppr a <+> char '~' <+> ppr b unPair :: Pair a -> (a,a) unPair (Pair x y) = (x,y) toPair :: (a,a) -> Pair a toPair (x,y) = Pair x y swap :: Pair a -> Pair a swap (Pair x y) = Pair y x pLiftFst :: (a -> a) -> Pair a -> Pair a pLiftFst f (Pair a b) = Pair (f a) b pLiftSnd :: (a -> a) -> Pair a -> Pair a pLiftSnd f (Pair a b) = Pair a (f b) ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Data/ShortText.hs0000644000000000000000000001326314472375231022672 0ustar0000000000000000{-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples, GeneralizedNewtypeDeriving, DerivingStrategies, CPP #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} -- gross hack: we manuvered ourselves into a position where we can't boot GHC with a LLVM based GHC anymore. -- LLVM based GHC's fail to compile memcmp ffi calls. These end up as memcmp$def in the llvm ir, however we -- don't have any prototypes and subsequently the llvm toolchain chokes on them. Since 7fdcce6d, we use -- ShortText for the package database. This however introduces this very module; which through inlining ends -- up bringing memcmp_ByteArray from bytestring:Data.ByteString.Short.Internal into scope, which results in -- the memcmp call we choke on. -- -- The solution thusly is to force late binding via the linker instead of inlining when comping with the -- bootstrap compiler. This will produce a slower (slightly less optimised) stage1 compiler only. -- -- See issue 18857. hsyl20 deserves credit for coming up with the idea for the soltuion. -- -- This can be removed when we exit the boot compiler window. Thus once we drop GHC-9.2 as boot compiler, -- we can drop this code as well. #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) {-# OPTIONS_GHC -fignore-interface-pragmas #-} #endif -- | -- An Unicode string for internal GHC use. Meant to replace String -- in places where being a lazy linked is not very useful and a more -- memory efficient data structure is desirable. -- Very similar to FastString, but not hash-consed and with some extra instances and -- functions for serialisation and I/O. Should be imported qualified. module GHC.Data.ShortText ( -- * ShortText ShortText(..), -- ** Conversion to and from String pack, unpack, -- ** Operations codepointLength, byteLength, GHC.Data.ShortText.null, splitFilePath, GHC.Data.ShortText.head, stripPrefix ) where import Prelude import Control.Monad (guard) import Control.DeepSeq as DeepSeq import Data.Binary import qualified Data.ByteString.Char8 as B8 import qualified Data.ByteString.Short.Internal as SBS import GHC.Exts import GHC.IO import GHC.Utils.Encoding import System.FilePath (isPathSeparator) {-| A 'ShortText' is a modified UTF-8 encoded string meant for short strings like file paths, module descriptions, etc. -} newtype ShortText = ShortText { contents :: SBS.ShortByteString } deriving stock (Show) deriving newtype (Eq, Ord, Binary, Semigroup, Monoid, NFData) -- We don't want to derive this one from ShortByteString since that one won't handle -- UTF-8 characters correctly. instance IsString ShortText where fromString = pack -- | /O(n)/ Returns the length of the 'ShortText' in characters. codepointLength :: ShortText -> Int codepointLength st = unsafeDupablePerformIO $ countUTF8Chars (contents st) -- | /O(1)/ Returns the length of the 'ShortText' in bytes. byteLength :: ShortText -> Int byteLength st = SBS.length $ contents st -- | /O(n)/ Convert a 'String' into a 'ShortText'. pack :: String -> ShortText pack s = unsafeDupablePerformIO $ ShortText <$> utf8EncodeShortByteString s -- | /O(n)/ Convert a 'ShortText' into a 'String'. unpack :: ShortText -> String unpack st = utf8DecodeShortByteString $ contents st -- | /O(1)/ Test whether the 'ShortText' is the empty string. null :: ShortText -> Bool null st = SBS.null $ contents st -- | /O(n)/ Split a 'ShortText' representing a file path into its components by separating -- on the file separator characters for this platform. splitFilePath :: ShortText -> [ShortText] -- This seems dangerous, but since the path separators are in the ASCII set they map down -- to a single byte when encoded in UTF-8 and so this should work even when casting to ByteString. -- We DeepSeq.force the resulting list so that we can be sure that no references to the -- bytestring in `st'` remain in unevaluated thunks, which might prevent `st'` from being -- collected by the GC. splitFilePath st = DeepSeq.force $ map (ShortText . SBS.toShort) $ B8.splitWith isPathSeparator st' where st' = SBS.fromShort $ contents st -- | /O(1)/ Returns the first UTF-8 codepoint in the 'ShortText'. Depending on the string in -- question, this may or may not be the actual first character in the string due to Unicode -- non-printable characters. head :: ShortText -> Char head st | SBS.null $ contents st = error "head: Empty ShortText" | otherwise = Prelude.head $ unpack st -- | /O(n)/ The 'stripPrefix' function takes two 'ShortText's and returns 'Just' the remainder of -- the second iff the first is its prefix, and otherwise Nothing. stripPrefix :: ShortText -> ShortText -> Maybe ShortText stripPrefix prefix st = do let !(SBS.SBS prefixBA) = contents prefix let !(SBS.SBS stBA) = contents st let prefixLength = sizeofByteArray# prefixBA let stLength = sizeofByteArray# stBA -- If the length of 'st' is not >= than the length of 'prefix', it is impossible for 'prefix' -- to be the prefix of `st`. guard $ (I# stLength) >= (I# prefixLength) -- 'prefix' is a prefix of 'st' if the first bytes of 'st' -- are equal to 'prefix' guard $ I# (compareByteArrays# prefixBA 0# stBA 0# prefixLength) == 0 -- Allocate a new ByteArray# and copy the remainder of the 'st' into it unsafeDupablePerformIO $ do let newBAsize = (stLength -# prefixLength) newSBS <- IO $ \s0 -> let !(# s1, ba #) = newByteArray# newBAsize s0 s2 = copyByteArray# stBA prefixLength ba 0# newBAsize s1 !(# s3, fba #) = unsafeFreezeByteArray# ba s2 in (# s3, SBS.SBS fba #) return . Just . ShortText $ newSBS ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Data/SizedSeq.hs0000644000000000000000000000226114470055371022447 0ustar0000000000000000{-# LANGUAGE StandaloneDeriving, DeriveGeneric #-} module GHC.Data.SizedSeq ( SizedSeq(..) , emptySS , addToSS , addListToSS , ssElts , sizeSS ) where import Prelude -- See note [Why do we import Prelude here?] import Control.DeepSeq import Data.Binary import Data.List (genericLength) import GHC.Generics data SizedSeq a = SizedSeq {-# UNPACK #-} !Word [a] deriving (Generic, Show) instance Functor SizedSeq where fmap f (SizedSeq sz l) = SizedSeq sz (fmap f l) instance Foldable SizedSeq where foldr f c ss = foldr f c (ssElts ss) instance Traversable SizedSeq where traverse f (SizedSeq sz l) = SizedSeq sz . reverse <$> traverse f (reverse l) instance Binary a => Binary (SizedSeq a) instance NFData a => NFData (SizedSeq a) where rnf (SizedSeq _ xs) = rnf xs emptySS :: SizedSeq a emptySS = SizedSeq 0 [] addToSS :: SizedSeq a -> a -> SizedSeq a addToSS (SizedSeq n r_xs) x = SizedSeq (n+1) (x:r_xs) addListToSS :: SizedSeq a -> [a] -> SizedSeq a addListToSS (SizedSeq n r_xs) xs = SizedSeq (n + genericLength xs) (reverse xs ++ r_xs) ssElts :: SizedSeq a -> [a] ssElts (SizedSeq _ r_xs) = reverse r_xs sizeSS :: SizedSeq a -> Word sizeSS (SizedSeq n _) = n ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/SmallArray.hs0000644000000000000000000000504714472400112021115 0ustar0000000000000000{-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE BlockArguments #-} -- | Small-array module GHC.Data.SmallArray ( SmallMutableArray (..) , SmallArray (..) , newSmallArray , writeSmallArray , freezeSmallArray , unsafeFreezeSmallArray , indexSmallArray , listToArray ) where import GHC.Exts import GHC.Prelude import GHC.ST data SmallArray a = SmallArray (SmallArray# a) data SmallMutableArray s a = SmallMutableArray (SmallMutableArray# s a) newSmallArray :: Int -- ^ size -> a -- ^ initial contents -> State# s -> (# State# s, SmallMutableArray s a #) {-# INLINE newSmallArray #-} newSmallArray (I# sz) x s = case newSmallArray# sz x s of (# s', a #) -> (# s', SmallMutableArray a #) writeSmallArray :: SmallMutableArray s a -- ^ array -> Int -- ^ index -> a -- ^ new element -> State# s -> State# s {-# INLINE writeSmallArray #-} writeSmallArray (SmallMutableArray a) (I# i) x = writeSmallArray# a i x -- | Copy and freeze a slice of a mutable array. freezeSmallArray :: SmallMutableArray s a -- ^ source -> Int -- ^ offset -> Int -- ^ length -> State# s -> (# State# s, SmallArray a #) {-# INLINE freezeSmallArray #-} freezeSmallArray (SmallMutableArray ma) (I# offset) (I# len) s = case freezeSmallArray# ma offset len s of (# s', a #) -> (# s', SmallArray a #) -- | Freeze a mutable array (no copy!) unsafeFreezeSmallArray :: SmallMutableArray s a -> State# s -> (# State# s, SmallArray a #) {-# INLINE unsafeFreezeSmallArray #-} unsafeFreezeSmallArray (SmallMutableArray ma) s = case unsafeFreezeSmallArray# ma s of (# s', a #) -> (# s', SmallArray a #) -- | Index a small-array (no bounds checking!) indexSmallArray :: SmallArray a -- ^ array -> Int -- ^ index -> a {-# INLINE indexSmallArray #-} indexSmallArray (SmallArray sa#) (I# i) = case indexSmallArray# sa# i of (# v #) -> v -- | Convert a list into an array. listToArray :: Int -> (e -> Int) -> (e -> a) -> [e] -> SmallArray a {-# INLINE listToArray #-} listToArray (I# size) index_of value_of xs = runST $ ST \s -> let index_of' e = case index_of e of I# i -> i write_elems ma es s = case es of [] -> s e:es' -> case writeSmallArray# ma (index_of' e) (value_of e) s of s' -> write_elems ma es' s' in case newSmallArray# size undefined s of (# s', ma #) -> case write_elems ma xs s' of s'' -> case unsafeFreezeSmallArray# ma s'' of (# s''', a #) -> (# s''', SmallArray a #) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Stream.hs0000644000000000000000000001252314472400112020276 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} -- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2012 -- -- ----------------------------------------------------------------------------- -- | Monadic streams module GHC.Data.Stream ( Stream(..), StreamS(..), runStream, yield, liftIO, collect, consume, fromList, map, mapM, mapAccumL_ ) where import GHC.Prelude hiding (map,mapM) import Control.Monad hiding (mapM) import Control.Monad.IO.Class -- | -- @Stream m a b@ is a computation in some Monad @m@ that delivers a sequence -- of elements of type @a@ followed by a result of type @b@. -- -- More concretely, a value of type @Stream m a b@ can be run using @runStreamInternal@ -- in the Monad @m@, and it delivers either -- -- * the final result: @Done b@, or -- * @Yield a str@ where @a@ is the next element in the stream, and @str@ -- is the rest of the stream -- * @Effect mstr@ where @mstr@ is some action running in @m@ which -- generates the rest of the stream. -- -- Stream is itself a Monad, and provides an operation 'yield' that -- produces a new element of the stream. This makes it convenient to turn -- existing monadic computations into streams. -- -- The idea is that Stream is useful for making a monadic computation -- that produces values from time to time. This can be used for -- knitting together two complex monadic operations, so that the -- producer does not have to produce all its values before the -- consumer starts consuming them. We make the producer into a -- Stream, and the consumer pulls on the stream each time it wants a -- new value. -- -- 'Stream' is implemented in the "yoneda" style for efficiency. By -- representing a stream in this manner 'fmap' and '>>=' operations are -- accumulated in the function parameters before being applied once when -- the stream is destroyed. In the old implementation each usage of 'mapM' -- and '>>=' would traverse the entire stream in order to apply the -- substitution at the leaves. -- -- The >>= operation for 'Stream' was a hot-spot in the ticky profile for -- the "ManyConstructors" test which called the 'cg' function many times in -- @StgToCmm.hs@ -- newtype Stream m a b = Stream { runStreamInternal :: forall r' r . (a -> m r') -- For fusing calls to `map` and `mapM` -> (b -> StreamS m r' r) -- For fusing `>>=` -> StreamS m r' r } runStream :: Applicative m => Stream m r' r -> StreamS m r' r runStream st = runStreamInternal st pure Done data StreamS m a b = Yield a (StreamS m a b) | Done b | Effect (m (StreamS m a b)) instance Monad m => Functor (StreamS m a) where fmap = liftM instance Monad m => Applicative (StreamS m a) where pure = Done (<*>) = ap instance Monad m => Monad (StreamS m a) where a >>= k = case a of Done r -> k r Yield a s -> Yield a (s >>= k) Effect m -> Effect (fmap (>>= k) m) instance Functor (Stream f a) where fmap = liftM instance Applicative (Stream m a) where pure a = Stream $ \_f g -> g a (<*>) = ap instance Monad (Stream m a) where Stream m >>= k = Stream $ \f h -> m f (\a -> runStreamInternal (k a) f h) instance MonadIO m => MonadIO (Stream m b) where liftIO io = Stream $ \_f g -> Effect (g <$> liftIO io) yield :: Monad m => a -> Stream m a () yield a = Stream $ \f rest -> Effect (flip Yield (rest ()) <$> f a) -- | Turn a Stream into an ordinary list, by demanding all the elements. collect :: Monad m => Stream m a () -> m [a] collect str = go [] (runStream str) where go acc (Done ()) = return (reverse acc) go acc (Effect m) = m >>= go acc go acc (Yield a k) = go (a:acc) k consume :: (Monad m, Monad n) => Stream m a b -> (forall a . m a -> n a) -> (a -> n ()) -> n b consume str l f = go (runStream str) where go (Done r) = return r go (Yield a p) = f a >> go p go (Effect m) = l m >>= go -- | Turn a list into a 'Stream', by yielding each element in turn. fromList :: Monad m => [a] -> Stream m a () fromList = mapM_ yield -- | Apply a function to each element of a 'Stream', lazily map :: Monad m => (a -> b) -> Stream m a x -> Stream m b x map f str = Stream $ \g h -> runStreamInternal str (g . f) h -- | Apply a monadic operation to each element of a 'Stream', lazily mapM :: Monad m => (a -> m b) -> Stream m a x -> Stream m b x mapM f str = Stream $ \g h -> runStreamInternal str (g <=< f) h -- | Note this is not very efficient because it traverses the whole stream -- before rebuilding it, avoid using it if you can. mapAccumL used to -- implemented but it wasn't used anywhere in the compiler and has similar -- effiency problems. mapAccumL_ :: forall m a b c r . Monad m => (c -> a -> m (c,b)) -> c -> Stream m a r -> Stream m b (c, r) mapAccumL_ f c str = Stream $ \f h -> go c f h (runStream str) where go :: c -> (b -> m r') -> ((c, r) -> StreamS m r' r1) -> StreamS m a r -> StreamS m r' r1 go c _f1 h1 (Done r) = h1 (c, r) go c f1 h1 (Yield a p) = Effect (f c a >>= (\(c', b) -> f1 b >>= \r' -> return $ Yield r' (go c' f1 h1 p))) go c f1 h1 (Effect m) = Effect (go c f1 h1 <$> m) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/Strict.hs0000644000000000000000000000327314472400112020315 0ustar0000000000000000-- Strict counterparts to common data structures, -- e.g. tuples, lists, maybes, etc. -- -- Import this module qualified as Strict. {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} module GHC.Data.Strict ( Maybe(Nothing, Just), fromMaybe, Pair(And), -- Not used at the moment: -- -- Either(Left, Right), -- List(Nil, Cons), ) where import GHC.Prelude hiding (Maybe(..), Either(..)) import Control.Applicative import Data.Semigroup import Data.Data data Maybe a = Nothing | Just !a deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) fromMaybe :: a -> Maybe a -> a fromMaybe d Nothing = d fromMaybe _ (Just x) = x apMaybe :: Maybe (a -> b) -> Maybe a -> Maybe b apMaybe (Just f) (Just x) = Just (f x) apMaybe _ _ = Nothing altMaybe :: Maybe a -> Maybe a -> Maybe a altMaybe Nothing r = r altMaybe l _ = l instance Semigroup a => Semigroup (Maybe a) where Nothing <> b = b a <> Nothing = a Just a <> Just b = Just (a <> b) instance Semigroup a => Monoid (Maybe a) where mempty = Nothing instance Applicative Maybe where pure = Just (<*>) = apMaybe instance Alternative Maybe where empty = Nothing (<|>) = altMaybe data Pair a b = !a `And` !b deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) -- The definitions below are commented out because they are -- not used anywhere in the compiler, but are useful to showcase -- the intent behind this module (i.e. how it may evolve). -- -- data Either a b = Left !a | Right !b -- deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) -- -- data List a = Nil | !a `Cons` !(List a) -- deriving (Eq, Ord, Show, Functor, Foldable, Traversable, Data) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/StringBuffer.hs0000644000000000000000000003705314472400112021450 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The University of Glasgow, 1997-2006 Buffers for scanning string input stored in external arrays. -} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# OPTIONS_GHC -O2 #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected module GHC.Data.StringBuffer ( StringBuffer(..), -- non-abstract for vs\/HaskellService -- * Creation\/destruction hGetStringBuffer, hGetStringBufferBlock, hPutStringBuffer, appendStringBuffers, stringToStringBuffer, stringBufferFromByteString, -- * Inspection nextChar, currentChar, prevChar, atEnd, fingerprintStringBuffer, -- * Moving and comparison stepOn, offsetBytes, byteDiff, atLine, -- * Conversion lexemeToString, lexemeToFastString, decodePrevNChars, -- * Parsing integers parseUnsignedInteger, -- * Checking for bi-directional format characters containsBidirectionalFormatChar, bidirectionalFormatChars ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Encoding import GHC.Utils.IO.Unsafe import GHC.Utils.Panic.Plain import GHC.Utils.Exception ( bracket_ ) import GHC.Fingerprint import Data.Maybe import System.IO import System.IO.Unsafe ( unsafePerformIO ) import GHC.IO.Encoding.UTF8 ( mkUTF8 ) import GHC.IO.Encoding.Failure ( CodingFailureMode(IgnoreCodingFailure) ) import qualified Data.ByteString.Internal as BS import qualified Data.ByteString as BS import Data.ByteString ( ByteString ) import GHC.Exts import Foreign #if MIN_VERSION_base(4,15,0) import GHC.ForeignPtr (unsafeWithForeignPtr) #else unsafeWithForeignPtr :: ForeignPtr a -> (Ptr a -> IO b) -> IO b unsafeWithForeignPtr = withForeignPtr #endif -- ----------------------------------------------------------------------------- -- The StringBuffer type -- |A StringBuffer is an internal pointer to a sized chunk of bytes. -- The bytes are intended to be *immutable*. There are pure -- operations to read the contents of a StringBuffer. -- -- A StringBuffer may have a finalizer, depending on how it was -- obtained. -- data StringBuffer = StringBuffer { buf :: {-# UNPACK #-} !(ForeignPtr Word8), len :: {-# UNPACK #-} !Int, -- length cur :: {-# UNPACK #-} !Int -- current pos } -- The buffer is assumed to be UTF-8 encoded, and furthermore -- we add three @\'\\0\'@ bytes to the end as sentinels so that the -- decoder doesn't have to check for overflow at every single byte -- of a multibyte sequence. instance Show StringBuffer where showsPrec _ s = showString "" -- ----------------------------------------------------------------------------- -- Creation / Destruction -- | Read a file into a 'StringBuffer'. The resulting buffer is automatically -- managed by the garbage collector. hGetStringBuffer :: FilePath -> IO StringBuffer hGetStringBuffer fname = do h <- openBinaryFile fname ReadMode size_i <- hFileSize h offset_i <- skipBOM h size_i 0 -- offset is 0 initially let size = fromIntegral $ size_i - offset_i buf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr buf $ \ptr -> do r <- if size == 0 then return 0 else hGetBuf h ptr size hClose h if (r /= size) then ioError (userError "short read of file") else newUTF8StringBuffer buf ptr size hGetStringBufferBlock :: Handle -> Int -> IO StringBuffer hGetStringBufferBlock handle wanted = do size_i <- hFileSize handle offset_i <- hTell handle >>= skipBOM handle size_i let size = min wanted (fromIntegral $ size_i-offset_i) buf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr buf $ \ptr -> do r <- if size == 0 then return 0 else hGetBuf handle ptr size if r /= size then ioError (userError $ "short read of file: "++show(r,size,size_i,handle)) else newUTF8StringBuffer buf ptr size hPutStringBuffer :: Handle -> StringBuffer -> IO () hPutStringBuffer hdl (StringBuffer buf len cur) = unsafeWithForeignPtr (plusForeignPtr buf cur) $ \ptr -> hPutBuf hdl ptr len -- | Skip the byte-order mark if there is one (see #1744 and #6016), -- and return the new position of the handle in bytes. -- -- This is better than treating #FEFF as whitespace, -- because that would mess up layout. We don't have a concept -- of zero-width whitespace in Haskell: all whitespace codepoints -- have a width of one column. skipBOM :: Handle -> Integer -> Integer -> IO Integer skipBOM h size offset = -- Only skip BOM at the beginning of a file. if size > 0 && offset == 0 then do -- Validate assumption that handle is in binary mode. assertM (hGetEncoding h >>= return . isNothing) -- Temporarily select utf8 encoding with error ignoring, -- to make `hLookAhead` and `hGetChar` return full Unicode characters. bracket_ (hSetEncoding h safeEncoding) (hSetBinaryMode h True) $ do c <- hLookAhead h if c == '\xfeff' then hGetChar h >> hTell h else return offset else return offset where safeEncoding = mkUTF8 IgnoreCodingFailure newUTF8StringBuffer :: ForeignPtr Word8 -> Ptr Word8 -> Int -> IO StringBuffer newUTF8StringBuffer buf ptr size = do pokeArray (ptr `plusPtr` size :: Ptr Word8) [0,0,0] -- sentinels for UTF-8 decoding return $ StringBuffer buf size 0 appendStringBuffers :: StringBuffer -> StringBuffer -> IO StringBuffer appendStringBuffers sb1 sb2 = do newBuf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr newBuf $ \ptr -> unsafeWithForeignPtr (buf sb1) $ \sb1Ptr -> unsafeWithForeignPtr (buf sb2) $ \sb2Ptr -> do copyArray ptr (sb1Ptr `advancePtr` cur sb1) sb1_len copyArray (ptr `advancePtr` sb1_len) (sb2Ptr `advancePtr` cur sb2) sb2_len pokeArray (ptr `advancePtr` size) [0,0,0] return (StringBuffer newBuf size 0) where sb1_len = calcLen sb1 sb2_len = calcLen sb2 calcLen sb = len sb - cur sb size = sb1_len + sb2_len -- | Encode a 'String' into a 'StringBuffer' as UTF-8. The resulting buffer -- is automatically managed by the garbage collector. stringToStringBuffer :: String -> StringBuffer stringToStringBuffer str = unsafePerformIO $ do let size = utf8EncodedLength str buf <- mallocForeignPtrArray (size+3) unsafeWithForeignPtr buf $ \ptr -> do utf8EncodeStringPtr ptr str pokeArray (ptr `plusPtr` size :: Ptr Word8) [0,0,0] -- sentinels for UTF-8 decoding return (StringBuffer buf size 0) -- | Convert a UTF-8 encoded 'ByteString' into a 'StringBuffer. This really -- relies on the internals of both 'ByteString' and 'StringBuffer'. -- -- /O(n)/ (but optimized into a @memcpy@ by @bytestring@ under the hood) stringBufferFromByteString :: ByteString -> StringBuffer stringBufferFromByteString bs = let BS.PS fp off len = BS.append bs (BS.pack [0,0,0]) in StringBuffer { buf = fp, len = len - 3, cur = off } -- ----------------------------------------------------------------------------- -- Grab a character -- | Return the first UTF-8 character of a nonempty 'StringBuffer' and as well -- the remaining portion (analogous to 'Data.List.uncons'). __Warning:__ The -- behavior is undefined if the 'StringBuffer' is empty. The result shares -- the same buffer as the original. Similar to 'utf8DecodeChar', if the -- character cannot be decoded as UTF-8, @\'\\0\'@ is returned. {-# INLINE nextChar #-} nextChar :: StringBuffer -> (Char,StringBuffer) nextChar (StringBuffer buf len (I# cur#)) = -- Getting our fingers dirty a little here, but this is performance-critical inlinePerformIO $ unsafeWithForeignPtr buf $ \(Ptr a#) -> case utf8DecodeCharAddr# (a# `plusAddr#` cur#) 0# of (# c#, nBytes# #) -> let cur' = I# (cur# +# nBytes#) in return (C# c#, StringBuffer buf len cur') bidirectionalFormatChars :: [(Char,String)] bidirectionalFormatChars = [ ('\x202a' , "U+202A LEFT-TO-RIGHT EMBEDDING (LRE)") , ('\x202b' , "U+202B RIGHT-TO-LEFT EMBEDDING (RLE)") , ('\x202c' , "U+202C POP DIRECTIONAL FORMATTING (PDF)") , ('\x202d' , "U+202D LEFT-TO-RIGHT OVERRIDE (LRO)") , ('\x202e' , "U+202E RIGHT-TO-LEFT OVERRIDE (RLO)") , ('\x2066' , "U+2066 LEFT-TO-RIGHT ISOLATE (LRI)") , ('\x2067' , "U+2067 RIGHT-TO-LEFT ISOLATE (RLI)") , ('\x2068' , "U+2068 FIRST STRONG ISOLATE (FSI)") , ('\x2069' , "U+2069 POP DIRECTIONAL ISOLATE (PDI)") ] {-| Returns true if the buffer contains Unicode bi-directional formatting characters. https://www.unicode.org/reports/tr9/#Bidirectional_Character_Types Bidirectional format characters are one of '\x202a' : "U+202A LEFT-TO-RIGHT EMBEDDING (LRE)" '\x202b' : "U+202B RIGHT-TO-LEFT EMBEDDING (RLE)" '\x202c' : "U+202C POP DIRECTIONAL FORMATTING (PDF)" '\x202d' : "U+202D LEFT-TO-RIGHT OVERRIDE (LRO)" '\x202e' : "U+202E RIGHT-TO-LEFT OVERRIDE (RLO)" '\x2066' : "U+2066 LEFT-TO-RIGHT ISOLATE (LRI)" '\x2067' : "U+2067 RIGHT-TO-LEFT ISOLATE (RLI)" '\x2068' : "U+2068 FIRST STRONG ISOLATE (FSI)" '\x2069' : "U+2069 POP DIRECTIONAL ISOLATE (PDI)" This list is encoded in 'bidirectionalFormatChars' -} {-# INLINE containsBidirectionalFormatChar #-} containsBidirectionalFormatChar :: StringBuffer -> Bool containsBidirectionalFormatChar (StringBuffer buf (I# len#) (I# cur#)) = inlinePerformIO $ unsafeWithForeignPtr buf $ \(Ptr a#) -> do let go :: Int# -> Bool go i | isTrue# (i >=# len#) = False | otherwise = case utf8DecodeCharAddr# a# i of (# '\x202a'# , _ #) -> True (# '\x202b'# , _ #) -> True (# '\x202c'# , _ #) -> True (# '\x202d'# , _ #) -> True (# '\x202e'# , _ #) -> True (# '\x2066'# , _ #) -> True (# '\x2067'# , _ #) -> True (# '\x2068'# , _ #) -> True (# '\x2069'# , _ #) -> True (# _, bytes #) -> go (i +# bytes) pure $! go cur# -- | Return the first UTF-8 character of a nonempty 'StringBuffer' (analogous -- to 'Data.List.head'). __Warning:__ The behavior is undefined if the -- 'StringBuffer' is empty. Similar to 'utf8DecodeChar', if the character -- cannot be decoded as UTF-8, @\'\\0\'@ is returned. currentChar :: StringBuffer -> Char currentChar = fst . nextChar prevChar :: StringBuffer -> Char -> Char prevChar (StringBuffer _ _ 0) deflt = deflt prevChar (StringBuffer buf _ cur) _ = inlinePerformIO $ unsafeWithForeignPtr buf $ \p -> do p' <- utf8PrevChar (p `plusPtr` cur) return (fst (utf8DecodeChar p')) -- ----------------------------------------------------------------------------- -- Moving -- | Return a 'StringBuffer' with the first UTF-8 character removed (analogous -- to 'Data.List.tail'). __Warning:__ The behavior is undefined if the -- 'StringBuffer' is empty. The result shares the same buffer as the -- original. stepOn :: StringBuffer -> StringBuffer stepOn s = snd (nextChar s) -- | Return a 'StringBuffer' with the first @n@ bytes removed. __Warning:__ -- If there aren't enough characters, the returned 'StringBuffer' will be -- invalid and any use of it may lead to undefined behavior. The result -- shares the same buffer as the original. offsetBytes :: Int -- ^ @n@, the number of bytes -> StringBuffer -> StringBuffer offsetBytes i s = s { cur = cur s + i } -- | Compute the difference in offset between two 'StringBuffer's that share -- the same buffer. __Warning:__ The behavior is undefined if the -- 'StringBuffer's use separate buffers. byteDiff :: StringBuffer -> StringBuffer -> Int byteDiff s1 s2 = cur s2 - cur s1 -- | Check whether a 'StringBuffer' is empty (analogous to 'Data.List.null'). atEnd :: StringBuffer -> Bool atEnd (StringBuffer _ l c) = l == c -- | Computes a hash of the contents of a 'StringBuffer'. fingerprintStringBuffer :: StringBuffer -> Fingerprint fingerprintStringBuffer (StringBuffer buf len cur) = unsafePerformIO $ withForeignPtr buf $ \ptr -> fingerprintData (ptr `plusPtr` cur) len -- | Computes a 'StringBuffer' which points to the first character of the -- wanted line. Lines begin at 1. atLine :: Int -> StringBuffer -> Maybe StringBuffer atLine line sb@(StringBuffer buf len _) = inlinePerformIO $ unsafeWithForeignPtr buf $ \p -> do p' <- skipToLine line len p if p' == nullPtr then return Nothing else let delta = p' `minusPtr` p in return $ Just (sb { cur = delta , len = len - delta }) skipToLine :: Int -> Int -> Ptr Word8 -> IO (Ptr Word8) skipToLine !line !len !op0 = go 1 op0 where !opend = op0 `plusPtr` len go !i_line !op | op >= opend = pure nullPtr | i_line == line = pure op | otherwise = do w <- peek op :: IO Word8 case w of 10 -> go (i_line + 1) (plusPtr op 1) 13 -> do -- this is safe because a 'StringBuffer' is -- guaranteed to have 3 bytes sentinel values. w' <- peek (plusPtr op 1) :: IO Word8 case w' of 10 -> go (i_line + 1) (plusPtr op 2) _ -> go (i_line + 1) (plusPtr op 1) _ -> go i_line (plusPtr op 1) -- ----------------------------------------------------------------------------- -- Conversion -- | Decode the first @n@ bytes of a 'StringBuffer' as UTF-8 into a 'String'. -- Similar to 'utf8DecodeChar', if the character cannot be decoded as UTF-8, -- they will be replaced with @\'\\0\'@. lexemeToString :: StringBuffer -> Int -- ^ @n@, the number of bytes -> String lexemeToString _ 0 = "" lexemeToString (StringBuffer buf _ cur) bytes = utf8DecodeStringLazy buf cur bytes lexemeToFastString :: StringBuffer -> Int -- ^ @n@, the number of bytes -> FastString lexemeToFastString _ 0 = nilFS lexemeToFastString (StringBuffer buf _ cur) len = inlinePerformIO $ unsafeWithForeignPtr buf $ \ptr -> return $! mkFastStringBytes (ptr `plusPtr` cur) len -- | Return the previous @n@ characters (or fewer if we are less than @n@ -- characters into the buffer. decodePrevNChars :: Int -> StringBuffer -> String decodePrevNChars n (StringBuffer buf _ cur) = inlinePerformIO $ unsafeWithForeignPtr buf $ \p0 -> go p0 n "" (p0 `plusPtr` (cur - 1)) where go :: Ptr Word8 -> Int -> String -> Ptr Word8 -> IO String go buf0 n acc p | n == 0 || buf0 >= p = return acc go buf0 n acc p = do p' <- utf8PrevChar p let (c,_) = utf8DecodeChar p' go buf0 (n - 1) (c:acc) p' -- ----------------------------------------------------------------------------- -- Parsing integer strings in various bases parseUnsignedInteger :: StringBuffer -> Int -> Integer -> (Char->Int) -> Integer parseUnsignedInteger (StringBuffer buf _ cur) len radix char_to_int = inlinePerformIO $ withForeignPtr buf $ \ptr -> return $! let go i x | i == len = x | otherwise = case fst (utf8DecodeChar (ptr `plusPtr` (cur + i))) of '_' -> go (i + 1) x -- skip "_" (#14473) char -> go (i + 1) (x * radix + toInteger (char_to_int char)) in go 0 0 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Data/TrieMap.hs0000644000000000000000000003565514472400112020417 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Data.TrieMap( -- * Maps over 'Maybe' values MaybeMap, -- * Maps over 'List' values ListMap, -- * Maps over 'Literal's LiteralMap, -- * 'TrieMap' class TrieMap(..), insertTM, deleteTM, foldMapTM, isEmptyTM, -- * Things helpful for adding additional Instances. (>.>), (|>), (|>>), XT, foldMaybe, filterMaybe, -- * Map for leaf compression GenMap, lkG, xtG, mapG, fdG, xtList, lkList ) where import GHC.Prelude import GHC.Types.Literal import GHC.Types.Unique.DFM import GHC.Types.Unique( Uniquable ) import qualified Data.Map as Map import qualified Data.IntMap as IntMap import GHC.Utils.Outputable import Control.Monad( (>=>) ) import Data.Kind( Type ) import qualified Data.Semigroup as S {- This module implements TrieMaps, which are finite mappings whose key is a structured value like a CoreExpr or Type. This file implements tries over general data structures. Implementation for tries over Core Expressions/Types are available in GHC.Core.Map.Expr. The regular pattern for handling TrieMaps on data structures was first described (to my knowledge) in Connelly and Morris's 1995 paper "A generalization of the Trie Data Structure"; there is also an accessible description of the idea in Okasaki's book "Purely Functional Data Structures", Section 10.3.2 ************************************************************************ * * The TrieMap class * * ************************************************************************ -} type XT a = Maybe a -> Maybe a -- How to alter a non-existent elt (Nothing) -- or an existing elt (Just) class TrieMap m where type Key m :: Type emptyTM :: m a lookupTM :: forall b. Key m -> m b -> Maybe b alterTM :: forall b. Key m -> XT b -> m b -> m b mapTM :: (a->b) -> m a -> m b filterTM :: (a -> Bool) -> m a -> m a foldTM :: (a -> b -> b) -> m a -> b -> b -- The unusual argument order here makes -- it easy to compose calls to foldTM; -- see for example fdE below insertTM :: TrieMap m => Key m -> a -> m a -> m a insertTM k v m = alterTM k (\_ -> Just v) m deleteTM :: TrieMap m => Key m -> m a -> m a deleteTM k m = alterTM k (\_ -> Nothing) m foldMapTM :: (TrieMap m, Monoid r) => (a -> r) -> m a -> r foldMapTM f m = foldTM (\ x r -> f x S.<> r) m mempty -- This looks inefficient. isEmptyTM :: TrieMap m => m a -> Bool isEmptyTM m = foldTM (\ _ _ -> False) m True ---------------------- -- Recall that -- Control.Monad.(>=>) :: (a -> Maybe b) -> (b -> Maybe c) -> a -> Maybe c (>.>) :: (a -> b) -> (b -> c) -> a -> c -- Reverse function composition (do f first, then g) infixr 1 >.> (f >.> g) x = g (f x) infixr 1 |>, |>> (|>) :: a -> (a->b) -> b -- Reverse application x |> f = f x ---------------------- (|>>) :: TrieMap m2 => (XT (m2 a) -> m1 (m2 a) -> m1 (m2 a)) -> (m2 a -> m2 a) -> m1 (m2 a) -> m1 (m2 a) (|>>) f g = f (Just . g . deMaybe) deMaybe :: TrieMap m => Maybe (m a) -> m a deMaybe Nothing = emptyTM deMaybe (Just m) = m {- ************************************************************************ * * IntMaps * * ************************************************************************ -} instance TrieMap IntMap.IntMap where type Key IntMap.IntMap = Int emptyTM = IntMap.empty lookupTM k m = IntMap.lookup k m alterTM = xtInt foldTM k m z = IntMap.foldr k z m mapTM f m = IntMap.map f m filterTM f m = IntMap.filter f m xtInt :: Int -> XT a -> IntMap.IntMap a -> IntMap.IntMap a xtInt k f m = IntMap.alter f k m instance Ord k => TrieMap (Map.Map k) where type Key (Map.Map k) = k emptyTM = Map.empty lookupTM = Map.lookup alterTM k f m = Map.alter f k m foldTM k m z = Map.foldr k z m mapTM f m = Map.map f m filterTM f m = Map.filter f m {- Note [foldTM determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~ We want foldTM to be deterministic, which is why we have an instance of TrieMap for UniqDFM, but not for UniqFM. Here's an example of some things that go wrong if foldTM is nondeterministic. Consider: f a b = return (a <> b) Depending on the order that the typechecker generates constraints you get either: f :: (Monad m, Monoid a) => a -> a -> m a or: f :: (Monoid a, Monad m) => a -> a -> m a The generated code will be different after desugaring as the dictionaries will be bound in different orders, leading to potential ABI incompatibility. One way to solve this would be to notice that the typeclasses could be sorted alphabetically. Unfortunately that doesn't quite work with this example: f a b = let x = a <> a; y = b <> b in x where you infer: f :: (Monoid m, Monoid m1) => m1 -> m -> m1 or: f :: (Monoid m1, Monoid m) => m1 -> m -> m1 Here you could decide to take the order of the type variables in the type according to depth first traversal and use it to order the constraints. The real trouble starts when the user enables incoherent instances and the compiler has to make an arbitrary choice. Consider: class T a b where go :: a -> b -> String instance (Show b) => T Int b where go a b = show a ++ show b instance (Show a) => T a Bool where go a b = show a ++ show b f = go 10 True GHC is free to choose either dictionary to implement f, but for the sake of determinism we'd like it to be consistent when compiling the same sources with the same flags. inert_dicts :: DictMap is implemented with a TrieMap. In getUnsolvedInerts it gets converted to a bag of (Wanted) Cts using a fold. Then in solve_simple_wanteds it's merged with other WantedConstraints. We want the conversion to a bag to be deterministic. For that purpose we use UniqDFM instead of UniqFM to implement the TrieMap. See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for more details on how it's made deterministic. -} instance forall key. Uniquable key => TrieMap (UniqDFM key) where type Key (UniqDFM key) = key emptyTM = emptyUDFM lookupTM k m = lookupUDFM m k alterTM k f m = alterUDFM f m k foldTM k m z = foldUDFM k z m mapTM f m = mapUDFM f m filterTM f m = filterUDFM f m {- ************************************************************************ * * Maybes * * ************************************************************************ If m is a map from k -> val then (MaybeMap m) is a map from (Maybe k) -> val -} data MaybeMap m a = MM { mm_nothing :: Maybe a, mm_just :: m a } instance TrieMap m => TrieMap (MaybeMap m) where type Key (MaybeMap m) = Maybe (Key m) emptyTM = MM { mm_nothing = Nothing, mm_just = emptyTM } lookupTM = lkMaybe lookupTM alterTM = xtMaybe alterTM foldTM = fdMaybe mapTM = mapMb filterTM = ftMaybe instance TrieMap m => Foldable (MaybeMap m) where foldMap = foldMapTM mapMb :: TrieMap m => (a->b) -> MaybeMap m a -> MaybeMap m b mapMb f (MM { mm_nothing = mn, mm_just = mj }) = MM { mm_nothing = fmap f mn, mm_just = mapTM f mj } lkMaybe :: (forall b. k -> m b -> Maybe b) -> Maybe k -> MaybeMap m a -> Maybe a lkMaybe _ Nothing = mm_nothing lkMaybe lk (Just x) = mm_just >.> lk x xtMaybe :: (forall b. k -> XT b -> m b -> m b) -> Maybe k -> XT a -> MaybeMap m a -> MaybeMap m a xtMaybe _ Nothing f m = m { mm_nothing = f (mm_nothing m) } xtMaybe tr (Just x) f m = m { mm_just = mm_just m |> tr x f } fdMaybe :: TrieMap m => (a -> b -> b) -> MaybeMap m a -> b -> b fdMaybe k m = foldMaybe k (mm_nothing m) . foldTM k (mm_just m) ftMaybe :: TrieMap m => (a -> Bool) -> MaybeMap m a -> MaybeMap m a ftMaybe f (MM { mm_nothing = mn, mm_just = mj }) = MM { mm_nothing = filterMaybe f mn, mm_just = filterTM f mj } foldMaybe :: (a -> b -> b) -> Maybe a -> b -> b foldMaybe _ Nothing b = b foldMaybe k (Just a) b = k a b filterMaybe :: (a -> Bool) -> Maybe a -> Maybe a filterMaybe _ Nothing = Nothing filterMaybe f input@(Just x) | f x = input | otherwise = Nothing {- ************************************************************************ * * Lists * * ************************************************************************ -} data ListMap m a = LM { lm_nil :: Maybe a , lm_cons :: m (ListMap m a) } instance TrieMap m => TrieMap (ListMap m) where type Key (ListMap m) = [Key m] emptyTM = LM { lm_nil = Nothing, lm_cons = emptyTM } lookupTM = lkList lookupTM alterTM = xtList alterTM foldTM = fdList mapTM = mapList filterTM = ftList instance TrieMap m => Foldable (ListMap m) where foldMap = foldMapTM instance (TrieMap m, Outputable a) => Outputable (ListMap m a) where ppr m = text "List elts" <+> ppr (foldTM (:) m []) mapList :: TrieMap m => (a->b) -> ListMap m a -> ListMap m b mapList f (LM { lm_nil = mnil, lm_cons = mcons }) = LM { lm_nil = fmap f mnil, lm_cons = mapTM (mapTM f) mcons } lkList :: TrieMap m => (forall b. k -> m b -> Maybe b) -> [k] -> ListMap m a -> Maybe a lkList _ [] = lm_nil lkList lk (x:xs) = lm_cons >.> lk x >=> lkList lk xs xtList :: TrieMap m => (forall b. k -> XT b -> m b -> m b) -> [k] -> XT a -> ListMap m a -> ListMap m a xtList _ [] f m = m { lm_nil = f (lm_nil m) } xtList tr (x:xs) f m = m { lm_cons = lm_cons m |> tr x |>> xtList tr xs f } fdList :: forall m a b. TrieMap m => (a -> b -> b) -> ListMap m a -> b -> b fdList k m = foldMaybe k (lm_nil m) . foldTM (fdList k) (lm_cons m) ftList :: TrieMap m => (a -> Bool) -> ListMap m a -> ListMap m a ftList f (LM { lm_nil = mnil, lm_cons = mcons }) = LM { lm_nil = filterMaybe f mnil, lm_cons = mapTM (filterTM f) mcons } {- ************************************************************************ * * Basic maps * * ************************************************************************ -} type LiteralMap a = Map.Map Literal a {- ************************************************************************ * * GenMap * * ************************************************************************ Note [Compressed TrieMap] ~~~~~~~~~~~~~~~~~~~~~~~~~ The GenMap constructor augments TrieMaps with leaf compression. This helps solve the performance problem detailed in #9960: suppose we have a handful H of entries in a TrieMap, each with a very large key, size K. If you fold over such a TrieMap you'd expect time O(H). That would certainly be true of an association list! But with TrieMap we actually have to navigate down a long singleton structure to get to the elements, so it takes time O(K*H). This can really hurt on many type-level computation benchmarks: see for example T9872d. The point of a TrieMap is that you need to navigate to the point where only one key remains, and then things should be fast. So the point of a SingletonMap is that, once we are down to a single (key,value) pair, we stop and just use SingletonMap. 'EmptyMap' provides an even more basic (but essential) optimization: if there is nothing in the map, don't bother building out the (possibly infinite) recursive TrieMap structure! Compressed triemaps are heavily used by GHC.Core.Map.Expr. So we have to mark some things as INLINEABLE to permit specialization. -} data GenMap m a = EmptyMap | SingletonMap (Key m) a | MultiMap (m a) instance (Outputable a, Outputable (m a)) => Outputable (GenMap m a) where ppr EmptyMap = text "Empty map" ppr (SingletonMap _ v) = text "Singleton map" <+> ppr v ppr (MultiMap m) = ppr m -- TODO undecidable instance instance (Eq (Key m), TrieMap m) => TrieMap (GenMap m) where type Key (GenMap m) = Key m emptyTM = EmptyMap lookupTM = lkG alterTM = xtG foldTM = fdG mapTM = mapG filterTM = ftG instance (Eq (Key m), TrieMap m) => Foldable (GenMap m) where foldMap = foldMapTM --We want to be able to specialize these functions when defining eg --tries over (GenMap CoreExpr) which requires INLINEABLE {-# INLINEABLE lkG #-} lkG :: (Eq (Key m), TrieMap m) => Key m -> GenMap m a -> Maybe a lkG _ EmptyMap = Nothing lkG k (SingletonMap k' v') | k == k' = Just v' | otherwise = Nothing lkG k (MultiMap m) = lookupTM k m {-# INLINEABLE xtG #-} xtG :: (Eq (Key m), TrieMap m) => Key m -> XT a -> GenMap m a -> GenMap m a xtG k f EmptyMap = case f Nothing of Just v -> SingletonMap k v Nothing -> EmptyMap xtG k f m@(SingletonMap k' v') | k' == k -- The new key matches the (single) key already in the tree. Hence, -- apply @f@ to @Just v'@ and build a singleton or empty map depending -- on the 'Just'/'Nothing' response respectively. = case f (Just v') of Just v'' -> SingletonMap k' v'' Nothing -> EmptyMap | otherwise -- We've hit a singleton tree for a different key than the one we are -- searching for. Hence apply @f@ to @Nothing@. If result is @Nothing@ then -- we can just return the old map. If not, we need a map with *two* -- entries. The easiest way to do that is to insert two items into an empty -- map of type @m a@. = case f Nothing of Nothing -> m Just v -> emptyTM |> alterTM k' (const (Just v')) >.> alterTM k (const (Just v)) >.> MultiMap xtG k f (MultiMap m) = MultiMap (alterTM k f m) {-# INLINEABLE mapG #-} mapG :: TrieMap m => (a -> b) -> GenMap m a -> GenMap m b mapG _ EmptyMap = EmptyMap mapG f (SingletonMap k v) = SingletonMap k (f v) mapG f (MultiMap m) = MultiMap (mapTM f m) {-# INLINEABLE fdG #-} fdG :: TrieMap m => (a -> b -> b) -> GenMap m a -> b -> b fdG _ EmptyMap = \z -> z fdG k (SingletonMap _ v) = \z -> k v z fdG k (MultiMap m) = foldTM k m {-# INLINEABLE ftG #-} ftG :: TrieMap m => (a -> Bool) -> GenMap m a -> GenMap m a ftG _ EmptyMap = EmptyMap ftG f input@(SingletonMap _ v) | f v = input | otherwise = EmptyMap ftG f (MultiMap m) = MultiMap (filterTM f m) -- we don't have enough information to reconstruct the key to make -- a SingletonMap ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Backend.hs0000644000000000000000000001170514472400112020755 0ustar0000000000000000{-# LANGUAGE MultiWayIf #-} -- | Code generation backends module GHC.Driver.Backend ( Backend (..) , platformDefaultBackend , platformNcgSupported , backendProducesObject , backendRetainsAllBindings ) where import GHC.Prelude import GHC.Platform -- | Code generation backends. -- -- GHC supports several code generation backends serving different purposes -- (producing machine code, producing ByteCode for the interpreter) and -- supporting different platforms. -- data Backend = NCG -- ^ Native code generator backend. -- -- Compiles Cmm code into textual assembler, then relies on -- an external assembler toolchain to produce machine code. -- -- Only supports a few platforms (X86, PowerPC, SPARC). -- -- See "GHC.CmmToAsm". | LLVM -- ^ LLVM backend. -- -- Compiles Cmm code into LLVM textual IR, then relies on -- LLVM toolchain to produce machine code. -- -- It relies on LLVM support for the calling convention used -- by the NCG backend to produce code objects ABI compatible -- with it (see "cc 10" or "ghccc" calling convention in -- https://llvm.org/docs/LangRef.html#calling-conventions). -- -- Support a few platforms (X86, AArch64, s390x, ARM). -- -- See "GHC.CmmToLlvm" | ViaC -- ^ Via-C backend. -- -- Compiles Cmm code into C code, then relies on a C compiler -- to produce machine code. -- -- It produces code objects that are *not* ABI compatible -- with those produced by NCG and LLVM backends. -- -- Produced code is expected to be less efficient than the -- one produced by NCG and LLVM backends because STG -- registers are not pinned into real registers. On the -- other hand, it supports more target platforms (those -- having a valid C toolchain). -- -- See "GHC.CmmToC" | Interpreter -- ^ ByteCode interpreter. -- -- Produce ByteCode objects (BCO, see "GHC.ByteCode") that -- can be interpreted. It is used by GHCi. -- -- Currently some extensions are not supported -- (foreign primops). -- -- See "GHC.StgToByteCode" | NoBackend -- ^ No code generated. -- -- Use this to disable code generation. It is particularly -- useful when GHC is used as a library for other purpose -- than generating code (e.g. to generate documentation with -- Haddock) or when the user requested it (via -fno-code) for -- some reason. deriving (Eq,Ord,Show,Read) -- | Default backend to use for the given platform. platformDefaultBackend :: Platform -> Backend platformDefaultBackend platform = if | platformUnregisterised platform -> ViaC | platformNcgSupported platform -> NCG | otherwise -> LLVM -- | Is the platform supported by the Native Code Generator? platformNcgSupported :: Platform -> Bool platformNcgSupported platform = if | platformUnregisterised platform -> False -- NCG doesn't support unregisterised ABI | ncgValidArch -> True | otherwise -> False where ncgValidArch = case platformArch platform of ArchX86 -> True ArchX86_64 -> True ArchPPC -> True ArchPPC_64 {} -> True ArchAArch64 -> True _ -> False -- | Will this backend produce an object file on the disk? backendProducesObject :: Backend -> Bool backendProducesObject ViaC = True backendProducesObject NCG = True backendProducesObject LLVM = True backendProducesObject Interpreter = False backendProducesObject NoBackend = False -- | Does this backend retain *all* top-level bindings for a module, -- rather than just the exported bindings, in the TypeEnv and compiled -- code (if any)? -- -- Interpreter backend does this, so that GHCi can call functions inside a -- module. -- -- When no backend is used we also do it, so that Haddock can get access to the -- GlobalRdrEnv for a module after typechecking it. backendRetainsAllBindings :: Backend -> Bool backendRetainsAllBindings Interpreter = True backendRetainsAllBindings NoBackend = True backendRetainsAllBindings ViaC = False backendRetainsAllBindings NCG = False backendRetainsAllBindings LLVM = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Backpack/Syntax.hs0000644000000000000000000000521114472400112022406 0ustar0000000000000000-- | This is the syntax for bkp files which are parsed in 'ghc --backpack' -- mode. This syntax is used purely for testing purposes. module GHC.Driver.Backpack.Syntax ( -- * Backpack abstract syntax HsUnitId(..), LHsUnitId, HsModuleSubst, LHsModuleSubst, HsModuleId(..), LHsModuleId, HsComponentId(..), LHsUnit, HsUnit(..), LHsUnitDecl, HsUnitDecl(..), IncludeDecl(..), LRenaming, Renaming(..), ) where import GHC.Prelude import GHC.Hs import GHC.Types.SrcLoc import GHC.Types.SourceFile import GHC.Unit.Module.Name import GHC.Unit.Types import GHC.Unit.Info import GHC.Utils.Outputable {- ************************************************************************ * * User syntax * * ************************************************************************ -} data HsComponentId = HsComponentId { hsPackageName :: PackageName, hsComponentId :: UnitId } instance Outputable HsComponentId where ppr (HsComponentId _pn cid) = ppr cid -- todo debug with pn data HsUnitId n = HsUnitId (Located n) [LHsModuleSubst n] type LHsUnitId n = Located (HsUnitId n) type HsModuleSubst n = (Located ModuleName, LHsModuleId n) type LHsModuleSubst n = Located (HsModuleSubst n) data HsModuleId n = HsModuleVar (Located ModuleName) | HsModuleId (LHsUnitId n) (Located ModuleName) type LHsModuleId n = Located (HsModuleId n) -- | Top level @unit@ declaration in a Backpack file. data HsUnit n = HsUnit { hsunitName :: Located n, hsunitBody :: [LHsUnitDecl n] } type LHsUnit n = Located (HsUnit n) -- | A declaration in a package, e.g. a module or signature definition, -- or an include. data HsUnitDecl n = DeclD HscSource (Located ModuleName) (Located HsModule) | IncludeD (IncludeDecl n) type LHsUnitDecl n = Located (HsUnitDecl n) -- | An include of another unit data IncludeDecl n = IncludeDecl { idUnitId :: LHsUnitId n, idModRenaming :: Maybe [ LRenaming ], -- | Is this a @dependency signature@ include? If so, -- we don't compile this include when we instantiate this -- unit (as there should not be any modules brought into -- scope.) idSignatureInclude :: Bool } -- | Rename a module from one name to another. The identity renaming -- means that the module should be brought into scope. data Renaming = Renaming { renameFrom :: Located ModuleName , renameTo :: Maybe (Located ModuleName) } type LRenaming = Located Renaming ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/CmdLine.hs0000644000000000000000000003170014472400112020736 0ustar0000000000000000{-# LANGUAGE RankNTypes #-} ------------------------------------------------------------------------------- -- -- | Command-line parser -- -- This is an abstract command-line parser used by DynFlags. -- -- (c) The University of Glasgow 2005 -- ------------------------------------------------------------------------------- module GHC.Driver.CmdLine ( processArgs, parseResponseFile, OptKind(..), GhcFlagMode(..), Flag(..), defFlag, defGhcFlag, defGhciFlag, defHiddenFlag, hoistFlag, errorsToGhcException, Err(..), Warn(..), WarnReason(..), EwM, runEwM, addErr, addWarn, addFlagWarn, getArg, getCurLoc, liftEwM ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.Bag import GHC.Types.SrcLoc import GHC.Utils.Json import GHC.Types.Error ( DiagnosticReason(..) ) import Data.Function import Data.List (sortBy, intercalate, stripPrefix) import GHC.ResponseFile import Control.Exception (IOException, catch) import Control.Monad (liftM, ap) import Control.Monad.IO.Class -------------------------------------------------------- -- The Flag and OptKind types -------------------------------------------------------- data Flag m = Flag { flagName :: String, -- Flag, without the leading "-" flagOptKind :: OptKind m, -- What to do if we see it flagGhcMode :: GhcFlagMode -- Which modes this flag affects } defFlag :: String -> OptKind m -> Flag m defFlag name optKind = Flag name optKind AllModes defGhcFlag :: String -> OptKind m -> Flag m defGhcFlag name optKind = Flag name optKind OnlyGhc defGhciFlag :: String -> OptKind m -> Flag m defGhciFlag name optKind = Flag name optKind OnlyGhci defHiddenFlag :: String -> OptKind m -> Flag m defHiddenFlag name optKind = Flag name optKind HiddenFlag hoistFlag :: forall m n. (forall a. m a -> n a) -> Flag m -> Flag n hoistFlag f (Flag a b c) = Flag a (go b) c where go (NoArg k) = NoArg (go2 k) go (HasArg k) = HasArg (\s -> go2 (k s)) go (SepArg k) = SepArg (\s -> go2 (k s)) go (Prefix k) = Prefix (\s -> go2 (k s)) go (OptPrefix k) = OptPrefix (\s -> go2 (k s)) go (OptIntSuffix k) = OptIntSuffix (\n -> go2 (k n)) go (IntSuffix k) = IntSuffix (\n -> go2 (k n)) go (WordSuffix k) = WordSuffix (\s -> go2 (k s)) go (FloatSuffix k) = FloatSuffix (\s -> go2 (k s)) go (PassFlag k) = PassFlag (\s -> go2 (k s)) go (AnySuffix k) = AnySuffix (\s -> go2 (k s)) go2 :: EwM m a -> EwM n a go2 (EwM g) = EwM $ \loc es ws -> f (g loc es ws) -- | GHC flag modes describing when a flag has an effect. data GhcFlagMode = OnlyGhc -- ^ The flag only affects the non-interactive GHC | OnlyGhci -- ^ The flag only affects the interactive GHC | AllModes -- ^ The flag affects multiple ghc modes | HiddenFlag -- ^ This flag should not be seen in cli completion data OptKind m -- Suppose the flag is -f = NoArg (EwM m ()) -- -f all by itself | HasArg (String -> EwM m ()) -- -farg or -f arg | SepArg (String -> EwM m ()) -- -f arg | Prefix (String -> EwM m ()) -- -farg | OptPrefix (String -> EwM m ()) -- -f or -farg (i.e. the arg is optional) | OptIntSuffix (Maybe Int -> EwM m ()) -- -f or -f=n; pass n to fn | IntSuffix (Int -> EwM m ()) -- -f or -f=n; pass n to fn | WordSuffix (Word -> EwM m ()) -- -f or -f=n; pass n to fn | FloatSuffix (Float -> EwM m ()) -- -f or -f=n; pass n to fn | PassFlag (String -> EwM m ()) -- -f; pass "-f" fn | AnySuffix (String -> EwM m ()) -- -f or -farg; pass entire "-farg" to fn -------------------------------------------------------- -- The EwM monad -------------------------------------------------------- -- | Used when filtering warnings: if a reason is given -- it can be filtered out when displaying. data WarnReason = NoReason | ReasonDeprecatedFlag | ReasonUnrecognisedFlag deriving (Eq, Show) instance Outputable WarnReason where ppr = text . show instance ToJson WarnReason where json NoReason = JSNull json reason = JSString $ show reason -- | A command-line error message newtype Err = Err { errMsg :: Located String } -- | A command-line warning message and the reason it arose data Warn = Warn { warnReason :: DiagnosticReason, warnMsg :: Located String } type Errs = Bag Err type Warns = Bag Warn -- EwM ("errors and warnings monad") is a monad -- transformer for m that adds an (err, warn) state newtype EwM m a = EwM { unEwM :: Located String -- Current parse arg -> Errs -> Warns -> m (Errs, Warns, a) } instance Monad m => Functor (EwM m) where fmap = liftM instance Monad m => Applicative (EwM m) where pure v = EwM (\_ e w -> return (e, w, v)) (<*>) = ap instance Monad m => Monad (EwM m) where (EwM f) >>= k = EwM (\l e w -> do (e', w', r) <- f l e w unEwM (k r) l e' w') instance MonadIO m => MonadIO (EwM m) where liftIO = liftEwM . liftIO runEwM :: EwM m a -> m (Errs, Warns, a) runEwM action = unEwM action (panic "processArgs: no arg yet") emptyBag emptyBag setArg :: Located String -> EwM m () -> EwM m () setArg l (EwM f) = EwM (\_ es ws -> f l es ws) addErr :: Monad m => String -> EwM m () addErr e = EwM (\(L loc _) es ws -> return (es `snocBag` Err (L loc e), ws, ())) addWarn :: Monad m => String -> EwM m () addWarn = addFlagWarn WarningWithoutFlag addFlagWarn :: Monad m => DiagnosticReason -> String -> EwM m () addFlagWarn reason msg = EwM $ (\(L loc _) es ws -> return (es, ws `snocBag` Warn reason (L loc msg), ())) getArg :: Monad m => EwM m String getArg = EwM (\(L _ arg) es ws -> return (es, ws, arg)) getCurLoc :: Monad m => EwM m SrcSpan getCurLoc = EwM (\(L loc _) es ws -> return (es, ws, loc)) liftEwM :: Monad m => m a -> EwM m a liftEwM action = EwM (\_ es ws -> do { r <- action; return (es, ws, r) }) -------------------------------------------------------- -- Processing arguments -------------------------------------------------------- processArgs :: Monad m => [Flag m] -- ^ cmdline parser spec -> [Located String] -- ^ args -> (FilePath -> EwM m [Located String]) -- ^ response file handler -> m ( [Located String], -- spare args [Err], -- errors [Warn] ) -- warnings processArgs spec args handleRespFile = do (errs, warns, spare) <- runEwM action return (spare, bagToList errs, bagToList warns) where action = process args [] -- process :: [Located String] -> [Located String] -> EwM m [Located String] process [] spare = return (reverse spare) process (L _ ('@' : resp_file) : args) spare = do resp_args <- handleRespFile resp_file process (resp_args ++ args) spare process (locArg@(L _ ('-' : arg)) : args) spare = case findArg spec arg of Just (rest, opt_kind) -> case processOneArg opt_kind rest arg args of Left err -> let b = process args spare in (setArg locArg $ addErr err) >> b Right (action,rest) -> let b = process rest spare in (setArg locArg $ action) >> b Nothing -> process args (locArg : spare) process (arg : args) spare = process args (arg : spare) processOneArg :: OptKind m -> String -> String -> [Located String] -> Either String (EwM m (), [Located String]) processOneArg opt_kind rest arg args = let dash_arg = '-' : arg rest_no_eq = dropEq rest in case opt_kind of NoArg a -> assert (null rest) Right (a, args) HasArg f | notNull rest_no_eq -> Right (f rest_no_eq, args) | otherwise -> case args of [] -> missingArgErr dash_arg (L _ arg1:args1) -> Right (f arg1, args1) -- See #9776 SepArg f -> case args of [] -> missingArgErr dash_arg (L _ arg1:args1) -> Right (f arg1, args1) -- See #12625 Prefix f | notNull rest_no_eq -> Right (f rest_no_eq, args) | otherwise -> missingArgErr dash_arg PassFlag f | notNull rest -> unknownFlagErr dash_arg | otherwise -> Right (f dash_arg, args) OptIntSuffix f | null rest -> Right (f Nothing, args) | Just n <- parseInt rest_no_eq -> Right (f (Just n), args) | otherwise -> Left ("malformed integer argument in " ++ dash_arg) IntSuffix f | Just n <- parseInt rest_no_eq -> Right (f n, args) | otherwise -> Left ("malformed integer argument in " ++ dash_arg) WordSuffix f | Just n <- parseWord rest_no_eq -> Right (f n, args) | otherwise -> Left ("malformed natural argument in " ++ dash_arg) FloatSuffix f | Just n <- parseFloat rest_no_eq -> Right (f n, args) | otherwise -> Left ("malformed float argument in " ++ dash_arg) OptPrefix f -> Right (f rest_no_eq, args) AnySuffix f -> Right (f dash_arg, args) findArg :: [Flag m] -> String -> Maybe (String, OptKind m) findArg spec arg = case sortBy (compare `on` (length . fst)) -- prefer longest matching flag [ (removeSpaces rest, optKind) | flag <- spec, let optKind = flagOptKind flag, Just rest <- [stripPrefix (flagName flag) arg], arg_ok optKind rest arg ] of [] -> Nothing (one:_) -> Just one arg_ok :: OptKind t -> [Char] -> String -> Bool arg_ok (NoArg _) rest _ = null rest arg_ok (HasArg _) _ _ = True arg_ok (SepArg _) rest _ = null rest arg_ok (Prefix _) _ _ = True -- Missing argument checked for in processOneArg t -- to improve error message (#12625) arg_ok (OptIntSuffix _) _ _ = True arg_ok (IntSuffix _) _ _ = True arg_ok (WordSuffix _) _ _ = True arg_ok (FloatSuffix _) _ _ = True arg_ok (OptPrefix _) _ _ = True arg_ok (PassFlag _) rest _ = null rest arg_ok (AnySuffix _) _ _ = True -- | Parse an Int -- -- Looks for "433" or "=342", with no trailing gubbins -- * n or =n => Just n -- * gibberish => Nothing parseInt :: String -> Maybe Int parseInt s = case reads s of ((n,""):_) -> Just n _ -> Nothing parseWord :: String -> Maybe Word parseWord s = case reads s of ((n,""):_) -> Just n _ -> Nothing parseFloat :: String -> Maybe Float parseFloat s = case reads s of ((n,""):_) -> Just n _ -> Nothing -- | Discards a leading equals sign dropEq :: String -> String dropEq ('=' : s) = s dropEq s = s unknownFlagErr :: String -> Either String a unknownFlagErr f = Left ("unrecognised flag: " ++ f) missingArgErr :: String -> Either String a missingArgErr f = Left ("missing argument for flag: " ++ f) -------------------------------------------------------- -- Utils -------------------------------------------------------- -- | Parse a response file into arguments. parseResponseFile :: MonadIO m => FilePath -> EwM m [Located String] parseResponseFile path = do res <- liftIO $ fmap Right (readFile path) `catch` \(e :: IOException) -> pure (Left e) case res of Left _err -> addErr "Could not open response file" >> return [] Right resp_file -> return $ map (mkGeneralLocated path) (unescapeArgs resp_file) -- See Note [Handling errors when parsing command-line flags] errorsToGhcException :: [(String, -- Location String)] -- Error -> GhcException errorsToGhcException errs = UsageError $ intercalate "\n" $ [ l ++ ": " ++ e | (l, e) <- errs ] {- Note [Handling errors when parsing command-line flags] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Parsing of static and mode flags happens before any session is started, i.e., before the first call to 'GHC.withGhc'. Therefore, to report errors for invalid usage of these two types of flags, we can not call any function that needs DynFlags, as there are no DynFlags available yet (unsafeGlobalDynFlags is not set either). So we always print "on the commandline" as the location, which is true except for Api users, which is probably ok. When reporting errors for invalid usage of dynamic flags we /can/ make use of DynFlags, and we do so explicitly in DynFlags.parseDynamicFlagsFull. Before, we called unsafeGlobalDynFlags when an invalid (combination of) flag(s) was given on the commandline, resulting in panics (#9963). -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Config.hs0000644000000000000000000000300714472400112020627 0ustar0000000000000000-- | Subsystem configuration module GHC.Driver.Config ( initOptCoercionOpts , initSimpleOpts , initBCOOpts , initEvalOpts ) where import GHC.Prelude import GHC.Driver.Session import GHC.Core.SimpleOpt import GHC.Core.Coercion.Opt import GHC.Runtime.Interpreter (BCOOpts(..)) import GHCi.Message (EvalOpts(..)) import GHC.Conc (getNumProcessors) import Control.Monad.IO.Class -- | Initialise coercion optimiser configuration from DynFlags initOptCoercionOpts :: DynFlags -> OptCoercionOpts initOptCoercionOpts dflags = OptCoercionOpts { optCoercionEnabled = not (hasNoOptCoercion dflags) } -- | Initialise Simple optimiser configuration from DynFlags initSimpleOpts :: DynFlags -> SimpleOpts initSimpleOpts dflags = SimpleOpts { so_uf_opts = unfoldingOpts dflags , so_co_opts = initOptCoercionOpts dflags } -- | Extract BCO options from DynFlags initBCOOpts :: DynFlags -> IO BCOOpts initBCOOpts dflags = do -- Serializing ResolvedBCO is expensive, so if we're in parallel mode -- (-j) parallelise the serialization. n_jobs <- case parMakeCount dflags of Nothing -> liftIO getNumProcessors Just n -> return n return $ BCOOpts n_jobs -- | Extract GHCi options from DynFlags and step initEvalOpts :: DynFlags -> Bool -> EvalOpts initEvalOpts dflags step = EvalOpts { useSandboxThread = gopt Opt_GhciSandbox dflags , singleStep = step , breakOnException = gopt Opt_BreakOnException dflags , breakOnError = gopt Opt_BreakOnError dflags } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Config/Diagnostic.hs0000644000000000000000000000112014472400112022705 0ustar0000000000000000module GHC.Driver.Config.Diagnostic ( initDiagOpts ) where import GHC.Driver.Flags import GHC.Driver.Session import GHC.Utils.Outputable import GHC.Utils.Error (DiagOpts (..)) initDiagOpts :: DynFlags -> DiagOpts initDiagOpts dflags = DiagOpts { diag_warning_flags = warningFlags dflags , diag_fatal_warning_flags = fatalWarningFlags dflags , diag_warn_is_error = gopt Opt_WarnIsError dflags , diag_reverse_errors = reverseErrors dflags , diag_max_errors = maxErrors dflags , diag_ppr_ctx = initSDocContext dflags defaultErrStyle } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Config/Logger.hs0000644000000000000000000000174414472400112022054 0ustar0000000000000000module GHC.Driver.Config.Logger ( initLogFlags ) where import GHC.Prelude import GHC.Driver.Session import GHC.Utils.Logger (LogFlags (..)) import GHC.Utils.Outputable -- | Initialize LogFlags from DynFlags initLogFlags :: DynFlags -> LogFlags initLogFlags dflags = LogFlags { log_default_user_context = initSDocContext dflags defaultUserStyle , log_default_dump_context = initSDocContext dflags defaultDumpStyle , log_dump_flags = dumpFlags dflags , log_show_caret = gopt Opt_DiagnosticsShowCaret dflags , log_show_warn_groups = gopt Opt_ShowWarnGroups dflags , log_enable_timestamps = not (gopt Opt_SuppressTimestamps dflags) , log_dump_to_file = gopt Opt_DumpToFile dflags , log_dump_dir = dumpDir dflags , log_dump_prefix = dumpPrefix dflags , log_dump_prefix_override = dumpPrefixForce dflags , log_enable_debug = not (hasNoDebugOutput dflags) , log_verbosity = verbosity dflags } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Config/Parser.hs0000644000000000000000000000110414472400112022057 0ustar0000000000000000module GHC.Driver.Config.Parser ( initParserOpts ) where import GHC.Prelude import GHC.Platform import GHC.Driver.Session import GHC.Driver.Config.Diagnostic import GHC.Parser.Lexer -- | Extracts the flags needed for parsing initParserOpts :: DynFlags -> ParserOpts initParserOpts = mkParserOpts <$> extensionFlags <*> initDiagOpts <*> (supportedLanguagesAndExtensions . platformArchOS . targetPlatform) <*> safeImportsOn <*> gopt Opt_Haddock <*> gopt Opt_KeepRawTokenStream <*> const True -- use LINE/COLUMN to update the internal location ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Env.hs0000644000000000000000000004247214472400112020163 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} module GHC.Driver.Env ( Hsc(..) , HscEnv (..) , hscUpdateFlags , hscSetFlags , hsc_home_unit , hsc_home_unit_maybe , hsc_units , hsc_HPT , hsc_HUE , hsc_HUG , hsc_all_home_unit_ids , hscUpdateLoggerFlags , hscUpdateHUG , hscUpdateHPT , hscSetActiveHomeUnit , hscSetActiveUnitId , hscActiveUnitId , runHsc , runHsc' , mkInteractiveHscEnv , runInteractiveHsc , hscEPS , hscInterp , hptCompleteSigs , hptAllInstances , hptInstancesBelow , hptAnns , hptAllThings , hptSomeThingsBelowUs , hptRules , prepareAnnotations , discardIC , lookupType , lookupIfaceByModule , mainModIs ) where import GHC.Prelude import GHC.Driver.Session import GHC.Driver.Errors ( printOrThrowDiagnostics ) import GHC.Driver.Errors.Types ( GhcMessage ) import GHC.Driver.Config.Logger (initLogFlags) import GHC.Driver.Config.Diagnostic (initDiagOpts) import GHC.Driver.Env.Types ( Hsc(..), HscEnv(..) ) import GHC.Runtime.Context import GHC.Runtime.Interpreter.Types (Interp) import GHC.Unit import GHC.Unit.Module.ModGuts import GHC.Unit.Module.ModIface import GHC.Unit.Module.ModDetails import GHC.Unit.Home.ModInfo import GHC.Unit.Env import GHC.Unit.External import GHC.Core ( CoreRule ) import GHC.Core.FamInstEnv import GHC.Core.InstEnv import GHC.Types.Annotations ( Annotation, AnnEnv, mkAnnEnv, plusAnnEnv ) import GHC.Types.CompleteMatch import GHC.Types.Error ( emptyMessages, Messages ) import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.TyThing import GHC.Builtin.Names ( gHC_PRIM ) import GHC.Data.Maybe import GHC.Utils.Exception as Ex import GHC.Utils.Outputable import GHC.Utils.Monad import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Utils.Logger import GHC.Utils.Trace import Data.IORef import qualified Data.Set as Set import Data.Set (Set) import GHC.Unit.Module.Graph import Data.List (sort) import qualified Data.Map as Map runHsc :: HscEnv -> Hsc a -> IO a runHsc hsc_env (Hsc hsc) = do (a, w) <- hsc hsc_env emptyMessages let dflags = hsc_dflags hsc_env let !diag_opts = initDiagOpts dflags printOrThrowDiagnostics (hsc_logger hsc_env) diag_opts w return a runHsc' :: HscEnv -> Hsc a -> IO (a, Messages GhcMessage) runHsc' hsc_env (Hsc hsc) = hsc hsc_env emptyMessages -- | Switches in the DynFlags and Plugins from the InteractiveContext mkInteractiveHscEnv :: HscEnv -> HscEnv mkInteractiveHscEnv hsc_env = let ic = hsc_IC hsc_env in hscSetFlags (ic_dflags ic) $ hsc_env { hsc_plugins = ic_plugins ic } -- | A variant of runHsc that switches in the DynFlags and Plugins from the -- InteractiveContext before running the Hsc computation. runInteractiveHsc :: HscEnv -> Hsc a -> IO a runInteractiveHsc hsc_env = runHsc (mkInteractiveHscEnv hsc_env) hsc_home_unit :: HscEnv -> HomeUnit hsc_home_unit = unsafeGetHomeUnit . hsc_unit_env hsc_home_unit_maybe :: HscEnv -> Maybe HomeUnit hsc_home_unit_maybe = ue_homeUnit . hsc_unit_env hsc_units :: HasDebugCallStack => HscEnv -> UnitState hsc_units = ue_units . hsc_unit_env hsc_HPT :: HscEnv -> HomePackageTable hsc_HPT = ue_hpt . hsc_unit_env hsc_HUE :: HscEnv -> HomeUnitEnv hsc_HUE = ue_currentHomeUnitEnv . hsc_unit_env hsc_HUG :: HscEnv -> HomeUnitGraph hsc_HUG = ue_home_unit_graph . hsc_unit_env hsc_all_home_unit_ids :: HscEnv -> Set.Set UnitId hsc_all_home_unit_ids = unitEnv_keys . hsc_HUG hscUpdateHPT :: (HomePackageTable -> HomePackageTable) -> HscEnv -> HscEnv hscUpdateHPT f hsc_env = hsc_env { hsc_unit_env = updateHpt f (hsc_unit_env hsc_env) } hscUpdateHUG :: (HomeUnitGraph -> HomeUnitGraph) -> HscEnv -> HscEnv hscUpdateHUG f hsc_env = hsc_env { hsc_unit_env = updateHug f (hsc_unit_env hsc_env) } {- Note [Target code interpreter] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Template Haskell and GHCi use an interpreter to execute code that is built for the compiler target platform (= code host platform) on the compiler host platform (= code build platform). The internal interpreter can be used when both platforms are the same and when the built code is compatible with the compiler itself (same way, etc.). This interpreter is not always available: for instance stage1 compiler doesn't have it because there might be an ABI mismatch between the code objects (built by stage1 compiler) and the stage1 compiler itself (built by stage0 compiler). In most cases, an external interpreter can be used instead: it runs in a separate process and it communicates with the compiler via a two-way message passing channel. The process is lazily spawned to avoid overhead when it is not used. The target code interpreter to use can be selected per session via the `hsc_interp` field of `HscEnv`. There may be no interpreter available at all, in which case Template Haskell and GHCi will fail to run. The interpreter to use is configured via command-line flags (in `GHC.setSessionDynFlags`). -} -- Note [hsc_type_env_var hack] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- hsc_type_env_var is used to initialize tcg_type_env_var, and -- eventually it is the mutable variable that is queried from -- if_rec_types to get a TypeEnv. So, clearly, it's something -- related to knot-tying (see Note [Tying the knot]). -- hsc_type_env_var is used in two places: initTcRn (where -- it initializes tcg_type_env_var) and initIfaceCheck -- (where it initializes if_rec_types). -- -- But why do we need a way to feed a mutable variable in? Why -- can't we just initialize tcg_type_env_var when we start -- typechecking? The problem is we need to knot-tie the -- EPS, and we may start adding things to the EPS before type -- checking starts. -- -- Here is a concrete example. Suppose we are running -- "ghc -c A.hs", and we have this file system state: -- -- A.hs-boot A.hi-boot **up to date** -- B.hs B.hi **up to date** -- A.hs A.hi **stale** -- -- The first thing we do is run checkOldIface on A.hi. -- checkOldIface will call loadInterface on B.hi so it can -- get its hands on the fingerprints, to find out if A.hi -- needs recompilation. But loadInterface also populates -- the EPS! And so if compilation turns out to be necessary, -- as it is in this case, the thunks we put into the EPS for -- B.hi need to have the correct if_rec_types mutable variable -- to query. -- -- If the mutable variable is only allocated WHEN we start -- typechecking, then that's too late: we can't get the -- information to the thunks. So we need to pre-commit -- to a type variable in 'hscIncrementalCompile' BEFORE we -- check the old interface. -- -- This is all a massive hack because arguably checkOldIface -- should not populate the EPS. But that's a refactor for -- another day. -- | Retrieve the ExternalPackageState cache. hscEPS :: HscEnv -> IO ExternalPackageState hscEPS hsc_env = readIORef (euc_eps (ue_eps (hsc_unit_env hsc_env))) hptCompleteSigs :: HscEnv -> [CompleteMatch] hptCompleteSigs = hptAllThings (md_complete_matches . hm_details) -- | Find all the instance declarations (of classes and families) from -- the Home Package Table filtered by the provided predicate function. -- Used in @tcRnImports@, to select the instances that are in the -- transitive closure of imports from the currently compiled module. hptAllInstances :: HscEnv -> (InstEnv, [FamInst]) hptAllInstances hsc_env = let (insts, famInsts) = unzip $ flip hptAllThings hsc_env $ \mod_info -> do let details = hm_details mod_info return (md_insts details, md_fam_insts details) in (foldl' unionInstEnv emptyInstEnv insts, concat famInsts) -- | Find instances visible from the given set of imports hptInstancesBelow :: HscEnv -> UnitId -> ModuleNameWithIsBoot -> (InstEnv, [FamInst]) hptInstancesBelow hsc_env uid mnwib = let mn = gwib_mod mnwib (insts, famInsts) = unzip $ hptSomeThingsBelowUs (\mod_info -> let details = hm_details mod_info -- Don't include instances for the current module in if moduleName (mi_module (hm_iface mod_info)) == mn then [] else [(md_insts details, md_fam_insts details)]) True -- Include -hi-boot hsc_env uid mnwib in (foldl' unionInstEnv emptyInstEnv insts, concat famInsts) -- | Get rules from modules "below" this one (in the dependency sense) hptRules :: HscEnv -> UnitId -> ModuleNameWithIsBoot -> [CoreRule] hptRules = hptSomeThingsBelowUs (md_rules . hm_details) False -- | Get annotations from modules "below" this one (in the dependency sense) hptAnns :: HscEnv -> Maybe (UnitId, ModuleNameWithIsBoot) -> [Annotation] hptAnns hsc_env (Just (uid, mn)) = hptSomeThingsBelowUs (md_anns . hm_details) False hsc_env uid mn hptAnns hsc_env Nothing = hptAllThings (md_anns . hm_details) hsc_env hptAllThings :: (HomeModInfo -> [a]) -> HscEnv -> [a] hptAllThings extract hsc_env = concatMap (concatMap extract . eltsHpt . homeUnitEnv_hpt . snd) (hugElts (hsc_HUG hsc_env)) -- | This function returns all the modules belonging to the home-unit that can -- be reached by following the given dependencies. Additionally, if both the -- boot module and the non-boot module can be reached, it only returns the -- non-boot one. hptModulesBelow :: HscEnv -> UnitId -> ModuleNameWithIsBoot -> Set ModNodeKeyWithUid hptModulesBelow hsc_env uid mn = filtered_mods $ [ mn | NodeKey_Module mn <- modules_below] where td_map = mgTransDeps (hsc_mod_graph hsc_env) modules_below = maybe [] Set.toList $ Map.lookup (NodeKey_Module (ModNodeKeyWithUid mn uid)) td_map filtered_mods = Set.fromDistinctAscList . filter_mods . sort -- IsBoot and NotBoot modules are necessarily consecutive in the sorted list -- (cf Ord instance of GenWithIsBoot). Hence we only have to perform a -- linear sweep with a window of size 2 to remove boot modules for which we -- have the corresponding non-boot. filter_mods = \case (r1@(ModNodeKeyWithUid (GWIB m1 b1) uid1) : r2@(ModNodeKeyWithUid (GWIB m2 _) uid2): rs) | m1 == m2 && uid1 == uid2 -> let !r' = case b1 of NotBoot -> r1 IsBoot -> r2 in r' : filter_mods rs | otherwise -> r1 : filter_mods (r2:rs) rs -> rs -- | Get things from modules "below" this one (in the dependency sense) -- C.f Inst.hptInstances hptSomeThingsBelowUs :: (HomeModInfo -> [a]) -> Bool -> HscEnv -> UnitId -> ModuleNameWithIsBoot -> [a] hptSomeThingsBelowUs extract include_hi_boot hsc_env uid mn | isOneShot (ghcMode (hsc_dflags hsc_env)) = [] | otherwise = let hug = hsc_HUG hsc_env in [ thing | -- Find each non-hi-boot module below me (ModNodeKeyWithUid (GWIB { gwib_mod = mod, gwib_isBoot = is_boot }) mod_uid) <- Set.toList (hptModulesBelow hsc_env uid mn) , include_hi_boot || (is_boot == NotBoot) -- unsavoury: when compiling the base package with --make, we -- sometimes try to look up RULES etc for GHC.Prim. GHC.Prim won't -- be in the HPT, because we never compile it; it's in the EPT -- instead. ToDo: clean up, and remove this slightly bogus filter: , mod /= moduleName gHC_PRIM , not (mod == gwib_mod mn && uid == mod_uid) -- Look it up in the HPT , let things = case lookupHug hug mod_uid mod of Just info -> extract info Nothing -> pprTrace "WARNING in hptSomeThingsBelowUs" msg mempty msg = vcat [text "missing module" <+> ppr mod, text "When starting from" <+> ppr mn, text "below:" <+> ppr (hptModulesBelow hsc_env uid mn), text "Probable cause: out-of-date interface files"] -- This really shouldn't happen, but see #962 , thing <- things ] -- | Deal with gathering annotations in from all possible places -- and combining them into a single 'AnnEnv' prepareAnnotations :: HscEnv -> Maybe ModGuts -> IO AnnEnv prepareAnnotations hsc_env mb_guts = do eps <- hscEPS hsc_env let -- Extract annotations from the module being compiled if supplied one mb_this_module_anns = fmap (mkAnnEnv . mg_anns) mb_guts -- Extract dependencies of the module if we are supplied one, -- otherwise load annotations from all home package table -- entries regardless of dependency ordering. get_mod mg = (moduleUnitId (mg_module mg), GWIB (moduleName (mg_module mg)) NotBoot) home_pkg_anns = (mkAnnEnv . hptAnns hsc_env) $ fmap get_mod mb_guts other_pkg_anns = eps_ann_env eps ann_env = foldl1' plusAnnEnv $ catMaybes [mb_this_module_anns, Just home_pkg_anns, Just other_pkg_anns] return ann_env -- | Find the 'TyThing' for the given 'Name' by using all the resources -- at our disposal: the compiled modules in the 'HomePackageTable' and the -- compiled modules in other packages that live in 'PackageTypeEnv'. Note -- that this does NOT look up the 'TyThing' in the module being compiled: you -- have to do that yourself, if desired lookupType :: HscEnv -> Name -> IO (Maybe TyThing) lookupType hsc_env name = do eps <- liftIO $ hscEPS hsc_env let pte = eps_PTE eps hpt = hsc_HUG hsc_env mod = assertPpr (isExternalName name) (ppr name) $ if isHoleName name then mkHomeModule (hsc_home_unit hsc_env) (moduleName (nameModule name)) else nameModule name !ty = if isOneShot (ghcMode (hsc_dflags hsc_env)) -- in one-shot, we don't use the HPT then lookupNameEnv pte name else case lookupHugByModule mod hpt of Just hm -> lookupNameEnv (md_types (hm_details hm)) name Nothing -> lookupNameEnv pte name pure ty -- | Find the 'ModIface' for a 'Module', searching in both the loaded home -- and external package module information lookupIfaceByModule :: HomeUnitGraph -> PackageIfaceTable -> Module -> Maybe ModIface lookupIfaceByModule hug pit mod = case lookupHugByModule mod hug of Just hm -> Just (hm_iface hm) Nothing -> lookupModuleEnv pit mod -- If the module does come from the home package, why do we look in the PIT as well? -- (a) In OneShot mode, even home-package modules accumulate in the PIT -- (b) Even in Batch (--make) mode, there is *one* case where a home-package -- module is in the PIT, namely GHC.Prim when compiling the base package. -- We could eliminate (b) if we wanted, by making GHC.Prim belong to a package -- of its own, but it doesn't seem worth the bother. mainModIs :: HomeUnitEnv -> Module mainModIs hue = mkHomeModule (expectJust "mainModIs" $ homeUnitEnv_home_unit hue) (mainModuleNameIs (homeUnitEnv_dflags hue)) -- | Retrieve the target code interpreter -- -- Fails if no target code interpreter is available hscInterp :: HscEnv -> Interp hscInterp hsc_env = case hsc_interp hsc_env of Nothing -> throw (InstallationError "Couldn't find a target code interpreter. Try with -fexternal-interpreter") Just i -> i -- | Update the LogFlags of the Log in hsc_logger from the DynFlags in -- hsc_dflags. You need to call this when DynFlags are modified. hscUpdateLoggerFlags :: HscEnv -> HscEnv hscUpdateLoggerFlags h = h { hsc_logger = setLogFlags (hsc_logger h) (initLogFlags (hsc_dflags h)) } -- | Update Flags hscUpdateFlags :: (DynFlags -> DynFlags) -> HscEnv -> HscEnv hscUpdateFlags f h = hscSetFlags (f (hsc_dflags h)) h -- | Set Flags hscSetFlags :: HasDebugCallStack => DynFlags -> HscEnv -> HscEnv hscSetFlags dflags h = hscUpdateLoggerFlags $ h { hsc_dflags = dflags , hsc_unit_env = ue_setFlags dflags (hsc_unit_env h) } -- See Note [Multiple Home Units] hscSetActiveHomeUnit :: HasDebugCallStack => HomeUnit -> HscEnv -> HscEnv hscSetActiveHomeUnit home_unit = hscSetActiveUnitId (homeUnitId home_unit) hscSetActiveUnitId :: HasDebugCallStack => UnitId -> HscEnv -> HscEnv hscSetActiveUnitId uid e = e { hsc_unit_env = ue_setActiveUnit uid (hsc_unit_env e) , hsc_dflags = ue_unitFlags uid (hsc_unit_env e) } hscActiveUnitId :: HscEnv -> UnitId hscActiveUnitId e = ue_currentUnit (hsc_unit_env e) -- | Discard the contents of the InteractiveContext, but keep the DynFlags and -- the loaded plugins. It will also keep ic_int_print and ic_monad if their -- names are from external packages. discardIC :: HscEnv -> HscEnv discardIC hsc_env = hsc_env { hsc_IC = empty_ic { ic_int_print = new_ic_int_print , ic_monad = new_ic_monad , ic_plugins = old_plugins } } where -- Force the new values for ic_int_print and ic_monad to avoid leaking old_ic !new_ic_int_print = keep_external_name ic_int_print !new_ic_monad = keep_external_name ic_monad !old_plugins = ic_plugins old_ic dflags = ic_dflags old_ic old_ic = hsc_IC hsc_env empty_ic = emptyInteractiveContext dflags keep_external_name ic_name | nameIsFromExternalPackage home_unit old_name = old_name | otherwise = ic_name empty_ic where home_unit = hsc_home_unit hsc_env old_name = ic_name old_ic ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Env/KnotVars.hs0000644000000000000000000001056614472400112021731 0ustar0000000000000000{-# LANGUAGE DeriveFunctor #-} -- | This data structure holds an updateable environment which is used -- when compiling module loops. module GHC.Driver.Env.KnotVars( KnotVars(..) , emptyKnotVars , knotVarsFromModuleEnv , knotVarElems , lookupKnotVars , knotVarsWithout ) where import GHC.Prelude import GHC.Unit.Types ( Module ) import GHC.Unit.Module.Env import Data.Maybe import GHC.Utils.Outputable -- See Note [Why is KnotVars not a ModuleEnv] -- See Note [KnotVars invariants] data KnotVars a = KnotVars { kv_domain :: [Module] -- Domain of the function , Note [KnotVars: Why store the domain?] -- Invariant: kv_lookup is surjective relative to kv_domain , kv_lookup :: Module -> Maybe a -- Lookup function } | NoKnotVars deriving Functor instance Outputable (KnotVars a) where ppr NoKnotVars = text "NoKnot" ppr (KnotVars dom _lookup) = text "Knotty:" <+> ppr dom emptyKnotVars :: KnotVars a emptyKnotVars = NoKnotVars knotVarsFromModuleEnv :: ModuleEnv a -> KnotVars a knotVarsFromModuleEnv me | isEmptyModuleEnv me = NoKnotVars knotVarsFromModuleEnv me = KnotVars (moduleEnvKeys me) (lookupModuleEnv me) knotVarElems :: KnotVars a -> [a] knotVarElems (KnotVars keys lookup) = mapMaybe lookup keys knotVarElems NoKnotVars = [] lookupKnotVars :: KnotVars a -> Module -> Maybe a lookupKnotVars (KnotVars _ lookup) x = lookup x lookupKnotVars NoKnotVars _ = Nothing knotVarsWithout :: Module -> KnotVars a -> KnotVars a knotVarsWithout this_mod (KnotVars loop_mods lkup) = KnotVars (filter (/= this_mod) loop_mods) (\that_mod -> if that_mod == this_mod then Nothing else lkup that_mod) knotVarsWithout _ NoKnotVars = NoKnotVars {- Note [Why is KnotVars not a ModuleEnv] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Initially 'KnotVars' was just a 'ModuleEnv a' but there is one tricky use of the data structure in 'mkDsEnvs' which required this generalised structure. In interactive mode the TypeEnvs from all the previous statements are merged togethed into one big TypeEnv. 'dsLookupVar' relies on `tcIfaceVar'. The normal lookup functions either look in the HPT or EPS but there is no entry for the `Ghci` modules in either, so the whole merged TypeEnv for all previous Ghci* is stored in the `if_rec_types` variable and then lookup checks there in the case of any interactive module. This is a misuse of the `if_rec_types` variable which might be fixed in future if the Ghci modules are just placed into the HPT like normal modules with implicit imports between them. Note [KnotVars: Why store the domain?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally there's a 'Module' at hand to tell us which 'TypeEnv' we want to interrogate at a particular time, apart from one case, when constructing the in-scope set when linting an unfolding. In this case the whole environemnt is needed to tell us everything that's in-scope at top-level in the loop because whilst we are linting unfoldings the top-level identifiers from modules in the cycle might not be globalised properly yet. This could be refactored so that the lint functions knew about 'KnotVars' and delayed this check until deciding whether a variable was local or not. Note [KnotVars invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~ There is a simple invariant which should hold for the KnotVars constructor: * At the end of upsweep, there should be no live KnotVars This invariant is difficult to test but easy to check using ghc-debug. The usage of NoKnotVars is intended to make this invariant easier to check. The most common situation where a KnotVars is retained accidently is if a HscEnv which contains reference to a KnotVars is used during interface file loading. The thunks created during this process will retain a reference to the KnotVars. In theory, all these references should be removed by 'maybeRehydrateAfter' as that rehydrates all interface files in the loop without using KnotVars. At the time of writing (MP: Oct 21) the invariant doesn't actually hold but also doesn't seem to have too much of a negative consequence on compiler residency. In theory it could be quite bad as each KnotVars may retain a stale reference to an entire TypeEnv. See #20491 -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Env/Types.hs0000644000000000000000000000751114472400112021262 0ustar0000000000000000{-# LANGUAGE DerivingVia #-} module GHC.Driver.Env.Types ( Hsc(..) , HscEnv(..) ) where import GHC.Driver.Errors.Types ( GhcMessage ) import {-# SOURCE #-} GHC.Driver.Hooks import GHC.Driver.Session ( ContainsDynFlags(..), HasDynFlags(..), DynFlags ) import GHC.Prelude import GHC.Runtime.Context import GHC.Runtime.Interpreter.Types ( Interp ) import GHC.Types.Error ( Messages ) import GHC.Types.Name.Cache import GHC.Types.Target import GHC.Types.TypeEnv import GHC.Unit.Finder.Types import GHC.Unit.Module.Graph import GHC.Unit.Env import GHC.Utils.Logger import GHC.Utils.TmpFs import {-# SOURCE #-} GHC.Driver.Plugins import Control.Monad.IO.Class import Control.Monad.Trans.Reader import Control.Monad.Trans.State import Data.IORef import GHC.Driver.Env.KnotVars -- | The Hsc monad: Passing an environment and diagnostic state newtype Hsc a = Hsc (HscEnv -> Messages GhcMessage -> IO (a, Messages GhcMessage)) deriving (Functor, Applicative, Monad, MonadIO) via ReaderT HscEnv (StateT (Messages GhcMessage) IO) instance HasDynFlags Hsc where getDynFlags = Hsc $ \e w -> return (hsc_dflags e, w) instance ContainsDynFlags HscEnv where extractDynFlags h = hsc_dflags h instance HasLogger Hsc where getLogger = Hsc $ \e w -> return (hsc_logger e, w) -- | HscEnv is like 'GHC.Driver.Monad.Session', except that some of the fields are immutable. -- An HscEnv is used to compile a single module from plain Haskell source -- code (after preprocessing) to either C, assembly or C--. It's also used -- to store the dynamic linker state to allow for multiple linkers in the -- same address space. -- Things like the module graph don't change during a single compilation. -- -- Historical note: \"hsc\" used to be the name of the compiler binary, -- when there was a separate driver and compiler. To compile a single -- module, the driver would invoke hsc on the source code... so nowadays -- we think of hsc as the layer of the compiler that deals with compiling -- a single module. data HscEnv = HscEnv { hsc_dflags :: DynFlags, -- ^ The dynamic flag settings hsc_targets :: [Target], -- ^ The targets (or roots) of the current session hsc_mod_graph :: ModuleGraph, -- ^ The module graph of the current session hsc_IC :: InteractiveContext, -- ^ The context for evaluating interactive statements hsc_NC :: {-# UNPACK #-} !NameCache, -- ^ Global Name cache so that each Name gets a single Unique. -- Also track the origin of the Names. hsc_FC :: {-# UNPACK #-} !FinderCache, -- ^ The cached result of performing finding in the file system hsc_type_env_vars :: KnotVars (IORef TypeEnv) -- ^ Used for one-shot compilation only, to initialise -- the 'IfGblEnv'. See 'GHC.Tc.Utils.tcg_type_env_var' for -- 'GHC.Tc.Utils.TcGblEnv'. See also Note [hsc_type_env_var hack] , hsc_interp :: Maybe Interp -- ^ target code interpreter (if any) to use for TH and GHCi. -- See Note [Target code interpreter] , hsc_plugins :: !Plugins -- ^ Plugins , hsc_unit_env :: UnitEnv -- ^ Unit environment (unit state, home unit, etc.). -- -- Initialized from the databases cached in 'hsc_unit_dbs' and -- from the DynFlags. , hsc_logger :: !Logger -- ^ Logger with its flags. -- -- Don't forget to update the logger flags if the logging -- related DynFlags change. Or better, use hscSetFlags setter -- which does it. , hsc_hooks :: !Hooks -- ^ Hooks , hsc_tmpfs :: !TmpFs -- ^ Temporary files } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Errors.hs0000644000000000000000000000553414472400112020705 0ustar0000000000000000module GHC.Driver.Errors ( printOrThrowDiagnostics , printMessages , handleFlagWarnings , mkDriverPsHeaderMessage ) where import GHC.Driver.Errors.Types import GHC.Data.Bag import GHC.Prelude import GHC.Types.SrcLoc import GHC.Types.SourceError import GHC.Types.Error import GHC.Utils.Error import GHC.Utils.Outputable (hang, ppr, ($$), SDocContext, text, withPprStyle, mkErrStyle, sdocStyle ) import GHC.Utils.Logger import qualified GHC.Driver.CmdLine as CmdLine printMessages :: Diagnostic a => Logger -> DiagOpts -> Messages a -> IO () printMessages logger opts msgs = sequence_ [ let style = mkErrStyle unqual ctx = (diag_ppr_ctx opts) { sdocStyle = style } in logMsg logger (MCDiagnostic sev . diagnosticReason $ dia) s $ withPprStyle style (messageWithHints ctx dia) | MsgEnvelope { errMsgSpan = s, errMsgDiagnostic = dia, errMsgSeverity = sev, errMsgContext = unqual } <- sortMsgBag (Just opts) (getMessages msgs) ] where messageWithHints :: Diagnostic a => SDocContext -> a -> SDoc messageWithHints ctx e = let main_msg = formatBulleted ctx $ diagnosticMessage e in case diagnosticHints e of [] -> main_msg [h] -> main_msg $$ hang (text "Suggested fix:") 2 (ppr h) hs -> main_msg $$ hang (text "Suggested fixes:") 2 (formatBulleted ctx . mkDecorated . map ppr $ hs) handleFlagWarnings :: Logger -> DiagOpts -> [CmdLine.Warn] -> IO () handleFlagWarnings logger opts warns = do let -- It would be nicer if warns :: [Located SDoc], but that -- has circular import problems. bag = listToBag [ mkPlainMsgEnvelope opts loc $ GhcDriverMessage $ DriverUnknownMessage $ mkPlainDiagnostic reason noHints $ text warn | CmdLine.Warn reason (L loc warn) <- warns ] printOrThrowDiagnostics logger opts (mkMessages bag) -- | Given a bag of diagnostics, turn them into an exception if -- any has 'SevError', or print them out otherwise. printOrThrowDiagnostics :: Logger -> DiagOpts -> Messages GhcMessage -> IO () printOrThrowDiagnostics logger opts msgs | errorsOrFatalWarningsFound msgs = throwErrors msgs | otherwise = printMessages logger opts msgs -- | Convert a 'PsError' into a wrapped 'DriverMessage'; use it -- for dealing with parse errors when the driver is doing dependency analysis. -- Defined here to avoid module loops between GHC.Driver.Error.Types and -- GHC.Driver.Error.Ppr mkDriverPsHeaderMessage :: MsgEnvelope PsMessage -> MsgEnvelope DriverMessage mkDriverPsHeaderMessage = fmap DriverPsHeaderMessage ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Errors/Ppr.hs0000644000000000000000000002644414472400112021451 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic {DriverMessage, GhcMessage} module GHC.Driver.Errors.Ppr where import GHC.Prelude import GHC.Driver.Errors.Types import GHC.Driver.Flags import GHC.Driver.Session import GHC.HsToCore.Errors.Ppr () import GHC.Parser.Errors.Ppr () import GHC.Tc.Errors.Ppr () import GHC.Types.Error import GHC.Unit.Types import GHC.Utils.Outputable import GHC.Unit.Module import GHC.Unit.State import GHC.Types.Hint import GHC.Types.SrcLoc import Data.Version import Language.Haskell.Syntax.Decls (RuleDecl(..)) -- -- Suggestions -- -- | Suggests a list of 'InstantiationSuggestion' for the '.hsig' file to the user. suggestInstantiatedWith :: ModuleName -> GenInstantiations UnitId -> [InstantiationSuggestion] suggestInstantiatedWith pi_mod_name insts = [ InstantiationSuggestion k v | (k,v) <- ((pi_mod_name, mkHoleModule pi_mod_name) : insts) ] instance Diagnostic GhcMessage where diagnosticMessage = \case GhcPsMessage m -> diagnosticMessage m GhcTcRnMessage m -> diagnosticMessage m GhcDsMessage m -> diagnosticMessage m GhcDriverMessage m -> diagnosticMessage m GhcUnknownMessage m -> diagnosticMessage m diagnosticReason = \case GhcPsMessage m -> diagnosticReason m GhcTcRnMessage m -> diagnosticReason m GhcDsMessage m -> diagnosticReason m GhcDriverMessage m -> diagnosticReason m GhcUnknownMessage m -> diagnosticReason m diagnosticHints = \case GhcPsMessage m -> diagnosticHints m GhcTcRnMessage m -> diagnosticHints m GhcDsMessage m -> diagnosticHints m GhcDriverMessage m -> diagnosticHints m GhcUnknownMessage m -> diagnosticHints m instance Diagnostic DriverMessage where diagnosticMessage = \case DriverUnknownMessage m -> diagnosticMessage m DriverPsHeaderMessage m -> diagnosticMessage m DriverMissingHomeModules uid missing buildingCabalPackage -> let msg | buildingCabalPackage == YesBuildingCabalPackage = hang (text "These modules are needed for compilation but not listed in your .cabal file's other-modules for" <+> quotes (ppr uid) <+> text ":") 4 (sep (map ppr missing)) | otherwise = hang (text "Modules are not listed in options for" <+> quotes (ppr uid) <+> text "but needed for compilation:") 4 (sep (map ppr missing)) in mkSimpleDecorated msg DriverUnknownHiddenModules uid missing -> let msg = hang (text "Modules are listed as hidden in options for" <+> quotes (ppr uid) <+> text "but not part of the unit:") 4 (sep (map ppr missing)) in mkSimpleDecorated msg DriverUnknownReexportedModules uid missing -> let msg = hang (text "Modules are listed as reexported in options for" <+> quotes (ppr uid) <+> text "but can't be found in any dependency:") 4 (sep (map ppr missing)) in mkSimpleDecorated msg DriverUnusedPackages unusedArgs -> let msg = vcat [ text "The following packages were specified" <+> text "via -package or -package-id flags," , text "but were not needed for compilation:" , nest 2 (vcat (map (withDash . displayOneUnused) unusedArgs)) ] in mkSimpleDecorated msg where withDash :: SDoc -> SDoc withDash = (<+>) (text "-") displayOneUnused (_uid, pn , v, f) = ppr pn <> text "-" <> text (showVersion v) <+> parens (suffix f) suffix f = text "exposed by flag" <+> pprUnusedArg f pprUnusedArg :: PackageArg -> SDoc pprUnusedArg (PackageArg str) = text "-package" <+> text str pprUnusedArg (UnitIdArg uid) = text "-package-id" <+> ppr uid DriverUnnecessarySourceImports mod -> mkSimpleDecorated (text "{-# SOURCE #-} unnecessary in import of " <+> quotes (ppr mod)) DriverDuplicatedModuleDeclaration mod files -> mkSimpleDecorated $ text "module" <+> quotes (ppr mod) <+> text "is defined in multiple files:" <+> sep (map text files) DriverModuleNotFound mod -> mkSimpleDecorated (text "module" <+> quotes (ppr mod) <+> text "cannot be found locally") DriverFileModuleNameMismatch actual expected -> mkSimpleDecorated $ text "File name does not match module name:" $$ text "Saw :" <+> quotes (ppr actual) $$ text "Expected:" <+> quotes (ppr expected) DriverUnexpectedSignature pi_mod_name _buildingCabalPackage _instantiations -> mkSimpleDecorated $ text "Unexpected signature:" <+> quotes (ppr pi_mod_name) DriverFileNotFound hsFilePath -> mkSimpleDecorated (text "Can't find" <+> text hsFilePath) DriverStaticPointersNotSupported -> mkSimpleDecorated (text "StaticPointers is not supported in GHCi interactive expressions.") DriverBackpackModuleNotFound modname -> mkSimpleDecorated (text "module" <+> ppr modname <+> text "was not found") DriverUserDefinedRuleIgnored (HsRule { rd_name = n }) -> mkSimpleDecorated $ text "Rule \"" <> ftext (snd $ unLoc n) <> text "\" ignored" $+$ text "User defined rules are disabled under Safe Haskell" DriverMixedSafetyImport modName -> mkSimpleDecorated $ text "Module" <+> ppr modName <+> text ("is imported both as a safe and unsafe import!") DriverCannotLoadInterfaceFile m -> mkSimpleDecorated $ text "Can't load the interface file for" <+> ppr m <> text ", to check that it can be safely imported" DriverInferredSafeModule m -> mkSimpleDecorated $ quotes (ppr $ moduleName m) <+> text "has been inferred as safe!" DriverInferredSafeImport m -> mkSimpleDecorated $ sep [ text "Importing Safe-Inferred module " <> ppr (moduleName m) <> text " from explicitly Safe module" ] DriverMarkedTrustworthyButInferredSafe m -> mkSimpleDecorated $ quotes (ppr $ moduleName m) <+> text "is marked as Trustworthy but has been inferred as safe!" DriverCannotImportUnsafeModule m -> mkSimpleDecorated $ sep [ ppr (moduleName m) <> text ": Can't be safely imported!" , text "The module itself isn't safe." ] DriverMissingSafeHaskellMode modName -> mkSimpleDecorated $ ppr modName <+> text "is missing Safe Haskell mode" DriverPackageNotTrusted state pkg -> mkSimpleDecorated $ pprWithUnitState state $ text "The package (" <> ppr pkg <> text ") is required to be trusted but it isn't!" DriverCannotImportFromUntrustedPackage state m -> mkSimpleDecorated $ sep [ ppr (moduleName m) <> text ": Can't be safely imported!" , text "The package (" <> (pprWithUnitState state $ ppr (moduleUnit m)) <> text ") the module resides in isn't trusted." ] DriverRedirectedNoMain mod_name -> mkSimpleDecorated $ (text ("Output was redirected with -o, " ++ "but no output will be generated.") $$ (text "There is no module named" <+> quotes (ppr mod_name) <> text ".")) DriverHomePackagesNotClosed needed_unit_ids -> mkSimpleDecorated $ vcat ([text "Home units are not closed." , text "It is necessary to also load the following units:" ] ++ map (\uid -> text "-" <+> ppr uid) needed_unit_ids) diagnosticReason = \case DriverUnknownMessage m -> diagnosticReason m DriverPsHeaderMessage {} -> ErrorWithoutFlag DriverMissingHomeModules{} -> WarningWithFlag Opt_WarnMissingHomeModules DriverUnknownHiddenModules {} -> ErrorWithoutFlag DriverUnknownReexportedModules {} -> ErrorWithoutFlag DriverUnusedPackages{} -> WarningWithFlag Opt_WarnUnusedPackages DriverUnnecessarySourceImports{} -> WarningWithFlag Opt_WarnUnusedImports DriverDuplicatedModuleDeclaration{} -> ErrorWithoutFlag DriverModuleNotFound{} -> ErrorWithoutFlag DriverFileModuleNameMismatch{} -> ErrorWithoutFlag DriverUnexpectedSignature{} -> ErrorWithoutFlag DriverFileNotFound{} -> ErrorWithoutFlag DriverStaticPointersNotSupported -> WarningWithoutFlag DriverBackpackModuleNotFound{} -> ErrorWithoutFlag DriverUserDefinedRuleIgnored{} -> WarningWithoutFlag DriverMixedSafetyImport{} -> ErrorWithoutFlag DriverCannotLoadInterfaceFile{} -> ErrorWithoutFlag DriverInferredSafeModule{} -> WarningWithFlag Opt_WarnSafe DriverMarkedTrustworthyButInferredSafe{} ->WarningWithFlag Opt_WarnTrustworthySafe DriverInferredSafeImport{} -> WarningWithFlag Opt_WarnInferredSafeImports DriverCannotImportUnsafeModule{} -> ErrorWithoutFlag DriverMissingSafeHaskellMode{} -> WarningWithFlag Opt_WarnMissingSafeHaskellMode DriverPackageNotTrusted{} -> ErrorWithoutFlag DriverCannotImportFromUntrustedPackage{} -> ErrorWithoutFlag DriverRedirectedNoMain {} -> ErrorWithoutFlag DriverHomePackagesNotClosed {} -> ErrorWithoutFlag diagnosticHints = \case DriverUnknownMessage m -> diagnosticHints m DriverPsHeaderMessage psMsg -> diagnosticHints psMsg DriverMissingHomeModules{} -> noHints DriverUnknownHiddenModules {} -> noHints DriverUnknownReexportedModules {} -> noHints DriverUnusedPackages{} -> noHints DriverUnnecessarySourceImports{} -> noHints DriverDuplicatedModuleDeclaration{} -> noHints DriverModuleNotFound{} -> noHints DriverFileModuleNameMismatch{} -> noHints DriverUnexpectedSignature pi_mod_name buildingCabalPackage instantiations -> if buildingCabalPackage == YesBuildingCabalPackage then [SuggestAddSignatureCabalFile pi_mod_name] else [SuggestSignatureInstantiations pi_mod_name (suggestInstantiatedWith pi_mod_name instantiations)] DriverFileNotFound{} -> noHints DriverStaticPointersNotSupported -> noHints DriverBackpackModuleNotFound{} -> noHints DriverUserDefinedRuleIgnored{} -> noHints DriverMixedSafetyImport{} -> noHints DriverCannotLoadInterfaceFile{} -> noHints DriverInferredSafeModule{} -> noHints DriverInferredSafeImport{} -> noHints DriverCannotImportUnsafeModule{} -> noHints DriverMissingSafeHaskellMode{} -> noHints DriverPackageNotTrusted{} -> noHints DriverMarkedTrustworthyButInferredSafe{} -> noHints DriverCannotImportFromUntrustedPackage{} -> noHints DriverRedirectedNoMain {} -> noHints DriverHomePackagesNotClosed {} -> noHints ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Errors/Types.hs0000644000000000000000000003303314472400112022004 0ustar0000000000000000{-# LANGUAGE GADTs #-} module GHC.Driver.Errors.Types ( GhcMessage(..) , DriverMessage(..), DriverMessages, PsMessage(PsHeaderMessage) , BuildingCabalPackage(..) , WarningMessages , ErrorMessages , WarnMsg -- * Constructors , ghcUnknownMessage -- * Utility functions , hoistTcRnMessage , hoistDsMessage , checkBuildingCabalPackage ) where import GHC.Prelude import Data.Bifunctor import Data.Typeable import GHC.Driver.Session import GHC.Types.Error import GHC.Unit.Module import GHC.Unit.State import GHC.Parser.Errors.Types ( PsMessage(PsHeaderMessage) ) import GHC.Tc.Errors.Types ( TcRnMessage ) import GHC.HsToCore.Errors.Types ( DsMessage ) import GHC.Hs.Extension (GhcTc) import Language.Haskell.Syntax.Decls (RuleDecl) -- | A collection of warning messages. -- /INVARIANT/: Each 'GhcMessage' in the collection should have 'SevWarning' severity. type WarningMessages = Messages GhcMessage -- | A collection of error messages. -- /INVARIANT/: Each 'GhcMessage' in the collection should have 'SevError' severity. type ErrorMessages = Messages GhcMessage -- | A single warning message. -- /INVARIANT/: It must have 'SevWarning' severity. type WarnMsg = MsgEnvelope GhcMessage {- Note [GhcMessage] ~~~~~~~~~~~~~~~~~~~~ We might need to report diagnostics (error and/or warnings) to the users. The 'GhcMessage' type is the root of the diagnostic hierarchy. It's useful to have a separate type constructor for the different stages of the compilation pipeline. This is not just helpful for tools, as it gives a clear indication on where the error occurred exactly. Furthermore it increases the modularity amongst the different components of GHC (i.e. to avoid having "everything depend on everything else") and allows us to write separate functions that renders the different kind of messages. -} -- | The umbrella type that encompasses all the different messages that GHC -- might output during the different compilation stages. See -- Note [GhcMessage]. data GhcMessage where -- | A message from the parsing phase. GhcPsMessage :: PsMessage -> GhcMessage -- | A message from typecheck/renaming phase. GhcTcRnMessage :: TcRnMessage -> GhcMessage -- | A message from the desugaring (HsToCore) phase. GhcDsMessage :: DsMessage -> GhcMessage -- | A message from the driver. GhcDriverMessage :: DriverMessage -> GhcMessage -- | An \"escape\" hatch which can be used when we don't know the source of -- the message or if the message is not one of the typed ones. The -- 'Diagnostic' and 'Typeable' constraints ensure that if we /know/, at -- pattern-matching time, the originating type, we can attempt a cast and -- access the fully-structured error. This would be the case for a GHC -- plugin that offers a domain-specific error type but that doesn't want to -- place the burden on IDEs/application code to \"know\" it. The -- 'Diagnostic' constraint ensures that worst case scenario we can still -- render this into something which can be eventually converted into a -- 'DecoratedSDoc'. GhcUnknownMessage :: forall a. (Diagnostic a, Typeable a) => a -> GhcMessage -- | Creates a new 'GhcMessage' out of any diagnostic. This function is also -- provided to ease the integration of #18516 by allowing diagnostics to be -- wrapped into the general (but structured) 'GhcMessage' type, so that the -- conversion can happen gradually. This function should not be needed within -- GHC, as it would typically be used by plugin or library authors (see -- comment for the 'GhcUnknownMessage' type constructor) ghcUnknownMessage :: (Diagnostic a, Typeable a) => a -> GhcMessage ghcUnknownMessage = GhcUnknownMessage -- | Abstracts away the frequent pattern where we are calling 'ioMsgMaybe' on -- the result of 'IO (Messages TcRnMessage, a)'. hoistTcRnMessage :: Monad m => m (Messages TcRnMessage, a) -> m (Messages GhcMessage, a) hoistTcRnMessage = fmap (first (fmap GhcTcRnMessage)) -- | Abstracts away the frequent pattern where we are calling 'ioMsgMaybe' on -- the result of 'IO (Messages DsMessage, a)'. hoistDsMessage :: Monad m => m (Messages DsMessage, a) -> m (Messages GhcMessage, a) hoistDsMessage = fmap (first (fmap GhcDsMessage)) -- | A collection of driver messages type DriverMessages = Messages DriverMessage -- | A message from the driver. data DriverMessage where -- | Simply wraps a generic 'Diagnostic' message @a@. DriverUnknownMessage :: (Diagnostic a, Typeable a) => a -> DriverMessage -- | A parse error in parsing a Haskell file header during dependency -- analysis DriverPsHeaderMessage :: !PsMessage -> DriverMessage {-| DriverMissingHomeModules is a warning (controlled with -Wmissing-home-modules) that arises when running GHC in --make mode when some modules needed for compilation are not included on the command line. For example, if A imports B, `ghc --make A.hs` will cause this warning, while `ghc --make A.hs B.hs` will not. Useful for cabal to ensure GHC won't pick up modules listed neither in 'exposed-modules' nor in 'other-modules'. Test case: warnings/should_compile/MissingMod -} DriverMissingHomeModules :: UnitId -> [ModuleName] -> !BuildingCabalPackage -> DriverMessage {-| DriverUnknown is a warning that arises when a user tries to reexport a module which isn't part of that unit. -} DriverUnknownReexportedModules :: UnitId -> [ModuleName] -> DriverMessage {-| DriverUnknownHiddenModules is a warning that arises when a user tries to hide a module which isn't part of that unit. -} DriverUnknownHiddenModules :: UnitId -> [ModuleName] -> DriverMessage {-| DriverUnusedPackages occurs when when package is requested on command line, but was never needed during compilation. Activated by -Wunused-packages. Test cases: warnings/should_compile/UnusedPackages -} DriverUnusedPackages :: [(UnitId, PackageName, Version, PackageArg)] -> DriverMessage {-| DriverUnnecessarySourceImports (controlled with -Wunused-imports) occurs if there are {-# SOURCE #-} imports which are not necessary. See 'warnUnnecessarySourceImports' in 'GHC.Driver.Make'. Test cases: warnings/should_compile/T10637 -} DriverUnnecessarySourceImports :: !ModuleName -> DriverMessage {-| DriverDuplicatedModuleDeclaration occurs if a module 'A' is declared in multiple files. Test cases: None. -} DriverDuplicatedModuleDeclaration :: !Module -> [FilePath] -> DriverMessage {-| DriverModuleNotFound occurs if a module 'A' can't be found. Test cases: None. -} DriverModuleNotFound :: !ModuleName -> DriverMessage {-| DriverFileModuleNameMismatch occurs if a module 'A' is defined in a file with a different name. The first field is the name written in the source code; the second argument is the name extracted from the filename. Test cases: module/mod178, /driver/bug1677 -} DriverFileModuleNameMismatch :: !ModuleName -> !ModuleName -> DriverMessage {-| DriverUnexpectedSignature occurs when GHC encounters a module 'A' that imports a signature file which is neither in the 'signatures' section of a '.cabal' file nor in any package in the home modules. Example: -- MyStr.hsig is defined, but not added to 'signatures' in the '.cabal' file. signature MyStr where data Str -- A.hs, which tries to import the signature. module A where import MyStr Test cases: driver/T12955 -} DriverUnexpectedSignature :: !ModuleName -> !BuildingCabalPackage -> GenInstantiations UnitId -> DriverMessage {-| DriverFileNotFound occurs when the input file (e.g. given on the command line) can't be found. Test cases: None. -} DriverFileNotFound :: !FilePath -> DriverMessage {-| DriverStaticPointersNotSupported occurs when the 'StaticPointers' extension is used in an interactive GHCi context. Test cases: ghci/scripts/StaticPtr -} DriverStaticPointersNotSupported :: DriverMessage {-| DriverBackpackModuleNotFound occurs when Backpack can't find a particular module during its dependency analysis. Test cases: - -} DriverBackpackModuleNotFound :: !ModuleName -> DriverMessage {-| DriverUserDefinedRuleIgnored is a warning that occurs when user-defined rules are ignored. This typically happens when Safe Haskell. Test cases: tests/safeHaskell/safeInfered/UnsafeWarn05 tests/safeHaskell/safeInfered/UnsafeWarn06 tests/safeHaskell/safeInfered/UnsafeWarn07 tests/safeHaskell/safeInfered/UnsafeInfered11 tests/safeHaskell/safeLanguage/SafeLang03 -} DriverUserDefinedRuleIgnored :: !(RuleDecl GhcTc) -> DriverMessage {-| DriverMixedSafetyImport is an error that occurs when a module is imported both as safe and unsafe. Test cases: tests/safeHaskell/safeInfered/Mixed03 tests/safeHaskell/safeInfered/Mixed02 -} DriverMixedSafetyImport :: !ModuleName -> DriverMessage {-| DriverCannotLoadInterfaceFile is an error that occurs when we cannot load the interface file for a particular module. This can happen for example in the context of Safe Haskell, when we have to load a module to check if it can be safely imported. Test cases: None. -} DriverCannotLoadInterfaceFile :: !Module -> DriverMessage {-| DriverInferredSafeImport is a warning (controlled by the Opt_WarnSafe flag) that occurs when a module is inferred safe. Test cases: None. -} DriverInferredSafeModule :: !Module -> DriverMessage {-| DriverMarkedTrustworthyButInferredSafe is a warning (controlled by the Opt_WarnTrustworthySafe flag) that occurs when a module is marked trustworthy in SafeHaskell but it has been inferred safe. Test cases: tests/safeHaskell/safeInfered/TrustworthySafe02 tests/safeHaskell/safeInfered/TrustworthySafe03 -} DriverMarkedTrustworthyButInferredSafe :: !Module -> DriverMessage {-| DriverInferredSafeImport is a warning (controlled by the Opt_WarnInferredSafeImports flag) that occurs when a safe-inferred module is imported from a safe module. Test cases: None. -} DriverInferredSafeImport :: !Module -> DriverMessage {-| DriverCannotImportUnsafeModule is an error that occurs when an usafe module is being imported from a safe one. Test cases: None. -} DriverCannotImportUnsafeModule :: !Module -> DriverMessage {-| DriverMissingSafeHaskellMode is a warning (controlled by the Opt_WarnMissingSafeHaskellMode flag) that occurs when a module is using SafeHaskell features but SafeHaskell mode is not enabled. Test cases: None. -} DriverMissingSafeHaskellMode :: !Module -> DriverMessage {-| DriverPackageNotTrusted is an error that occurs when a package is required to be trusted but it isn't. Test cases: tests/safeHaskell/check/Check01 tests/safeHaskell/check/Check08 tests/safeHaskell/check/Check06 tests/safeHaskell/check/pkg01/ImpSafeOnly09 tests/safeHaskell/check/pkg01/ImpSafe03 tests/safeHaskell/check/pkg01/ImpSafeOnly07 tests/safeHaskell/check/pkg01/ImpSafeOnly08 -} DriverPackageNotTrusted :: !UnitState -> !UnitId -> DriverMessage {-| DriverCannotImportFromUntrustedPackage is an error that occurs in the context of Safe Haskell when trying to import a module coming from an untrusted package. Test cases: tests/safeHaskell/check/Check09 tests/safeHaskell/check/pkg01/ImpSafe01 tests/safeHaskell/check/pkg01/ImpSafe04 tests/safeHaskell/check/pkg01/ImpSafeOnly03 tests/safeHaskell/check/pkg01/ImpSafeOnly05 tests/safeHaskell/flags/SafeFlags17 tests/safeHaskell/flags/SafeFlags22 tests/safeHaskell/flags/SafeFlags23 tests/safeHaskell/ghci/p11 tests/safeHaskell/ghci/p12 tests/safeHaskell/ghci/p17 tests/safeHaskell/ghci/p3 tests/safeHaskell/safeInfered/UnsafeInfered01 tests/safeHaskell/safeInfered/UnsafeInfered02 tests/safeHaskell/safeInfered/UnsafeInfered02 tests/safeHaskell/safeInfered/UnsafeInfered03 tests/safeHaskell/safeInfered/UnsafeInfered05 tests/safeHaskell/safeInfered/UnsafeInfered06 tests/safeHaskell/safeInfered/UnsafeInfered09 tests/safeHaskell/safeInfered/UnsafeInfered10 tests/safeHaskell/safeInfered/UnsafeInfered11 tests/safeHaskell/safeInfered/UnsafeWarn01 tests/safeHaskell/safeInfered/UnsafeWarn03 tests/safeHaskell/safeInfered/UnsafeWarn04 tests/safeHaskell/safeInfered/UnsafeWarn05 tests/safeHaskell/unsafeLibs/BadImport01 tests/safeHaskell/unsafeLibs/BadImport06 tests/safeHaskell/unsafeLibs/BadImport07 tests/safeHaskell/unsafeLibs/BadImport08 tests/safeHaskell/unsafeLibs/BadImport09 tests/safeHaskell/unsafeLibs/Dep05 tests/safeHaskell/unsafeLibs/Dep06 tests/safeHaskell/unsafeLibs/Dep07 tests/safeHaskell/unsafeLibs/Dep08 tests/safeHaskell/unsafeLibs/Dep09 tests/safeHaskell/unsafeLibs/Dep10 -} DriverCannotImportFromUntrustedPackage :: !UnitState -> !Module -> DriverMessage DriverRedirectedNoMain :: !ModuleName -> DriverMessage DriverHomePackagesNotClosed :: ![UnitId] -> DriverMessage -- | Pass to a 'DriverMessage' the information whether or not the -- '-fbuilding-cabal-package' flag is set. data BuildingCabalPackage = YesBuildingCabalPackage | NoBuildingCabalPackage deriving Eq -- | Checks if we are building a cabal package by consulting the 'DynFlags'. checkBuildingCabalPackage :: DynFlags -> BuildingCabalPackage checkBuildingCabalPackage dflags = if gopt Opt_BuildingCabalPackage dflags then YesBuildingCabalPackage else NoBuildingCabalPackage ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Flags.hs0000644000000000000000000007744414472400112020476 0ustar0000000000000000module GHC.Driver.Flags ( DumpFlag(..) , GeneralFlag(..) , Language(..) , optimisationFlags -- * Warnings , WarningFlag(..) , warnFlagNames , warningGroups , warningHierarchies , smallestWarningGroups , standardWarnings , minusWOpts , minusWallOpts , minusWeverythingOpts , minusWcompatOpts , unusedBindsFlags ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Data.EnumSet as EnumSet import Control.Monad (guard) import Data.List.NonEmpty (NonEmpty(..)) import Data.Maybe (fromMaybe,mapMaybe) data Language = Haskell98 | Haskell2010 | GHC2021 deriving (Eq, Enum, Show, Bounded) instance Outputable Language where ppr = text . show instance Binary Language where put_ bh = put_ bh . fromEnum get bh = toEnum <$> get bh -- | Debugging flags data DumpFlag -- See Note [Updating flag description in the User's Guide] -- debugging flags = Opt_D_dump_cmm | Opt_D_dump_cmm_from_stg | Opt_D_dump_cmm_raw | Opt_D_dump_cmm_verbose_by_proc -- All of the cmm subflags (there are a lot!) automatically -- enabled if you run -ddump-cmm-verbose-by-proc -- Each flag corresponds to exact stage of Cmm pipeline. | Opt_D_dump_cmm_verbose -- same as -ddump-cmm-verbose-by-proc but writes each stage -- to a separate file (if used with -ddump-to-file) | Opt_D_dump_cmm_cfg | Opt_D_dump_cmm_cbe | Opt_D_dump_cmm_switch | Opt_D_dump_cmm_proc | Opt_D_dump_cmm_sp | Opt_D_dump_cmm_sink | Opt_D_dump_cmm_caf | Opt_D_dump_cmm_procmap | Opt_D_dump_cmm_split | Opt_D_dump_cmm_info | Opt_D_dump_cmm_cps -- end cmm subflags | Opt_D_dump_cfg_weights -- ^ Dump the cfg used for block layout. | Opt_D_dump_asm | Opt_D_dump_asm_native | Opt_D_dump_asm_liveness | Opt_D_dump_asm_regalloc | Opt_D_dump_asm_regalloc_stages | Opt_D_dump_asm_conflicts | Opt_D_dump_asm_stats | Opt_D_dump_c_backend | Opt_D_dump_llvm | Opt_D_dump_core_stats | Opt_D_dump_deriv | Opt_D_dump_ds | Opt_D_dump_ds_preopt | Opt_D_dump_foreign | Opt_D_dump_inlinings | Opt_D_dump_verbose_inlinings | Opt_D_dump_rule_firings | Opt_D_dump_rule_rewrites | Opt_D_dump_simpl_trace | Opt_D_dump_occur_anal | Opt_D_dump_parsed | Opt_D_dump_parsed_ast | Opt_D_dump_rn | Opt_D_dump_rn_ast | Opt_D_dump_simpl | Opt_D_dump_simpl_iterations | Opt_D_dump_spec | Opt_D_dump_prep | Opt_D_dump_late_cc | Opt_D_dump_stg_from_core -- ^ Initial STG (CoreToStg output) | Opt_D_dump_stg_unarised -- ^ STG after unarise | Opt_D_dump_stg_cg -- ^ STG (after stg2stg) | Opt_D_dump_stg_tags -- ^ Result of tag inference analysis. | Opt_D_dump_stg_final -- ^ Final STG (before cmm gen) | Opt_D_dump_call_arity | Opt_D_dump_exitify | Opt_D_dump_stranal | Opt_D_dump_str_signatures | Opt_D_dump_cpranal | Opt_D_dump_cpr_signatures | Opt_D_dump_tc | Opt_D_dump_tc_ast | Opt_D_dump_hie | Opt_D_dump_types | Opt_D_dump_rules | Opt_D_dump_cse | Opt_D_dump_worker_wrapper | Opt_D_dump_rn_trace | Opt_D_dump_rn_stats | Opt_D_dump_opt_cmm | Opt_D_dump_simpl_stats | Opt_D_dump_cs_trace -- Constraint solver in type checker | Opt_D_dump_tc_trace | Opt_D_dump_ec_trace -- Pattern match exhaustiveness checker | Opt_D_dump_if_trace | Opt_D_dump_splices | Opt_D_th_dec_file | Opt_D_dump_BCOs | Opt_D_dump_ticked | Opt_D_dump_rtti | Opt_D_source_stats | Opt_D_verbose_stg2stg | Opt_D_dump_hi | Opt_D_dump_hi_diffs | Opt_D_dump_mod_cycles | Opt_D_dump_mod_map | Opt_D_dump_timings | Opt_D_dump_view_pattern_commoning | Opt_D_verbose_core2core | Opt_D_dump_debug | Opt_D_dump_json | Opt_D_ppr_debug | Opt_D_no_debug_output | Opt_D_dump_faststrings | Opt_D_faststring_stats deriving (Eq, Show, Enum) -- | Enumerates the simple on-or-off dynamic flags data GeneralFlag -- See Note [Updating flag description in the User's Guide] = Opt_DumpToFile -- ^ Append dump output to files instead of stdout. | Opt_D_dump_minimal_imports | Opt_DoCoreLinting | Opt_DoLinearCoreLinting | Opt_DoStgLinting | Opt_DoCmmLinting | Opt_DoAsmLinting | Opt_DoAnnotationLinting | Opt_DoBoundsChecking | Opt_NoLlvmMangler -- hidden flag | Opt_FastLlvm -- hidden flag | Opt_NoTypeableBinds | Opt_DistinctConstructorTables | Opt_InfoTableMap | Opt_WarnIsError -- -Werror; makes warnings fatal | Opt_ShowWarnGroups -- Show the group a warning belongs to | Opt_HideSourcePaths -- Hide module source/object paths | Opt_PrintExplicitForalls | Opt_PrintExplicitKinds | Opt_PrintExplicitCoercions | Opt_PrintExplicitRuntimeReps | Opt_PrintEqualityRelations | Opt_PrintAxiomIncomps | Opt_PrintUnicodeSyntax | Opt_PrintExpandedSynonyms | Opt_PrintPotentialInstances | Opt_PrintTypecheckerElaboration -- optimisation opts | Opt_CallArity | Opt_Exitification | Opt_Strictness | Opt_LateDmdAnal -- #6087 | Opt_KillAbsence | Opt_KillOneShot | Opt_FullLaziness | Opt_FloatIn | Opt_LateSpecialise | Opt_Specialise | Opt_SpecialiseAggressively | Opt_CrossModuleSpecialise | Opt_InlineGenerics | Opt_InlineGenericsAggressively | Opt_StaticArgumentTransformation | Opt_CSE | Opt_StgCSE | Opt_StgLiftLams | Opt_LiberateCase | Opt_SpecConstr | Opt_SpecConstrKeen | Opt_DoLambdaEtaExpansion | Opt_IgnoreAsserts | Opt_DoEtaReduction | Opt_CaseMerge | Opt_CaseFolding -- Constant folding through case-expressions | Opt_UnboxStrictFields | Opt_UnboxSmallStrictFields | Opt_DictsCheap | Opt_EnableRewriteRules -- Apply rewrite rules during simplification | Opt_EnableThSpliceWarnings -- Enable warnings for TH splices | Opt_RegsGraph -- do graph coloring register allocation | Opt_RegsIterative -- do iterative coalescing graph coloring register allocation | Opt_PedanticBottoms -- Be picky about how we treat bottom | Opt_LlvmTBAA -- Use LLVM TBAA infrastructure for improving AA (hidden flag) | Opt_LlvmFillUndefWithGarbage -- Testing for undef bugs (hidden flag) | Opt_IrrefutableTuples | Opt_CmmSink | Opt_CmmStaticPred | Opt_CmmElimCommonBlocks | Opt_CmmControlFlow | Opt_AsmShortcutting | Opt_OmitYields | Opt_FunToThunk -- deprecated | Opt_DictsStrict -- be strict in argument dictionaries | Opt_DmdTxDictSel -- ^ deprecated, no effect and behaviour is now default. -- Allowed switching of a special demand transformer for dictionary selectors | Opt_Loopification -- See Note [Self-recursive tail calls] | Opt_CfgBlocklayout -- ^ Use the cfg based block layout algorithm. | Opt_WeightlessBlocklayout -- ^ Layout based on last instruction per block. | Opt_CprAnal | Opt_WorkerWrapper | Opt_WorkerWrapperUnlift -- ^ Do W/W split for unlifting even if we won't unbox anything. | Opt_SolveConstantDicts | Opt_AlignmentSanitisation | Opt_CatchNonexhaustiveCases | Opt_NumConstantFolding | Opt_CoreConstantFolding | Opt_FastPAPCalls -- #6084 -- Inference flags | Opt_DoTagInferenceChecks -- PreInlining is on by default. The option is there just to see how -- bad things get if you turn it off! | Opt_SimplPreInlining -- Interface files | Opt_IgnoreInterfacePragmas | Opt_OmitInterfacePragmas | Opt_ExposeAllUnfoldings | Opt_WriteInterface -- forces .hi files to be written even with -fno-code | Opt_WriteHie -- generate .hie files -- profiling opts | Opt_AutoSccsOnIndividualCafs | Opt_ProfCountEntries | Opt_ProfLateInlineCcs | Opt_ProfLateCcs | Opt_ProfManualCcs -- ^ Ignore manual SCC annotations -- misc opts | Opt_Pp | Opt_ForceRecomp | Opt_IgnoreOptimChanges | Opt_IgnoreHpcChanges | Opt_ExcessPrecision | Opt_EagerBlackHoling | Opt_NoHsMain | Opt_SplitSections | Opt_StgStats | Opt_HideAllPackages | Opt_HideAllPluginPackages | Opt_PrintBindResult | Opt_Haddock | Opt_HaddockOptions | Opt_BreakOnException | Opt_BreakOnError | Opt_PrintEvldWithShow | Opt_PrintBindContents | Opt_GenManifest | Opt_EmbedManifest | Opt_SharedImplib | Opt_BuildingCabalPackage | Opt_IgnoreDotGhci | Opt_GhciSandbox | Opt_GhciHistory | Opt_GhciLeakCheck | Opt_ValidateHie | Opt_LocalGhciHistory | Opt_NoIt | Opt_HelpfulErrors | Opt_DeferTypeErrors -- Since 7.6 | Opt_DeferTypedHoles -- Since 7.10 | Opt_DeferOutOfScopeVariables | Opt_PIC -- ^ @-fPIC@ | Opt_PIE -- ^ @-fPIE@ | Opt_PICExecutable -- ^ @-pie@ | Opt_ExternalDynamicRefs | Opt_Ticky | Opt_Ticky_Allocd | Opt_Ticky_LNE | Opt_Ticky_Dyn_Thunk | Opt_Ticky_Tag | Opt_Ticky_AP -- ^ Use regular thunks even when we could use std ap thunks in order to get entry counts | Opt_RPath | Opt_RelativeDynlibPaths | Opt_CompactUnwind -- ^ @-fcompact-unwind@ | Opt_Hpc | Opt_FamAppCache | Opt_ExternalInterpreter | Opt_OptimalApplicativeDo | Opt_VersionMacros | Opt_WholeArchiveHsLibs -- copy all libs into a single folder prior to linking binaries -- this should elivate the excessive command line limit restrictions -- on windows, by only requiring a single -L argument instead of -- one for each dependency. At the time of this writing, gcc -- forwards all -L flags to the collect2 command without using a -- response file and as such breaking apart. | Opt_SingleLibFolder | Opt_ExposeInternalSymbols | Opt_KeepCAFs | Opt_KeepGoing | Opt_ByteCode | Opt_LinkRts -- output style opts | Opt_ErrorSpans -- Include full span info in error messages, -- instead of just the start position. | Opt_DeferDiagnostics | Opt_DiagnosticsShowCaret -- Show snippets of offending code | Opt_PprCaseAsLet | Opt_PprShowTicks | Opt_ShowHoleConstraints -- Options relating to the display of valid hole fits -- when generating an error message for a typed hole -- See Note [Valid hole fits include ...] in GHC.Tc.Errors.Hole | Opt_ShowValidHoleFits | Opt_SortValidHoleFits | Opt_SortBySizeHoleFits | Opt_SortBySubsumHoleFits | Opt_AbstractRefHoleFits | Opt_UnclutterValidHoleFits | Opt_ShowTypeAppOfHoleFits | Opt_ShowTypeAppVarsOfHoleFits | Opt_ShowDocsOfHoleFits | Opt_ShowTypeOfHoleFits | Opt_ShowProvOfHoleFits | Opt_ShowMatchesOfHoleFits | Opt_ShowLoadedModules | Opt_HexWordLiterals -- See Note [Print Hexadecimal Literals] -- Suppress a coercions inner structure, replacing it with '...' | Opt_SuppressCoercions -- Suppress the type of a coercion as well | Opt_SuppressCoercionTypes | Opt_SuppressVarKinds -- Suppress module id prefixes on variables. | Opt_SuppressModulePrefixes -- Suppress type applications. | Opt_SuppressTypeApplications -- Suppress info such as arity and unfoldings on identifiers. | Opt_SuppressIdInfo -- Suppress separate type signatures in core, but leave types on -- lambda bound vars | Opt_SuppressUnfoldings -- Suppress the details of even stable unfoldings | Opt_SuppressTypeSignatures -- Suppress unique ids on variables. -- Except for uniques, as some simplifier phases introduce new -- variables that have otherwise identical names. | Opt_SuppressUniques | Opt_SuppressStgExts | Opt_SuppressStgReps | Opt_SuppressTicks -- Replaces Opt_PprShowTicks | Opt_SuppressTimestamps -- ^ Suppress timestamps in dumps | Opt_SuppressCoreSizes -- ^ Suppress per binding Core size stats in dumps -- temporary flags | Opt_AutoLinkPackages | Opt_ImplicitImportQualified -- keeping stuff | Opt_KeepHscppFiles | Opt_KeepHiDiffs | Opt_KeepHcFiles | Opt_KeepSFiles | Opt_KeepTmpFiles | Opt_KeepRawTokenStream | Opt_KeepLlvmFiles | Opt_KeepHiFiles | Opt_KeepOFiles | Opt_BuildDynamicToo -- safe haskell flags | Opt_DistrustAllPackages | Opt_PackageTrust | Opt_PluginTrustworthy | Opt_G_NoStateHack | Opt_G_NoOptCoercion deriving (Eq, Show, Enum) -- Check whether a flag should be considered an "optimisation flag" -- for purposes of recompilation avoidance (see -- Note [Ignoring some flag changes] in GHC.Iface.Recomp.Flags). Being listed here is -- not a guarantee that the flag has no other effect. We could, and -- perhaps should, separate out the flags that have some minor impact on -- program semantics and/or error behavior (e.g., assertions), but -- then we'd need to go to extra trouble (and an additional flag) -- to allow users to ignore the optimisation level even though that -- means ignoring some change. optimisationFlags :: EnumSet GeneralFlag optimisationFlags = EnumSet.fromList [ Opt_CallArity , Opt_Strictness , Opt_LateDmdAnal , Opt_KillAbsence , Opt_KillOneShot , Opt_FullLaziness , Opt_FloatIn , Opt_LateSpecialise , Opt_Specialise , Opt_SpecialiseAggressively , Opt_CrossModuleSpecialise , Opt_StaticArgumentTransformation , Opt_CSE , Opt_StgCSE , Opt_StgLiftLams , Opt_LiberateCase , Opt_SpecConstr , Opt_SpecConstrKeen , Opt_DoLambdaEtaExpansion , Opt_IgnoreAsserts , Opt_DoEtaReduction , Opt_CaseMerge , Opt_CaseFolding , Opt_UnboxStrictFields , Opt_UnboxSmallStrictFields , Opt_DictsCheap , Opt_EnableRewriteRules , Opt_RegsGraph , Opt_RegsIterative , Opt_PedanticBottoms , Opt_LlvmTBAA , Opt_LlvmFillUndefWithGarbage , Opt_IrrefutableTuples , Opt_CmmSink , Opt_CmmElimCommonBlocks , Opt_AsmShortcutting , Opt_OmitYields , Opt_FunToThunk , Opt_DictsStrict , Opt_DmdTxDictSel , Opt_Loopification , Opt_CfgBlocklayout , Opt_WeightlessBlocklayout , Opt_CprAnal , Opt_WorkerWrapper , Opt_WorkerWrapperUnlift , Opt_SolveConstantDicts , Opt_CatchNonexhaustiveCases , Opt_IgnoreAsserts ] data WarningFlag = -- See Note [Updating flag description in the User's Guide] Opt_WarnDuplicateExports | Opt_WarnDuplicateConstraints | Opt_WarnRedundantConstraints | Opt_WarnHiShadows | Opt_WarnImplicitPrelude | Opt_WarnIncompletePatterns | Opt_WarnIncompleteUniPatterns | Opt_WarnIncompletePatternsRecUpd | Opt_WarnOverflowedLiterals | Opt_WarnEmptyEnumerations | Opt_WarnMissingFields | Opt_WarnMissingImportList | Opt_WarnMissingMethods | Opt_WarnMissingSignatures | Opt_WarnMissingLocalSignatures | Opt_WarnNameShadowing | Opt_WarnOverlappingPatterns | Opt_WarnTypeDefaults | Opt_WarnMonomorphism | Opt_WarnUnusedTopBinds | Opt_WarnUnusedLocalBinds | Opt_WarnUnusedPatternBinds | Opt_WarnUnusedImports | Opt_WarnUnusedMatches | Opt_WarnUnusedTypePatterns | Opt_WarnUnusedForalls | Opt_WarnUnusedRecordWildcards | Opt_WarnRedundantBangPatterns | Opt_WarnRedundantRecordWildcards | Opt_WarnWarningsDeprecations | Opt_WarnDeprecatedFlags | Opt_WarnMissingMonadFailInstances -- since 8.0, has no effect since 8.8 | Opt_WarnSemigroup -- since 8.0 | Opt_WarnDodgyExports | Opt_WarnDodgyImports | Opt_WarnOrphans | Opt_WarnAutoOrphans | Opt_WarnIdentities | Opt_WarnTabs | Opt_WarnUnrecognisedPragmas | Opt_WarnMisplacedPragmas | Opt_WarnDodgyForeignImports | Opt_WarnUnusedDoBind | Opt_WarnWrongDoBind | Opt_WarnAlternativeLayoutRuleTransitional | Opt_WarnUnsafe | Opt_WarnSafe | Opt_WarnTrustworthySafe | Opt_WarnMissedSpecs | Opt_WarnAllMissedSpecs | Opt_WarnUnsupportedCallingConventions | Opt_WarnUnsupportedLlvmVersion | Opt_WarnMissedExtraSharedLib | Opt_WarnInlineRuleShadowing | Opt_WarnTypedHoles | Opt_WarnPartialTypeSignatures | Opt_WarnMissingExportedSignatures | Opt_WarnUntickedPromotedConstructors | Opt_WarnDerivingTypeable | Opt_WarnDeferredTypeErrors | Opt_WarnDeferredOutOfScopeVariables | Opt_WarnNonCanonicalMonadInstances -- since 8.0 | Opt_WarnNonCanonicalMonadFailInstances -- since 8.0, removed 8.8 | Opt_WarnNonCanonicalMonoidInstances -- since 8.0 | Opt_WarnMissingPatternSynonymSignatures -- since 8.0 | Opt_WarnUnrecognisedWarningFlags -- since 8.0 | Opt_WarnSimplifiableClassConstraints -- Since 8.2 | Opt_WarnCPPUndef -- Since 8.2 | Opt_WarnUnbangedStrictPatterns -- Since 8.2 | Opt_WarnMissingHomeModules -- Since 8.2 | Opt_WarnPartialFields -- Since 8.4 | Opt_WarnMissingExportList | Opt_WarnInaccessibleCode | Opt_WarnStarIsType -- Since 8.6 | Opt_WarnStarBinder -- Since 8.6 | Opt_WarnImplicitKindVars -- Since 8.6 | Opt_WarnSpaceAfterBang | Opt_WarnMissingDerivingStrategies -- Since 8.8 | Opt_WarnPrepositiveQualifiedModule -- Since 8.10 | Opt_WarnUnusedPackages -- Since 8.10 | Opt_WarnInferredSafeImports -- Since 8.10 | Opt_WarnMissingSafeHaskellMode -- Since 8.10 | Opt_WarnCompatUnqualifiedImports -- Since 8.10 | Opt_WarnDerivingDefaults | Opt_WarnInvalidHaddock -- Since 9.0 | Opt_WarnOperatorWhitespaceExtConflict -- Since 9.2 | Opt_WarnOperatorWhitespace -- Since 9.2 | Opt_WarnAmbiguousFields -- Since 9.2 | Opt_WarnImplicitLift -- Since 9.2 | Opt_WarnMissingKindSignatures -- Since 9.2 | Opt_WarnMissingExportedPatternSynonymSignatures -- since 9.2 | Opt_WarnRedundantStrictnessFlags -- Since 9.4 | Opt_WarnForallIdentifier -- Since 9.4 | Opt_WarnUnicodeBidirectionalFormatCharacters -- Since 9.0.2 | Opt_WarnGADTMonoLocalBinds -- Since 9.4 | Opt_WarnTypeEqualityOutOfScope -- Since 9.4 | Opt_WarnTypeEqualityRequiresOperators -- Since 9.4 deriving (Eq, Ord, Show, Enum) -- | Return the names of a WarningFlag -- -- One flag may have several names because of US/UK spelling. The first one is -- the "preferred one" that will be displayed in warning messages. warnFlagNames :: WarningFlag -> NonEmpty String warnFlagNames wflag = case wflag of Opt_WarnAlternativeLayoutRuleTransitional -> "alternative-layout-rule-transitional" :| [] Opt_WarnAmbiguousFields -> "ambiguous-fields" :| [] Opt_WarnAutoOrphans -> "auto-orphans" :| [] Opt_WarnCPPUndef -> "cpp-undef" :| [] Opt_WarnUnbangedStrictPatterns -> "unbanged-strict-patterns" :| [] Opt_WarnDeferredTypeErrors -> "deferred-type-errors" :| [] Opt_WarnDeferredOutOfScopeVariables -> "deferred-out-of-scope-variables" :| [] Opt_WarnWarningsDeprecations -> "deprecations" :| ["warnings-deprecations"] Opt_WarnDeprecatedFlags -> "deprecated-flags" :| [] Opt_WarnDerivingDefaults -> "deriving-defaults" :| [] Opt_WarnDerivingTypeable -> "deriving-typeable" :| [] Opt_WarnDodgyExports -> "dodgy-exports" :| [] Opt_WarnDodgyForeignImports -> "dodgy-foreign-imports" :| [] Opt_WarnDodgyImports -> "dodgy-imports" :| [] Opt_WarnEmptyEnumerations -> "empty-enumerations" :| [] Opt_WarnDuplicateConstraints -> "duplicate-constraints" :| [] Opt_WarnRedundantConstraints -> "redundant-constraints" :| [] Opt_WarnDuplicateExports -> "duplicate-exports" :| [] Opt_WarnHiShadows -> "hi-shadowing" :| [] Opt_WarnInaccessibleCode -> "inaccessible-code" :| [] Opt_WarnImplicitPrelude -> "implicit-prelude" :| [] Opt_WarnImplicitKindVars -> "implicit-kind-vars" :| [] Opt_WarnIncompletePatterns -> "incomplete-patterns" :| [] Opt_WarnIncompletePatternsRecUpd -> "incomplete-record-updates" :| [] Opt_WarnIncompleteUniPatterns -> "incomplete-uni-patterns" :| [] Opt_WarnInlineRuleShadowing -> "inline-rule-shadowing" :| [] Opt_WarnIdentities -> "identities" :| [] Opt_WarnMissingFields -> "missing-fields" :| [] Opt_WarnMissingImportList -> "missing-import-lists" :| [] Opt_WarnMissingExportList -> "missing-export-lists" :| [] Opt_WarnMissingLocalSignatures -> "missing-local-signatures" :| [] Opt_WarnMissingMethods -> "missing-methods" :| [] Opt_WarnMissingMonadFailInstances -> "missing-monadfail-instances" :| [] Opt_WarnSemigroup -> "semigroup" :| [] Opt_WarnMissingSignatures -> "missing-signatures" :| [] Opt_WarnMissingKindSignatures -> "missing-kind-signatures" :| [] Opt_WarnMissingExportedSignatures -> "missing-exported-signatures" :| [] Opt_WarnMonomorphism -> "monomorphism-restriction" :| [] Opt_WarnNameShadowing -> "name-shadowing" :| [] Opt_WarnNonCanonicalMonadInstances -> "noncanonical-monad-instances" :| [] Opt_WarnNonCanonicalMonadFailInstances -> "noncanonical-monadfail-instances" :| [] Opt_WarnNonCanonicalMonoidInstances -> "noncanonical-monoid-instances" :| [] Opt_WarnOrphans -> "orphans" :| [] Opt_WarnOverflowedLiterals -> "overflowed-literals" :| [] Opt_WarnOverlappingPatterns -> "overlapping-patterns" :| [] Opt_WarnMissedSpecs -> "missed-specialisations" :| ["missed-specializations"] Opt_WarnAllMissedSpecs -> "all-missed-specialisations" :| ["all-missed-specializations"] Opt_WarnSafe -> "safe" :| [] Opt_WarnTrustworthySafe -> "trustworthy-safe" :| [] Opt_WarnInferredSafeImports -> "inferred-safe-imports" :| [] Opt_WarnMissingSafeHaskellMode -> "missing-safe-haskell-mode" :| [] Opt_WarnTabs -> "tabs" :| [] Opt_WarnTypeDefaults -> "type-defaults" :| [] Opt_WarnTypedHoles -> "typed-holes" :| [] Opt_WarnPartialTypeSignatures -> "partial-type-signatures" :| [] Opt_WarnUnrecognisedPragmas -> "unrecognised-pragmas" :| [] Opt_WarnMisplacedPragmas -> "misplaced-pragmas" :| [] Opt_WarnUnsafe -> "unsafe" :| [] Opt_WarnUnsupportedCallingConventions -> "unsupported-calling-conventions" :| [] Opt_WarnUnsupportedLlvmVersion -> "unsupported-llvm-version" :| [] Opt_WarnMissedExtraSharedLib -> "missed-extra-shared-lib" :| [] Opt_WarnUntickedPromotedConstructors -> "unticked-promoted-constructors" :| [] Opt_WarnUnusedDoBind -> "unused-do-bind" :| [] Opt_WarnUnusedForalls -> "unused-foralls" :| [] Opt_WarnUnusedImports -> "unused-imports" :| [] Opt_WarnUnusedLocalBinds -> "unused-local-binds" :| [] Opt_WarnUnusedMatches -> "unused-matches" :| [] Opt_WarnUnusedPatternBinds -> "unused-pattern-binds" :| [] Opt_WarnUnusedTopBinds -> "unused-top-binds" :| [] Opt_WarnUnusedTypePatterns -> "unused-type-patterns" :| [] Opt_WarnUnusedRecordWildcards -> "unused-record-wildcards" :| [] Opt_WarnRedundantBangPatterns -> "redundant-bang-patterns" :| [] Opt_WarnRedundantRecordWildcards -> "redundant-record-wildcards" :| [] Opt_WarnRedundantStrictnessFlags -> "redundant-strictness-flags" :| [] Opt_WarnWrongDoBind -> "wrong-do-bind" :| [] Opt_WarnMissingPatternSynonymSignatures -> "missing-pattern-synonym-signatures" :| [] Opt_WarnMissingDerivingStrategies -> "missing-deriving-strategies" :| [] Opt_WarnSimplifiableClassConstraints -> "simplifiable-class-constraints" :| [] Opt_WarnMissingHomeModules -> "missing-home-modules" :| [] Opt_WarnUnrecognisedWarningFlags -> "unrecognised-warning-flags" :| [] Opt_WarnStarBinder -> "star-binder" :| [] Opt_WarnStarIsType -> "star-is-type" :| [] Opt_WarnSpaceAfterBang -> "missing-space-after-bang" :| [] Opt_WarnPartialFields -> "partial-fields" :| [] Opt_WarnPrepositiveQualifiedModule -> "prepositive-qualified-module" :| [] Opt_WarnUnusedPackages -> "unused-packages" :| [] Opt_WarnCompatUnqualifiedImports -> "compat-unqualified-imports" :| [] Opt_WarnInvalidHaddock -> "invalid-haddock" :| [] Opt_WarnOperatorWhitespaceExtConflict -> "operator-whitespace-ext-conflict" :| [] Opt_WarnOperatorWhitespace -> "operator-whitespace" :| [] Opt_WarnImplicitLift -> "implicit-lift" :| [] Opt_WarnMissingExportedPatternSynonymSignatures -> "missing-exported-pattern-synonym-signatures" :| [] Opt_WarnForallIdentifier -> "forall-identifier" :| [] Opt_WarnUnicodeBidirectionalFormatCharacters -> "unicode-bidirectional-format-characters" :| [] Opt_WarnGADTMonoLocalBinds -> "gadt-mono-local-binds" :| [] Opt_WarnTypeEqualityOutOfScope -> "type-equality-out-of-scope" :| [] Opt_WarnTypeEqualityRequiresOperators -> "type-equality-requires-operators" :| [] -- ----------------------------------------------------------------------------- -- Standard sets of warning options -- Note [Documenting warning flags] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- If you change the list of warning enabled by default -- please remember to update the User's Guide. The relevant file is: -- -- docs/users_guide/using-warnings.rst -- | Warning groups. -- -- As all warnings are in the Weverything set, it is ignored when -- displaying to the user which group a warning is in. warningGroups :: [(String, [WarningFlag])] warningGroups = [ ("compat", minusWcompatOpts) , ("unused-binds", unusedBindsFlags) , ("default", standardWarnings) , ("extra", minusWOpts) , ("all", minusWallOpts) , ("everything", minusWeverythingOpts) ] -- | Warning group hierarchies, where there is an explicit inclusion -- relation. -- -- Each inner list is a hierarchy of warning groups, ordered from -- smallest to largest, where each group is a superset of the one -- before it. -- -- Separating this from 'warningGroups' allows for multiple -- hierarchies with no inherent relation to be defined. -- -- The special-case Weverything group is not included. warningHierarchies :: [[String]] warningHierarchies = hierarchies ++ map (:[]) rest where hierarchies = [["default", "extra", "all"]] rest = filter (`notElem` "everything" : concat hierarchies) $ map fst warningGroups -- | Find the smallest group in every hierarchy which a warning -- belongs to, excluding Weverything. smallestWarningGroups :: WarningFlag -> [String] smallestWarningGroups flag = mapMaybe go warningHierarchies where -- Because each hierarchy is arranged from smallest to largest, -- the first group we find in a hierarchy which contains the flag -- is the smallest. go (group:rest) = fromMaybe (go rest) $ do flags <- lookup group warningGroups guard (flag `elem` flags) pure (Just group) go [] = Nothing -- | Warnings enabled unless specified otherwise standardWarnings :: [WarningFlag] standardWarnings -- see Note [Documenting warning flags] = [ Opt_WarnOverlappingPatterns, Opt_WarnWarningsDeprecations, Opt_WarnDeprecatedFlags, Opt_WarnDeferredTypeErrors, Opt_WarnTypedHoles, Opt_WarnDeferredOutOfScopeVariables, Opt_WarnPartialTypeSignatures, Opt_WarnUnrecognisedPragmas, Opt_WarnMisplacedPragmas, Opt_WarnDuplicateExports, Opt_WarnDerivingDefaults, Opt_WarnOverflowedLiterals, Opt_WarnEmptyEnumerations, Opt_WarnAmbiguousFields, Opt_WarnMissingFields, Opt_WarnMissingMethods, Opt_WarnWrongDoBind, Opt_WarnUnsupportedCallingConventions, Opt_WarnDodgyForeignImports, Opt_WarnInlineRuleShadowing, Opt_WarnAlternativeLayoutRuleTransitional, Opt_WarnUnsupportedLlvmVersion, Opt_WarnMissedExtraSharedLib, Opt_WarnTabs, Opt_WarnUnrecognisedWarningFlags, Opt_WarnSimplifiableClassConstraints, Opt_WarnStarBinder, Opt_WarnInaccessibleCode, Opt_WarnSpaceAfterBang, Opt_WarnNonCanonicalMonadInstances, Opt_WarnNonCanonicalMonoidInstances, Opt_WarnOperatorWhitespaceExtConflict, Opt_WarnForallIdentifier, Opt_WarnUnicodeBidirectionalFormatCharacters, Opt_WarnGADTMonoLocalBinds, Opt_WarnTypeEqualityRequiresOperators ] -- | Things you get with -W minusWOpts :: [WarningFlag] minusWOpts = standardWarnings ++ [ Opt_WarnUnusedTopBinds, Opt_WarnUnusedLocalBinds, Opt_WarnUnusedPatternBinds, Opt_WarnUnusedMatches, Opt_WarnUnusedForalls, Opt_WarnUnusedImports, Opt_WarnIncompletePatterns, Opt_WarnDodgyExports, Opt_WarnDodgyImports, Opt_WarnUnbangedStrictPatterns ] -- | Things you get with -Wall minusWallOpts :: [WarningFlag] minusWallOpts = minusWOpts ++ [ Opt_WarnTypeDefaults, Opt_WarnNameShadowing, Opt_WarnMissingSignatures, Opt_WarnHiShadows, Opt_WarnOrphans, Opt_WarnUnusedDoBind, Opt_WarnTrustworthySafe, Opt_WarnMissingPatternSynonymSignatures, Opt_WarnUnusedRecordWildcards, Opt_WarnRedundantRecordWildcards, Opt_WarnStarIsType, Opt_WarnIncompleteUniPatterns, Opt_WarnIncompletePatternsRecUpd ] -- | Things you get with -Weverything, i.e. *all* known warnings flags minusWeverythingOpts :: [WarningFlag] minusWeverythingOpts = [ toEnum 0 .. ] -- | Things you get with -Wcompat. -- -- This is intended to group together warnings that will be enabled by default -- at some point in the future, so that library authors eager to make their -- code future compatible to fix issues before they even generate warnings. minusWcompatOpts :: [WarningFlag] minusWcompatOpts = [ Opt_WarnSemigroup , Opt_WarnNonCanonicalMonoidInstances , Opt_WarnStarIsType , Opt_WarnCompatUnqualifiedImports , Opt_WarnTypeEqualityOutOfScope ] -- | Things you get with -Wunused-binds unusedBindsFlags :: [WarningFlag] unusedBindsFlags = [ Opt_WarnUnusedTopBinds , Opt_WarnUnusedLocalBinds , Opt_WarnUnusedPatternBinds ] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Hooks.hs0000644000000000000000000001224614472400112020512 0ustar0000000000000000-- \section[Hooks]{Low level API hooks} -- NB: this module is SOURCE-imported by DynFlags, and should primarily -- refer to *types*, rather than *code* {-# LANGUAGE RankNTypes, TypeFamilies #-} module GHC.Driver.Hooks ( Hooks , HasHooks (..) , ContainsHooks (..) , emptyHooks -- the hooks: , DsForeignsHook , dsForeignsHook , tcForeignImportsHook , tcForeignExportsHook , hscFrontendHook , hscCompileCoreExprHook , ghcPrimIfaceHook , runPhaseHook , runMetaHook , linkHook , runRnSpliceHook , getValueSafelyHook , createIservProcessHook , stgToCmmHook , cmmToRawCmmHook ) where import GHC.Prelude import GHC.Driver.Env import GHC.Driver.Session import GHC.Driver.Pipeline.Phases import GHC.Hs.Decls import GHC.Hs.Binds import GHC.Hs.Expr import GHC.Hs.Extension import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Types.Id import GHC.Types.SrcLoc import GHC.Types.Basic import GHC.Types.CostCentre import GHC.Types.IPE import GHC.Types.Meta import GHC.Types.HpcInfo import GHC.Unit.Module import GHC.Unit.Module.ModSummary import GHC.Unit.Module.ModIface import GHC.Unit.Home.ModInfo import GHC.Core import GHC.Core.TyCon import GHC.Core.Type import GHC.Tc.Types import GHC.Stg.Syntax import GHC.StgToCmm.Types (ModuleLFInfos) import GHC.StgToCmm.Config import GHC.Cmm import GHCi.RemoteTypes import GHC.Data.Stream import GHC.Data.Bag import qualified Data.Kind import System.Process import GHC.Linker.Types {- ************************************************************************ * * \subsection{Hooks} * * ************************************************************************ -} -- | Hooks can be used by GHC API clients to replace parts of -- the compiler pipeline. If a hook is not installed, GHC -- uses the default built-in behaviour emptyHooks :: Hooks emptyHooks = Hooks { dsForeignsHook = Nothing , tcForeignImportsHook = Nothing , tcForeignExportsHook = Nothing , hscFrontendHook = Nothing , hscCompileCoreExprHook = Nothing , ghcPrimIfaceHook = Nothing , runPhaseHook = Nothing , runMetaHook = Nothing , linkHook = Nothing , runRnSpliceHook = Nothing , getValueSafelyHook = Nothing , createIservProcessHook = Nothing , stgToCmmHook = Nothing , cmmToRawCmmHook = Nothing } {- Note [The Decoupling Abstract Data Hack] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The "Abstract Data" idea is due to Richard Eisenberg in https://gitlab.haskell.org/ghc/ghc/-/merge_requests/1957, where the pattern is described in more detail. Here we use it as a temporary measure to break the dependency from the Parser on the Desugarer until the parser is free of DynFlags. We introduced a nullary type family @DsForeignsook@, whose single definition is in GHC.HsToCore.Types, where we instantiate it to [LForeignDecl GhcTc] -> DsM (ForeignStubs, OrdList (Id, CoreExpr)) In doing so, the Hooks module (which is an hs-boot dependency of DynFlags) can be decoupled from its use of the DsM definition in GHC.HsToCore.Types. Since both DsM and the definition of @ForeignsHook@ live in the same module, there is virtually no difference for plugin authors that want to write a foreign hook. -} -- See Note [The Decoupling Abstract Data Hack] type family DsForeignsHook :: Data.Kind.Type data Hooks = Hooks { dsForeignsHook :: !(Maybe DsForeignsHook) -- ^ Actual type: -- @Maybe ([LForeignDecl GhcTc] -> DsM (ForeignStubs, OrdList (Id, CoreExpr)))@ , tcForeignImportsHook :: !(Maybe ([LForeignDecl GhcRn] -> TcM ([Id], [LForeignDecl GhcTc], Bag GlobalRdrElt))) , tcForeignExportsHook :: !(Maybe ([LForeignDecl GhcRn] -> TcM (LHsBinds GhcTc, [LForeignDecl GhcTc], Bag GlobalRdrElt))) , hscFrontendHook :: !(Maybe (ModSummary -> Hsc FrontendResult)) , hscCompileCoreExprHook :: !(Maybe (HscEnv -> SrcSpan -> CoreExpr -> IO (ForeignHValue, [Linkable], PkgsLoaded))) , ghcPrimIfaceHook :: !(Maybe ModIface) , runPhaseHook :: !(Maybe PhaseHook) , runMetaHook :: !(Maybe (MetaHook TcM)) , linkHook :: !(Maybe (GhcLink -> DynFlags -> Bool -> HomePackageTable -> IO SuccessFlag)) , runRnSpliceHook :: !(Maybe (HsSplice GhcRn -> RnM (HsSplice GhcRn))) , getValueSafelyHook :: !(Maybe (HscEnv -> Name -> Type -> IO (Either Type (HValue, [Linkable], PkgsLoaded)))) , createIservProcessHook :: !(Maybe (CreateProcess -> IO ProcessHandle)) , stgToCmmHook :: !(Maybe (StgToCmmConfig -> InfoTableProvMap -> [TyCon] -> CollectedCCs -> [CgStgTopBinding] -> HpcInfo -> Stream IO CmmGroup ModuleLFInfos)) , cmmToRawCmmHook :: !(forall a . Maybe (DynFlags -> Maybe Module -> Stream IO CmmGroupSRTs a -> IO (Stream IO RawCmmGroup a))) } class HasHooks m where getHooks :: m Hooks class ContainsHooks a where extractHooks :: a -> Hooks ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Monad.hs0000644000000000000000000002014114472400112020456 0ustar0000000000000000{-# LANGUAGE DeriveFunctor, DerivingVia, RankNTypes #-} {-# OPTIONS_GHC -funbox-strict-fields #-} -- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow, 2010 -- -- The Session type and related functionality -- -- ----------------------------------------------------------------------------- module GHC.Driver.Monad ( -- * 'Ghc' monad stuff GhcMonad(..), Ghc(..), GhcT(..), liftGhcT, reflectGhc, reifyGhc, getSessionDynFlags, liftIO, Session(..), withSession, modifySession, modifySessionM, withTempSession, -- * Logger modifyLogger, pushLogHookM, popLogHookM, putLogMsgM, putMsgM, withTimingM, -- ** Diagnostics logDiagnostics, printException, WarnErrLogger, defaultWarnErrLogger ) where import GHC.Prelude import GHC.Driver.Session import GHC.Driver.Env import GHC.Driver.Errors ( printOrThrowDiagnostics, printMessages ) import GHC.Driver.Errors.Types import GHC.Driver.Config.Diagnostic import GHC.Utils.Monad import GHC.Utils.Exception import GHC.Utils.Error import GHC.Utils.Logger import GHC.Types.SrcLoc import GHC.Types.SourceError import Control.Monad import Control.Monad.Catch as MC import Control.Monad.Trans.Reader import Data.IORef -- ----------------------------------------------------------------------------- -- | A monad that has all the features needed by GHC API calls. -- -- In short, a GHC monad -- -- - allows embedding of IO actions, -- -- - can log warnings, -- -- - allows handling of (extensible) exceptions, and -- -- - maintains a current session. -- -- If you do not use 'Ghc' or 'GhcT', make sure to call 'GHC.initGhcMonad' -- before any call to the GHC API functions can occur. -- class (Functor m, ExceptionMonad m, HasDynFlags m, HasLogger m ) => GhcMonad m where getSession :: m HscEnv setSession :: HscEnv -> m () -- | Call the argument with the current session. withSession :: GhcMonad m => (HscEnv -> m a) -> m a withSession f = getSession >>= f -- | Grabs the DynFlags from the Session getSessionDynFlags :: GhcMonad m => m DynFlags getSessionDynFlags = withSession (return . hsc_dflags) -- | Set the current session to the result of applying the current session to -- the argument. modifySession :: GhcMonad m => (HscEnv -> HscEnv) -> m () modifySession f = do h <- getSession setSession $! f h -- | Set the current session to the result of applying the current session to -- the argument. modifySessionM :: GhcMonad m => (HscEnv -> m HscEnv) -> m () modifySessionM f = do h <- getSession h' <- f h setSession $! h' withSavedSession :: GhcMonad m => m a -> m a withSavedSession m = do saved_session <- getSession m `MC.finally` setSession saved_session -- | Call an action with a temporarily modified Session. withTempSession :: GhcMonad m => (HscEnv -> HscEnv) -> m a -> m a withTempSession f m = withSavedSession $ modifySession f >> m ---------------------------------------- -- Logging ---------------------------------------- -- | Modify the logger modifyLogger :: GhcMonad m => (Logger -> Logger) -> m () modifyLogger f = modifySession $ \hsc_env -> hsc_env { hsc_logger = f (hsc_logger hsc_env) } -- | Push a log hook on the stack pushLogHookM :: GhcMonad m => (LogAction -> LogAction) -> m () pushLogHookM = modifyLogger . pushLogHook -- | Pop a log hook from the stack popLogHookM :: GhcMonad m => m () popLogHookM = modifyLogger popLogHook -- | Put a log message putMsgM :: GhcMonad m => SDoc -> m () putMsgM doc = do logger <- getLogger liftIO $ putMsg logger doc -- | Put a log message putLogMsgM :: GhcMonad m => MessageClass -> SrcSpan -> SDoc -> m () putLogMsgM msg_class loc doc = do logger <- getLogger liftIO $ logMsg logger msg_class loc doc -- | Time an action withTimingM :: GhcMonad m => SDoc -> (b -> ()) -> m b -> m b withTimingM doc force action = do logger <- getLogger withTiming logger doc force action -- ----------------------------------------------------------------------------- -- | A monad that allows logging of diagnostics. logDiagnostics :: GhcMonad m => Messages GhcMessage -> m () logDiagnostics warns = do dflags <- getSessionDynFlags logger <- getLogger let !diag_opts = initDiagOpts dflags liftIO $ printOrThrowDiagnostics logger diag_opts warns -- ----------------------------------------------------------------------------- -- | A minimal implementation of a 'GhcMonad'. If you need a custom monad, -- e.g., to maintain additional state consider wrapping this monad or using -- 'GhcT'. newtype Ghc a = Ghc { unGhc :: Session -> IO a } deriving (Functor) deriving (MonadThrow, MonadCatch, MonadMask) via (ReaderT Session IO) -- | The Session is a handle to the complete state of a compilation -- session. A compilation session consists of a set of modules -- constituting the current program or library, the context for -- interactive evaluation, and various caches. data Session = Session !(IORef HscEnv) instance Applicative Ghc where pure a = Ghc $ \_ -> return a g <*> m = do f <- g; a <- m; return (f a) instance Monad Ghc where m >>= g = Ghc $ \s -> do a <- unGhc m s; unGhc (g a) s instance MonadIO Ghc where liftIO ioA = Ghc $ \_ -> ioA instance MonadFix Ghc where mfix f = Ghc $ \s -> mfix (\x -> unGhc (f x) s) instance HasDynFlags Ghc where getDynFlags = getSessionDynFlags instance HasLogger Ghc where getLogger = hsc_logger <$> getSession instance GhcMonad Ghc where getSession = Ghc $ \(Session r) -> readIORef r setSession s' = Ghc $ \(Session r) -> writeIORef r s' -- | Reflect a computation in the 'Ghc' monad into the 'IO' monad. -- -- You can use this to call functions returning an action in the 'Ghc' monad -- inside an 'IO' action. This is needed for some (too restrictive) callback -- arguments of some library functions: -- -- > libFunc :: String -> (Int -> IO a) -> IO a -- > ghcFunc :: Int -> Ghc a -- > -- > ghcFuncUsingLibFunc :: String -> Ghc a -> Ghc a -- > ghcFuncUsingLibFunc str = -- > reifyGhc $ \s -> -- > libFunc $ \i -> do -- > reflectGhc (ghcFunc i) s -- reflectGhc :: Ghc a -> Session -> IO a reflectGhc m = unGhc m -- > Dual to 'reflectGhc'. See its documentation. reifyGhc :: (Session -> IO a) -> Ghc a reifyGhc act = Ghc $ act -- ----------------------------------------------------------------------------- -- | A monad transformer to add GHC specific features to another monad. -- -- Note that the wrapped monad must support IO and handling of exceptions. newtype GhcT m a = GhcT { unGhcT :: Session -> m a } deriving (Functor) deriving (MonadThrow, MonadCatch, MonadMask) via (ReaderT Session m) liftGhcT :: m a -> GhcT m a liftGhcT m = GhcT $ \_ -> m instance Applicative m => Applicative (GhcT m) where pure x = GhcT $ \_ -> pure x g <*> m = GhcT $ \s -> unGhcT g s <*> unGhcT m s instance Monad m => Monad (GhcT m) where m >>= k = GhcT $ \s -> do a <- unGhcT m s; unGhcT (k a) s instance MonadIO m => MonadIO (GhcT m) where liftIO ioA = GhcT $ \_ -> liftIO ioA instance MonadIO m => HasDynFlags (GhcT m) where getDynFlags = GhcT $ \(Session r) -> liftM hsc_dflags (liftIO $ readIORef r) instance MonadIO m => HasLogger (GhcT m) where getLogger = GhcT $ \(Session r) -> liftM hsc_logger (liftIO $ readIORef r) instance ExceptionMonad m => GhcMonad (GhcT m) where getSession = GhcT $ \(Session r) -> liftIO $ readIORef r setSession s' = GhcT $ \(Session r) -> liftIO $ writeIORef r s' -- | Print the all diagnostics in a 'SourceError'. Useful inside exception -- handlers. printException :: (HasLogger m, MonadIO m, HasDynFlags m) => SourceError -> m () printException err = do dflags <- getDynFlags logger <- getLogger let !diag_opts = initDiagOpts dflags liftIO $ printMessages logger diag_opts (srcErrorMessages err) -- | A function called to log warnings and errors. type WarnErrLogger = forall m. (HasDynFlags m , MonadIO m, HasLogger m) => Maybe SourceError -> m () defaultWarnErrLogger :: WarnErrLogger defaultWarnErrLogger Nothing = return () defaultWarnErrLogger (Just e) = printException e ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Phases.hs0000644000000000000000000002725614472400112020661 0ustar0000000000000000----------------------------------------------------------------------------- -- -- GHC Driver -- -- (c) The University of Glasgow 2002 -- ----------------------------------------------------------------------------- module GHC.Driver.Phases ( Phase(..), happensBefore, eqPhase, isStopLn, startPhase, phaseInputExt, StopPhase(..), stopPhaseToPhase, isHaskellishSuffix, isHaskellSrcSuffix, isBackpackishSuffix, isObjectSuffix, isCishSuffix, isDynLibSuffix, isHaskellUserSrcSuffix, isHaskellSigSuffix, isSourceSuffix, isHaskellishTarget, isHaskellishFilename, isHaskellSrcFilename, isHaskellSigFilename, isObjectFilename, isCishFilename, isDynLibFilename, isHaskellUserSrcFilename, isSourceFilename, phaseForeignLanguage ) where import GHC.Prelude import GHC.Platform import GHC.ForeignSrcLang import GHC.Types.SourceFile import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import System.FilePath ----------------------------------------------------------------------------- -- Phases {- Phase of the | Suffix saying | Flag saying | (suffix of) compilation system | ``start here''| ``stop after''| output file literate pre-processor | .lhs | - | - C pre-processor (opt.) | - | -E | - Haskell compiler | .hs | -C, -S | .hc, .s C compiler (opt.) | .hc or .c | -S | .s assembler | .s or .S | -c | .o linker | other | - | a.out linker (merge objects) | other | - | .o -} -- Phases we can actually stop after data StopPhase = StopPreprocess -- ^ @-E@ | StopC -- ^ @-C@ | StopAs -- ^ @-S@ | NoStop -- ^ @-c@ stopPhaseToPhase :: StopPhase -> Phase stopPhaseToPhase StopPreprocess = anyHsc stopPhaseToPhase StopC = HCc stopPhaseToPhase StopAs = As False stopPhaseToPhase NoStop = StopLn -- | Untyped Phase description data Phase = Unlit HscSource | Cpp HscSource | HsPp HscSource | Hsc HscSource | Ccxx -- Compile C++ | Cc -- Compile C | Cobjc -- Compile Objective-C | Cobjcxx -- Compile Objective-C++ | HCc -- Haskellised C (as opposed to vanilla C) compilation | As Bool -- Assembler for regular assembly files (Bool: with-cpp) | LlvmOpt -- Run LLVM opt tool over llvm assembly | LlvmLlc -- LLVM bitcode to native assembly | LlvmMangle -- Fix up TNTC by processing assembly produced by LLVM | CmmCpp -- pre-process Cmm source | Cmm -- parse & compile Cmm code | MergeForeign -- merge in the foreign object files -- The final phase is a pseudo-phase that tells the pipeline to stop. | StopLn -- Stop, but linking will follow, so generate .o file deriving (Eq, Show) instance Outputable Phase where ppr p = text (show p) anyHsc :: Phase anyHsc = Hsc (panic "anyHsc") isStopLn :: Phase -> Bool isStopLn StopLn = True isStopLn _ = False eqPhase :: Phase -> Phase -> Bool -- Equality of constructors, ignoring the HscSource field -- NB: the HscSource field can be 'bot'; see anyHsc above eqPhase (Unlit _) (Unlit _) = True eqPhase (Cpp _) (Cpp _) = True eqPhase (HsPp _) (HsPp _) = True eqPhase (Hsc _) (Hsc _) = True eqPhase Cc Cc = True eqPhase Cobjc Cobjc = True eqPhase HCc HCc = True eqPhase (As x) (As y) = x == y eqPhase LlvmOpt LlvmOpt = True eqPhase LlvmLlc LlvmLlc = True eqPhase LlvmMangle LlvmMangle = True eqPhase CmmCpp CmmCpp = True eqPhase Cmm Cmm = True eqPhase MergeForeign MergeForeign = True eqPhase StopLn StopLn = True eqPhase Ccxx Ccxx = True eqPhase Cobjcxx Cobjcxx = True eqPhase _ _ = False -- MP: happensBefore is only used in preprocessPipeline, that usage should -- be refactored and this usage removed. happensBefore :: Platform -> Phase -> Phase -> Bool happensBefore platform p1 p2 = p1 `happensBefore'` p2 where StopLn `happensBefore'` _ = False x `happensBefore'` y = after_x `eqPhase` y || after_x `happensBefore'` y where after_x = nextPhase platform x nextPhase :: Platform -> Phase -> Phase nextPhase platform p -- A conservative approximation to the next phase, used in happensBefore = case p of Unlit sf -> Cpp sf Cpp sf -> HsPp sf HsPp sf -> Hsc sf Hsc _ -> maybeHCc LlvmOpt -> LlvmLlc LlvmLlc -> LlvmMangle LlvmMangle -> As False As _ -> MergeForeign Ccxx -> As False Cc -> As False Cobjc -> As False Cobjcxx -> As False CmmCpp -> Cmm Cmm -> maybeHCc HCc -> As False MergeForeign -> StopLn StopLn -> panic "nextPhase: nothing after StopLn" where maybeHCc = if platformUnregisterised platform then HCc else As False -- the first compilation phase for a given file is determined -- by its suffix. startPhase :: String -> Phase startPhase "lhs" = Unlit HsSrcFile startPhase "lhs-boot" = Unlit HsBootFile startPhase "lhsig" = Unlit HsigFile startPhase "hs" = Cpp HsSrcFile startPhase "hs-boot" = Cpp HsBootFile startPhase "hsig" = Cpp HsigFile startPhase "hscpp" = HsPp HsSrcFile startPhase "hspp" = Hsc HsSrcFile startPhase "hc" = HCc startPhase "c" = Cc startPhase "cpp" = Ccxx startPhase "C" = Cc startPhase "m" = Cobjc startPhase "M" = Cobjcxx startPhase "mm" = Cobjcxx startPhase "cc" = Ccxx startPhase "cxx" = Ccxx startPhase "s" = As False startPhase "S" = As True startPhase "ll" = LlvmOpt startPhase "bc" = LlvmLlc startPhase "lm_s" = LlvmMangle startPhase "o" = StopLn startPhase "cmm" = CmmCpp startPhase "cmmcpp" = Cmm startPhase _ = StopLn -- all unknown file types -- This is used to determine the extension for the output from the -- current phase (if it generates a new file). The extension depends -- on the next phase in the pipeline. phaseInputExt :: Phase -> String phaseInputExt (Unlit HsSrcFile) = "lhs" phaseInputExt (Unlit HsBootFile) = "lhs-boot" phaseInputExt (Unlit HsigFile) = "lhsig" phaseInputExt (Cpp _) = "lpp" -- intermediate only phaseInputExt (HsPp _) = "hscpp" -- intermediate only phaseInputExt (Hsc _) = "hspp" -- intermediate only -- NB: as things stand, phaseInputExt (Hsc x) must not evaluate x -- because runPhase uses the StopBefore phase to pick the -- output filename. That could be fixed, but watch out. phaseInputExt HCc = "hc" phaseInputExt Ccxx = "cpp" phaseInputExt Cobjc = "m" phaseInputExt Cobjcxx = "mm" phaseInputExt Cc = "c" phaseInputExt (As True) = "S" phaseInputExt (As False) = "s" phaseInputExt LlvmOpt = "ll" phaseInputExt LlvmLlc = "bc" phaseInputExt LlvmMangle = "lm_s" phaseInputExt CmmCpp = "cmmcpp" phaseInputExt Cmm = "cmm" phaseInputExt MergeForeign = "o" phaseInputExt StopLn = "o" haskellish_src_suffixes, backpackish_suffixes, haskellish_suffixes, cish_suffixes, haskellish_user_src_suffixes, haskellish_sig_suffixes :: [String] -- When a file with an extension in the haskellish_src_suffixes group is -- loaded in --make mode, its imports will be loaded too. haskellish_src_suffixes = haskellish_user_src_suffixes ++ [ "hspp", "hscpp" ] haskellish_suffixes = haskellish_src_suffixes ++ [ "hc", "cmm", "cmmcpp" ] cish_suffixes = [ "c", "cpp", "C", "cc", "cxx", "s", "S", "ll", "bc", "lm_s", "m", "M", "mm" ] -- Will not be deleted as temp files: haskellish_user_src_suffixes = haskellish_sig_suffixes ++ [ "hs", "lhs", "hs-boot", "lhs-boot" ] haskellish_sig_suffixes = [ "hsig", "lhsig" ] backpackish_suffixes = [ "bkp" ] objish_suffixes :: Platform -> [String] -- Use the appropriate suffix for the system on which -- the GHC-compiled code will run objish_suffixes platform = case platformOS platform of OSMinGW32 -> [ "o", "O", "obj", "OBJ" ] _ -> [ "o" ] dynlib_suffixes :: Platform -> [String] dynlib_suffixes platform = case platformOS platform of OSMinGW32 -> ["dll", "DLL"] OSDarwin -> ["dylib", "so"] _ -> ["so"] isHaskellishSuffix, isBackpackishSuffix, isHaskellSrcSuffix, isCishSuffix, isHaskellUserSrcSuffix, isHaskellSigSuffix :: String -> Bool isHaskellishSuffix s = s `elem` haskellish_suffixes isBackpackishSuffix s = s `elem` backpackish_suffixes isHaskellSigSuffix s = s `elem` haskellish_sig_suffixes isHaskellSrcSuffix s = s `elem` haskellish_src_suffixes isCishSuffix s = s `elem` cish_suffixes isHaskellUserSrcSuffix s = s `elem` haskellish_user_src_suffixes isObjectSuffix, isDynLibSuffix :: Platform -> String -> Bool isObjectSuffix platform s = s `elem` objish_suffixes platform isDynLibSuffix platform s = s `elem` dynlib_suffixes platform isSourceSuffix :: String -> Bool isSourceSuffix suff = isHaskellishSuffix suff || isCishSuffix suff || isBackpackishSuffix suff -- | When we are given files (modified by -x arguments) we need -- to determine if they are Haskellish or not to figure out -- how we should try to compile it. The rules are: -- -- 1. If no -x flag was specified, we check to see if -- the file looks like a module name, has no extension, -- or has a Haskell source extension. -- -- 2. If an -x flag was specified, we just make sure the -- specified suffix is a Haskell one. isHaskellishTarget :: (String, Maybe Phase) -> Bool isHaskellishTarget (f,Nothing) = looksLikeModuleName f || isHaskellSrcFilename f || not (hasExtension f) isHaskellishTarget (_,Just phase) = phase `notElem` [ As True, As False, Cc, Cobjc, Cobjcxx, CmmCpp, Cmm , StopLn] isHaskellishFilename, isHaskellSrcFilename, isCishFilename, isHaskellUserSrcFilename, isSourceFilename, isHaskellSigFilename :: FilePath -> Bool -- takeExtension return .foo, so we drop 1 to get rid of the . isHaskellishFilename f = isHaskellishSuffix (drop 1 $ takeExtension f) isHaskellSrcFilename f = isHaskellSrcSuffix (drop 1 $ takeExtension f) isCishFilename f = isCishSuffix (drop 1 $ takeExtension f) isHaskellUserSrcFilename f = isHaskellUserSrcSuffix (drop 1 $ takeExtension f) isSourceFilename f = isSourceSuffix (drop 1 $ takeExtension f) isHaskellSigFilename f = isHaskellSigSuffix (drop 1 $ takeExtension f) isObjectFilename, isDynLibFilename :: Platform -> FilePath -> Bool isObjectFilename platform f = isObjectSuffix platform (drop 1 $ takeExtension f) isDynLibFilename platform f = isDynLibSuffix platform (drop 1 $ takeExtension f) -- | Foreign language of the phase if the phase deals with a foreign code phaseForeignLanguage :: Phase -> Maybe ForeignSrcLang phaseForeignLanguage phase = case phase of Cc -> Just LangC Ccxx -> Just LangCxx Cobjc -> Just LangObjc Cobjcxx -> Just LangObjcxx HCc -> Just LangC As _ -> Just LangAsm MergeForeign -> Just RawObject _ -> Nothing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Pipeline/Monad.hs0000644000000000000000000000356214472400112022233 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE MultiParamTypeClasses #-} -- | The 'TPipelineClass' and 'MonadUse' classes and associated types module GHC.Driver.Pipeline.Monad ( TPipelineClass, MonadUse(..) , PipeEnv(..) , PipelineOutput(..) ) where import GHC.Prelude import Control.Monad.IO.Class import qualified Data.Kind as K import GHC.Driver.Phases import GHC.Utils.TmpFs -- The interface that the pipeline monad must implement. type TPipelineClass (f :: K.Type -> K.Type) (m :: K.Type -> K.Type) = (Functor m, MonadIO m, Applicative m, Monad m, MonadUse f m) -- | Lift a `f` action into an `m` action. class MonadUse f m where use :: f a -> m a -- PipeEnv: invariant information passed down through the pipeline data PipeEnv = PipeEnv { stop_phase :: StopPhase, -- ^ Stop just after this phase src_filename :: String, -- ^ basename of original input source src_basename :: String, -- ^ basename of original input source src_suffix :: String, -- ^ its extension start_phase :: Phase, output_spec :: PipelineOutput -- ^ says where to put the pipeline output } data PipelineOutput = Temporary TempFileLifetime -- ^ Output should be to a temporary file: we're going to -- run more compilation steps on this output later. | Persistent -- ^ We want a persistent file, i.e. a file in the current directory -- derived from the input filename, but with the appropriate extension. -- eg. in "ghc -c Foo.hs" the output goes into ./Foo.o. | SpecificFile -- ^ The output must go into the specific outputFile in DynFlags. -- We don't store the filename in the constructor as it changes -- when doing -dynamic-too. | NoOutputFile -- ^ No output should be created, like in Interpreter or NoBackend. deriving Show ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Pipeline/Phases.hs0000644000000000000000000000470014472400112022413 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE RankNTypes #-} module GHC.Driver.Pipeline.Phases (TPhase(..), PhaseHook(..)) where import GHC.Prelude import GHC.Driver.Pipeline.Monad import GHC.Driver.Env.Types import GHC.Driver.Session import GHC.Driver.CmdLine import GHC.Types.SourceFile import GHC.Unit.Module.ModSummary import GHC.Unit.Module.Status import GHC.Tc.Types ( FrontendResult ) import GHC.Types.Error import GHC.Driver.Errors.Types import GHC.Fingerprint.Type import GHC.Unit.Module.Location ( ModLocation ) import GHC.Unit.Module.Name ( ModuleName ) import GHC.Unit.Module.ModIface import GHC.Linker.Types import GHC.Driver.Phases -- Typed Pipeline Phases -- MP: TODO: We need to refine the arguments to each of these phases so recompilation -- can be smarter. For example, rather than passing a whole HscEnv, just pass the options -- which each phase depends on, then recompilation checking can decide to only rerun each -- phase if the inputs have been modified. data TPhase res where T_Unlit :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_FileArgs :: HscEnv -> FilePath -> TPhase (DynFlags, Messages PsMessage, [Warn]) T_Cpp :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_HsPp :: PipeEnv -> HscEnv -> FilePath -> FilePath -> TPhase FilePath T_HscRecomp :: PipeEnv -> HscEnv -> FilePath -> HscSource -> TPhase (HscEnv, ModSummary, HscRecompStatus) T_Hsc :: HscEnv -> ModSummary -> TPhase (FrontendResult, Messages GhcMessage) T_HscPostTc :: HscEnv -> ModSummary -> FrontendResult -> Messages GhcMessage -> Maybe Fingerprint -> TPhase HscBackendAction T_HscBackend :: PipeEnv -> HscEnv -> ModuleName -> HscSource -> ModLocation -> HscBackendAction -> TPhase ([FilePath], ModIface, Maybe Linkable, FilePath) T_CmmCpp :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_Cmm :: PipeEnv -> HscEnv -> FilePath -> TPhase ([FilePath], FilePath) T_Cc :: Phase -> PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_As :: Bool -> PipeEnv -> HscEnv -> Maybe ModLocation -> FilePath -> TPhase FilePath T_LlvmOpt :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_LlvmLlc :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_LlvmMangle :: PipeEnv -> HscEnv -> FilePath -> TPhase FilePath T_MergeForeign :: PipeEnv -> HscEnv -> FilePath -> [FilePath] -> TPhase FilePath -- | A wrapper around the interpretation function for phases. data PhaseHook = PhaseHook (forall a . TPhase a -> IO a) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Plugins.hs0000644000000000000000000003153014472400112021045 0ustar0000000000000000{-# LANGUAGE RankNTypes #-} -- | Definitions for writing /plugins/ for GHC. Plugins can hook into -- several areas of the compiler. See the 'Plugin' type. These plugins -- include type-checker plugins, source plugins, and core-to-core plugins. module GHC.Driver.Plugins ( -- * Plugins Plugins (..) , emptyPlugins , Plugin(..) , defaultPlugin , CommandLineOption , PsMessages(..) , ParsedResult(..) -- ** Recompilation checking , purePlugin, impurePlugin, flagRecompile , PluginRecompile(..) -- * Plugin types -- ** Frontend plugins , FrontendPlugin(..), defaultFrontendPlugin, FrontendPluginAction -- ** Core plugins -- | Core plugins allow plugins to register as a Core-to-Core pass. , CorePlugin -- ** Typechecker plugins -- | Typechecker plugins allow plugins to provide evidence to the -- typechecker. , TcPlugin -- ** Source plugins -- | GHC offers a number of points where plugins can access and modify its -- front-end (\"source\") representation. These include: -- -- - access to the parser result with 'parsedResultAction' -- - access to the renamed AST with 'renamedResultAction' -- - access to the typechecked AST with 'typeCheckResultAction' -- - access to the Template Haskell splices with 'spliceRunAction' -- - access to loaded interface files with 'interfaceLoadAction' -- , keepRenamedSource -- ** Defaulting plugins -- | Defaulting plugins can add candidate types to the defaulting -- mechanism. , DefaultingPlugin -- ** Hole fit plugins -- | hole fit plugins allow plugins to change the behavior of valid hole -- fit suggestions , HoleFitPluginR -- * Internal , PluginWithArgs(..), pluginsWithArgs, pluginRecompile' , LoadedPlugin(..), lpModuleName , StaticPlugin(..) , mapPlugins, withPlugins, withPlugins_ ) where import GHC.Prelude import GHC.Driver.Env import GHC.Driver.Monad import GHC.Driver.Phases import GHC.Unit.Module import GHC.Unit.Module.ModIface import GHC.Unit.Module.ModSummary import GHC.Parser.Errors.Types (PsWarning, PsError) import qualified GHC.Tc.Types import GHC.Tc.Types ( TcGblEnv, IfM, TcM, tcg_rn_decls, tcg_rn_exports ) import GHC.Tc.Errors.Hole.FitTypes ( HoleFitPluginR ) import GHC.Core.Opt.Monad ( CoreToDo, CoreM ) import GHC.Hs import GHC.Types.Error (Messages) import GHC.Utils.Fingerprint import GHC.Utils.Outputable (Outputable(..), text, (<+>)) import Data.List (sort) --Qualified import so we can define a Semigroup instance -- but it doesn't clash with Outputable.<> import qualified Data.Semigroup import Control.Monad import GHC.Linker.Types import GHC.Types.Unique.DFM -- | Command line options gathered from the -PModule.Name:stuff syntax -- are given to you as this type type CommandLineOption = String -- | Errors and warnings produced by the parser data PsMessages = PsMessages { psWarnings :: Messages PsWarning , psErrors :: Messages PsError } -- | Result of running the parser and the parser plugin data ParsedResult = ParsedResult { -- | Parsed module, potentially modified by a plugin parsedResultModule :: HsParsedModule , -- | Warnings and errors from parser, potentially modified by a plugin parsedResultMessages :: PsMessages } -- | 'Plugin' is the compiler plugin data type. Try to avoid -- constructing one of these directly, and just modify some fields of -- 'defaultPlugin' instead: this is to try and preserve source-code -- compatibility when we add fields to this. -- -- Nonetheless, this API is preliminary and highly likely to change in -- the future. data Plugin = Plugin { installCoreToDos :: CorePlugin -- ^ Modify the Core pipeline that will be used for compilation. -- This is called as the Core pipeline is built for every module -- being compiled, and plugins get the opportunity to modify the -- pipeline in a nondeterministic order. , tcPlugin :: TcPlugin -- ^ An optional typechecker plugin, which may modify the -- behaviour of the constraint solver. , defaultingPlugin :: DefaultingPlugin -- ^ An optional defaulting plugin, which may specify the -- additional type-defaulting rules. , holeFitPlugin :: HoleFitPlugin -- ^ An optional plugin to handle hole fits, which may re-order -- or change the list of valid hole fits and refinement hole fits. , driverPlugin :: [CommandLineOption] -> HscEnv -> IO HscEnv -- ^ An optional plugin to update 'HscEnv', right after plugin loading. This -- can be used to register hooks or tweak any field of 'DynFlags' before -- doing actual work on a module. -- -- @since 8.10.1 , pluginRecompile :: [CommandLineOption] -> IO PluginRecompile -- ^ Specify how the plugin should affect recompilation. , parsedResultAction :: [CommandLineOption] -> ModSummary -> ParsedResult -> Hsc ParsedResult -- ^ Modify the module when it is parsed. This is called by -- "GHC.Driver.Main" when the parser has produced no or only non-fatal -- errors. -- Compilation will fail if the messages produced by this function contain -- any errors. , renamedResultAction :: [CommandLineOption] -> TcGblEnv -> HsGroup GhcRn -> TcM (TcGblEnv, HsGroup GhcRn) -- ^ Modify each group after it is renamed. This is called after each -- `HsGroup` has been renamed. , typeCheckResultAction :: [CommandLineOption] -> ModSummary -> TcGblEnv -> TcM TcGblEnv -- ^ Modify the module when it is type checked. This is called at the -- very end of typechecking. , spliceRunAction :: [CommandLineOption] -> LHsExpr GhcTc -> TcM (LHsExpr GhcTc) -- ^ Modify the TH splice or quasiqoute before it is run. , interfaceLoadAction :: forall lcl . [CommandLineOption] -> ModIface -> IfM lcl ModIface -- ^ Modify an interface that have been loaded. This is called by -- "GHC.Iface.Load" when an interface is successfully loaded. Not applied to -- the loading of the plugin interface. Tools that rely on information from -- modules other than the currently compiled one should implement this -- function. } -- Note [Source plugins] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The `Plugin` datatype have been extended by fields that allow access to the -- different inner representations that are generated during the compilation -- process. These fields are `parsedResultAction`, `renamedResultAction`, -- `typeCheckResultAction`, `spliceRunAction` and `interfaceLoadAction`. -- -- The main purpose of these plugins is to help tool developers. They allow -- development tools to extract the information about the source code of a big -- Haskell project during the normal build procedure. In this case the plugin -- acts as the tools access point to the compiler that can be controlled by -- compiler flags. This is important because the manipulation of compiler flags -- is supported by most build environment. -- -- For the full discussion, check the full proposal at: -- https://gitlab.haskell.org/ghc/ghc/wikis/extended-plugins-proposal data PluginWithArgs = PluginWithArgs { paPlugin :: Plugin -- ^ the actual callable plugin , paArguments :: [CommandLineOption] -- ^ command line arguments for the plugin } -- | A plugin with its arguments. The result of loading the plugin. data LoadedPlugin = LoadedPlugin { lpPlugin :: PluginWithArgs -- ^ the actual plugin together with its commandline arguments , lpModule :: ModIface -- ^ the module containing the plugin } -- | A static plugin with its arguments. For registering compiled-in plugins -- through the GHC API. data StaticPlugin = StaticPlugin { spPlugin :: PluginWithArgs -- ^ the actual plugin together with its commandline arguments } lpModuleName :: LoadedPlugin -> ModuleName lpModuleName = moduleName . mi_module . lpModule pluginRecompile' :: PluginWithArgs -> IO PluginRecompile pluginRecompile' (PluginWithArgs plugin args) = pluginRecompile plugin args data PluginRecompile = ForceRecompile | NoForceRecompile | MaybeRecompile Fingerprint instance Outputable PluginRecompile where ppr ForceRecompile = text "ForceRecompile" ppr NoForceRecompile = text "NoForceRecompile" ppr (MaybeRecompile fp) = text "MaybeRecompile" <+> ppr fp instance Semigroup PluginRecompile where ForceRecompile <> _ = ForceRecompile NoForceRecompile <> r = r MaybeRecompile fp <> NoForceRecompile = MaybeRecompile fp MaybeRecompile fp <> MaybeRecompile fp' = MaybeRecompile (fingerprintFingerprints [fp, fp']) MaybeRecompile _fp <> ForceRecompile = ForceRecompile instance Monoid PluginRecompile where mempty = NoForceRecompile type CorePlugin = [CommandLineOption] -> [CoreToDo] -> CoreM [CoreToDo] type TcPlugin = [CommandLineOption] -> Maybe GHC.Tc.Types.TcPlugin type DefaultingPlugin = [CommandLineOption] -> Maybe GHC.Tc.Types.DefaultingPlugin type HoleFitPlugin = [CommandLineOption] -> Maybe HoleFitPluginR purePlugin, impurePlugin, flagRecompile :: [CommandLineOption] -> IO PluginRecompile purePlugin _args = return NoForceRecompile impurePlugin _args = return ForceRecompile flagRecompile = return . MaybeRecompile . fingerprintFingerprints . map fingerprintString . sort -- | Default plugin: does nothing at all, except for marking that safe -- inference has failed unless @-fplugin-trustworthy@ is passed. For -- compatibility reason you should base all your plugin definitions on this -- default value. defaultPlugin :: Plugin defaultPlugin = Plugin { installCoreToDos = const return , tcPlugin = const Nothing , defaultingPlugin = const Nothing , holeFitPlugin = const Nothing , driverPlugin = const return , pluginRecompile = impurePlugin , renamedResultAction = \_ env grp -> return (env, grp) , parsedResultAction = \_ _ -> return , typeCheckResultAction = \_ _ -> return , spliceRunAction = \_ -> return , interfaceLoadAction = \_ -> return } -- | A renamer plugin which mades the renamed source available in -- a typechecker plugin. keepRenamedSource :: [CommandLineOption] -> TcGblEnv -> HsGroup GhcRn -> TcM (TcGblEnv, HsGroup GhcRn) keepRenamedSource _ gbl_env group = return (gbl_env { tcg_rn_decls = update (tcg_rn_decls gbl_env) , tcg_rn_exports = update_exports (tcg_rn_exports gbl_env) }, group) where update_exports Nothing = Just [] update_exports m = m update Nothing = Just emptyRnGroup update m = m type PluginOperation m a = Plugin -> [CommandLineOption] -> a -> m a type ConstPluginOperation m a = Plugin -> [CommandLineOption] -> a -> m () data Plugins = Plugins { staticPlugins :: ![StaticPlugin] -- ^ Static plugins which do not need dynamic loading. These plugins are -- intended to be added by GHC API users directly to this list. -- -- To add dynamically loaded plugins through the GHC API see -- 'addPluginModuleName' instead. , loadedPlugins :: ![LoadedPlugin] -- ^ Plugins dynamically loaded after processing arguments. What -- will be loaded here is directed by DynFlags.pluginModNames. -- Arguments are loaded from DynFlags.pluginModNameOpts. -- -- The purpose of this field is to cache the plugins so they -- don't have to be loaded each time they are needed. See -- 'GHC.Runtime.Loader.initializePlugins'. , loadedPluginDeps :: !([Linkable], PkgsLoaded) -- ^ The object files required by the loaded plugins -- See Note [Plugin dependencies] } emptyPlugins :: Plugins emptyPlugins = Plugins [] [] ([], emptyUDFM) pluginsWithArgs :: Plugins -> [PluginWithArgs] pluginsWithArgs plugins = map lpPlugin (loadedPlugins plugins) ++ map spPlugin (staticPlugins plugins) -- | Perform an operation by using all of the plugins in turn. withPlugins :: Monad m => Plugins -> PluginOperation m a -> a -> m a withPlugins plugins transformation input = foldM go input (pluginsWithArgs plugins) where go arg (PluginWithArgs p opts) = transformation p opts arg mapPlugins :: Plugins -> (Plugin -> [CommandLineOption] -> a) -> [a] mapPlugins plugins f = map (\(PluginWithArgs p opts) -> f p opts) (pluginsWithArgs plugins) -- | Perform a constant operation by using all of the plugins in turn. withPlugins_ :: Monad m => Plugins -> ConstPluginOperation m a -> a -> m () withPlugins_ plugins transformation input = mapM_ (\(PluginWithArgs p opts) -> transformation p opts input) (pluginsWithArgs plugins) type FrontendPluginAction = [String] -> [(String, Maybe Phase)] -> Ghc () data FrontendPlugin = FrontendPlugin { frontend :: FrontendPluginAction } defaultFrontendPlugin :: FrontendPlugin defaultFrontendPlugin = FrontendPlugin { frontend = \_ _ -> return () } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Ppr.hs0000644000000000000000000000232614472400112020166 0ustar0000000000000000-- | Printing related functions that depend on session state (DynFlags) module GHC.Driver.Ppr ( showSDoc , showSDocUnsafe , showSDocForUser , showPpr , showPprUnsafe , printForUser ) where import GHC.Prelude import GHC.Driver.Session import GHC.Unit.State import GHC.Utils.Outputable import GHC.Utils.Ppr ( Mode(..) ) import System.IO ( Handle ) -- | Show a SDoc as a String with the default user style showSDoc :: DynFlags -> SDoc -> String showSDoc dflags sdoc = renderWithContext (initSDocContext dflags defaultUserStyle) sdoc showPpr :: Outputable a => DynFlags -> a -> String showPpr dflags thing = showSDoc dflags (ppr thing) -- | Allows caller to specify the PrintUnqualified to use showSDocForUser :: DynFlags -> UnitState -> PrintUnqualified -> SDoc -> String showSDocForUser dflags unit_state unqual doc = renderWithContext (initSDocContext dflags sty) doc' where sty = mkUserStyle unqual AllTheWay doc' = pprWithUnitState unit_state doc printForUser :: DynFlags -> Handle -> PrintUnqualified -> Depth -> SDoc -> IO () printForUser dflags handle unqual depth doc = printSDocLn ctx (PageMode False) handle doc where ctx = initSDocContext dflags (mkUserStyle unqual depth) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Session.hs0000644000000000000000000066736614472400113021076 0ustar0000000000000000{-# OPTIONS_GHC -O0 #-} {-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE LambdaCase #-} ------------------------------------------------------------------------------- -- -- | Dynamic flags -- -- Most flags are dynamic flags, which means they can change from compilation -- to compilation using @OPTIONS_GHC@ pragmas, and in a multi-session GHC each -- session can be using different dynamic flags. Dynamic flags can also be set -- at the prompt in GHCi. -- -- (c) The University of Glasgow 2005 -- ------------------------------------------------------------------------------- {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Driver.Session ( -- * Dynamic flags and associated configuration types DumpFlag(..), GeneralFlag(..), WarningFlag(..), DiagnosticReason(..), Language(..), FatalMessager, FlushOut(..), ProfAuto(..), glasgowExtsFlags, hasPprDebug, hasNoDebugOutput, hasNoStateHack, hasNoOptCoercion, dopt, dopt_set, dopt_unset, gopt, gopt_set, gopt_unset, setGeneralFlag', unSetGeneralFlag', wopt, wopt_set, wopt_unset, wopt_fatal, wopt_set_fatal, wopt_unset_fatal, xopt, xopt_set, xopt_unset, xopt_set_unlessExplSpec, xopt_DuplicateRecordFields, xopt_FieldSelectors, lang_set, DynamicTooState(..), dynamicTooState, setDynamicNow, sccProfilingEnabled, needSourceNotes, DynFlags(..), outputFile, objectSuf, ways, FlagSpec(..), HasDynFlags(..), ContainsDynFlags(..), RtsOptsEnabled(..), GhcMode(..), isOneShot, GhcLink(..), isNoLink, PackageFlag(..), PackageArg(..), ModRenaming(..), packageFlagsChanged, IgnorePackageFlag(..), TrustFlag(..), PackageDBFlag(..), PkgDbRef(..), Option(..), showOpt, DynLibLoader(..), fFlags, fLangFlags, xFlags, wWarningFlags, makeDynFlagsConsistent, positionIndependent, optimisationFlags, setFlagsFromEnvFile, pprDynFlagsDiff, flagSpecOf, targetProfile, -- ** Safe Haskell safeHaskellOn, safeHaskellModeEnabled, safeImportsOn, safeLanguageOn, safeInferOn, packageTrustOn, safeDirectImpsReq, safeImplicitImpsReq, unsafeFlags, unsafeFlagsForInfer, -- ** LLVM Targets LlvmTarget(..), LlvmConfig(..), -- ** System tool settings and locations Settings(..), sProgramName, sProjectVersion, sGhcUsagePath, sGhciUsagePath, sToolDir, sTopDir, sGlobalPackageDatabasePath, sLdSupportsCompactUnwind, sLdSupportsBuildId, sLdSupportsFilelist, sLdIsGnuLd, sGccSupportsNoPie, sPgm_L, sPgm_P, sPgm_F, sPgm_c, sPgm_cxx, sPgm_a, sPgm_l, sPgm_lm, sPgm_dll, sPgm_T, sPgm_windres, sPgm_libtool, sPgm_ar, sPgm_ranlib, sPgm_lo, sPgm_lc, sPgm_lcc, sPgm_i, sOpt_L, sOpt_P, sOpt_P_fingerprint, sOpt_F, sOpt_c, sOpt_cxx, sOpt_a, sOpt_l, sOpt_lm, sOpt_windres, sOpt_lo, sOpt_lc, sOpt_lcc, sOpt_i, sExtraGccViaCFlags, sTargetPlatformString, sGhcWithInterpreter, sLibFFI, GhcNameVersion(..), FileSettings(..), PlatformMisc(..), settings, programName, projectVersion, ghcUsagePath, ghciUsagePath, topDir, versionedAppDir, versionedFilePath, extraGccViaCFlags, globalPackageDatabasePath, pgm_L, pgm_P, pgm_F, pgm_c, pgm_cxx, pgm_a, pgm_l, pgm_lm, pgm_dll, pgm_T, pgm_windres, pgm_libtool, pgm_ar, pgm_otool, pgm_install_name_tool, pgm_ranlib, pgm_lo, pgm_lc, pgm_lcc, pgm_i, opt_L, opt_P, opt_F, opt_c, opt_cxx, opt_a, opt_l, opt_lm, opt_i, opt_P_signature, opt_windres, opt_lo, opt_lc, opt_lcc, updatePlatformConstants, -- ** Manipulating DynFlags addPluginModuleName, defaultDynFlags, -- Settings -> DynFlags initDynFlags, -- DynFlags -> IO DynFlags defaultFatalMessager, defaultFlushOut, setOutputFile, setDynOutputFile, setOutputHi, setDynOutputHi, augmentByWorkingDirectory, getOpts, -- DynFlags -> (DynFlags -> [a]) -> [a] getVerbFlags, updOptLevel, setTmpDir, setUnitId, TurnOnFlag, turnOn, turnOff, impliedGFlags, impliedOffGFlags, impliedXFlags, -- ** State CmdLineP(..), runCmdLineP, getCmdLineState, putCmdLineState, processCmdLineP, -- ** Parsing DynFlags parseDynamicFlagsCmdLine, parseDynamicFilePragma, parseDynamicFlagsFull, -- ** Available DynFlags allNonDeprecatedFlags, flagsAll, flagsDynamic, flagsPackage, flagsForCompletion, supportedLanguagesAndExtensions, languageExtensions, -- ** DynFlags C compiler options picCCOpts, picPOpts, -- ** DynFlags C linker options pieCCLDOpts, -- * Compiler configuration suitable for display to the user compilerInfo, wordAlignment, setUnsafeGlobalDynFlags, -- * SSE and AVX isSse4_2Enabled, isBmiEnabled, isBmi2Enabled, isAvxEnabled, isAvx2Enabled, isAvx512cdEnabled, isAvx512erEnabled, isAvx512fEnabled, isAvx512pfEnabled, -- * Linker/compiler information LinkerInfo(..), CompilerInfo(..), useXLinkerRPath, -- * Include specifications IncludeSpecs(..), addGlobalInclude, addQuoteInclude, flattenIncludes, addImplicitQuoteInclude, -- * SDoc initSDocContext, initDefaultSDocContext, ) where import GHC.Prelude import GHC.Platform import GHC.Platform.Ways import GHC.Platform.Profile import GHC.UniqueSubdir (uniqueSubdir) import GHC.Unit.Types import GHC.Unit.Parser import GHC.Unit.Module import GHC.Builtin.Names ( mAIN_NAME ) import GHC.Driver.Phases ( Phase(..), phaseInputExt ) import GHC.Driver.Flags import GHC.Driver.Backend import GHC.Settings.Config import GHC.Utils.CliOption import GHC.Core.Unfold import GHC.Driver.CmdLine import GHC.Settings.Constants import GHC.Utils.Panic import qualified GHC.Utils.Ppr.Colour as Col import GHC.Utils.Misc import GHC.Utils.Constants (debugIsOn) import GHC.Utils.GlobalVars import GHC.Data.Maybe import GHC.Data.Bool import GHC.Utils.Monad import GHC.Types.Error (DiagnosticReason(..)) import GHC.Types.SrcLoc import GHC.Types.SafeHaskell import GHC.Types.Basic ( IntWithInf, treatZeroAsInf ) import qualified GHC.Types.FieldLabel as FieldLabel import GHC.Data.FastString import GHC.Utils.TmpFs import GHC.Utils.Fingerprint import GHC.Utils.Outputable import GHC.Settings import GHC.CmmToAsm.CFG.Weight import {-# SOURCE #-} GHC.Core.Opt.CallerCC import GHC.SysTools.Terminal ( stderrSupportsAnsiColors ) import GHC.SysTools.BaseDir ( expandToolDir, expandTopDir ) import Data.IORef import Control.Arrow ((&&&)) import Control.Monad import Control.Monad.Trans.Class import Control.Monad.Trans.Writer import Control.Monad.Trans.Reader import Control.Monad.Trans.Except import Control.Monad.Trans.State as State import Data.Functor.Identity import Data.Ord import Data.Char import Data.List (intercalate, sortBy) import qualified Data.List.NonEmpty as NE import qualified Data.Map as Map import qualified Data.Set as Set import System.FilePath import System.Directory import System.Environment (lookupEnv) import System.IO import System.IO.Error import Text.ParserCombinators.ReadP hiding (char) import Text.ParserCombinators.ReadP as R import GHC.Data.EnumSet (EnumSet) import qualified GHC.Data.EnumSet as EnumSet import GHC.Foreign (withCString, peekCString) import qualified GHC.LanguageExtensions as LangExt -- Note [Updating flag description in the User's Guide] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- If you modify anything in this file please make sure that your changes are -- described in the User's Guide. Please update the flag description in the -- users guide (docs/users_guide) whenever you add or change a flag. -- Please make sure you add ":since:" information to new flags. -- Note [Supporting CLI completion] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- The command line interface completion (in for example bash) is an easy way -- for the developer to learn what flags are available from GHC. -- GHC helps by separating which flags are available when compiling with GHC, -- and which flags are available when using GHCi. -- A flag is assumed to either work in both these modes, or only in one of them. -- When adding or changing a flag, please consider for which mode the flag will -- have effect, and annotate it accordingly. For Flags use defFlag, defGhcFlag, -- defGhciFlag, and for FlagSpec use flagSpec or flagGhciSpec. -- Note [Adding a language extension] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- There are a few steps to adding (or removing) a language extension, -- -- * Adding the extension to GHC.LanguageExtensions -- -- The Extension type in libraries/ghc-boot-th/GHC/LanguageExtensions/Type.hs -- is the canonical list of language extensions known by GHC. -- -- * Adding a flag to DynFlags.xFlags -- -- This is fairly self-explanatory. The name should be concise, memorable, -- and consistent with any previous implementations of the similar idea in -- other Haskell compilers. -- -- * Adding the flag to the documentation -- -- This is the same as any other flag. See -- Note [Updating flag description in the User's Guide] -- -- * Adding the flag to Cabal -- -- The Cabal library has its own list of all language extensions supported -- by all major compilers. This is the list that user code being uploaded -- to Hackage is checked against to ensure language extension validity. -- Consequently, it is very important that this list remains up-to-date. -- -- To this end, there is a testsuite test (testsuite/tests/driver/T4437.hs) -- whose job it is to ensure these GHC's extensions are consistent with -- Cabal. -- -- The recommended workflow is, -- -- 1. Temporarily add your new language extension to the -- expectedGhcOnlyExtensions list in T4437 to ensure the test doesn't -- break while Cabal is updated. -- -- 2. After your GHC change is accepted, submit a Cabal pull request adding -- your new extension to Cabal's list (found in -- Cabal/Language/Haskell/Extension.hs). -- -- 3. After your Cabal change is accepted, let the GHC developers know so -- they can update the Cabal submodule and remove the extensions from -- expectedGhcOnlyExtensions. -- -- * Adding the flag to the GHC Wiki -- -- There is a change log tracking language extension additions and removals -- on the GHC wiki: https://gitlab.haskell.org/ghc/ghc/wikis/language-pragma-history -- -- See #4437 and #8176. -- ----------------------------------------------------------------------------- -- DynFlags -- | Used to differentiate the scope an include needs to apply to. -- We have to split the include paths to avoid accidentally forcing recursive -- includes since -I overrides the system search paths. See #14312. data IncludeSpecs = IncludeSpecs { includePathsQuote :: [String] , includePathsGlobal :: [String] -- | See Note [Implicit include paths] , includePathsQuoteImplicit :: [String] } deriving Show -- | Append to the list of includes a path that shall be included using `-I` -- when the C compiler is called. These paths override system search paths. addGlobalInclude :: IncludeSpecs -> [String] -> IncludeSpecs addGlobalInclude spec paths = let f = includePathsGlobal spec in spec { includePathsGlobal = f ++ paths } -- | Append to the list of includes a path that shall be included using -- `-iquote` when the C compiler is called. These paths only apply when quoted -- includes are used. e.g. #include "foo.h" addQuoteInclude :: IncludeSpecs -> [String] -> IncludeSpecs addQuoteInclude spec paths = let f = includePathsQuote spec in spec { includePathsQuote = f ++ paths } -- | These includes are not considered while fingerprinting the flags for iface -- | See Note [Implicit include paths] addImplicitQuoteInclude :: IncludeSpecs -> [String] -> IncludeSpecs addImplicitQuoteInclude spec paths = let f = includePathsQuoteImplicit spec in spec { includePathsQuoteImplicit = f ++ paths } -- | Concatenate and flatten the list of global and quoted includes returning -- just a flat list of paths. flattenIncludes :: IncludeSpecs -> [String] flattenIncludes specs = includePathsQuote specs ++ includePathsQuoteImplicit specs ++ includePathsGlobal specs {- Note [Implicit include paths] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compile driver adds the path to the folder containing the source file being compiled to the 'IncludeSpecs', and this change gets recorded in the 'DynFlags' that are used later to compute the interface file. Because of this, the flags fingerprint derived from these 'DynFlags' and recorded in the interface file will end up containing the absolute path to the source folder. Build systems with a remote cache like Bazel or Buck (or Shake, see #16956) store the build artifacts produced by a build BA for reuse in subsequent builds. Embedding source paths in interface fingerprints will thwart these attemps and lead to unnecessary recompilations when the source paths in BA differ from the source paths in subsequent builds. -} -- | Contains not only a collection of 'GeneralFlag's but also a plethora of -- information relating to the compilation of a single file or GHC session data DynFlags = DynFlags { ghcMode :: GhcMode, ghcLink :: GhcLink, backend :: !Backend, -- ^ The backend to use (if any). -- -- Whenever you change the backend, also make sure to set 'ghcLink' to -- something sensible. -- -- 'NoBackend' can be used to avoid generating any output, however, note that: -- -- * If a program uses Template Haskell the typechecker may need to run code -- from an imported module. To facilitate this, code generation is enabled -- for modules imported by modules that use template haskell, using the -- default backend for the platform. -- See Note [-fno-code mode]. -- formerly Settings ghcNameVersion :: {-# UNPACK #-} !GhcNameVersion, fileSettings :: {-# UNPACK #-} !FileSettings, targetPlatform :: Platform, -- Filled in by SysTools toolSettings :: {-# UNPACK #-} !ToolSettings, platformMisc :: {-# UNPACK #-} !PlatformMisc, rawSettings :: [(String, String)], tmpDir :: TempDir, llvmConfig :: LlvmConfig, -- ^ N.B. It's important that this field is lazy since we load the LLVM -- configuration lazily. See Note [LLVM configuration] in "GHC.SysTools". llvmOptLevel :: Int, -- ^ LLVM optimisation level verbosity :: Int, -- ^ Verbosity level: see Note [Verbosity levels] debugLevel :: Int, -- ^ How much debug information to produce simplPhases :: Int, -- ^ Number of simplifier phases maxSimplIterations :: Int, -- ^ Max simplifier iterations ruleCheck :: Maybe String, strictnessBefore :: [Int], -- ^ Additional demand analysis parMakeCount :: Maybe Int, -- ^ The number of modules to compile in parallel -- in --make mode, where Nothing ==> compile as -- many in parallel as there are CPUs. enableTimeStats :: Bool, -- ^ Enable RTS timing statistics? ghcHeapSize :: Maybe Int, -- ^ The heap size to set. maxRelevantBinds :: Maybe Int, -- ^ Maximum number of bindings from the type envt -- to show in type error messages maxValidHoleFits :: Maybe Int, -- ^ Maximum number of hole fits to show -- in typed hole error messages maxRefHoleFits :: Maybe Int, -- ^ Maximum number of refinement hole -- fits to show in typed hole error -- messages refLevelHoleFits :: Maybe Int, -- ^ Maximum level of refinement for -- refinement hole fits in typed hole -- error messages maxUncoveredPatterns :: Int, -- ^ Maximum number of unmatched patterns to show -- in non-exhaustiveness warnings maxPmCheckModels :: Int, -- ^ Soft limit on the number of models -- the pattern match checker checks -- a pattern against. A safe guard -- against exponential blow-up. simplTickFactor :: Int, -- ^ Multiplier for simplifier ticks dmdUnboxWidth :: !Int, -- ^ Whether DmdAnal should optimistically put an -- Unboxed demand on returned products with at most -- this number of fields specConstrThreshold :: Maybe Int, -- ^ Threshold for SpecConstr specConstrCount :: Maybe Int, -- ^ Max number of specialisations for any one function specConstrRecursive :: Int, -- ^ Max number of specialisations for recursive types -- Not optional; otherwise ForceSpecConstr can diverge. binBlobThreshold :: Maybe Word, -- ^ Binary literals (e.g. strings) whose size is above -- this threshold will be dumped in a binary file -- by the assembler code generator. 0 and Nothing disables -- this feature. See 'GHC.StgToCmm.Config'. liberateCaseThreshold :: Maybe Int, -- ^ Threshold for LiberateCase floatLamArgs :: Maybe Int, -- ^ Arg count for lambda floating -- See 'GHC.Core.Opt.Monad.FloatOutSwitches' liftLamsRecArgs :: Maybe Int, -- ^ Maximum number of arguments after lambda lifting a -- recursive function. liftLamsNonRecArgs :: Maybe Int, -- ^ Maximum number of arguments after lambda lifting a -- non-recursive function. liftLamsKnown :: Bool, -- ^ Lambda lift even when this turns a known call -- into an unknown call. cmmProcAlignment :: Maybe Int, -- ^ Align Cmm functions at this boundary or use default. historySize :: Int, -- ^ Simplification history size importPaths :: [FilePath], mainModuleNameIs :: ModuleName, mainFunIs :: Maybe String, reductionDepth :: IntWithInf, -- ^ Typechecker maximum stack depth solverIterations :: IntWithInf, -- ^ Number of iterations in the constraints solver -- Typically only 1 is needed homeUnitId_ :: UnitId, -- ^ Target home unit-id homeUnitInstanceOf_ :: Maybe UnitId, -- ^ Id of the unit to instantiate homeUnitInstantiations_ :: [(ModuleName, Module)], -- ^ Module instantiations -- Note [Filepaths and Multiple Home Units] workingDirectory :: Maybe FilePath, thisPackageName :: Maybe String, -- ^ What the package is called, use with multiple home units hiddenModules :: Set.Set ModuleName, reexportedModules :: Set.Set ModuleName, -- ways targetWays_ :: Ways, -- ^ Target way flags from the command line -- For object splitting splitInfo :: Maybe (String,Int), -- paths etc. objectDir :: Maybe String, dylibInstallName :: Maybe String, hiDir :: Maybe String, hieDir :: Maybe String, stubDir :: Maybe String, dumpDir :: Maybe String, objectSuf_ :: String, hcSuf :: String, hiSuf_ :: String, hieSuf :: String, dynObjectSuf_ :: String, dynHiSuf_ :: String, outputFile_ :: Maybe String, dynOutputFile_ :: Maybe String, outputHi :: Maybe String, dynOutputHi :: Maybe String, dynLibLoader :: DynLibLoader, dynamicNow :: !Bool, -- ^ Indicate if we are now generating dynamic output -- because of -dynamic-too. This predicate is -- used to query the appropriate fields -- (outputFile/dynOutputFile, ways, etc.) -- | This defaults to 'non-module'. It can be set by -- 'GHC.Driver.Pipeline.setDumpPrefix' or 'ghc.GHCi.UI.runStmt' based on -- where its output is going. dumpPrefix :: FilePath, -- | Override the 'dumpPrefix' set by 'GHC.Driver.Pipeline.setDumpPrefix' -- or 'ghc.GHCi.UI.runStmt'. -- Set by @-ddump-file-prefix@ dumpPrefixForce :: Maybe FilePath, ldInputs :: [Option], includePaths :: IncludeSpecs, libraryPaths :: [String], frameworkPaths :: [String], -- used on darwin only cmdlineFrameworks :: [String], -- ditto rtsOpts :: Maybe String, rtsOptsEnabled :: RtsOptsEnabled, rtsOptsSuggestions :: Bool, hpcDir :: String, -- ^ Path to store the .mix files -- Plugins pluginModNames :: [ModuleName], -- ^ the @-fplugin@ flags given on the command line, in *reverse* -- order that they're specified on the command line. pluginModNameOpts :: [(ModuleName,String)], frontendPluginOpts :: [String], -- ^ the @-ffrontend-opt@ flags given on the command line, in *reverse* -- order that they're specified on the command line. -- For ghc -M depMakefile :: FilePath, depIncludePkgDeps :: Bool, depIncludeCppDeps :: Bool, depExcludeMods :: [ModuleName], depSuffixes :: [String], -- Package flags packageDBFlags :: [PackageDBFlag], -- ^ The @-package-db@ flags given on the command line, In -- *reverse* order that they're specified on the command line. -- This is intended to be applied with the list of "initial" -- package databases derived from @GHC_PACKAGE_PATH@; see -- 'getUnitDbRefs'. ignorePackageFlags :: [IgnorePackageFlag], -- ^ The @-ignore-package@ flags from the command line. -- In *reverse* order that they're specified on the command line. packageFlags :: [PackageFlag], -- ^ The @-package@ and @-hide-package@ flags from the command-line. -- In *reverse* order that they're specified on the command line. pluginPackageFlags :: [PackageFlag], -- ^ The @-plugin-package-id@ flags from command line. -- In *reverse* order that they're specified on the command line. trustFlags :: [TrustFlag], -- ^ The @-trust@ and @-distrust@ flags. -- In *reverse* order that they're specified on the command line. packageEnv :: Maybe FilePath, -- ^ Filepath to the package environment file (if overriding default) -- hsc dynamic flags dumpFlags :: EnumSet DumpFlag, generalFlags :: EnumSet GeneralFlag, warningFlags :: EnumSet WarningFlag, fatalWarningFlags :: EnumSet WarningFlag, -- Don't change this without updating extensionFlags: language :: Maybe Language, -- | Safe Haskell mode safeHaskell :: SafeHaskellMode, safeInfer :: Bool, safeInferred :: Bool, -- We store the location of where some extension and flags were turned on so -- we can produce accurate error messages when Safe Haskell fails due to -- them. thOnLoc :: SrcSpan, newDerivOnLoc :: SrcSpan, deriveViaOnLoc :: SrcSpan, overlapInstLoc :: SrcSpan, incoherentOnLoc :: SrcSpan, pkgTrustOnLoc :: SrcSpan, warnSafeOnLoc :: SrcSpan, warnUnsafeOnLoc :: SrcSpan, trustworthyOnLoc :: SrcSpan, -- Don't change this without updating extensionFlags: -- Here we collect the settings of the language extensions -- from the command line, the ghci config file and -- from interactive :set / :seti commands. extensions :: [OnOff LangExt.Extension], -- extensionFlags should always be equal to -- flattenExtensionFlags language extensions -- LangExt.Extension is defined in libraries/ghc-boot so that it can be used -- by template-haskell extensionFlags :: EnumSet LangExt.Extension, -- | Unfolding control -- See Note [Discounts and thresholds] in GHC.Core.Unfold unfoldingOpts :: !UnfoldingOpts, maxWorkerArgs :: Int, ghciHistSize :: Int, flushOut :: FlushOut, ghcVersionFile :: Maybe FilePath, haddockOptions :: Maybe String, -- | GHCi scripts specified by -ghci-script, in reverse order ghciScripts :: [String], -- Output style options pprUserLength :: Int, pprCols :: Int, useUnicode :: Bool, useColor :: OverridingBool, canUseColor :: Bool, colScheme :: Col.Scheme, -- | what kind of {-# SCC #-} to add automatically profAuto :: ProfAuto, callerCcFilters :: [CallerCcFilter], interactivePrint :: Maybe String, -- | Machine dependent flags (-m\ stuff) sseVersion :: Maybe SseVersion, bmiVersion :: Maybe BmiVersion, avx :: Bool, avx2 :: Bool, avx512cd :: Bool, -- Enable AVX-512 Conflict Detection Instructions. avx512er :: Bool, -- Enable AVX-512 Exponential and Reciprocal Instructions. avx512f :: Bool, -- Enable AVX-512 instructions. avx512pf :: Bool, -- Enable AVX-512 PreFetch Instructions. -- | Run-time linker information (what options we need, etc.) rtldInfo :: IORef (Maybe LinkerInfo), -- | Run-time C compiler information rtccInfo :: IORef (Maybe CompilerInfo), -- | Run-time assembler information rtasmInfo :: IORef (Maybe CompilerInfo), -- Constants used to control the amount of optimization done. -- | Max size, in bytes, of inline array allocations. maxInlineAllocSize :: Int, -- | Only inline memcpy if it generates no more than this many -- pseudo (roughly: Cmm) instructions. maxInlineMemcpyInsns :: Int, -- | Only inline memset if it generates no more than this many -- pseudo (roughly: Cmm) instructions. maxInlineMemsetInsns :: Int, -- | Reverse the order of error messages in GHC/GHCi reverseErrors :: Bool, -- | Limit the maximum number of errors to show maxErrors :: Maybe Int, -- | Unique supply configuration for testing build determinism initialUnique :: Word, uniqueIncrement :: Int, -- 'Int' because it can be used to test uniques in decreasing order. -- | Temporary: CFG Edge weights for fast iterations cfgWeights :: Weights } class HasDynFlags m where getDynFlags :: m DynFlags {- It would be desirable to have the more generalised instance (MonadTrans t, Monad m, HasDynFlags m) => HasDynFlags (t m) where getDynFlags = lift getDynFlags instance definition. However, that definition would overlap with the `HasDynFlags (GhcT m)` instance. Instead we define instances for a couple of common Monad transformers explicitly. -} instance (Monoid a, Monad m, HasDynFlags m) => HasDynFlags (WriterT a m) where getDynFlags = lift getDynFlags instance (Monad m, HasDynFlags m) => HasDynFlags (ReaderT a m) where getDynFlags = lift getDynFlags instance (Monad m, HasDynFlags m) => HasDynFlags (MaybeT m) where getDynFlags = lift getDynFlags instance (Monad m, HasDynFlags m) => HasDynFlags (ExceptT e m) where getDynFlags = lift getDynFlags class ContainsDynFlags t where extractDynFlags :: t -> DynFlags data ProfAuto = NoProfAuto -- ^ no SCC annotations added | ProfAutoAll -- ^ top-level and nested functions are annotated | ProfAutoTop -- ^ top-level functions annotated only | ProfAutoExports -- ^ exported functions annotated only | ProfAutoCalls -- ^ annotate call-sites deriving (Eq,Enum) data LlvmTarget = LlvmTarget { lDataLayout :: String , lCPU :: String , lAttributes :: [String] } -- | See Note [LLVM configuration] in "GHC.SysTools". data LlvmConfig = LlvmConfig { llvmTargets :: [(String, LlvmTarget)] , llvmPasses :: [(Int, String)] } ----------------------------------------------------------------------------- -- Accessessors from 'DynFlags' -- | "unbuild" a 'Settings' from a 'DynFlags'. This shouldn't be needed in the -- vast majority of code. But GHCi questionably uses this to produce a default -- 'DynFlags' from which to compute a flags diff for printing. settings :: DynFlags -> Settings settings dflags = Settings { sGhcNameVersion = ghcNameVersion dflags , sFileSettings = fileSettings dflags , sTargetPlatform = targetPlatform dflags , sToolSettings = toolSettings dflags , sPlatformMisc = platformMisc dflags , sRawSettings = rawSettings dflags } programName :: DynFlags -> String programName dflags = ghcNameVersion_programName $ ghcNameVersion dflags projectVersion :: DynFlags -> String projectVersion dflags = ghcNameVersion_projectVersion (ghcNameVersion dflags) ghcUsagePath :: DynFlags -> FilePath ghcUsagePath dflags = fileSettings_ghcUsagePath $ fileSettings dflags ghciUsagePath :: DynFlags -> FilePath ghciUsagePath dflags = fileSettings_ghciUsagePath $ fileSettings dflags toolDir :: DynFlags -> Maybe FilePath toolDir dflags = fileSettings_toolDir $ fileSettings dflags topDir :: DynFlags -> FilePath topDir dflags = fileSettings_topDir $ fileSettings dflags extraGccViaCFlags :: DynFlags -> [String] extraGccViaCFlags dflags = toolSettings_extraGccViaCFlags $ toolSettings dflags globalPackageDatabasePath :: DynFlags -> FilePath globalPackageDatabasePath dflags = fileSettings_globalPackageDatabase $ fileSettings dflags pgm_L :: DynFlags -> String pgm_L dflags = toolSettings_pgm_L $ toolSettings dflags pgm_P :: DynFlags -> (String,[Option]) pgm_P dflags = toolSettings_pgm_P $ toolSettings dflags pgm_F :: DynFlags -> String pgm_F dflags = toolSettings_pgm_F $ toolSettings dflags pgm_c :: DynFlags -> String pgm_c dflags = toolSettings_pgm_c $ toolSettings dflags pgm_cxx :: DynFlags -> String pgm_cxx dflags = toolSettings_pgm_cxx $ toolSettings dflags pgm_a :: DynFlags -> (String,[Option]) pgm_a dflags = toolSettings_pgm_a $ toolSettings dflags pgm_l :: DynFlags -> (String,[Option]) pgm_l dflags = toolSettings_pgm_l $ toolSettings dflags pgm_lm :: DynFlags -> Maybe (String,[Option]) pgm_lm dflags = toolSettings_pgm_lm $ toolSettings dflags pgm_dll :: DynFlags -> (String,[Option]) pgm_dll dflags = toolSettings_pgm_dll $ toolSettings dflags pgm_T :: DynFlags -> String pgm_T dflags = toolSettings_pgm_T $ toolSettings dflags pgm_windres :: DynFlags -> String pgm_windres dflags = toolSettings_pgm_windres $ toolSettings dflags pgm_libtool :: DynFlags -> String pgm_libtool dflags = toolSettings_pgm_libtool $ toolSettings dflags pgm_lcc :: DynFlags -> (String,[Option]) pgm_lcc dflags = toolSettings_pgm_lcc $ toolSettings dflags pgm_ar :: DynFlags -> String pgm_ar dflags = toolSettings_pgm_ar $ toolSettings dflags pgm_otool :: DynFlags -> String pgm_otool dflags = toolSettings_pgm_otool $ toolSettings dflags pgm_install_name_tool :: DynFlags -> String pgm_install_name_tool dflags = toolSettings_pgm_install_name_tool $ toolSettings dflags pgm_ranlib :: DynFlags -> String pgm_ranlib dflags = toolSettings_pgm_ranlib $ toolSettings dflags pgm_lo :: DynFlags -> (String,[Option]) pgm_lo dflags = toolSettings_pgm_lo $ toolSettings dflags pgm_lc :: DynFlags -> (String,[Option]) pgm_lc dflags = toolSettings_pgm_lc $ toolSettings dflags pgm_i :: DynFlags -> String pgm_i dflags = toolSettings_pgm_i $ toolSettings dflags opt_L :: DynFlags -> [String] opt_L dflags = toolSettings_opt_L $ toolSettings dflags opt_P :: DynFlags -> [String] opt_P dflags = concatMap (wayOptP (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_P (toolSettings dflags) -- This function packages everything that's needed to fingerprint opt_P -- flags. See Note [Repeated -optP hashing]. opt_P_signature :: DynFlags -> ([String], Fingerprint) opt_P_signature dflags = ( concatMap (wayOptP (targetPlatform dflags)) (ways dflags) , toolSettings_opt_P_fingerprint $ toolSettings dflags ) opt_F :: DynFlags -> [String] opt_F dflags= toolSettings_opt_F $ toolSettings dflags opt_c :: DynFlags -> [String] opt_c dflags = concatMap (wayOptc (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_c (toolSettings dflags) opt_cxx :: DynFlags -> [String] opt_cxx dflags= toolSettings_opt_cxx $ toolSettings dflags opt_a :: DynFlags -> [String] opt_a dflags= toolSettings_opt_a $ toolSettings dflags opt_l :: DynFlags -> [String] opt_l dflags = concatMap (wayOptl (targetPlatform dflags)) (ways dflags) ++ toolSettings_opt_l (toolSettings dflags) opt_lm :: DynFlags -> [String] opt_lm dflags= toolSettings_opt_lm $ toolSettings dflags opt_windres :: DynFlags -> [String] opt_windres dflags= toolSettings_opt_windres $ toolSettings dflags opt_lcc :: DynFlags -> [String] opt_lcc dflags= toolSettings_opt_lcc $ toolSettings dflags opt_lo :: DynFlags -> [String] opt_lo dflags= toolSettings_opt_lo $ toolSettings dflags opt_lc :: DynFlags -> [String] opt_lc dflags= toolSettings_opt_lc $ toolSettings dflags opt_i :: DynFlags -> [String] opt_i dflags= toolSettings_opt_i $ toolSettings dflags -- | The directory for this version of ghc in the user's app directory -- The appdir used to be in ~/.ghc but to respect the XDG specification -- we want to move it under $XDG_DATA_HOME/ -- However, old tooling (like cabal) might still write package environments -- to the old directory, so we prefer that if a subdirectory of ~/.ghc -- with the correct target and GHC version suffix exists. -- -- i.e. if ~/.ghc/$UNIQUE_SUBDIR exists we use that -- otherwise we use $XDG_DATA_HOME/$UNIQUE_SUBDIR -- -- UNIQUE_SUBDIR is typically a combination of the target platform and GHC version versionedAppDir :: String -> ArchOS -> MaybeT IO FilePath versionedAppDir appname platform = do -- Make sure we handle the case the HOME isn't set (see #11678) -- We need to fallback to the old scheme if the subdirectory exists. msum $ map (checkIfExists <=< fmap ( versionedFilePath platform)) [ tryMaybeT $ getAppUserDataDirectory appname -- this is ~/.ghc/ , tryMaybeT $ getXdgDirectory XdgData appname -- this is $XDG_DATA_HOME/ ] where checkIfExists dir = tryMaybeT (doesDirectoryExist dir) >>= \case True -> pure dir False -> MaybeT (pure Nothing) versionedFilePath :: ArchOS -> FilePath versionedFilePath platform = uniqueSubdir platform -- | The 'GhcMode' tells us whether we're doing multi-module -- compilation (controlled via the "GHC" API) or one-shot -- (single-module) compilation. This makes a difference primarily to -- the "GHC.Unit.Finder": in one-shot mode we look for interface files for -- imported modules, but in multi-module mode we look for source files -- in order to check whether they need to be recompiled. data GhcMode = CompManager -- ^ @\-\-make@, GHCi, etc. | OneShot -- ^ @ghc -c Foo.hs@ | MkDepend -- ^ @ghc -M@, see "GHC.Unit.Finder" for why we need this deriving Eq instance Outputable GhcMode where ppr CompManager = text "CompManager" ppr OneShot = text "OneShot" ppr MkDepend = text "MkDepend" isOneShot :: GhcMode -> Bool isOneShot OneShot = True isOneShot _other = False -- | What to do in the link step, if there is one. data GhcLink = NoLink -- ^ Don't link at all | LinkBinary -- ^ Link object code into a binary | LinkInMemory -- ^ Use the in-memory dynamic linker (works for both -- bytecode and object code). | LinkDynLib -- ^ Link objects into a dynamic lib (DLL on Windows, DSO on ELF platforms) | LinkStaticLib -- ^ Link objects into a static lib | LinkMergedObj -- ^ Link objects into a merged "GHCi object" deriving (Eq, Show) isNoLink :: GhcLink -> Bool isNoLink NoLink = True isNoLink _ = False -- | We accept flags which make packages visible, but how they select -- the package varies; this data type reflects what selection criterion -- is used. data PackageArg = PackageArg String -- ^ @-package@, by 'PackageName' | UnitIdArg Unit -- ^ @-package-id@, by 'Unit' deriving (Eq, Show) instance Outputable PackageArg where ppr (PackageArg pn) = text "package" <+> text pn ppr (UnitIdArg uid) = text "unit" <+> ppr uid -- | Represents the renaming that may be associated with an exposed -- package, e.g. the @rns@ part of @-package "foo (rns)"@. -- -- Here are some example parsings of the package flags (where -- a string literal is punned to be a 'ModuleName': -- -- * @-package foo@ is @ModRenaming True []@ -- * @-package foo ()@ is @ModRenaming False []@ -- * @-package foo (A)@ is @ModRenaming False [("A", "A")]@ -- * @-package foo (A as B)@ is @ModRenaming False [("A", "B")]@ -- * @-package foo with (A as B)@ is @ModRenaming True [("A", "B")]@ data ModRenaming = ModRenaming { modRenamingWithImplicit :: Bool, -- ^ Bring all exposed modules into scope? modRenamings :: [(ModuleName, ModuleName)] -- ^ Bring module @m@ into scope -- under name @n@. } deriving (Eq) instance Outputable ModRenaming where ppr (ModRenaming b rns) = ppr b <+> parens (ppr rns) -- | Flags for manipulating the set of non-broken packages. newtype IgnorePackageFlag = IgnorePackage String -- ^ @-ignore-package@ deriving (Eq) -- | Flags for manipulating package trust. data TrustFlag = TrustPackage String -- ^ @-trust@ | DistrustPackage String -- ^ @-distrust@ deriving (Eq) -- | Flags for manipulating packages visibility. data PackageFlag = ExposePackage String PackageArg ModRenaming -- ^ @-package@, @-package-id@ | HidePackage String -- ^ @-hide-package@ deriving (Eq) -- NB: equality instance is used by packageFlagsChanged data PackageDBFlag = PackageDB PkgDbRef | NoUserPackageDB | NoGlobalPackageDB | ClearPackageDBs deriving (Eq) packageFlagsChanged :: DynFlags -> DynFlags -> Bool packageFlagsChanged idflags1 idflags0 = packageFlags idflags1 /= packageFlags idflags0 || ignorePackageFlags idflags1 /= ignorePackageFlags idflags0 || pluginPackageFlags idflags1 /= pluginPackageFlags idflags0 || trustFlags idflags1 /= trustFlags idflags0 || packageDBFlags idflags1 /= packageDBFlags idflags0 || packageGFlags idflags1 /= packageGFlags idflags0 where packageGFlags dflags = map (`gopt` dflags) [ Opt_HideAllPackages , Opt_HideAllPluginPackages , Opt_AutoLinkPackages ] instance Outputable PackageFlag where ppr (ExposePackage n arg rn) = text n <> braces (ppr arg <+> ppr rn) ppr (HidePackage str) = text "-hide-package" <+> text str data DynLibLoader = Deployable | SystemDependent deriving Eq data RtsOptsEnabled = RtsOptsNone | RtsOptsIgnore | RtsOptsIgnoreAll | RtsOptsSafeOnly | RtsOptsAll deriving (Show) -- | Are we building with @-fPIE@ or @-fPIC@ enabled? positionIndependent :: DynFlags -> Bool positionIndependent dflags = gopt Opt_PIC dflags || gopt Opt_PIE dflags -- Note [-dynamic-too business] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- With -dynamic-too flag, we try to build both the non-dynamic and dynamic -- objects in a single run of the compiler: the pipeline is the same down to -- Core optimisation, then the backend (from Core to object code) is executed -- twice. -- -- The implementation is currently rather hacky, for example, we don't clearly separate non-dynamic -- and dynamic loaded interfaces (#9176). -- -- To make matters worse, we automatically enable -dynamic-too when some modules -- need Template-Haskell and GHC is dynamically linked (cf -- GHC.Driver.Pipeline.compileOne'). -- -- We used to try and fall back from a dynamic-too failure but this feature -- didn't work as expected (#20446) so it was removed to simplify the -- implementation and not obscure latent bugs. data DynamicTooState = DT_Dont -- ^ Don't try to build dynamic objects too | DT_OK -- ^ Will still try to generate dynamic objects | DT_Dyn -- ^ Currently generating dynamic objects (in the backend) deriving (Eq,Show,Ord) dynamicTooState :: DynFlags -> DynamicTooState dynamicTooState dflags | not (gopt Opt_BuildDynamicToo dflags) = DT_Dont | dynamicNow dflags = DT_Dyn | otherwise = DT_OK setDynamicNow :: DynFlags -> DynFlags setDynamicNow dflags0 = dflags0 { dynamicNow = True } ----------------------------------------------------------------------------- -- | Used by 'GHC.runGhc' to partially initialize a new 'DynFlags' value initDynFlags :: DynFlags -> IO DynFlags initDynFlags dflags = do let refRtldInfo <- newIORef Nothing refRtccInfo <- newIORef Nothing refRtasmInfo <- newIORef Nothing canUseUnicode <- do let enc = localeEncoding str = "‘’" (withCString enc str $ \cstr -> do str' <- peekCString enc cstr return (str == str')) `catchIOError` \_ -> return False ghcNoUnicodeEnv <- lookupEnv "GHC_NO_UNICODE" let useUnicode' = isNothing ghcNoUnicodeEnv && canUseUnicode maybeGhcColorsEnv <- lookupEnv "GHC_COLORS" maybeGhcColoursEnv <- lookupEnv "GHC_COLOURS" let adjustCols (Just env) = Col.parseScheme env adjustCols Nothing = id let (useColor', colScheme') = (adjustCols maybeGhcColoursEnv . adjustCols maybeGhcColorsEnv) (useColor dflags, colScheme dflags) tmp_dir <- normalise <$> getTemporaryDirectory return dflags{ useUnicode = useUnicode', useColor = useColor', canUseColor = stderrSupportsAnsiColors, colScheme = colScheme', rtldInfo = refRtldInfo, rtccInfo = refRtccInfo, rtasmInfo = refRtasmInfo, tmpDir = TempDir tmp_dir } -- | The normal 'DynFlags'. Note that they are not suitable for use in this form -- and must be fully initialized by 'GHC.runGhc' first. defaultDynFlags :: Settings -> LlvmConfig -> DynFlags defaultDynFlags mySettings llvmConfig = -- See Note [Updating flag description in the User's Guide] DynFlags { ghcMode = CompManager, ghcLink = LinkBinary, backend = platformDefaultBackend (sTargetPlatform mySettings), verbosity = 0, debugLevel = 0, simplPhases = 2, maxSimplIterations = 4, ruleCheck = Nothing, binBlobThreshold = Just 500000, -- 500K is a good default (see #16190) maxRelevantBinds = Just 6, maxValidHoleFits = Just 6, maxRefHoleFits = Just 6, refLevelHoleFits = Nothing, maxUncoveredPatterns = 4, maxPmCheckModels = 30, simplTickFactor = 100, dmdUnboxWidth = 3, -- Default: Assume an unboxed demand on function bodies returning a triple specConstrThreshold = Just 2000, specConstrCount = Just 3, specConstrRecursive = 3, liberateCaseThreshold = Just 2000, floatLamArgs = Just 0, -- Default: float only if no fvs liftLamsRecArgs = Just 5, -- Default: the number of available argument hardware registers on x86_64 liftLamsNonRecArgs = Just 5, -- Default: the number of available argument hardware registers on x86_64 liftLamsKnown = False, -- Default: don't turn known calls into unknown ones cmmProcAlignment = Nothing, historySize = 20, strictnessBefore = [], parMakeCount = Just 1, enableTimeStats = False, ghcHeapSize = Nothing, importPaths = ["."], mainModuleNameIs = mAIN_NAME, mainFunIs = Nothing, reductionDepth = treatZeroAsInf mAX_REDUCTION_DEPTH, solverIterations = treatZeroAsInf mAX_SOLVER_ITERATIONS, homeUnitId_ = mainUnitId, homeUnitInstanceOf_ = Nothing, homeUnitInstantiations_ = [], workingDirectory = Nothing, thisPackageName = Nothing, hiddenModules = Set.empty, reexportedModules = Set.empty, objectDir = Nothing, dylibInstallName = Nothing, hiDir = Nothing, hieDir = Nothing, stubDir = Nothing, dumpDir = Nothing, objectSuf_ = phaseInputExt StopLn, hcSuf = phaseInputExt HCc, hiSuf_ = "hi", hieSuf = "hie", dynObjectSuf_ = "dyn_" ++ phaseInputExt StopLn, dynHiSuf_ = "dyn_hi", dynamicNow = False, pluginModNames = [], pluginModNameOpts = [], frontendPluginOpts = [], outputFile_ = Nothing, dynOutputFile_ = Nothing, outputHi = Nothing, dynOutputHi = Nothing, dynLibLoader = SystemDependent, dumpPrefix = "non-module.", dumpPrefixForce = Nothing, ldInputs = [], includePaths = IncludeSpecs [] [] [], libraryPaths = [], frameworkPaths = [], cmdlineFrameworks = [], rtsOpts = Nothing, rtsOptsEnabled = RtsOptsSafeOnly, rtsOptsSuggestions = True, hpcDir = ".hpc", packageDBFlags = [], packageFlags = [], pluginPackageFlags = [], ignorePackageFlags = [], trustFlags = [], packageEnv = Nothing, targetWays_ = Set.empty, splitInfo = Nothing, ghcNameVersion = sGhcNameVersion mySettings, fileSettings = sFileSettings mySettings, toolSettings = sToolSettings mySettings, targetPlatform = sTargetPlatform mySettings, platformMisc = sPlatformMisc mySettings, rawSettings = sRawSettings mySettings, tmpDir = panic "defaultDynFlags: uninitialized tmpDir", -- See Note [LLVM configuration]. llvmConfig = llvmConfig, llvmOptLevel = 0, -- ghc -M values depMakefile = "Makefile", depIncludePkgDeps = False, depIncludeCppDeps = False, depExcludeMods = [], depSuffixes = [], -- end of ghc -M values ghcVersionFile = Nothing, haddockOptions = Nothing, dumpFlags = EnumSet.empty, generalFlags = EnumSet.fromList (defaultFlags mySettings), warningFlags = EnumSet.fromList standardWarnings, fatalWarningFlags = EnumSet.empty, ghciScripts = [], language = Nothing, safeHaskell = Sf_None, safeInfer = True, safeInferred = True, thOnLoc = noSrcSpan, newDerivOnLoc = noSrcSpan, deriveViaOnLoc = noSrcSpan, overlapInstLoc = noSrcSpan, incoherentOnLoc = noSrcSpan, pkgTrustOnLoc = noSrcSpan, warnSafeOnLoc = noSrcSpan, warnUnsafeOnLoc = noSrcSpan, trustworthyOnLoc = noSrcSpan, extensions = [], extensionFlags = flattenExtensionFlags Nothing [], unfoldingOpts = defaultUnfoldingOpts, maxWorkerArgs = 10, ghciHistSize = 50, -- keep a log of length 50 by default flushOut = defaultFlushOut, pprUserLength = 5, pprCols = 100, useUnicode = False, useColor = Auto, canUseColor = False, colScheme = Col.defaultScheme, profAuto = NoProfAuto, callerCcFilters = [], interactivePrint = Nothing, sseVersion = Nothing, bmiVersion = Nothing, avx = False, avx2 = False, avx512cd = False, avx512er = False, avx512f = False, avx512pf = False, rtldInfo = panic "defaultDynFlags: no rtldInfo", rtccInfo = panic "defaultDynFlags: no rtccInfo", rtasmInfo = panic "defaultDynFlags: no rtasmInfo", maxInlineAllocSize = 128, maxInlineMemcpyInsns = 32, maxInlineMemsetInsns = 32, initialUnique = 0, uniqueIncrement = 1, reverseErrors = False, maxErrors = Nothing, cfgWeights = defaultWeights } type FatalMessager = String -> IO () defaultFatalMessager :: FatalMessager defaultFatalMessager = hPutStrLn stderr newtype FlushOut = FlushOut (IO ()) defaultFlushOut :: FlushOut defaultFlushOut = FlushOut $ hFlush stdout {- Note [Verbosity levels] ~~~~~~~~~~~~~~~~~~~~~~~ 0 | print errors & warnings only 1 | minimal verbosity: print "compiling M ... done." for each module. 2 | equivalent to -dshow-passes 3 | equivalent to existing "ghc -v" 4 | "ghc -v -ddump-most" 5 | "ghc -v -ddump-all" -} data OnOff a = On a | Off a deriving (Eq, Show) instance Outputable a => Outputable (OnOff a) where ppr (On x) = text "On" <+> ppr x ppr (Off x) = text "Off" <+> ppr x -- OnOffs accumulate in reverse order, so we use foldr in order to -- process them in the right order flattenExtensionFlags :: Maybe Language -> [OnOff LangExt.Extension] -> EnumSet LangExt.Extension flattenExtensionFlags ml = foldr f defaultExtensionFlags where f (On f) flags = EnumSet.insert f flags f (Off f) flags = EnumSet.delete f flags defaultExtensionFlags = EnumSet.fromList (languageExtensions ml) -- | The language extensions implied by the various language variants. -- When updating this be sure to update the flag documentation in -- @docs/users_guide/exts@. languageExtensions :: Maybe Language -> [LangExt.Extension] -- Nothing: the default case languageExtensions Nothing = languageExtensions (Just GHC2021) languageExtensions (Just Haskell98) = [LangExt.ImplicitPrelude, -- See Note [When is StarIsType enabled] LangExt.StarIsType, LangExt.CUSKs, LangExt.MonomorphismRestriction, LangExt.NPlusKPatterns, LangExt.DatatypeContexts, LangExt.TraditionalRecordSyntax, LangExt.FieldSelectors, LangExt.NondecreasingIndentation, -- strictly speaking non-standard, but we always had this -- on implicitly before the option was added in 7.1, and -- turning it off breaks code, so we're keeping it on for -- backwards compatibility. Cabal uses -XHaskell98 by -- default unless you specify another language. LangExt.DeepSubsumption -- Non-standard but enabled for backwards compatability (see GHC proposal #511) ] languageExtensions (Just Haskell2010) = [LangExt.ImplicitPrelude, -- See Note [When is StarIsType enabled] LangExt.StarIsType, LangExt.CUSKs, LangExt.MonomorphismRestriction, LangExt.DatatypeContexts, LangExt.TraditionalRecordSyntax, LangExt.EmptyDataDecls, LangExt.ForeignFunctionInterface, LangExt.PatternGuards, LangExt.DoAndIfThenElse, LangExt.FieldSelectors, LangExt.RelaxedPolyRec, LangExt.DeepSubsumption ] languageExtensions (Just GHC2021) = [LangExt.ImplicitPrelude, -- See Note [When is StarIsType enabled] LangExt.StarIsType, LangExt.MonomorphismRestriction, LangExt.TraditionalRecordSyntax, LangExt.EmptyDataDecls, LangExt.ForeignFunctionInterface, LangExt.PatternGuards, LangExt.DoAndIfThenElse, LangExt.FieldSelectors, LangExt.RelaxedPolyRec, -- Now the new extensions (not in Haskell2010) LangExt.BangPatterns, LangExt.BinaryLiterals, LangExt.ConstrainedClassMethods, LangExt.ConstraintKinds, LangExt.DeriveDataTypeable, LangExt.DeriveFoldable, LangExt.DeriveFunctor, LangExt.DeriveGeneric, LangExt.DeriveLift, LangExt.DeriveTraversable, LangExt.EmptyCase, LangExt.EmptyDataDeriving, LangExt.ExistentialQuantification, LangExt.ExplicitForAll, LangExt.FlexibleContexts, LangExt.FlexibleInstances, LangExt.GADTSyntax, LangExt.GeneralizedNewtypeDeriving, LangExt.HexFloatLiterals, LangExt.ImportQualifiedPost, LangExt.InstanceSigs, LangExt.KindSignatures, LangExt.MultiParamTypeClasses, LangExt.NamedFieldPuns, LangExt.NamedWildCards, LangExt.NumericUnderscores, LangExt.PolyKinds, LangExt.PostfixOperators, LangExt.RankNTypes, LangExt.ScopedTypeVariables, LangExt.StandaloneDeriving, LangExt.StandaloneKindSignatures, LangExt.TupleSections, LangExt.TypeApplications, LangExt.TypeOperators, LangExt.TypeSynonymInstances] hasPprDebug :: DynFlags -> Bool hasPprDebug = dopt Opt_D_ppr_debug hasNoDebugOutput :: DynFlags -> Bool hasNoDebugOutput = dopt Opt_D_no_debug_output hasNoStateHack :: DynFlags -> Bool hasNoStateHack = gopt Opt_G_NoStateHack hasNoOptCoercion :: DynFlags -> Bool hasNoOptCoercion = gopt Opt_G_NoOptCoercion -- | Test whether a 'DumpFlag' is set dopt :: DumpFlag -> DynFlags -> Bool dopt f dflags = (f `EnumSet.member` dumpFlags dflags) || (verbosity dflags >= 4 && enableIfVerbose f) where enableIfVerbose Opt_D_dump_tc_trace = False enableIfVerbose Opt_D_dump_rn_trace = False enableIfVerbose Opt_D_dump_cs_trace = False enableIfVerbose Opt_D_dump_if_trace = False enableIfVerbose Opt_D_dump_tc = False enableIfVerbose Opt_D_dump_rn = False enableIfVerbose Opt_D_dump_rn_stats = False enableIfVerbose Opt_D_dump_hi_diffs = False enableIfVerbose Opt_D_verbose_core2core = False enableIfVerbose Opt_D_verbose_stg2stg = False enableIfVerbose Opt_D_dump_splices = False enableIfVerbose Opt_D_th_dec_file = False enableIfVerbose Opt_D_dump_rule_firings = False enableIfVerbose Opt_D_dump_rule_rewrites = False enableIfVerbose Opt_D_dump_simpl_trace = False enableIfVerbose Opt_D_dump_rtti = False enableIfVerbose Opt_D_dump_inlinings = False enableIfVerbose Opt_D_dump_verbose_inlinings = False enableIfVerbose Opt_D_dump_core_stats = False enableIfVerbose Opt_D_dump_asm_stats = False enableIfVerbose Opt_D_dump_types = False enableIfVerbose Opt_D_dump_simpl_iterations = False enableIfVerbose Opt_D_dump_ticked = False enableIfVerbose Opt_D_dump_view_pattern_commoning = False enableIfVerbose Opt_D_dump_mod_cycles = False enableIfVerbose Opt_D_dump_mod_map = False enableIfVerbose Opt_D_dump_ec_trace = False enableIfVerbose _ = True -- | Set a 'DumpFlag' dopt_set :: DynFlags -> DumpFlag -> DynFlags dopt_set dfs f = dfs{ dumpFlags = EnumSet.insert f (dumpFlags dfs) } -- | Unset a 'DumpFlag' dopt_unset :: DynFlags -> DumpFlag -> DynFlags dopt_unset dfs f = dfs{ dumpFlags = EnumSet.delete f (dumpFlags dfs) } -- | Test whether a 'GeneralFlag' is set -- -- Note that `dynamicNow` (i.e., dynamic objects built with `-dynamic-too`) -- always implicitly enables Opt_PIC, Opt_ExternalDynamicRefs, and disables -- Opt_SplitSections. -- gopt :: GeneralFlag -> DynFlags -> Bool gopt Opt_PIC dflags | dynamicNow dflags = True gopt Opt_ExternalDynamicRefs dflags | dynamicNow dflags = True gopt Opt_SplitSections dflags | dynamicNow dflags = False gopt f dflags = f `EnumSet.member` generalFlags dflags -- | Set a 'GeneralFlag' gopt_set :: DynFlags -> GeneralFlag -> DynFlags gopt_set dfs f = dfs{ generalFlags = EnumSet.insert f (generalFlags dfs) } -- | Unset a 'GeneralFlag' gopt_unset :: DynFlags -> GeneralFlag -> DynFlags gopt_unset dfs f = dfs{ generalFlags = EnumSet.delete f (generalFlags dfs) } -- | Test whether a 'WarningFlag' is set wopt :: WarningFlag -> DynFlags -> Bool wopt f dflags = f `EnumSet.member` warningFlags dflags -- | Set a 'WarningFlag' wopt_set :: DynFlags -> WarningFlag -> DynFlags wopt_set dfs f = dfs{ warningFlags = EnumSet.insert f (warningFlags dfs) } -- | Unset a 'WarningFlag' wopt_unset :: DynFlags -> WarningFlag -> DynFlags wopt_unset dfs f = dfs{ warningFlags = EnumSet.delete f (warningFlags dfs) } -- | Test whether a 'WarningFlag' is set as fatal wopt_fatal :: WarningFlag -> DynFlags -> Bool wopt_fatal f dflags = f `EnumSet.member` fatalWarningFlags dflags -- | Mark a 'WarningFlag' as fatal (do not set the flag) wopt_set_fatal :: DynFlags -> WarningFlag -> DynFlags wopt_set_fatal dfs f = dfs { fatalWarningFlags = EnumSet.insert f (fatalWarningFlags dfs) } -- | Mark a 'WarningFlag' as not fatal wopt_unset_fatal :: DynFlags -> WarningFlag -> DynFlags wopt_unset_fatal dfs f = dfs { fatalWarningFlags = EnumSet.delete f (fatalWarningFlags dfs) } -- | Test whether a 'LangExt.Extension' is set xopt :: LangExt.Extension -> DynFlags -> Bool xopt f dflags = f `EnumSet.member` extensionFlags dflags -- | Set a 'LangExt.Extension' xopt_set :: DynFlags -> LangExt.Extension -> DynFlags xopt_set dfs f = let onoffs = On f : extensions dfs in dfs { extensions = onoffs, extensionFlags = flattenExtensionFlags (language dfs) onoffs } -- | Unset a 'LangExt.Extension' xopt_unset :: DynFlags -> LangExt.Extension -> DynFlags xopt_unset dfs f = let onoffs = Off f : extensions dfs in dfs { extensions = onoffs, extensionFlags = flattenExtensionFlags (language dfs) onoffs } -- | Set or unset a 'LangExt.Extension', unless it has been explicitly -- set or unset before. xopt_set_unlessExplSpec :: LangExt.Extension -> (DynFlags -> LangExt.Extension -> DynFlags) -> DynFlags -> DynFlags xopt_set_unlessExplSpec ext setUnset dflags = let referedExts = stripOnOff <$> extensions dflags stripOnOff (On x) = x stripOnOff (Off x) = x in if ext `elem` referedExts then dflags else setUnset dflags ext xopt_DuplicateRecordFields :: DynFlags -> FieldLabel.DuplicateRecordFields xopt_DuplicateRecordFields dfs | xopt LangExt.DuplicateRecordFields dfs = FieldLabel.DuplicateRecordFields | otherwise = FieldLabel.NoDuplicateRecordFields xopt_FieldSelectors :: DynFlags -> FieldLabel.FieldSelectors xopt_FieldSelectors dfs | xopt LangExt.FieldSelectors dfs = FieldLabel.FieldSelectors | otherwise = FieldLabel.NoFieldSelectors lang_set :: DynFlags -> Maybe Language -> DynFlags lang_set dflags lang = dflags { language = lang, extensionFlags = flattenExtensionFlags lang (extensions dflags) } -- | Set the Haskell language standard to use setLanguage :: Language -> DynP () setLanguage l = upd (`lang_set` Just l) -- | Is the -fpackage-trust mode on packageTrustOn :: DynFlags -> Bool packageTrustOn = gopt Opt_PackageTrust -- | Is Safe Haskell on in some way (including inference mode) safeHaskellOn :: DynFlags -> Bool safeHaskellOn dflags = safeHaskellModeEnabled dflags || safeInferOn dflags safeHaskellModeEnabled :: DynFlags -> Bool safeHaskellModeEnabled dflags = safeHaskell dflags `elem` [Sf_Unsafe, Sf_Trustworthy , Sf_Safe ] -- | Is the Safe Haskell safe language in use safeLanguageOn :: DynFlags -> Bool safeLanguageOn dflags = safeHaskell dflags == Sf_Safe -- | Is the Safe Haskell safe inference mode active safeInferOn :: DynFlags -> Bool safeInferOn = safeInfer -- | Test if Safe Imports are on in some form safeImportsOn :: DynFlags -> Bool safeImportsOn dflags = safeHaskell dflags == Sf_Unsafe || safeHaskell dflags == Sf_Trustworthy || safeHaskell dflags == Sf_Safe -- | Set a 'Safe Haskell' flag setSafeHaskell :: SafeHaskellMode -> DynP () setSafeHaskell s = updM f where f dfs = do let sf = safeHaskell dfs safeM <- combineSafeFlags sf s case s of Sf_Safe -> return $ dfs { safeHaskell = safeM, safeInfer = False } -- leave safe inference on in Trustworthy mode so we can warn -- if it could have been inferred safe. Sf_Trustworthy -> do l <- getCurLoc return $ dfs { safeHaskell = safeM, trustworthyOnLoc = l } -- leave safe inference on in Unsafe mode as well. _ -> return $ dfs { safeHaskell = safeM } -- | Are all direct imports required to be safe for this Safe Haskell mode? -- Direct imports are when the code explicitly imports a module safeDirectImpsReq :: DynFlags -> Bool safeDirectImpsReq d = safeLanguageOn d -- | Are all implicit imports required to be safe for this Safe Haskell mode? -- Implicit imports are things in the prelude. e.g System.IO when print is used. safeImplicitImpsReq :: DynFlags -> Bool safeImplicitImpsReq d = safeLanguageOn d -- | Combine two Safe Haskell modes correctly. Used for dealing with multiple flags. -- This makes Safe Haskell very much a monoid but for now I prefer this as I don't -- want to export this functionality from the module but do want to export the -- type constructors. combineSafeFlags :: SafeHaskellMode -> SafeHaskellMode -> DynP SafeHaskellMode combineSafeFlags a b | a == Sf_None = return b | b == Sf_None = return a | a == Sf_Ignore || b == Sf_Ignore = return Sf_Ignore | a == b = return a | otherwise = addErr errm >> pure a where errm = "Incompatible Safe Haskell flags! (" ++ show a ++ ", " ++ show b ++ ")" -- | A list of unsafe flags under Safe Haskell. Tuple elements are: -- * name of the flag -- * function to get srcspan that enabled the flag -- * function to test if the flag is on -- * function to turn the flag off unsafeFlags, unsafeFlagsForInfer :: [(String, DynFlags -> SrcSpan, DynFlags -> Bool, DynFlags -> DynFlags)] unsafeFlags = [ ("-XGeneralizedNewtypeDeriving", newDerivOnLoc, xopt LangExt.GeneralizedNewtypeDeriving, flip xopt_unset LangExt.GeneralizedNewtypeDeriving) , ("-XDerivingVia", deriveViaOnLoc, xopt LangExt.DerivingVia, flip xopt_unset LangExt.DerivingVia) , ("-XTemplateHaskell", thOnLoc, xopt LangExt.TemplateHaskell, flip xopt_unset LangExt.TemplateHaskell) ] unsafeFlagsForInfer = unsafeFlags -- | Retrieve the options corresponding to a particular @opt_*@ field in the correct order getOpts :: DynFlags -- ^ 'DynFlags' to retrieve the options from -> (DynFlags -> [a]) -- ^ Relevant record accessor: one of the @opt_*@ accessors -> [a] -- ^ Correctly ordered extracted options getOpts dflags opts = reverse (opts dflags) -- We add to the options from the front, so we need to reverse the list -- | Gets the verbosity flag for the current verbosity level. This is fed to -- other tools, so GHC-specific verbosity flags like @-ddump-most@ are not included getVerbFlags :: DynFlags -> [String] getVerbFlags dflags | verbosity dflags >= 4 = ["-v"] | otherwise = [] setObjectDir, setHiDir, setHieDir, setStubDir, setDumpDir, setOutputDir, setDynObjectSuf, setDynHiSuf, setDylibInstallName, setObjectSuf, setHiSuf, setHieSuf, setHcSuf, parseDynLibLoaderMode, setPgmP, addOptl, addOptc, addOptcxx, addOptP, addCmdlineFramework, addHaddockOpts, addGhciScript, setInteractivePrint :: String -> DynFlags -> DynFlags setOutputFile, setDynOutputFile, setOutputHi, setDynOutputHi, setDumpPrefixForce :: Maybe String -> DynFlags -> DynFlags setObjectDir f d = d { objectDir = Just f} setHiDir f d = d { hiDir = Just f} setHieDir f d = d { hieDir = Just f} setStubDir f d = d { stubDir = Just f , includePaths = addGlobalInclude (includePaths d) [f] } -- -stubdir D adds an implicit -I D, so that gcc can find the _stub.h file -- \#included from the .hc file when compiling via C (i.e. unregisterised -- builds). setDumpDir f d = d { dumpDir = Just f} setOutputDir f = setObjectDir f . setHieDir f . setHiDir f . setStubDir f . setDumpDir f setDylibInstallName f d = d { dylibInstallName = Just f} setObjectSuf f d = d { objectSuf_ = f} setDynObjectSuf f d = d { dynObjectSuf_ = f} setHiSuf f d = d { hiSuf_ = f} setHieSuf f d = d { hieSuf = f} setDynHiSuf f d = d { dynHiSuf_ = f} setHcSuf f d = d { hcSuf = f} setOutputFile f d = d { outputFile_ = f} setDynOutputFile f d = d { dynOutputFile_ = f} setOutputHi f d = d { outputHi = f} setDynOutputHi f d = d { dynOutputHi = f} parseUnitInsts :: String -> Instantiations parseUnitInsts str = case filter ((=="").snd) (readP_to_S parse str) of [(r, "")] -> r _ -> throwGhcException $ CmdLineError ("Can't parse -instantiated-with: " ++ str) where parse = sepBy parseEntry (R.char ',') parseEntry = do n <- parseModuleName _ <- R.char '=' m <- parseHoleyModule return (n, m) setUnitInstantiations :: String -> DynFlags -> DynFlags setUnitInstantiations s d = d { homeUnitInstantiations_ = parseUnitInsts s } setUnitInstanceOf :: String -> DynFlags -> DynFlags setUnitInstanceOf s d = d { homeUnitInstanceOf_ = Just (UnitId (fsLit s)) } addPluginModuleName :: String -> DynFlags -> DynFlags addPluginModuleName name d = d { pluginModNames = (mkModuleName name) : (pluginModNames d) } clearPluginModuleNames :: DynFlags -> DynFlags clearPluginModuleNames d = d { pluginModNames = [] , pluginModNameOpts = [] } addPluginModuleNameOption :: String -> DynFlags -> DynFlags addPluginModuleNameOption optflag d = d { pluginModNameOpts = (mkModuleName m, option) : (pluginModNameOpts d) } where (m, rest) = break (== ':') optflag option = case rest of [] -> "" -- should probably signal an error (_:plug_opt) -> plug_opt -- ignore the ':' from break addFrontendPluginOption :: String -> DynFlags -> DynFlags addFrontendPluginOption s d = d { frontendPluginOpts = s : frontendPluginOpts d } parseDynLibLoaderMode f d = case splitAt 8 f of ("deploy", "") -> d { dynLibLoader = Deployable } ("sysdep", "") -> d { dynLibLoader = SystemDependent } _ -> throwGhcException (CmdLineError ("Unknown dynlib loader: " ++ f)) setDumpPrefixForce f d = d { dumpPrefixForce = f} -- XXX HACK: Prelude> words "'does not' work" ===> ["'does","not'","work"] -- Config.hs should really use Option. setPgmP f = alterToolSettings (\s -> s { toolSettings_pgm_P = (pgm, map Option args)}) where (pgm:args) = words f addOptl f = alterToolSettings (\s -> s { toolSettings_opt_l = f : toolSettings_opt_l s}) addOptc f = alterToolSettings (\s -> s { toolSettings_opt_c = f : toolSettings_opt_c s}) addOptcxx f = alterToolSettings (\s -> s { toolSettings_opt_cxx = f : toolSettings_opt_cxx s}) addOptP f = alterToolSettings $ \s -> s { toolSettings_opt_P = f : toolSettings_opt_P s , toolSettings_opt_P_fingerprint = fingerprintStrings (f : toolSettings_opt_P s) } -- See Note [Repeated -optP hashing] where fingerprintStrings ss = fingerprintFingerprints $ map fingerprintString ss setDepMakefile :: FilePath -> DynFlags -> DynFlags setDepMakefile f d = d { depMakefile = f } setDepIncludeCppDeps :: Bool -> DynFlags -> DynFlags setDepIncludeCppDeps b d = d { depIncludeCppDeps = b } setDepIncludePkgDeps :: Bool -> DynFlags -> DynFlags setDepIncludePkgDeps b d = d { depIncludePkgDeps = b } addDepExcludeMod :: String -> DynFlags -> DynFlags addDepExcludeMod m d = d { depExcludeMods = mkModuleName m : depExcludeMods d } addDepSuffix :: FilePath -> DynFlags -> DynFlags addDepSuffix s d = d { depSuffixes = s : depSuffixes d } addCmdlineFramework f d = d { cmdlineFrameworks = f : cmdlineFrameworks d} addGhcVersionFile :: FilePath -> DynFlags -> DynFlags addGhcVersionFile f d = d { ghcVersionFile = Just f } addHaddockOpts f d = d { haddockOptions = Just f} addGhciScript f d = d { ghciScripts = f : ghciScripts d} setInteractivePrint f d = d { interactivePrint = Just f} ----------------------------------------------------------------------------- -- Setting the optimisation level updOptLevelChanged :: Int -> DynFlags -> (DynFlags, Bool) -- ^ Sets the 'DynFlags' to be appropriate to the optimisation level and signals if any changes took place updOptLevelChanged n dfs = (dfs3, changed1 || changed2 || changed3) where final_n = max 0 (min 2 n) -- Clamp to 0 <= n <= 2 (dfs1, changed1) = foldr unset (dfs , False) remove_gopts (dfs2, changed2) = foldr set (dfs1, False) extra_gopts (dfs3, changed3) = setLlvmOptLevel dfs2 extra_gopts = [ f | (ns,f) <- optLevelFlags, final_n `elem` ns ] remove_gopts = [ f | (ns,f) <- optLevelFlags, final_n `notElem` ns ] set f (dfs, changed) | gopt f dfs = (dfs, changed) | otherwise = (gopt_set dfs f, True) unset f (dfs, changed) | not (gopt f dfs) = (dfs, changed) | otherwise = (gopt_unset dfs f, True) setLlvmOptLevel dfs | llvmOptLevel dfs /= final_n = (dfs{ llvmOptLevel = final_n }, True) | otherwise = (dfs, False) updOptLevel :: Int -> DynFlags -> DynFlags -- ^ Sets the 'DynFlags' to be appropriate to the optimisation level updOptLevel n = fst . updOptLevelChanged n {- ********************************************************************** %* * DynFlags parser %* * %********************************************************************* -} -- ----------------------------------------------------------------------------- -- Parsing the dynamic flags. -- | Parse dynamic flags from a list of command line arguments. Returns -- the parsed 'DynFlags', the left-over arguments, and a list of warnings. -- Throws a 'UsageError' if errors occurred during parsing (such as unknown -- flags or missing arguments). parseDynamicFlagsCmdLine :: MonadIO m => DynFlags -> [Located String] -> m (DynFlags, [Located String], [Warn]) -- ^ Updated 'DynFlags', left-over arguments, and -- list of warnings. parseDynamicFlagsCmdLine = parseDynamicFlagsFull flagsAll True -- | Like 'parseDynamicFlagsCmdLine' but does not allow the package flags -- (-package, -hide-package, -ignore-package, -hide-all-packages, -package-db). -- Used to parse flags set in a modules pragma. parseDynamicFilePragma :: MonadIO m => DynFlags -> [Located String] -> m (DynFlags, [Located String], [Warn]) -- ^ Updated 'DynFlags', left-over arguments, and -- list of warnings. parseDynamicFilePragma = parseDynamicFlagsFull flagsDynamic False newtype CmdLineP s a = CmdLineP (forall m. (Monad m) => StateT s m a) instance Monad (CmdLineP s) where CmdLineP k >>= f = CmdLineP (k >>= \x -> case f x of CmdLineP g -> g) return = pure instance Applicative (CmdLineP s) where pure x = CmdLineP (pure x) (<*>) = ap instance Functor (CmdLineP s) where fmap f (CmdLineP k) = CmdLineP (fmap f k) getCmdLineState :: CmdLineP s s getCmdLineState = CmdLineP State.get putCmdLineState :: s -> CmdLineP s () putCmdLineState x = CmdLineP (State.put x) runCmdLineP :: CmdLineP s a -> s -> (a, s) runCmdLineP (CmdLineP k) s0 = runIdentity $ runStateT k s0 -- | A helper to parse a set of flags from a list of command-line arguments, handling -- response files. processCmdLineP :: forall s m. MonadIO m => [Flag (CmdLineP s)] -- ^ valid flags to match against -> s -- ^ current state -> [Located String] -- ^ arguments to parse -> m (([Located String], [Err], [Warn]), s) -- ^ (leftovers, errors, warnings) processCmdLineP activeFlags s0 args = runStateT (processArgs (map (hoistFlag getCmdLineP) activeFlags) args parseResponseFile) s0 where getCmdLineP :: CmdLineP s a -> StateT s m a getCmdLineP (CmdLineP k) = k -- | Parses the dynamically set flags for GHC. This is the most general form of -- the dynamic flag parser that the other methods simply wrap. It allows -- saying which flags are valid flags and indicating if we are parsing -- arguments from the command line or from a file pragma. parseDynamicFlagsFull :: forall m. MonadIO m => [Flag (CmdLineP DynFlags)] -- ^ valid flags to match against -> Bool -- ^ are the arguments from the command line? -> DynFlags -- ^ current dynamic flags -> [Located String] -- ^ arguments to parse -> m (DynFlags, [Located String], [Warn]) parseDynamicFlagsFull activeFlags cmdline dflags0 args = do ((leftover, errs, warns), dflags1) <- processCmdLineP activeFlags dflags0 args -- See Note [Handling errors when parsing command-line flags] let rdr = renderWithContext (initSDocContext dflags0 defaultUserStyle) unless (null errs) $ liftIO $ throwGhcExceptionIO $ errorsToGhcException $ map ((rdr . ppr . getLoc &&& unLoc) . errMsg) $ errs -- check for disabled flags in safe haskell let (dflags2, sh_warns) = safeFlagCheck cmdline dflags1 theWays = ways dflags2 unless (allowed_combination theWays) $ liftIO $ throwGhcExceptionIO (CmdLineError ("combination not supported: " ++ intercalate "/" (map wayDesc (Set.toAscList theWays)))) let (dflags3, consistency_warnings) = makeDynFlagsConsistent dflags2 -- Set timer stats & heap size when (enableTimeStats dflags3) $ liftIO enableTimingStats case (ghcHeapSize dflags3) of Just x -> liftIO (setHeapSize x) _ -> return () liftIO $ setUnsafeGlobalDynFlags dflags3 let warns' = map (Warn WarningWithoutFlag) (consistency_warnings ++ sh_warns) return (dflags3, leftover, warns' ++ warns) -- | Check (and potentially disable) any extensions that aren't allowed -- in safe mode. -- -- The bool is to indicate if we are parsing command line flags (false means -- file pragma). This allows us to generate better warnings. safeFlagCheck :: Bool -> DynFlags -> (DynFlags, [Located String]) safeFlagCheck _ dflags | safeLanguageOn dflags = (dflagsUnset, warns) where -- Handle illegal flags under safe language. (dflagsUnset, warns) = foldl' check_method (dflags, []) unsafeFlags check_method (df, warns) (str,loc,test,fix) | test df = (fix df, warns ++ safeFailure (loc df) str) | otherwise = (df, warns) safeFailure loc str = [L loc $ str ++ " is not allowed in Safe Haskell; ignoring " ++ str] safeFlagCheck cmdl dflags = case safeInferOn dflags of True -> (dflags' { safeInferred = safeFlags }, warn) False -> (dflags', warn) where -- dynflags and warn for when -fpackage-trust by itself with no safe -- haskell flag (dflags', warn) | not (safeHaskellModeEnabled dflags) && not cmdl && packageTrustOn dflags = (gopt_unset dflags Opt_PackageTrust, pkgWarnMsg) | otherwise = (dflags, []) pkgWarnMsg = [L (pkgTrustOnLoc dflags') $ "-fpackage-trust ignored;" ++ " must be specified with a Safe Haskell flag"] -- Have we inferred Unsafe? See Note [GHC.Driver.Main . Safe Haskell Inference] safeFlags = all (\(_,_,t,_) -> not $ t dflags) unsafeFlagsForInfer {- ********************************************************************** %* * DynFlags specifications %* * %********************************************************************* -} -- | All dynamic flags option strings without the deprecated ones. -- These are the user facing strings for enabling and disabling options. allNonDeprecatedFlags :: [String] allNonDeprecatedFlags = allFlagsDeps False -- | All flags with possibility to filter deprecated ones allFlagsDeps :: Bool -> [String] allFlagsDeps keepDeprecated = [ '-':flagName flag | (deprecated, flag) <- flagsAllDeps , keepDeprecated || not (isDeprecated deprecated)] where isDeprecated Deprecated = True isDeprecated _ = False {- - Below we export user facing symbols for GHC dynamic flags for use with the - GHC API. -} -- All dynamic flags present in GHC. flagsAll :: [Flag (CmdLineP DynFlags)] flagsAll = map snd flagsAllDeps -- All dynamic flags present in GHC with deprecation information. flagsAllDeps :: [(Deprecation, Flag (CmdLineP DynFlags))] flagsAllDeps = package_flags_deps ++ dynamic_flags_deps -- All dynamic flags, minus package flags, present in GHC. flagsDynamic :: [Flag (CmdLineP DynFlags)] flagsDynamic = map snd dynamic_flags_deps -- ALl package flags present in GHC. flagsPackage :: [Flag (CmdLineP DynFlags)] flagsPackage = map snd package_flags_deps ----------------Helpers to make flags and keep deprecation information---------- type FlagMaker m = String -> OptKind m -> Flag m type DynFlagMaker = FlagMaker (CmdLineP DynFlags) data Deprecation = NotDeprecated | Deprecated deriving (Eq, Ord) -- Make a non-deprecated flag make_ord_flag :: DynFlagMaker -> String -> OptKind (CmdLineP DynFlags) -> (Deprecation, Flag (CmdLineP DynFlags)) make_ord_flag fm name kind = (NotDeprecated, fm name kind) -- Make a deprecated flag make_dep_flag :: DynFlagMaker -> String -> OptKind (CmdLineP DynFlags) -> String -> (Deprecation, Flag (CmdLineP DynFlags)) make_dep_flag fm name kind message = (Deprecated, fm name $ add_dep_message kind message) add_dep_message :: OptKind (CmdLineP DynFlags) -> String -> OptKind (CmdLineP DynFlags) add_dep_message (NoArg f) message = NoArg $ f >> deprecate message add_dep_message (HasArg f) message = HasArg $ \s -> f s >> deprecate message add_dep_message (SepArg f) message = SepArg $ \s -> f s >> deprecate message add_dep_message (Prefix f) message = Prefix $ \s -> f s >> deprecate message add_dep_message (OptPrefix f) message = OptPrefix $ \s -> f s >> deprecate message add_dep_message (OptIntSuffix f) message = OptIntSuffix $ \oi -> f oi >> deprecate message add_dep_message (IntSuffix f) message = IntSuffix $ \i -> f i >> deprecate message add_dep_message (WordSuffix f) message = WordSuffix $ \i -> f i >> deprecate message add_dep_message (FloatSuffix f) message = FloatSuffix $ \fl -> f fl >> deprecate message add_dep_message (PassFlag f) message = PassFlag $ \s -> f s >> deprecate message add_dep_message (AnySuffix f) message = AnySuffix $ \s -> f s >> deprecate message ----------------------- The main flags themselves ------------------------------ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] dynamic_flags_deps :: [(Deprecation, Flag (CmdLineP DynFlags))] dynamic_flags_deps = [ make_dep_flag defFlag "n" (NoArg $ return ()) "The -n flag is deprecated and no longer has any effect" , make_ord_flag defFlag "cpp" (NoArg (setExtensionFlag LangExt.Cpp)) , make_ord_flag defFlag "F" (NoArg (setGeneralFlag Opt_Pp)) , (Deprecated, defFlag "#include" (HasArg (\_s -> deprecate ("-#include and INCLUDE pragmas are " ++ "deprecated: They no longer have any effect")))) , make_ord_flag defFlag "v" (OptIntSuffix setVerbosity) , make_ord_flag defGhcFlag "j" (OptIntSuffix (\n -> case n of Just n | n > 0 -> upd (\d -> d { parMakeCount = Just n }) | otherwise -> addErr "Syntax: -j[n] where n > 0" Nothing -> upd (\d -> d { parMakeCount = Nothing }))) -- When the number of parallel builds -- is omitted, it is the same -- as specifying that the number of -- parallel builds is equal to the -- result of getNumProcessors , make_ord_flag defFlag "instantiated-with" (sepArg setUnitInstantiations) , make_ord_flag defFlag "this-component-id" (sepArg setUnitInstanceOf) -- RTS options ------------------------------------------------------------- , make_ord_flag defFlag "H" (HasArg (\s -> upd (\d -> d { ghcHeapSize = Just $ fromIntegral (decodeSize s)}))) , make_ord_flag defFlag "Rghc-timing" (NoArg (upd (\d -> d { enableTimeStats = True }))) ------- ways --------------------------------------------------------------- , make_ord_flag defGhcFlag "prof" (NoArg (addWayDynP WayProf)) , (Deprecated, defFlag "eventlog" $ noArgM $ \d -> do deprecate "the eventlog is now enabled in all runtime system ways" return d) , make_ord_flag defGhcFlag "debug" (NoArg (addWayDynP WayDebug)) , make_ord_flag defGhcFlag "threaded" (NoArg (addWayDynP WayThreaded)) , make_ord_flag defGhcFlag "ticky" (NoArg (setGeneralFlag Opt_Ticky >> addWayDynP WayDebug)) -- -ticky enables ticky-ticky code generation, and also implies -debug which -- is required to get the RTS ticky support. ----- Linker -------------------------------------------------------- , make_ord_flag defGhcFlag "static" (NoArg removeWayDyn) , make_ord_flag defGhcFlag "dynamic" (NoArg (addWayDynP WayDyn)) , make_ord_flag defGhcFlag "rdynamic" $ noArg $ #if defined(linux_HOST_OS) addOptl "-rdynamic" #elif defined(mingw32_HOST_OS) addOptl "-Wl,--export-all-symbols" #else -- ignored for compat w/ gcc: id #endif , make_ord_flag defGhcFlag "relative-dynlib-paths" (NoArg (setGeneralFlag Opt_RelativeDynlibPaths)) , make_ord_flag defGhcFlag "copy-libs-when-linking" (NoArg (setGeneralFlag Opt_SingleLibFolder)) , make_ord_flag defGhcFlag "pie" (NoArg (setGeneralFlag Opt_PICExecutable)) , make_ord_flag defGhcFlag "no-pie" (NoArg (unSetGeneralFlag Opt_PICExecutable)) ------- Specific phases -------------------------------------------- -- need to appear before -pgmL to be parsed as LLVM flags. , make_ord_flag defFlag "pgmlo" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_lo = (f,[]) } , make_ord_flag defFlag "pgmlc" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_lc = (f,[]) } , make_ord_flag defFlag "pgmlm" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_lm = if null f then Nothing else Just (f,[]) } , make_ord_flag defFlag "pgmi" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_i = f } , make_ord_flag defFlag "pgmL" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_L = f } , make_ord_flag defFlag "pgmP" (hasArg setPgmP) , make_ord_flag defFlag "pgmF" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_F = f } , make_ord_flag defFlag "pgmc" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_c = f } , make_ord_flag defFlag "pgmcxx" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_cxx = f } , (Deprecated, defFlag "pgmc-supports-no-pie" $ noArgM $ \d -> do deprecate $ "use -pgml-supports-no-pie instead" pure $ alterToolSettings (\s -> s { toolSettings_ccSupportsNoPie = True }) d) , make_ord_flag defFlag "pgms" (HasArg (\_ -> addWarn "Object splitting was removed in GHC 8.8")) , make_ord_flag defFlag "pgma" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_a = (f,[]) } , make_ord_flag defFlag "pgml" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_l = (f,[]) , -- Don't pass -no-pie with custom -pgml (see #15319). Note -- that this could break when -no-pie is actually needed. -- But the CC_SUPPORTS_NO_PIE check only happens at -- buildtime, and -pgml is a runtime option. A better -- solution would be running this check for each custom -- -pgml. toolSettings_ccSupportsNoPie = False } , make_ord_flag defFlag "pgml-supports-no-pie" $ noArg $ alterToolSettings $ \s -> s { toolSettings_ccSupportsNoPie = True } , make_ord_flag defFlag "pgmdll" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_dll = (f,[]) } , make_ord_flag defFlag "pgmwindres" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_windres = f } , make_ord_flag defFlag "pgmlibtool" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_libtool = f } , make_ord_flag defFlag "pgmar" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_ar = f } , make_ord_flag defFlag "pgmotool" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_otool = f} , make_ord_flag defFlag "pgminstall_name_tool" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_install_name_tool = f} , make_ord_flag defFlag "pgmranlib" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_pgm_ranlib = f } -- need to appear before -optl/-opta to be parsed as LLVM flags. , make_ord_flag defFlag "optlm" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_lm = f : toolSettings_opt_lm s } , make_ord_flag defFlag "optlo" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_lo = f : toolSettings_opt_lo s } , make_ord_flag defFlag "optlc" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_lc = f : toolSettings_opt_lc s } , make_ord_flag defFlag "opti" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_i = f : toolSettings_opt_i s } , make_ord_flag defFlag "optL" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_L = f : toolSettings_opt_L s } , make_ord_flag defFlag "optP" (hasArg addOptP) , make_ord_flag defFlag "optF" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_F = f : toolSettings_opt_F s } , make_ord_flag defFlag "optc" (hasArg addOptc) , make_ord_flag defFlag "optcxx" (hasArg addOptcxx) , make_ord_flag defFlag "opta" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_a = f : toolSettings_opt_a s } , make_ord_flag defFlag "optl" (hasArg addOptl) , make_ord_flag defFlag "optwindres" $ hasArg $ \f -> alterToolSettings $ \s -> s { toolSettings_opt_windres = f : toolSettings_opt_windres s } , make_ord_flag defGhcFlag "split-objs" (NoArg $ addWarn "ignoring -split-objs") , make_ord_flag defGhcFlag "split-sections" (noArgM (\dflags -> do if platformHasSubsectionsViaSymbols (targetPlatform dflags) then do addWarn $ "-split-sections is not useful on this platform " ++ "since it always uses subsections via symbols. Ignoring." return dflags else return (gopt_set dflags Opt_SplitSections))) -------- ghc -M ----------------------------------------------------- , make_ord_flag defGhcFlag "dep-suffix" (hasArg addDepSuffix) , make_ord_flag defGhcFlag "dep-makefile" (hasArg setDepMakefile) , make_ord_flag defGhcFlag "include-cpp-deps" (noArg (setDepIncludeCppDeps True)) , make_ord_flag defGhcFlag "include-pkg-deps" (noArg (setDepIncludePkgDeps True)) , make_ord_flag defGhcFlag "exclude-module" (hasArg addDepExcludeMod) -------- Linking ---------------------------------------------------- , make_ord_flag defGhcFlag "no-link" (noArg (\d -> d { ghcLink=NoLink })) , make_ord_flag defGhcFlag "shared" (noArg (\d -> d { ghcLink=LinkDynLib })) , make_ord_flag defGhcFlag "staticlib" (noArg (\d -> setGeneralFlag' Opt_LinkRts (d { ghcLink=LinkStaticLib }))) , make_ord_flag defGhcFlag "-merge-objs" (noArg (\d -> d { ghcLink=LinkMergedObj })) , make_ord_flag defGhcFlag "dynload" (hasArg parseDynLibLoaderMode) , make_ord_flag defGhcFlag "dylib-install-name" (hasArg setDylibInstallName) ------- Libraries --------------------------------------------------- , make_ord_flag defFlag "L" (Prefix addLibraryPath) , make_ord_flag defFlag "l" (hasArg (addLdInputs . Option . ("-l" ++))) ------- Frameworks -------------------------------------------------- -- -framework-path should really be -F ... , make_ord_flag defFlag "framework-path" (HasArg addFrameworkPath) , make_ord_flag defFlag "framework" (hasArg addCmdlineFramework) ------- Output Redirection ------------------------------------------ , make_ord_flag defGhcFlag "odir" (hasArg setObjectDir) , make_ord_flag defGhcFlag "o" (sepArg (setOutputFile . Just)) , make_ord_flag defGhcFlag "dyno" (sepArg (setDynOutputFile . Just)) , make_ord_flag defGhcFlag "ohi" (hasArg (setOutputHi . Just )) , make_ord_flag defGhcFlag "dynohi" (hasArg (setDynOutputHi . Just )) , make_ord_flag defGhcFlag "osuf" (hasArg setObjectSuf) , make_ord_flag defGhcFlag "dynosuf" (hasArg setDynObjectSuf) , make_ord_flag defGhcFlag "hcsuf" (hasArg setHcSuf) , make_ord_flag defGhcFlag "hisuf" (hasArg setHiSuf) , make_ord_flag defGhcFlag "hiesuf" (hasArg setHieSuf) , make_ord_flag defGhcFlag "dynhisuf" (hasArg setDynHiSuf) , make_ord_flag defGhcFlag "hidir" (hasArg setHiDir) , make_ord_flag defGhcFlag "hiedir" (hasArg setHieDir) , make_ord_flag defGhcFlag "tmpdir" (hasArg setTmpDir) , make_ord_flag defGhcFlag "stubdir" (hasArg setStubDir) , make_ord_flag defGhcFlag "dumpdir" (hasArg setDumpDir) , make_ord_flag defGhcFlag "outputdir" (hasArg setOutputDir) , make_ord_flag defGhcFlag "ddump-file-prefix" (hasArg (setDumpPrefixForce . Just . flip (++) ".")) , make_ord_flag defGhcFlag "dynamic-too" (NoArg (setGeneralFlag Opt_BuildDynamicToo)) ------- Keeping temporary files ------------------------------------- -- These can be singular (think ghc -c) or plural (think ghc --make) , make_ord_flag defGhcFlag "keep-hc-file" (NoArg (setGeneralFlag Opt_KeepHcFiles)) , make_ord_flag defGhcFlag "keep-hc-files" (NoArg (setGeneralFlag Opt_KeepHcFiles)) , make_ord_flag defGhcFlag "keep-hscpp-file" (NoArg (setGeneralFlag Opt_KeepHscppFiles)) , make_ord_flag defGhcFlag "keep-hscpp-files" (NoArg (setGeneralFlag Opt_KeepHscppFiles)) , make_ord_flag defGhcFlag "keep-s-file" (NoArg (setGeneralFlag Opt_KeepSFiles)) , make_ord_flag defGhcFlag "keep-s-files" (NoArg (setGeneralFlag Opt_KeepSFiles)) , make_ord_flag defGhcFlag "keep-llvm-file" (NoArg $ setObjBackend LLVM >> setGeneralFlag Opt_KeepLlvmFiles) , make_ord_flag defGhcFlag "keep-llvm-files" (NoArg $ setObjBackend LLVM >> setGeneralFlag Opt_KeepLlvmFiles) -- This only makes sense as plural , make_ord_flag defGhcFlag "keep-tmp-files" (NoArg (setGeneralFlag Opt_KeepTmpFiles)) , make_ord_flag defGhcFlag "keep-hi-file" (NoArg (setGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "no-keep-hi-file" (NoArg (unSetGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "keep-hi-files" (NoArg (setGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "no-keep-hi-files" (NoArg (unSetGeneralFlag Opt_KeepHiFiles)) , make_ord_flag defGhcFlag "keep-o-file" (NoArg (setGeneralFlag Opt_KeepOFiles)) , make_ord_flag defGhcFlag "no-keep-o-file" (NoArg (unSetGeneralFlag Opt_KeepOFiles)) , make_ord_flag defGhcFlag "keep-o-files" (NoArg (setGeneralFlag Opt_KeepOFiles)) , make_ord_flag defGhcFlag "no-keep-o-files" (NoArg (unSetGeneralFlag Opt_KeepOFiles)) ------- Miscellaneous ---------------------------------------------- , make_ord_flag defGhcFlag "no-auto-link-packages" (NoArg (unSetGeneralFlag Opt_AutoLinkPackages)) , make_ord_flag defGhcFlag "no-hs-main" (NoArg (setGeneralFlag Opt_NoHsMain)) , make_ord_flag defGhcFlag "fno-state-hack" (NoArg (setGeneralFlag Opt_G_NoStateHack)) , make_ord_flag defGhcFlag "fno-opt-coercion" (NoArg (setGeneralFlag Opt_G_NoOptCoercion)) , make_ord_flag defGhcFlag "with-rtsopts" (HasArg setRtsOpts) , make_ord_flag defGhcFlag "rtsopts" (NoArg (setRtsOptsEnabled RtsOptsAll)) , make_ord_flag defGhcFlag "rtsopts=all" (NoArg (setRtsOptsEnabled RtsOptsAll)) , make_ord_flag defGhcFlag "rtsopts=some" (NoArg (setRtsOptsEnabled RtsOptsSafeOnly)) , make_ord_flag defGhcFlag "rtsopts=none" (NoArg (setRtsOptsEnabled RtsOptsNone)) , make_ord_flag defGhcFlag "rtsopts=ignore" (NoArg (setRtsOptsEnabled RtsOptsIgnore)) , make_ord_flag defGhcFlag "rtsopts=ignoreAll" (NoArg (setRtsOptsEnabled RtsOptsIgnoreAll)) , make_ord_flag defGhcFlag "no-rtsopts" (NoArg (setRtsOptsEnabled RtsOptsNone)) , make_ord_flag defGhcFlag "no-rtsopts-suggestions" (noArg (\d -> d {rtsOptsSuggestions = False})) , make_ord_flag defGhcFlag "dhex-word-literals" (NoArg (setGeneralFlag Opt_HexWordLiterals)) , make_ord_flag defGhcFlag "ghcversion-file" (hasArg addGhcVersionFile) , make_ord_flag defGhcFlag "main-is" (SepArg setMainIs) , make_ord_flag defGhcFlag "haddock" (NoArg (setGeneralFlag Opt_Haddock)) , make_ord_flag defGhcFlag "no-haddock" (NoArg (unSetGeneralFlag Opt_Haddock)) , make_ord_flag defGhcFlag "haddock-opts" (hasArg addHaddockOpts) , make_ord_flag defGhcFlag "hpcdir" (SepArg setOptHpcDir) , make_ord_flag defGhciFlag "ghci-script" (hasArg addGhciScript) , make_ord_flag defGhciFlag "interactive-print" (hasArg setInteractivePrint) , make_ord_flag defGhcFlag "ticky-allocd" (NoArg (setGeneralFlag Opt_Ticky_Allocd)) , make_ord_flag defGhcFlag "ticky-LNE" (NoArg (setGeneralFlag Opt_Ticky_LNE)) , make_ord_flag defGhcFlag "ticky-ap-thunk" (NoArg (setGeneralFlag Opt_Ticky_AP)) , make_ord_flag defGhcFlag "ticky-dyn-thunk" (NoArg (setGeneralFlag Opt_Ticky_Dyn_Thunk)) , make_ord_flag defGhcFlag "ticky-tag-checks" (NoArg (setGeneralFlag Opt_Ticky_Tag)) ------- recompilation checker -------------------------------------- , make_dep_flag defGhcFlag "recomp" (NoArg $ unSetGeneralFlag Opt_ForceRecomp) "Use -fno-force-recomp instead" , make_dep_flag defGhcFlag "no-recomp" (NoArg $ setGeneralFlag Opt_ForceRecomp) "Use -fforce-recomp instead" , make_ord_flag defFlag "fmax-errors" (intSuffix (\n d -> d { maxErrors = Just (max 1 n) })) , make_ord_flag defFlag "fno-max-errors" (noArg (\d -> d { maxErrors = Nothing })) , make_ord_flag defFlag "freverse-errors" (noArg (\d -> d {reverseErrors = True} )) , make_ord_flag defFlag "fno-reverse-errors" (noArg (\d -> d {reverseErrors = False} )) ------ HsCpp opts --------------------------------------------------- , make_ord_flag defFlag "D" (AnySuffix (upd . addOptP)) , make_ord_flag defFlag "U" (AnySuffix (upd . addOptP)) ------- Include/Import Paths ---------------------------------------- , make_ord_flag defFlag "I" (Prefix addIncludePath) , make_ord_flag defFlag "i" (OptPrefix addImportPath) ------ Output style options ----------------------------------------- , make_ord_flag defFlag "dppr-user-length" (intSuffix (\n d -> d { pprUserLength = n })) , make_ord_flag defFlag "dppr-cols" (intSuffix (\n d -> d { pprCols = n })) , make_ord_flag defFlag "fdiagnostics-color=auto" (NoArg (upd (\d -> d { useColor = Auto }))) , make_ord_flag defFlag "fdiagnostics-color=always" (NoArg (upd (\d -> d { useColor = Always }))) , make_ord_flag defFlag "fdiagnostics-color=never" (NoArg (upd (\d -> d { useColor = Never }))) -- Suppress all that is suppressable in core dumps. -- Except for uniques, as some simplifier phases introduce new variables that -- have otherwise identical names. , make_ord_flag defGhcFlag "dsuppress-all" (NoArg $ do setGeneralFlag Opt_SuppressCoercions setGeneralFlag Opt_SuppressCoercionTypes setGeneralFlag Opt_SuppressVarKinds setGeneralFlag Opt_SuppressModulePrefixes setGeneralFlag Opt_SuppressTypeApplications setGeneralFlag Opt_SuppressIdInfo setGeneralFlag Opt_SuppressTicks setGeneralFlag Opt_SuppressStgExts setGeneralFlag Opt_SuppressStgReps setGeneralFlag Opt_SuppressTypeSignatures setGeneralFlag Opt_SuppressCoreSizes setGeneralFlag Opt_SuppressTimestamps) ------ Debugging ---------------------------------------------------- , make_ord_flag defGhcFlag "dstg-stats" (NoArg (setGeneralFlag Opt_StgStats)) , make_ord_flag defGhcFlag "ddump-cmm" (setDumpFlag Opt_D_dump_cmm) , make_ord_flag defGhcFlag "ddump-cmm-from-stg" (setDumpFlag Opt_D_dump_cmm_from_stg) , make_ord_flag defGhcFlag "ddump-cmm-raw" (setDumpFlag Opt_D_dump_cmm_raw) , make_ord_flag defGhcFlag "ddump-cmm-verbose" (setDumpFlag Opt_D_dump_cmm_verbose) , make_ord_flag defGhcFlag "ddump-cmm-verbose-by-proc" (setDumpFlag Opt_D_dump_cmm_verbose_by_proc) , make_ord_flag defGhcFlag "ddump-cmm-cfg" (setDumpFlag Opt_D_dump_cmm_cfg) , make_ord_flag defGhcFlag "ddump-cmm-cbe" (setDumpFlag Opt_D_dump_cmm_cbe) , make_ord_flag defGhcFlag "ddump-cmm-switch" (setDumpFlag Opt_D_dump_cmm_switch) , make_ord_flag defGhcFlag "ddump-cmm-proc" (setDumpFlag Opt_D_dump_cmm_proc) , make_ord_flag defGhcFlag "ddump-cmm-sp" (setDumpFlag Opt_D_dump_cmm_sp) , make_ord_flag defGhcFlag "ddump-cmm-sink" (setDumpFlag Opt_D_dump_cmm_sink) , make_ord_flag defGhcFlag "ddump-cmm-caf" (setDumpFlag Opt_D_dump_cmm_caf) , make_ord_flag defGhcFlag "ddump-cmm-procmap" (setDumpFlag Opt_D_dump_cmm_procmap) , make_ord_flag defGhcFlag "ddump-cmm-split" (setDumpFlag Opt_D_dump_cmm_split) , make_ord_flag defGhcFlag "ddump-cmm-info" (setDumpFlag Opt_D_dump_cmm_info) , make_ord_flag defGhcFlag "ddump-cmm-cps" (setDumpFlag Opt_D_dump_cmm_cps) , make_ord_flag defGhcFlag "ddump-cmm-opt" (setDumpFlag Opt_D_dump_opt_cmm) , make_ord_flag defGhcFlag "ddump-cfg-weights" (setDumpFlag Opt_D_dump_cfg_weights) , make_ord_flag defGhcFlag "ddump-core-stats" (setDumpFlag Opt_D_dump_core_stats) , make_ord_flag defGhcFlag "ddump-asm" (setDumpFlag Opt_D_dump_asm) , make_ord_flag defGhcFlag "ddump-asm-native" (setDumpFlag Opt_D_dump_asm_native) , make_ord_flag defGhcFlag "ddump-asm-liveness" (setDumpFlag Opt_D_dump_asm_liveness) , make_ord_flag defGhcFlag "ddump-asm-regalloc" (setDumpFlag Opt_D_dump_asm_regalloc) , make_ord_flag defGhcFlag "ddump-asm-conflicts" (setDumpFlag Opt_D_dump_asm_conflicts) , make_ord_flag defGhcFlag "ddump-asm-regalloc-stages" (setDumpFlag Opt_D_dump_asm_regalloc_stages) , make_ord_flag defGhcFlag "ddump-asm-stats" (setDumpFlag Opt_D_dump_asm_stats) , make_ord_flag defGhcFlag "ddump-llvm" (NoArg $ setDumpFlag' Opt_D_dump_llvm) , make_ord_flag defGhcFlag "ddump-c-backend" (NoArg $ setDumpFlag' Opt_D_dump_c_backend) , make_ord_flag defGhcFlag "ddump-deriv" (setDumpFlag Opt_D_dump_deriv) , make_ord_flag defGhcFlag "ddump-ds" (setDumpFlag Opt_D_dump_ds) , make_ord_flag defGhcFlag "ddump-ds-preopt" (setDumpFlag Opt_D_dump_ds_preopt) , make_ord_flag defGhcFlag "ddump-foreign" (setDumpFlag Opt_D_dump_foreign) , make_ord_flag defGhcFlag "ddump-inlinings" (setDumpFlag Opt_D_dump_inlinings) , make_ord_flag defGhcFlag "ddump-verbose-inlinings" (setDumpFlag Opt_D_dump_verbose_inlinings) , make_ord_flag defGhcFlag "ddump-rule-firings" (setDumpFlag Opt_D_dump_rule_firings) , make_ord_flag defGhcFlag "ddump-rule-rewrites" (setDumpFlag Opt_D_dump_rule_rewrites) , make_ord_flag defGhcFlag "ddump-simpl-trace" (setDumpFlag Opt_D_dump_simpl_trace) , make_ord_flag defGhcFlag "ddump-occur-anal" (setDumpFlag Opt_D_dump_occur_anal) , make_ord_flag defGhcFlag "ddump-parsed" (setDumpFlag Opt_D_dump_parsed) , make_ord_flag defGhcFlag "ddump-parsed-ast" (setDumpFlag Opt_D_dump_parsed_ast) , make_ord_flag defGhcFlag "dkeep-comments" (NoArg (setGeneralFlag Opt_KeepRawTokenStream)) , make_ord_flag defGhcFlag "ddump-rn" (setDumpFlag Opt_D_dump_rn) , make_ord_flag defGhcFlag "ddump-rn-ast" (setDumpFlag Opt_D_dump_rn_ast) , make_ord_flag defGhcFlag "ddump-simpl" (setDumpFlag Opt_D_dump_simpl) , make_ord_flag defGhcFlag "ddump-simpl-iterations" (setDumpFlag Opt_D_dump_simpl_iterations) , make_ord_flag defGhcFlag "ddump-spec" (setDumpFlag Opt_D_dump_spec) , make_ord_flag defGhcFlag "ddump-prep" (setDumpFlag Opt_D_dump_prep) , make_ord_flag defGhcFlag "ddump-late-cc" (setDumpFlag Opt_D_dump_late_cc) , make_ord_flag defGhcFlag "ddump-stg-from-core" (setDumpFlag Opt_D_dump_stg_from_core) , make_ord_flag defGhcFlag "ddump-stg-unarised" (setDumpFlag Opt_D_dump_stg_unarised) , make_ord_flag defGhcFlag "ddump-stg-final" (setDumpFlag Opt_D_dump_stg_final) , make_ord_flag defGhcFlag "ddump-stg-cg" (setDumpFlag Opt_D_dump_stg_cg) , make_dep_flag defGhcFlag "ddump-stg" (setDumpFlag Opt_D_dump_stg_from_core) "Use `-ddump-stg-from-core` or `-ddump-stg-final` instead" , make_ord_flag defGhcFlag "ddump-stg-tags" (setDumpFlag Opt_D_dump_stg_tags) , make_ord_flag defGhcFlag "ddump-call-arity" (setDumpFlag Opt_D_dump_call_arity) , make_ord_flag defGhcFlag "ddump-exitify" (setDumpFlag Opt_D_dump_exitify) , make_ord_flag defGhcFlag "ddump-stranal" (setDumpFlag Opt_D_dump_stranal) , make_ord_flag defGhcFlag "ddump-str-signatures" (setDumpFlag Opt_D_dump_str_signatures) , make_ord_flag defGhcFlag "ddump-cpranal" (setDumpFlag Opt_D_dump_cpranal) , make_ord_flag defGhcFlag "ddump-cpr-signatures" (setDumpFlag Opt_D_dump_cpr_signatures) , make_ord_flag defGhcFlag "ddump-tc" (setDumpFlag Opt_D_dump_tc) , make_ord_flag defGhcFlag "ddump-tc-ast" (setDumpFlag Opt_D_dump_tc_ast) , make_ord_flag defGhcFlag "ddump-hie" (setDumpFlag Opt_D_dump_hie) , make_ord_flag defGhcFlag "ddump-types" (setDumpFlag Opt_D_dump_types) , make_ord_flag defGhcFlag "ddump-rules" (setDumpFlag Opt_D_dump_rules) , make_ord_flag defGhcFlag "ddump-cse" (setDumpFlag Opt_D_dump_cse) , make_ord_flag defGhcFlag "ddump-worker-wrapper" (setDumpFlag Opt_D_dump_worker_wrapper) , make_ord_flag defGhcFlag "ddump-rn-trace" (setDumpFlag Opt_D_dump_rn_trace) , make_ord_flag defGhcFlag "ddump-if-trace" (setDumpFlag Opt_D_dump_if_trace) , make_ord_flag defGhcFlag "ddump-cs-trace" (setDumpFlag Opt_D_dump_cs_trace) , make_ord_flag defGhcFlag "ddump-tc-trace" (NoArg (do setDumpFlag' Opt_D_dump_tc_trace setDumpFlag' Opt_D_dump_cs_trace)) , make_ord_flag defGhcFlag "ddump-ec-trace" (setDumpFlag Opt_D_dump_ec_trace) , make_ord_flag defGhcFlag "ddump-splices" (setDumpFlag Opt_D_dump_splices) , make_ord_flag defGhcFlag "dth-dec-file" (setDumpFlag Opt_D_th_dec_file) , make_ord_flag defGhcFlag "ddump-rn-stats" (setDumpFlag Opt_D_dump_rn_stats) , make_ord_flag defGhcFlag "ddump-opt-cmm" --old alias for cmm-opt (setDumpFlag Opt_D_dump_opt_cmm) , make_ord_flag defGhcFlag "ddump-simpl-stats" (setDumpFlag Opt_D_dump_simpl_stats) , make_ord_flag defGhcFlag "ddump-bcos" (setDumpFlag Opt_D_dump_BCOs) , make_ord_flag defGhcFlag "dsource-stats" (setDumpFlag Opt_D_source_stats) , make_ord_flag defGhcFlag "dverbose-core2core" (NoArg $ setVerbosity (Just 2) >> setDumpFlag' Opt_D_verbose_core2core) , make_ord_flag defGhcFlag "dverbose-stg2stg" (setDumpFlag Opt_D_verbose_stg2stg) , make_ord_flag defGhcFlag "ddump-hi" (setDumpFlag Opt_D_dump_hi) , make_ord_flag defGhcFlag "ddump-minimal-imports" (NoArg (setGeneralFlag Opt_D_dump_minimal_imports)) , make_ord_flag defGhcFlag "ddump-hpc" (setDumpFlag Opt_D_dump_ticked) -- back compat , make_ord_flag defGhcFlag "ddump-ticked" (setDumpFlag Opt_D_dump_ticked) , make_ord_flag defGhcFlag "ddump-mod-cycles" (setDumpFlag Opt_D_dump_mod_cycles) , make_ord_flag defGhcFlag "ddump-mod-map" (setDumpFlag Opt_D_dump_mod_map) , make_ord_flag defGhcFlag "ddump-timings" (setDumpFlag Opt_D_dump_timings) , make_ord_flag defGhcFlag "ddump-view-pattern-commoning" (setDumpFlag Opt_D_dump_view_pattern_commoning) , make_ord_flag defGhcFlag "ddump-to-file" (NoArg (setGeneralFlag Opt_DumpToFile)) , make_ord_flag defGhcFlag "ddump-hi-diffs" (setDumpFlag Opt_D_dump_hi_diffs) , make_ord_flag defGhcFlag "ddump-rtti" (setDumpFlag Opt_D_dump_rtti) , make_ord_flag defGhcFlag "dlint" (NoArg enableDLint) , make_ord_flag defGhcFlag "dcore-lint" (NoArg (setGeneralFlag Opt_DoCoreLinting)) , make_ord_flag defGhcFlag "dlinear-core-lint" (NoArg (setGeneralFlag Opt_DoLinearCoreLinting)) , make_ord_flag defGhcFlag "dstg-lint" (NoArg (setGeneralFlag Opt_DoStgLinting)) , make_ord_flag defGhcFlag "dcmm-lint" (NoArg (setGeneralFlag Opt_DoCmmLinting)) , make_ord_flag defGhcFlag "dasm-lint" (NoArg (setGeneralFlag Opt_DoAsmLinting)) , make_ord_flag defGhcFlag "dannot-lint" (NoArg (setGeneralFlag Opt_DoAnnotationLinting)) , make_ord_flag defGhcFlag "dtag-inference-checks" (NoArg (setGeneralFlag Opt_DoTagInferenceChecks)) , make_ord_flag defGhcFlag "dshow-passes" (NoArg $ forceRecompile >> (setVerbosity $ Just 2)) , make_ord_flag defGhcFlag "dfaststring-stats" (setDumpFlag Opt_D_faststring_stats) , make_ord_flag defGhcFlag "dno-llvm-mangler" (NoArg (setGeneralFlag Opt_NoLlvmMangler)) -- hidden flag , make_ord_flag defGhcFlag "dno-typeable-binds" (NoArg (setGeneralFlag Opt_NoTypeableBinds)) , make_ord_flag defGhcFlag "ddump-debug" (setDumpFlag Opt_D_dump_debug) , make_ord_flag defGhcFlag "ddump-json" (setDumpFlag Opt_D_dump_json ) , make_ord_flag defGhcFlag "dppr-debug" (setDumpFlag Opt_D_ppr_debug) , make_ord_flag defGhcFlag "ddebug-output" (noArg (flip dopt_unset Opt_D_no_debug_output)) , make_ord_flag defGhcFlag "dno-debug-output" (setDumpFlag Opt_D_no_debug_output) , make_ord_flag defGhcFlag "ddump-faststrings" (setDumpFlag Opt_D_dump_faststrings) ------ Machine dependent (-m) stuff --------------------------- , make_ord_flag defGhcFlag "msse" (noArg (\d -> d { sseVersion = Just SSE1 })) , make_ord_flag defGhcFlag "msse2" (noArg (\d -> d { sseVersion = Just SSE2 })) , make_ord_flag defGhcFlag "msse3" (noArg (\d -> d { sseVersion = Just SSE3 })) , make_ord_flag defGhcFlag "msse4" (noArg (\d -> d { sseVersion = Just SSE4 })) , make_ord_flag defGhcFlag "msse4.2" (noArg (\d -> d { sseVersion = Just SSE42 })) , make_ord_flag defGhcFlag "mbmi" (noArg (\d -> d { bmiVersion = Just BMI1 })) , make_ord_flag defGhcFlag "mbmi2" (noArg (\d -> d { bmiVersion = Just BMI2 })) , make_ord_flag defGhcFlag "mavx" (noArg (\d -> d { avx = True })) , make_ord_flag defGhcFlag "mavx2" (noArg (\d -> d { avx2 = True })) , make_ord_flag defGhcFlag "mavx512cd" (noArg (\d -> d { avx512cd = True })) , make_ord_flag defGhcFlag "mavx512er" (noArg (\d -> d { avx512er = True })) , make_ord_flag defGhcFlag "mavx512f" (noArg (\d -> d { avx512f = True })) , make_ord_flag defGhcFlag "mavx512pf" (noArg (\d -> d { avx512pf = True })) ------ Warning opts ------------------------------------------------- , make_ord_flag defFlag "W" (NoArg (mapM_ setWarningFlag minusWOpts)) , make_ord_flag defFlag "Werror" (NoArg (do { setGeneralFlag Opt_WarnIsError ; mapM_ setFatalWarningFlag minusWeverythingOpts })) , make_ord_flag defFlag "Wwarn" (NoArg (do { unSetGeneralFlag Opt_WarnIsError ; mapM_ unSetFatalWarningFlag minusWeverythingOpts })) -- Opt_WarnIsError is still needed to pass -Werror -- to CPP; see runCpp in SysTools , make_dep_flag defFlag "Wnot" (NoArg (upd (\d -> d {warningFlags = EnumSet.empty}))) "Use -w or -Wno-everything instead" , make_ord_flag defFlag "w" (NoArg (upd (\d -> d {warningFlags = EnumSet.empty}))) -- New-style uniform warning sets -- -- Note that -Weverything > -Wall > -Wextra > -Wdefault > -Wno-everything , make_ord_flag defFlag "Weverything" (NoArg (mapM_ setWarningFlag minusWeverythingOpts)) , make_ord_flag defFlag "Wno-everything" (NoArg (upd (\d -> d {warningFlags = EnumSet.empty}))) , make_ord_flag defFlag "Wall" (NoArg (mapM_ setWarningFlag minusWallOpts)) , make_ord_flag defFlag "Wno-all" (NoArg (mapM_ unSetWarningFlag minusWallOpts)) , make_ord_flag defFlag "Wextra" (NoArg (mapM_ setWarningFlag minusWOpts)) , make_ord_flag defFlag "Wno-extra" (NoArg (mapM_ unSetWarningFlag minusWOpts)) , make_ord_flag defFlag "Wdefault" (NoArg (mapM_ setWarningFlag standardWarnings)) , make_ord_flag defFlag "Wno-default" (NoArg (mapM_ unSetWarningFlag standardWarnings)) , make_ord_flag defFlag "Wcompat" (NoArg (mapM_ setWarningFlag minusWcompatOpts)) , make_ord_flag defFlag "Wno-compat" (NoArg (mapM_ unSetWarningFlag minusWcompatOpts)) ------ Plugin flags ------------------------------------------------ , make_ord_flag defGhcFlag "fplugin-opt" (hasArg addPluginModuleNameOption) , make_ord_flag defGhcFlag "fplugin-trustworthy" (NoArg (setGeneralFlag Opt_PluginTrustworthy)) , make_ord_flag defGhcFlag "fplugin" (hasArg addPluginModuleName) , make_ord_flag defGhcFlag "fclear-plugins" (noArg clearPluginModuleNames) , make_ord_flag defGhcFlag "ffrontend-opt" (hasArg addFrontendPluginOption) ------ Optimisation flags ------------------------------------------ , make_dep_flag defGhcFlag "Onot" (noArgM $ setOptLevel 0 ) "Use -O0 instead" , make_ord_flag defGhcFlag "O" (optIntSuffixM (\mb_n -> setOptLevel (mb_n `orElse` 1))) -- If the number is missing, use 1 , make_ord_flag defFlag "fbinary-blob-threshold" (intSuffix (\n d -> d { binBlobThreshold = case fromIntegral n of 0 -> Nothing x -> Just x})) , make_ord_flag defFlag "fmax-relevant-binds" (intSuffix (\n d -> d { maxRelevantBinds = Just n })) , make_ord_flag defFlag "fno-max-relevant-binds" (noArg (\d -> d { maxRelevantBinds = Nothing })) , make_ord_flag defFlag "fmax-valid-hole-fits" (intSuffix (\n d -> d { maxValidHoleFits = Just n })) , make_ord_flag defFlag "fno-max-valid-hole-fits" (noArg (\d -> d { maxValidHoleFits = Nothing })) , make_ord_flag defFlag "fmax-refinement-hole-fits" (intSuffix (\n d -> d { maxRefHoleFits = Just n })) , make_ord_flag defFlag "fno-max-refinement-hole-fits" (noArg (\d -> d { maxRefHoleFits = Nothing })) , make_ord_flag defFlag "frefinement-level-hole-fits" (intSuffix (\n d -> d { refLevelHoleFits = Just n })) , make_ord_flag defFlag "fno-refinement-level-hole-fits" (noArg (\d -> d { refLevelHoleFits = Nothing })) , make_dep_flag defGhcFlag "fllvm-pass-vectors-in-regs" (noArg id) "vectors registers are now passed in registers by default." , make_ord_flag defFlag "fmax-uncovered-patterns" (intSuffix (\n d -> d { maxUncoveredPatterns = n })) , make_ord_flag defFlag "fmax-pmcheck-models" (intSuffix (\n d -> d { maxPmCheckModels = n })) , make_ord_flag defFlag "fsimplifier-phases" (intSuffix (\n d -> d { simplPhases = n })) , make_ord_flag defFlag "fmax-simplifier-iterations" (intSuffix (\n d -> d { maxSimplIterations = n })) , (Deprecated, defFlag "fmax-pmcheck-iterations" (intSuffixM (\_ d -> do { deprecate $ "use -fmax-pmcheck-models instead" ; return d }))) , make_ord_flag defFlag "fsimpl-tick-factor" (intSuffix (\n d -> d { simplTickFactor = n })) , make_ord_flag defFlag "fdmd-unbox-width" (intSuffix (\n d -> d { dmdUnboxWidth = n })) , make_ord_flag defFlag "fspec-constr-threshold" (intSuffix (\n d -> d { specConstrThreshold = Just n })) , make_ord_flag defFlag "fno-spec-constr-threshold" (noArg (\d -> d { specConstrThreshold = Nothing })) , make_ord_flag defFlag "fspec-constr-count" (intSuffix (\n d -> d { specConstrCount = Just n })) , make_ord_flag defFlag "fno-spec-constr-count" (noArg (\d -> d { specConstrCount = Nothing })) , make_ord_flag defFlag "fspec-constr-recursive" (intSuffix (\n d -> d { specConstrRecursive = n })) , make_ord_flag defFlag "fliberate-case-threshold" (intSuffix (\n d -> d { liberateCaseThreshold = Just n })) , make_ord_flag defFlag "fno-liberate-case-threshold" (noArg (\d -> d { liberateCaseThreshold = Nothing })) , make_ord_flag defFlag "drule-check" (sepArg (\s d -> d { ruleCheck = Just s })) , make_ord_flag defFlag "dinline-check" (sepArg (\s d -> d { unfoldingOpts = updateReportPrefix (Just s) (unfoldingOpts d)})) , make_ord_flag defFlag "freduction-depth" (intSuffix (\n d -> d { reductionDepth = treatZeroAsInf n })) , make_ord_flag defFlag "fconstraint-solver-iterations" (intSuffix (\n d -> d { solverIterations = treatZeroAsInf n })) , (Deprecated, defFlag "fcontext-stack" (intSuffixM (\n d -> do { deprecate $ "use -freduction-depth=" ++ show n ++ " instead" ; return $ d { reductionDepth = treatZeroAsInf n } }))) , (Deprecated, defFlag "ftype-function-depth" (intSuffixM (\n d -> do { deprecate $ "use -freduction-depth=" ++ show n ++ " instead" ; return $ d { reductionDepth = treatZeroAsInf n } }))) , make_ord_flag defFlag "fstrictness-before" (intSuffix (\n d -> d { strictnessBefore = n : strictnessBefore d })) , make_ord_flag defFlag "ffloat-lam-args" (intSuffix (\n d -> d { floatLamArgs = Just n })) , make_ord_flag defFlag "ffloat-all-lams" (noArg (\d -> d { floatLamArgs = Nothing })) , make_ord_flag defFlag "fstg-lift-lams-rec-args" (intSuffix (\n d -> d { liftLamsRecArgs = Just n })) , make_ord_flag defFlag "fstg-lift-lams-rec-args-any" (noArg (\d -> d { liftLamsRecArgs = Nothing })) , make_ord_flag defFlag "fstg-lift-lams-non-rec-args" (intSuffix (\n d -> d { liftLamsNonRecArgs = Just n })) , make_ord_flag defFlag "fstg-lift-lams-non-rec-args-any" (noArg (\d -> d { liftLamsNonRecArgs = Nothing })) , make_ord_flag defFlag "fstg-lift-lams-known" (noArg (\d -> d { liftLamsKnown = True })) , make_ord_flag defFlag "fno-stg-lift-lams-known" (noArg (\d -> d { liftLamsKnown = False })) , make_ord_flag defFlag "fproc-alignment" (intSuffix (\n d -> d { cmmProcAlignment = Just n })) , make_ord_flag defFlag "fblock-layout-weights" (HasArg (\s -> upd (\d -> d { cfgWeights = parseWeights s (cfgWeights d)}))) , make_ord_flag defFlag "fhistory-size" (intSuffix (\n d -> d { historySize = n })) , make_ord_flag defFlag "funfolding-creation-threshold" (intSuffix (\n d -> d { unfoldingOpts = updateCreationThreshold n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-use-threshold" (intSuffix (\n d -> d { unfoldingOpts = updateUseThreshold n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-fun-discount" (intSuffix (\n d -> d { unfoldingOpts = updateFunAppDiscount n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-dict-discount" (intSuffix (\n d -> d { unfoldingOpts = updateDictDiscount n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-case-threshold" (intSuffix (\n d -> d { unfoldingOpts = updateCaseThreshold n (unfoldingOpts d)})) , make_ord_flag defFlag "funfolding-case-scaling" (intSuffix (\n d -> d { unfoldingOpts = updateCaseScaling n (unfoldingOpts d)})) , make_dep_flag defFlag "funfolding-keeness-factor" (floatSuffix (\_ d -> d)) "-funfolding-keeness-factor is no longer respected as of GHC 9.0" , make_ord_flag defFlag "fmax-worker-args" (intSuffix (\n d -> d {maxWorkerArgs = n})) , make_ord_flag defGhciFlag "fghci-hist-size" (intSuffix (\n d -> d {ghciHistSize = n})) , make_ord_flag defGhcFlag "fmax-inline-alloc-size" (intSuffix (\n d -> d { maxInlineAllocSize = n })) , make_ord_flag defGhcFlag "fmax-inline-memcpy-insns" (intSuffix (\n d -> d { maxInlineMemcpyInsns = n })) , make_ord_flag defGhcFlag "fmax-inline-memset-insns" (intSuffix (\n d -> d { maxInlineMemsetInsns = n })) , make_ord_flag defGhcFlag "dinitial-unique" (wordSuffix (\n d -> d { initialUnique = n })) , make_ord_flag defGhcFlag "dunique-increment" (intSuffix (\n d -> d { uniqueIncrement = n })) ------ Profiling ---------------------------------------------------- -- OLD profiling flags , make_dep_flag defGhcFlag "auto-all" (noArg (\d -> d { profAuto = ProfAutoAll } )) "Use -fprof-auto instead" , make_dep_flag defGhcFlag "no-auto-all" (noArg (\d -> d { profAuto = NoProfAuto } )) "Use -fno-prof-auto instead" , make_dep_flag defGhcFlag "auto" (noArg (\d -> d { profAuto = ProfAutoExports } )) "Use -fprof-auto-exported instead" , make_dep_flag defGhcFlag "no-auto" (noArg (\d -> d { profAuto = NoProfAuto } )) "Use -fno-prof-auto instead" , make_dep_flag defGhcFlag "caf-all" (NoArg (setGeneralFlag Opt_AutoSccsOnIndividualCafs)) "Use -fprof-cafs instead" , make_dep_flag defGhcFlag "no-caf-all" (NoArg (unSetGeneralFlag Opt_AutoSccsOnIndividualCafs)) "Use -fno-prof-cafs instead" -- NEW profiling flags , make_ord_flag defGhcFlag "fprof-auto" (noArg (\d -> d { profAuto = ProfAutoAll } )) , make_ord_flag defGhcFlag "fprof-auto-top" (noArg (\d -> d { profAuto = ProfAutoTop } )) , make_ord_flag defGhcFlag "fprof-auto-exported" (noArg (\d -> d { profAuto = ProfAutoExports } )) , make_ord_flag defGhcFlag "fprof-auto-calls" (noArg (\d -> d { profAuto = ProfAutoCalls } )) , make_ord_flag defGhcFlag "fno-prof-auto" (noArg (\d -> d { profAuto = NoProfAuto } )) -- Caller-CC , make_ord_flag defGhcFlag "fprof-callers" (HasArg setCallerCcFilters) , make_ord_flag defGhcFlag "fdistinct-constructor-tables" (NoArg (setGeneralFlag Opt_DistinctConstructorTables)) , make_ord_flag defGhcFlag "finfo-table-map" (NoArg (setGeneralFlag Opt_InfoTableMap)) ------ Compiler flags ----------------------------------------------- , make_ord_flag defGhcFlag "fasm" (NoArg (setObjBackend NCG)) , make_ord_flag defGhcFlag "fvia-c" (NoArg (deprecate $ "The -fvia-c flag does nothing; " ++ "it will be removed in a future GHC release")) , make_ord_flag defGhcFlag "fvia-C" (NoArg (deprecate $ "The -fvia-C flag does nothing; " ++ "it will be removed in a future GHC release")) , make_ord_flag defGhcFlag "fllvm" (NoArg (setObjBackend LLVM)) , make_ord_flag defFlag "fno-code" (NoArg ((upd $ \d -> d { ghcLink=NoLink }) >> setBackend NoBackend)) , make_ord_flag defFlag "fbyte-code" (noArgM $ \dflags -> do setBackend Interpreter pure $ gopt_set dflags Opt_ByteCode) , make_ord_flag defFlag "fobject-code" $ NoArg $ do dflags <- liftEwM getCmdLineState setBackend $ platformDefaultBackend (targetPlatform dflags) , make_dep_flag defFlag "fglasgow-exts" (NoArg enableGlasgowExts) "Use individual extensions instead" , make_dep_flag defFlag "fno-glasgow-exts" (NoArg disableGlasgowExts) "Use individual extensions instead" , make_ord_flag defFlag "Wunused-binds" (NoArg enableUnusedBinds) , make_ord_flag defFlag "Wno-unused-binds" (NoArg disableUnusedBinds) , make_ord_flag defHiddenFlag "fwarn-unused-binds" (NoArg enableUnusedBinds) , make_ord_flag defHiddenFlag "fno-warn-unused-binds" (NoArg disableUnusedBinds) ------ Safe Haskell flags ------------------------------------------- , make_ord_flag defFlag "fpackage-trust" (NoArg setPackageTrust) , make_ord_flag defFlag "fno-safe-infer" (noArg (\d -> d { safeInfer = False })) , make_ord_flag defFlag "fno-safe-haskell" (NoArg (setSafeHaskell Sf_Ignore)) ------ position independent flags ---------------------------------- , make_ord_flag defGhcFlag "fPIC" (NoArg (setGeneralFlag Opt_PIC)) , make_ord_flag defGhcFlag "fno-PIC" (NoArg (unSetGeneralFlag Opt_PIC)) , make_ord_flag defGhcFlag "fPIE" (NoArg (setGeneralFlag Opt_PIE)) , make_ord_flag defGhcFlag "fno-PIE" (NoArg (unSetGeneralFlag Opt_PIE)) ------ Debugging flags ---------------------------------------------- , make_ord_flag defGhcFlag "g" (OptIntSuffix setDebugLevel) ] ++ map (mkFlag turnOn "" setGeneralFlag ) negatableFlagsDeps ++ map (mkFlag turnOff "no-" unSetGeneralFlag ) negatableFlagsDeps ++ map (mkFlag turnOn "d" setGeneralFlag ) dFlagsDeps ++ map (mkFlag turnOff "dno-" unSetGeneralFlag ) dFlagsDeps ++ map (mkFlag turnOn "f" setGeneralFlag ) fFlagsDeps ++ map (mkFlag turnOff "fno-" unSetGeneralFlag ) fFlagsDeps ++ map (mkFlag turnOn "W" setWarningFlag ) wWarningFlagsDeps ++ map (mkFlag turnOff "Wno-" unSetWarningFlag ) wWarningFlagsDeps ++ map (mkFlag turnOn "Werror=" setWErrorFlag ) wWarningFlagsDeps ++ map (mkFlag turnOn "Wwarn=" unSetFatalWarningFlag ) wWarningFlagsDeps ++ map (mkFlag turnOn "Wno-error=" unSetFatalWarningFlag ) wWarningFlagsDeps ++ map (mkFlag turnOn "fwarn-" setWarningFlag . hideFlag) wWarningFlagsDeps ++ map (mkFlag turnOff "fno-warn-" unSetWarningFlag . hideFlag) wWarningFlagsDeps ++ [ (NotDeprecated, unrecognisedWarning "W"), (Deprecated, unrecognisedWarning "fwarn-"), (Deprecated, unrecognisedWarning "fno-warn-") ] ++ [ make_ord_flag defFlag "Werror=compat" (NoArg (mapM_ setWErrorFlag minusWcompatOpts)) , make_ord_flag defFlag "Wno-error=compat" (NoArg (mapM_ unSetFatalWarningFlag minusWcompatOpts)) , make_ord_flag defFlag "Wwarn=compat" (NoArg (mapM_ unSetFatalWarningFlag minusWcompatOpts)) ] ++ map (mkFlag turnOn "f" setExtensionFlag ) fLangFlagsDeps ++ map (mkFlag turnOff "fno-" unSetExtensionFlag) fLangFlagsDeps ++ map (mkFlag turnOn "X" setExtensionFlag ) xFlagsDeps ++ map (mkFlag turnOff "XNo" unSetExtensionFlag) xFlagsDeps ++ map (mkFlag turnOn "X" setLanguage ) languageFlagsDeps ++ map (mkFlag turnOn "X" setSafeHaskell ) safeHaskellFlagsDeps -- | This is where we handle unrecognised warning flags. We only issue a warning -- if -Wunrecognised-warning-flags is set. See #11429 for context. unrecognisedWarning :: String -> Flag (CmdLineP DynFlags) unrecognisedWarning prefix = defHiddenFlag prefix (Prefix action) where action :: String -> EwM (CmdLineP DynFlags) () action flag = do f <- wopt Opt_WarnUnrecognisedWarningFlags <$> liftEwM getCmdLineState when f $ addFlagWarn (WarningWithFlag Opt_WarnUnrecognisedWarningFlags) $ "unrecognised warning flag: -" ++ prefix ++ flag -- See Note [Supporting CLI completion] package_flags_deps :: [(Deprecation, Flag (CmdLineP DynFlags))] package_flags_deps = [ ------- Packages ---------------------------------------------------- make_ord_flag defFlag "package-db" (HasArg (addPkgDbRef . PkgDbPath)) , make_ord_flag defFlag "clear-package-db" (NoArg clearPkgDb) , make_ord_flag defFlag "no-global-package-db" (NoArg removeGlobalPkgDb) , make_ord_flag defFlag "no-user-package-db" (NoArg removeUserPkgDb) , make_ord_flag defFlag "global-package-db" (NoArg (addPkgDbRef GlobalPkgDb)) , make_ord_flag defFlag "user-package-db" (NoArg (addPkgDbRef UserPkgDb)) -- backwards compat with GHC<=7.4 : , make_dep_flag defFlag "package-conf" (HasArg $ addPkgDbRef . PkgDbPath) "Use -package-db instead" , make_dep_flag defFlag "no-user-package-conf" (NoArg removeUserPkgDb) "Use -no-user-package-db instead" , make_ord_flag defGhcFlag "package-name" (HasArg $ \name -> upd (setUnitId name)) , make_ord_flag defGhcFlag "this-unit-id" (hasArg setUnitId) , make_ord_flag defGhcFlag "working-dir" (hasArg setWorkingDirectory) , make_ord_flag defGhcFlag "this-package-name" (hasArg setPackageName) , make_ord_flag defGhcFlag "hidden-module" (HasArg addHiddenModule) , make_ord_flag defGhcFlag "reexported-module" (HasArg addReexportedModule) , make_ord_flag defFlag "package" (HasArg exposePackage) , make_ord_flag defFlag "plugin-package-id" (HasArg exposePluginPackageId) , make_ord_flag defFlag "plugin-package" (HasArg exposePluginPackage) , make_ord_flag defFlag "package-id" (HasArg exposePackageId) , make_ord_flag defFlag "hide-package" (HasArg hidePackage) , make_ord_flag defFlag "hide-all-packages" (NoArg (setGeneralFlag Opt_HideAllPackages)) , make_ord_flag defFlag "hide-all-plugin-packages" (NoArg (setGeneralFlag Opt_HideAllPluginPackages)) , make_ord_flag defFlag "package-env" (HasArg setPackageEnv) , make_ord_flag defFlag "ignore-package" (HasArg ignorePackage) , make_dep_flag defFlag "syslib" (HasArg exposePackage) "Use -package instead" , make_ord_flag defFlag "distrust-all-packages" (NoArg (setGeneralFlag Opt_DistrustAllPackages)) , make_ord_flag defFlag "trust" (HasArg trustPackage) , make_ord_flag defFlag "distrust" (HasArg distrustPackage) ] where setPackageEnv env = upd $ \s -> s { packageEnv = Just env } -- | Make a list of flags for shell completion. -- Filter all available flags into two groups, for interactive GHC vs all other. flagsForCompletion :: Bool -> [String] flagsForCompletion isInteractive = [ '-':flagName flag | flag <- flagsAll , modeFilter (flagGhcMode flag) ] where modeFilter AllModes = True modeFilter OnlyGhci = isInteractive modeFilter OnlyGhc = not isInteractive modeFilter HiddenFlag = False type TurnOnFlag = Bool -- True <=> we are turning the flag on -- False <=> we are turning the flag off turnOn :: TurnOnFlag; turnOn = True turnOff :: TurnOnFlag; turnOff = False data FlagSpec flag = FlagSpec { flagSpecName :: String -- ^ Flag in string form , flagSpecFlag :: flag -- ^ Flag in internal form , flagSpecAction :: (TurnOnFlag -> DynP ()) -- ^ Extra action to run when the flag is found -- Typically, emit a warning or error , flagSpecGhcMode :: GhcFlagMode -- ^ In which ghc mode the flag has effect } -- | Define a new flag. flagSpec :: String -> flag -> (Deprecation, FlagSpec flag) flagSpec name flag = flagSpec' name flag nop -- | Define a new flag with an effect. flagSpec' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (Deprecation, FlagSpec flag) flagSpec' name flag act = (NotDeprecated, FlagSpec name flag act AllModes) -- | Define a warning flag. warnSpec :: WarningFlag -> [(Deprecation, FlagSpec WarningFlag)] warnSpec flag = warnSpec' flag nop -- | Define a warning flag with an effect. warnSpec' :: WarningFlag -> (TurnOnFlag -> DynP ()) -> [(Deprecation, FlagSpec WarningFlag)] warnSpec' flag act = [ (NotDeprecated, FlagSpec name flag act AllModes) | name <- NE.toList (warnFlagNames flag) ] -- | Define a new deprecated flag with an effect. depFlagSpecOp :: String -> flag -> (TurnOnFlag -> DynP ()) -> String -> (Deprecation, FlagSpec flag) depFlagSpecOp name flag act dep = (Deprecated, snd (flagSpec' name flag (\f -> act f >> deprecate dep))) -- | Define a new deprecated flag. depFlagSpec :: String -> flag -> String -> (Deprecation, FlagSpec flag) depFlagSpec name flag dep = depFlagSpecOp name flag nop dep -- | Define a deprecated warning flag. depWarnSpec :: WarningFlag -> String -> [(Deprecation, FlagSpec WarningFlag)] depWarnSpec flag dep = [ depFlagSpecOp name flag nop dep | name <- NE.toList (warnFlagNames flag) ] -- | Define a deprecated warning name substituted by another. subWarnSpec :: String -> WarningFlag -> String -> [(Deprecation, FlagSpec WarningFlag)] subWarnSpec oldname flag dep = [ depFlagSpecOp oldname flag nop dep ] -- | Define a new deprecated flag with an effect where the deprecation message -- depends on the flag value depFlagSpecOp' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (TurnOnFlag -> String) -> (Deprecation, FlagSpec flag) depFlagSpecOp' name flag act dep = (Deprecated, FlagSpec name flag (\f -> act f >> (deprecate $ dep f)) AllModes) -- | Define a new deprecated flag where the deprecation message -- depends on the flag value depFlagSpec' :: String -> flag -> (TurnOnFlag -> String) -> (Deprecation, FlagSpec flag) depFlagSpec' name flag dep = depFlagSpecOp' name flag nop dep -- | Define a new deprecated flag where the deprecation message -- is shown depending on the flag value depFlagSpecCond :: String -> flag -> (TurnOnFlag -> Bool) -> String -> (Deprecation, FlagSpec flag) depFlagSpecCond name flag cond dep = (Deprecated, FlagSpec name flag (\f -> when (cond f) $ deprecate dep) AllModes) -- | Define a new flag for GHCi. flagGhciSpec :: String -> flag -> (Deprecation, FlagSpec flag) flagGhciSpec name flag = flagGhciSpec' name flag nop -- | Define a new flag for GHCi with an effect. flagGhciSpec' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (Deprecation, FlagSpec flag) flagGhciSpec' name flag act = (NotDeprecated, FlagSpec name flag act OnlyGhci) -- | Define a new flag invisible to CLI completion. flagHiddenSpec :: String -> flag -> (Deprecation, FlagSpec flag) flagHiddenSpec name flag = flagHiddenSpec' name flag nop -- | Define a new flag invisible to CLI completion with an effect. flagHiddenSpec' :: String -> flag -> (TurnOnFlag -> DynP ()) -> (Deprecation, FlagSpec flag) flagHiddenSpec' name flag act = (NotDeprecated, FlagSpec name flag act HiddenFlag) -- | Hide a 'FlagSpec' from being displayed in @--show-options@. -- -- This is for example useful for flags that are obsolete, but should not -- (yet) be deprecated for compatibility reasons. hideFlag :: (Deprecation, FlagSpec a) -> (Deprecation, FlagSpec a) hideFlag (dep, fs) = (dep, fs { flagSpecGhcMode = HiddenFlag }) mkFlag :: TurnOnFlag -- ^ True <=> it should be turned on -> String -- ^ The flag prefix -> (flag -> DynP ()) -- ^ What to do when the flag is found -> (Deprecation, FlagSpec flag) -- ^ Specification of -- this particular flag -> (Deprecation, Flag (CmdLineP DynFlags)) mkFlag turn_on flagPrefix f (dep, (FlagSpec name flag extra_action mode)) = (dep, Flag (flagPrefix ++ name) (NoArg (f flag >> extra_action turn_on)) mode) -- here to avoid module cycle with GHC.Driver.CmdLine deprecate :: Monad m => String -> EwM m () deprecate s = do arg <- getArg addFlagWarn (WarningWithFlag Opt_WarnDeprecatedFlags) (arg ++ " is deprecated: " ++ s) deprecatedForExtension :: String -> TurnOnFlag -> String deprecatedForExtension lang turn_on = "use -X" ++ flag ++ " or pragma {-# LANGUAGE " ++ flag ++ " #-} instead" where flag | turn_on = lang | otherwise = "No" ++ lang useInstead :: String -> String -> TurnOnFlag -> String useInstead prefix flag turn_on = "Use " ++ prefix ++ no ++ flag ++ " instead" where no = if turn_on then "" else "no-" nop :: TurnOnFlag -> DynP () nop _ = return () -- | Find the 'FlagSpec' for a 'WarningFlag'. flagSpecOf :: WarningFlag -> Maybe (FlagSpec WarningFlag) flagSpecOf = flip Map.lookup wWarningFlagMap wWarningFlagMap :: Map.Map WarningFlag (FlagSpec WarningFlag) wWarningFlagMap = Map.fromListWith (\_ x -> x) $ map (flagSpecFlag &&& id) wWarningFlags -- | These @-W\@ flags can all be reversed with @-Wno-\@ wWarningFlags :: [FlagSpec WarningFlag] wWarningFlags = map snd (sortBy (comparing fst) wWarningFlagsDeps) wWarningFlagsDeps :: [(Deprecation, FlagSpec WarningFlag)] wWarningFlagsDeps = mconcat [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] -- Please keep the list of flags below sorted alphabetically warnSpec Opt_WarnAlternativeLayoutRuleTransitional, warnSpec Opt_WarnAmbiguousFields, depWarnSpec Opt_WarnAutoOrphans "it has no effect", warnSpec Opt_WarnCPPUndef, warnSpec Opt_WarnUnbangedStrictPatterns, warnSpec Opt_WarnDeferredTypeErrors, warnSpec Opt_WarnDeferredOutOfScopeVariables, warnSpec Opt_WarnWarningsDeprecations, warnSpec Opt_WarnDeprecatedFlags, warnSpec Opt_WarnDerivingDefaults, warnSpec Opt_WarnDerivingTypeable, warnSpec Opt_WarnDodgyExports, warnSpec Opt_WarnDodgyForeignImports, warnSpec Opt_WarnDodgyImports, warnSpec Opt_WarnEmptyEnumerations, subWarnSpec "duplicate-constraints" Opt_WarnDuplicateConstraints "it is subsumed by -Wredundant-constraints", warnSpec Opt_WarnRedundantConstraints, warnSpec Opt_WarnDuplicateExports, depWarnSpec Opt_WarnHiShadows "it is not used, and was never implemented", warnSpec Opt_WarnInaccessibleCode, warnSpec Opt_WarnImplicitPrelude, depWarnSpec Opt_WarnImplicitKindVars "it is now an error", warnSpec Opt_WarnIncompletePatterns, warnSpec Opt_WarnIncompletePatternsRecUpd, warnSpec Opt_WarnIncompleteUniPatterns, warnSpec Opt_WarnInlineRuleShadowing, warnSpec Opt_WarnIdentities, warnSpec Opt_WarnMissingFields, warnSpec Opt_WarnMissingImportList, warnSpec Opt_WarnMissingExportList, subWarnSpec "missing-local-sigs" Opt_WarnMissingLocalSignatures "it is replaced by -Wmissing-local-signatures", warnSpec Opt_WarnMissingLocalSignatures, warnSpec Opt_WarnMissingMethods, depWarnSpec Opt_WarnMissingMonadFailInstances "fail is no longer a method of Monad", warnSpec Opt_WarnSemigroup, warnSpec Opt_WarnMissingSignatures, warnSpec Opt_WarnMissingKindSignatures, subWarnSpec "missing-exported-sigs" Opt_WarnMissingExportedSignatures "it is replaced by -Wmissing-exported-signatures", warnSpec Opt_WarnMissingExportedSignatures, warnSpec Opt_WarnMonomorphism, warnSpec Opt_WarnNameShadowing, warnSpec Opt_WarnNonCanonicalMonadInstances, depWarnSpec Opt_WarnNonCanonicalMonadFailInstances "fail is no longer a method of Monad", warnSpec Opt_WarnNonCanonicalMonoidInstances, warnSpec Opt_WarnOrphans, warnSpec Opt_WarnOverflowedLiterals, warnSpec Opt_WarnOverlappingPatterns, warnSpec Opt_WarnMissedSpecs, warnSpec Opt_WarnAllMissedSpecs, warnSpec' Opt_WarnSafe setWarnSafe, warnSpec Opt_WarnTrustworthySafe, warnSpec Opt_WarnInferredSafeImports, warnSpec Opt_WarnMissingSafeHaskellMode, warnSpec Opt_WarnTabs, warnSpec Opt_WarnTypeDefaults, warnSpec Opt_WarnTypedHoles, warnSpec Opt_WarnPartialTypeSignatures, warnSpec Opt_WarnUnrecognisedPragmas, warnSpec Opt_WarnMisplacedPragmas, warnSpec' Opt_WarnUnsafe setWarnUnsafe, warnSpec Opt_WarnUnsupportedCallingConventions, warnSpec Opt_WarnUnsupportedLlvmVersion, warnSpec Opt_WarnMissedExtraSharedLib, warnSpec Opt_WarnUntickedPromotedConstructors, warnSpec Opt_WarnUnusedDoBind, warnSpec Opt_WarnUnusedForalls, warnSpec Opt_WarnUnusedImports, warnSpec Opt_WarnUnusedLocalBinds, warnSpec Opt_WarnUnusedMatches, warnSpec Opt_WarnUnusedPatternBinds, warnSpec Opt_WarnUnusedTopBinds, warnSpec Opt_WarnUnusedTypePatterns, warnSpec Opt_WarnUnusedRecordWildcards, warnSpec Opt_WarnRedundantBangPatterns, warnSpec Opt_WarnRedundantRecordWildcards, warnSpec Opt_WarnRedundantStrictnessFlags, warnSpec Opt_WarnWrongDoBind, warnSpec Opt_WarnMissingPatternSynonymSignatures, warnSpec Opt_WarnMissingDerivingStrategies, warnSpec Opt_WarnSimplifiableClassConstraints, warnSpec Opt_WarnMissingHomeModules, warnSpec Opt_WarnUnrecognisedWarningFlags, warnSpec Opt_WarnStarBinder, warnSpec Opt_WarnStarIsType, depWarnSpec Opt_WarnSpaceAfterBang "bang patterns can no longer be written with a space", warnSpec Opt_WarnPartialFields, warnSpec Opt_WarnPrepositiveQualifiedModule, warnSpec Opt_WarnUnusedPackages, warnSpec Opt_WarnCompatUnqualifiedImports, warnSpec Opt_WarnInvalidHaddock, warnSpec Opt_WarnOperatorWhitespaceExtConflict, warnSpec Opt_WarnOperatorWhitespace, warnSpec Opt_WarnImplicitLift, warnSpec Opt_WarnMissingExportedPatternSynonymSignatures, warnSpec Opt_WarnForallIdentifier, warnSpec Opt_WarnUnicodeBidirectionalFormatCharacters, warnSpec Opt_WarnGADTMonoLocalBinds, warnSpec Opt_WarnTypeEqualityOutOfScope, warnSpec Opt_WarnTypeEqualityRequiresOperators ] -- | These @-\@ flags can all be reversed with @-no-\@ negatableFlagsDeps :: [(Deprecation, FlagSpec GeneralFlag)] negatableFlagsDeps = [ flagGhciSpec "ignore-dot-ghci" Opt_IgnoreDotGhci ] -- | These @-d\@ flags can all be reversed with @-dno-\@ dFlagsDeps :: [(Deprecation, FlagSpec GeneralFlag)] dFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] -- Please keep the list of flags below sorted alphabetically flagSpec "ppr-case-as-let" Opt_PprCaseAsLet, depFlagSpec' "ppr-ticks" Opt_PprShowTicks (\turn_on -> useInstead "-d" "suppress-ticks" (not turn_on)), flagSpec "suppress-ticks" Opt_SuppressTicks, depFlagSpec' "suppress-stg-free-vars" Opt_SuppressStgExts (useInstead "-d" "suppress-stg-exts"), flagSpec "suppress-stg-exts" Opt_SuppressStgExts, flagSpec "suppress-stg-reps" Opt_SuppressStgReps, flagSpec "suppress-coercions" Opt_SuppressCoercions, flagSpec "suppress-coercion-types" Opt_SuppressCoercionTypes, flagSpec "suppress-idinfo" Opt_SuppressIdInfo, flagSpec "suppress-unfoldings" Opt_SuppressUnfoldings, flagSpec "suppress-module-prefixes" Opt_SuppressModulePrefixes, flagSpec "suppress-timestamps" Opt_SuppressTimestamps, flagSpec "suppress-type-applications" Opt_SuppressTypeApplications, flagSpec "suppress-type-signatures" Opt_SuppressTypeSignatures, flagSpec "suppress-uniques" Opt_SuppressUniques, flagSpec "suppress-var-kinds" Opt_SuppressVarKinds, flagSpec "suppress-core-sizes" Opt_SuppressCoreSizes ] -- | These @-f\@ flags can all be reversed with @-fno-\@ fFlags :: [FlagSpec GeneralFlag] fFlags = map snd fFlagsDeps fFlagsDeps :: [(Deprecation, FlagSpec GeneralFlag)] fFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] -- Please keep the list of flags below sorted alphabetically flagSpec "asm-shortcutting" Opt_AsmShortcutting, flagGhciSpec "break-on-error" Opt_BreakOnError, flagGhciSpec "break-on-exception" Opt_BreakOnException, flagSpec "building-cabal-package" Opt_BuildingCabalPackage, flagSpec "call-arity" Opt_CallArity, flagSpec "exitification" Opt_Exitification, flagSpec "case-merge" Opt_CaseMerge, flagSpec "case-folding" Opt_CaseFolding, flagSpec "cmm-elim-common-blocks" Opt_CmmElimCommonBlocks, flagSpec "cmm-sink" Opt_CmmSink, flagSpec "cmm-static-pred" Opt_CmmStaticPred, flagSpec "cse" Opt_CSE, flagSpec "stg-cse" Opt_StgCSE, flagSpec "stg-lift-lams" Opt_StgLiftLams, flagSpec "cpr-anal" Opt_CprAnal, flagSpec "defer-diagnostics" Opt_DeferDiagnostics, flagSpec "defer-type-errors" Opt_DeferTypeErrors, flagSpec "defer-typed-holes" Opt_DeferTypedHoles, flagSpec "defer-out-of-scope-variables" Opt_DeferOutOfScopeVariables, flagSpec "diagnostics-show-caret" Opt_DiagnosticsShowCaret, flagSpec "dicts-cheap" Opt_DictsCheap, flagSpec "dicts-strict" Opt_DictsStrict, depFlagSpec "dmd-tx-dict-sel" Opt_DmdTxDictSel "effect is now unconditionally enabled", flagSpec "do-eta-reduction" Opt_DoEtaReduction, flagSpec "do-lambda-eta-expansion" Opt_DoLambdaEtaExpansion, flagSpec "eager-blackholing" Opt_EagerBlackHoling, flagSpec "embed-manifest" Opt_EmbedManifest, flagSpec "enable-rewrite-rules" Opt_EnableRewriteRules, flagSpec "enable-th-splice-warnings" Opt_EnableThSpliceWarnings, flagSpec "error-spans" Opt_ErrorSpans, flagSpec "excess-precision" Opt_ExcessPrecision, flagSpec "expose-all-unfoldings" Opt_ExposeAllUnfoldings, flagSpec "expose-internal-symbols" Opt_ExposeInternalSymbols, flagSpec "external-dynamic-refs" Opt_ExternalDynamicRefs, flagSpec "external-interpreter" Opt_ExternalInterpreter, flagSpec "family-application-cache" Opt_FamAppCache, flagSpec "float-in" Opt_FloatIn, flagSpec "force-recomp" Opt_ForceRecomp, flagSpec "ignore-optim-changes" Opt_IgnoreOptimChanges, flagSpec "ignore-hpc-changes" Opt_IgnoreHpcChanges, flagSpec "full-laziness" Opt_FullLaziness, depFlagSpec' "fun-to-thunk" Opt_FunToThunk (useInstead "-f" "full-laziness"), flagSpec "gen-manifest" Opt_GenManifest, flagSpec "ghci-history" Opt_GhciHistory, flagSpec "ghci-leak-check" Opt_GhciLeakCheck, flagSpec "validate-ide-info" Opt_ValidateHie, flagGhciSpec "local-ghci-history" Opt_LocalGhciHistory, flagGhciSpec "no-it" Opt_NoIt, flagSpec "ghci-sandbox" Opt_GhciSandbox, flagSpec "helpful-errors" Opt_HelpfulErrors, flagSpec "hpc" Opt_Hpc, flagSpec "ignore-asserts" Opt_IgnoreAsserts, flagSpec "ignore-interface-pragmas" Opt_IgnoreInterfacePragmas, flagGhciSpec "implicit-import-qualified" Opt_ImplicitImportQualified, flagSpec "irrefutable-tuples" Opt_IrrefutableTuples, flagSpec "keep-going" Opt_KeepGoing, flagSpec "late-dmd-anal" Opt_LateDmdAnal, flagSpec "late-specialise" Opt_LateSpecialise, flagSpec "liberate-case" Opt_LiberateCase, flagHiddenSpec "llvm-tbaa" Opt_LlvmTBAA, flagHiddenSpec "llvm-fill-undef-with-garbage" Opt_LlvmFillUndefWithGarbage, flagSpec "loopification" Opt_Loopification, flagSpec "block-layout-cfg" Opt_CfgBlocklayout, flagSpec "block-layout-weightless" Opt_WeightlessBlocklayout, flagSpec "omit-interface-pragmas" Opt_OmitInterfacePragmas, flagSpec "omit-yields" Opt_OmitYields, flagSpec "optimal-applicative-do" Opt_OptimalApplicativeDo, flagSpec "pedantic-bottoms" Opt_PedanticBottoms, flagSpec "pre-inlining" Opt_SimplPreInlining, flagGhciSpec "print-bind-contents" Opt_PrintBindContents, flagGhciSpec "print-bind-result" Opt_PrintBindResult, flagGhciSpec "print-evld-with-show" Opt_PrintEvldWithShow, flagSpec "print-explicit-foralls" Opt_PrintExplicitForalls, flagSpec "print-explicit-kinds" Opt_PrintExplicitKinds, flagSpec "print-explicit-coercions" Opt_PrintExplicitCoercions, flagSpec "print-explicit-runtime-reps" Opt_PrintExplicitRuntimeReps, flagSpec "print-equality-relations" Opt_PrintEqualityRelations, flagSpec "print-axiom-incomps" Opt_PrintAxiomIncomps, flagSpec "print-unicode-syntax" Opt_PrintUnicodeSyntax, flagSpec "print-expanded-synonyms" Opt_PrintExpandedSynonyms, flagSpec "print-potential-instances" Opt_PrintPotentialInstances, flagSpec "print-typechecker-elaboration" Opt_PrintTypecheckerElaboration, flagSpec "prof-cafs" Opt_AutoSccsOnIndividualCafs, flagSpec "prof-count-entries" Opt_ProfCountEntries, flagSpec "prof-late" Opt_ProfLateCcs, flagSpec "prof-manual" Opt_ProfManualCcs, flagSpec "prof-late-inline" Opt_ProfLateInlineCcs, flagSpec "regs-graph" Opt_RegsGraph, flagSpec "regs-iterative" Opt_RegsIterative, depFlagSpec' "rewrite-rules" Opt_EnableRewriteRules (useInstead "-f" "enable-rewrite-rules"), flagSpec "shared-implib" Opt_SharedImplib, flagSpec "spec-constr" Opt_SpecConstr, flagSpec "spec-constr-keen" Opt_SpecConstrKeen, flagSpec "specialise" Opt_Specialise, flagSpec "specialize" Opt_Specialise, flagSpec "specialise-aggressively" Opt_SpecialiseAggressively, flagSpec "specialize-aggressively" Opt_SpecialiseAggressively, flagSpec "cross-module-specialise" Opt_CrossModuleSpecialise, flagSpec "cross-module-specialize" Opt_CrossModuleSpecialise, flagSpec "inline-generics" Opt_InlineGenerics, flagSpec "inline-generics-aggressively" Opt_InlineGenericsAggressively, flagSpec "static-argument-transformation" Opt_StaticArgumentTransformation, flagSpec "strictness" Opt_Strictness, flagSpec "use-rpaths" Opt_RPath, flagSpec "write-interface" Opt_WriteInterface, flagSpec "write-ide-info" Opt_WriteHie, flagSpec "unbox-small-strict-fields" Opt_UnboxSmallStrictFields, flagSpec "unbox-strict-fields" Opt_UnboxStrictFields, flagSpec "version-macros" Opt_VersionMacros, flagSpec "worker-wrapper" Opt_WorkerWrapper, flagSpec "worker-wrapper-cbv" Opt_WorkerWrapperUnlift, flagSpec "solve-constant-dicts" Opt_SolveConstantDicts, flagSpec "catch-nonexhaustive-cases" Opt_CatchNonexhaustiveCases, flagSpec "alignment-sanitisation" Opt_AlignmentSanitisation, flagSpec "check-prim-bounds" Opt_DoBoundsChecking, flagSpec "num-constant-folding" Opt_NumConstantFolding, flagSpec "core-constant-folding" Opt_CoreConstantFolding, flagSpec "fast-pap-calls" Opt_FastPAPCalls, flagSpec "cmm-control-flow" Opt_CmmControlFlow, flagSpec "show-warning-groups" Opt_ShowWarnGroups, flagSpec "hide-source-paths" Opt_HideSourcePaths, flagSpec "show-loaded-modules" Opt_ShowLoadedModules, flagSpec "whole-archive-hs-libs" Opt_WholeArchiveHsLibs, flagSpec "keep-cafs" Opt_KeepCAFs, flagSpec "link-rts" Opt_LinkRts, flagSpec' "compact-unwind" Opt_CompactUnwind (\turn_on -> updM (\dflags -> do unless (platformOS (targetPlatform dflags) == OSDarwin && turn_on) (addWarn "-compact-unwind is only implemented by the darwin platform. Ignoring.") return dflags)) ] ++ fHoleFlags -- | These @-f\@ flags have to do with the typed-hole error message or -- the valid hole fits in that message. See Note [Valid hole fits include ...] -- in the "GHC.Tc.Errors.Hole" module. These flags can all be reversed with -- @-fno-\@ fHoleFlags :: [(Deprecation, FlagSpec GeneralFlag)] fHoleFlags = [ flagSpec "show-hole-constraints" Opt_ShowHoleConstraints, depFlagSpec' "show-valid-substitutions" Opt_ShowValidHoleFits (useInstead "-f" "show-valid-hole-fits"), flagSpec "show-valid-hole-fits" Opt_ShowValidHoleFits, -- Sorting settings flagSpec "sort-valid-hole-fits" Opt_SortValidHoleFits, flagSpec "sort-by-size-hole-fits" Opt_SortBySizeHoleFits, flagSpec "sort-by-subsumption-hole-fits" Opt_SortBySubsumHoleFits, flagSpec "abstract-refinement-hole-fits" Opt_AbstractRefHoleFits, -- Output format settings flagSpec "show-hole-matches-of-hole-fits" Opt_ShowMatchesOfHoleFits, flagSpec "show-provenance-of-hole-fits" Opt_ShowProvOfHoleFits, flagSpec "show-type-of-hole-fits" Opt_ShowTypeOfHoleFits, flagSpec "show-type-app-of-hole-fits" Opt_ShowTypeAppOfHoleFits, flagSpec "show-type-app-vars-of-hole-fits" Opt_ShowTypeAppVarsOfHoleFits, flagSpec "show-docs-of-hole-fits" Opt_ShowDocsOfHoleFits, flagSpec "unclutter-valid-hole-fits" Opt_UnclutterValidHoleFits ] -- | These @-f\@ flags can all be reversed with @-fno-\@ fLangFlags :: [FlagSpec LangExt.Extension] fLangFlags = map snd fLangFlagsDeps fLangFlagsDeps :: [(Deprecation, FlagSpec LangExt.Extension)] fLangFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] depFlagSpecOp' "th" LangExt.TemplateHaskell checkTemplateHaskellOk (deprecatedForExtension "TemplateHaskell"), depFlagSpec' "fi" LangExt.ForeignFunctionInterface (deprecatedForExtension "ForeignFunctionInterface"), depFlagSpec' "ffi" LangExt.ForeignFunctionInterface (deprecatedForExtension "ForeignFunctionInterface"), depFlagSpec' "arrows" LangExt.Arrows (deprecatedForExtension "Arrows"), depFlagSpec' "implicit-prelude" LangExt.ImplicitPrelude (deprecatedForExtension "ImplicitPrelude"), depFlagSpec' "bang-patterns" LangExt.BangPatterns (deprecatedForExtension "BangPatterns"), depFlagSpec' "monomorphism-restriction" LangExt.MonomorphismRestriction (deprecatedForExtension "MonomorphismRestriction"), depFlagSpec' "extended-default-rules" LangExt.ExtendedDefaultRules (deprecatedForExtension "ExtendedDefaultRules"), depFlagSpec' "implicit-params" LangExt.ImplicitParams (deprecatedForExtension "ImplicitParams"), depFlagSpec' "scoped-type-variables" LangExt.ScopedTypeVariables (deprecatedForExtension "ScopedTypeVariables"), depFlagSpec' "allow-overlapping-instances" LangExt.OverlappingInstances (deprecatedForExtension "OverlappingInstances"), depFlagSpec' "allow-undecidable-instances" LangExt.UndecidableInstances (deprecatedForExtension "UndecidableInstances"), depFlagSpec' "allow-incoherent-instances" LangExt.IncoherentInstances (deprecatedForExtension "IncoherentInstances") ] supportedLanguages :: [String] supportedLanguages = map (flagSpecName . snd) languageFlagsDeps supportedLanguageOverlays :: [String] supportedLanguageOverlays = map (flagSpecName . snd) safeHaskellFlagsDeps supportedExtensions :: ArchOS -> [String] supportedExtensions (ArchOS _ os) = concatMap toFlagSpecNamePair xFlags where toFlagSpecNamePair flg -- IMPORTANT! Make sure that `ghc --supported-extensions` omits -- "TemplateHaskell"/"QuasiQuotes" when it's known not to work out of the -- box. See also GHC #11102 and #16331 for more details about -- the rationale | isAIX, flagSpecFlag flg == LangExt.TemplateHaskell = [noName] | isAIX, flagSpecFlag flg == LangExt.QuasiQuotes = [noName] | otherwise = [name, noName] where isAIX = os == OSAIX noName = "No" ++ name name = flagSpecName flg supportedLanguagesAndExtensions :: ArchOS -> [String] supportedLanguagesAndExtensions arch_os = supportedLanguages ++ supportedLanguageOverlays ++ supportedExtensions arch_os -- | These -X flags cannot be reversed with -XNo languageFlagsDeps :: [(Deprecation, FlagSpec Language)] languageFlagsDeps = [ flagSpec "Haskell98" Haskell98, flagSpec "Haskell2010" Haskell2010, flagSpec "GHC2021" GHC2021 ] -- | These -X flags cannot be reversed with -XNo -- They are used to place hard requirements on what GHC Haskell language -- features can be used. safeHaskellFlagsDeps :: [(Deprecation, FlagSpec SafeHaskellMode)] safeHaskellFlagsDeps = [mkF Sf_Unsafe, mkF Sf_Trustworthy, mkF Sf_Safe] where mkF flag = flagSpec (show flag) flag -- | These -X flags can all be reversed with -XNo xFlags :: [FlagSpec LangExt.Extension] xFlags = map snd xFlagsDeps xFlagsDeps :: [(Deprecation, FlagSpec LangExt.Extension)] xFlagsDeps = [ -- See Note [Updating flag description in the User's Guide] -- See Note [Supporting CLI completion] -- See Note [Adding a language extension] -- Please keep the list of flags below sorted alphabetically flagSpec "AllowAmbiguousTypes" LangExt.AllowAmbiguousTypes, flagSpec "AlternativeLayoutRule" LangExt.AlternativeLayoutRule, flagSpec "AlternativeLayoutRuleTransitional" LangExt.AlternativeLayoutRuleTransitional, flagSpec "Arrows" LangExt.Arrows, depFlagSpecCond "AutoDeriveTypeable" LangExt.AutoDeriveTypeable id ("Typeable instances are created automatically " ++ "for all types since GHC 8.2."), flagSpec "BangPatterns" LangExt.BangPatterns, flagSpec "BinaryLiterals" LangExt.BinaryLiterals, flagSpec "CApiFFI" LangExt.CApiFFI, flagSpec "CPP" LangExt.Cpp, flagSpec "CUSKs" LangExt.CUSKs, flagSpec "ConstrainedClassMethods" LangExt.ConstrainedClassMethods, flagSpec "ConstraintKinds" LangExt.ConstraintKinds, flagSpec "DataKinds" LangExt.DataKinds, depFlagSpecCond "DatatypeContexts" LangExt.DatatypeContexts id ("It was widely considered a misfeature, " ++ "and has been removed from the Haskell language."), flagSpec "DefaultSignatures" LangExt.DefaultSignatures, flagSpec "DeriveAnyClass" LangExt.DeriveAnyClass, flagSpec "DeriveDataTypeable" LangExt.DeriveDataTypeable, flagSpec "DeriveFoldable" LangExt.DeriveFoldable, flagSpec "DeriveFunctor" LangExt.DeriveFunctor, flagSpec "DeriveGeneric" LangExt.DeriveGeneric, flagSpec "DeriveLift" LangExt.DeriveLift, flagSpec "DeriveTraversable" LangExt.DeriveTraversable, flagSpec "DerivingStrategies" LangExt.DerivingStrategies, flagSpec' "DerivingVia" LangExt.DerivingVia setDeriveVia, flagSpec "DisambiguateRecordFields" LangExt.DisambiguateRecordFields, flagSpec "DoAndIfThenElse" LangExt.DoAndIfThenElse, flagSpec "BlockArguments" LangExt.BlockArguments, depFlagSpec' "DoRec" LangExt.RecursiveDo (deprecatedForExtension "RecursiveDo"), flagSpec "DuplicateRecordFields" LangExt.DuplicateRecordFields, flagSpec "FieldSelectors" LangExt.FieldSelectors, flagSpec "EmptyCase" LangExt.EmptyCase, flagSpec "EmptyDataDecls" LangExt.EmptyDataDecls, flagSpec "EmptyDataDeriving" LangExt.EmptyDataDeriving, flagSpec "ExistentialQuantification" LangExt.ExistentialQuantification, flagSpec "ExplicitForAll" LangExt.ExplicitForAll, flagSpec "ExplicitNamespaces" LangExt.ExplicitNamespaces, flagSpec "ExtendedDefaultRules" LangExt.ExtendedDefaultRules, flagSpec "FlexibleContexts" LangExt.FlexibleContexts, flagSpec "FlexibleInstances" LangExt.FlexibleInstances, flagSpec "ForeignFunctionInterface" LangExt.ForeignFunctionInterface, flagSpec "FunctionalDependencies" LangExt.FunctionalDependencies, flagSpec "GADTSyntax" LangExt.GADTSyntax, flagSpec "GADTs" LangExt.GADTs, flagSpec "GHCForeignImportPrim" LangExt.GHCForeignImportPrim, flagSpec' "GeneralizedNewtypeDeriving" LangExt.GeneralizedNewtypeDeriving setGenDeriving, flagSpec' "GeneralisedNewtypeDeriving" LangExt.GeneralizedNewtypeDeriving setGenDeriving, flagSpec "ImplicitParams" LangExt.ImplicitParams, flagSpec "ImplicitPrelude" LangExt.ImplicitPrelude, flagSpec "ImportQualifiedPost" LangExt.ImportQualifiedPost, flagSpec "ImpredicativeTypes" LangExt.ImpredicativeTypes, flagSpec' "IncoherentInstances" LangExt.IncoherentInstances setIncoherentInsts, flagSpec "TypeFamilyDependencies" LangExt.TypeFamilyDependencies, flagSpec "InstanceSigs" LangExt.InstanceSigs, flagSpec "ApplicativeDo" LangExt.ApplicativeDo, flagSpec "InterruptibleFFI" LangExt.InterruptibleFFI, flagSpec "JavaScriptFFI" LangExt.JavaScriptFFI, flagSpec "KindSignatures" LangExt.KindSignatures, flagSpec "LambdaCase" LangExt.LambdaCase, flagSpec "LexicalNegation" LangExt.LexicalNegation, flagSpec "LiberalTypeSynonyms" LangExt.LiberalTypeSynonyms, flagSpec "LinearTypes" LangExt.LinearTypes, flagSpec "MagicHash" LangExt.MagicHash, flagSpec "MonadComprehensions" LangExt.MonadComprehensions, flagSpec "MonoLocalBinds" LangExt.MonoLocalBinds, flagSpec "DeepSubsumption" LangExt.DeepSubsumption, flagSpec "MonomorphismRestriction" LangExt.MonomorphismRestriction, flagSpec "MultiParamTypeClasses" LangExt.MultiParamTypeClasses, flagSpec "MultiWayIf" LangExt.MultiWayIf, flagSpec "NumericUnderscores" LangExt.NumericUnderscores, flagSpec "NPlusKPatterns" LangExt.NPlusKPatterns, flagSpec "NamedFieldPuns" LangExt.NamedFieldPuns, flagSpec "NamedWildCards" LangExt.NamedWildCards, flagSpec "NegativeLiterals" LangExt.NegativeLiterals, flagSpec "HexFloatLiterals" LangExt.HexFloatLiterals, flagSpec "NondecreasingIndentation" LangExt.NondecreasingIndentation, depFlagSpec' "NullaryTypeClasses" LangExt.NullaryTypeClasses (deprecatedForExtension "MultiParamTypeClasses"), flagSpec "NumDecimals" LangExt.NumDecimals, depFlagSpecOp "OverlappingInstances" LangExt.OverlappingInstances setOverlappingInsts "instead use per-instance pragmas OVERLAPPING/OVERLAPPABLE/OVERLAPS", flagSpec "OverloadedLabels" LangExt.OverloadedLabels, flagSpec "OverloadedLists" LangExt.OverloadedLists, flagSpec "OverloadedStrings" LangExt.OverloadedStrings, flagSpec "PackageImports" LangExt.PackageImports, flagSpec "ParallelArrays" LangExt.ParallelArrays, flagSpec "ParallelListComp" LangExt.ParallelListComp, flagSpec "PartialTypeSignatures" LangExt.PartialTypeSignatures, flagSpec "PatternGuards" LangExt.PatternGuards, depFlagSpec' "PatternSignatures" LangExt.ScopedTypeVariables (deprecatedForExtension "ScopedTypeVariables"), flagSpec "PatternSynonyms" LangExt.PatternSynonyms, flagSpec "PolyKinds" LangExt.PolyKinds, flagSpec "PolymorphicComponents" LangExt.RankNTypes, flagSpec "QuantifiedConstraints" LangExt.QuantifiedConstraints, flagSpec "PostfixOperators" LangExt.PostfixOperators, flagSpec "QuasiQuotes" LangExt.QuasiQuotes, flagSpec "QualifiedDo" LangExt.QualifiedDo, flagSpec "Rank2Types" LangExt.RankNTypes, flagSpec "RankNTypes" LangExt.RankNTypes, flagSpec "RebindableSyntax" LangExt.RebindableSyntax, flagSpec "OverloadedRecordDot" LangExt.OverloadedRecordDot, flagSpec "OverloadedRecordUpdate" LangExt.OverloadedRecordUpdate, depFlagSpec' "RecordPuns" LangExt.NamedFieldPuns (deprecatedForExtension "NamedFieldPuns"), flagSpec "RecordWildCards" LangExt.RecordWildCards, flagSpec "RecursiveDo" LangExt.RecursiveDo, flagSpec "RelaxedLayout" LangExt.RelaxedLayout, depFlagSpecCond "RelaxedPolyRec" LangExt.RelaxedPolyRec not "You can't turn off RelaxedPolyRec any more", flagSpec "RoleAnnotations" LangExt.RoleAnnotations, flagSpec "ScopedTypeVariables" LangExt.ScopedTypeVariables, flagSpec "StandaloneDeriving" LangExt.StandaloneDeriving, flagSpec "StarIsType" LangExt.StarIsType, flagSpec "StaticPointers" LangExt.StaticPointers, flagSpec "Strict" LangExt.Strict, flagSpec "StrictData" LangExt.StrictData, flagSpec' "TemplateHaskell" LangExt.TemplateHaskell checkTemplateHaskellOk, flagSpec "TemplateHaskellQuotes" LangExt.TemplateHaskellQuotes, flagSpec "StandaloneKindSignatures" LangExt.StandaloneKindSignatures, flagSpec "TraditionalRecordSyntax" LangExt.TraditionalRecordSyntax, flagSpec "TransformListComp" LangExt.TransformListComp, flagSpec "TupleSections" LangExt.TupleSections, flagSpec "TypeApplications" LangExt.TypeApplications, flagSpec "TypeInType" LangExt.TypeInType, flagSpec "TypeFamilies" LangExt.TypeFamilies, flagSpec "TypeOperators" LangExt.TypeOperators, flagSpec "TypeSynonymInstances" LangExt.TypeSynonymInstances, flagSpec "UnboxedTuples" LangExt.UnboxedTuples, flagSpec "UnboxedSums" LangExt.UnboxedSums, flagSpec "UndecidableInstances" LangExt.UndecidableInstances, flagSpec "UndecidableSuperClasses" LangExt.UndecidableSuperClasses, flagSpec "UnicodeSyntax" LangExt.UnicodeSyntax, flagSpec "UnliftedDatatypes" LangExt.UnliftedDatatypes, flagSpec "UnliftedFFITypes" LangExt.UnliftedFFITypes, flagSpec "UnliftedNewtypes" LangExt.UnliftedNewtypes, flagSpec "ViewPatterns" LangExt.ViewPatterns ] defaultFlags :: Settings -> [GeneralFlag] defaultFlags settings -- See Note [Updating flag description in the User's Guide] = [ Opt_AutoLinkPackages, Opt_DiagnosticsShowCaret, Opt_EmbedManifest, Opt_FamAppCache, Opt_GenManifest, Opt_GhciHistory, Opt_GhciSandbox, Opt_HelpfulErrors, Opt_KeepHiFiles, Opt_KeepOFiles, Opt_OmitYields, Opt_PrintBindContents, Opt_ProfCountEntries, Opt_SharedImplib, Opt_SimplPreInlining, Opt_VersionMacros, Opt_RPath, Opt_CompactUnwind, Opt_SuppressStgReps ] ++ [f | (ns,f) <- optLevelFlags, 0 `elem` ns] -- The default -O0 options ++ default_PIC platform ++ validHoleFitDefaults where platform = sTargetPlatform settings -- | These are the default settings for the display and sorting of valid hole -- fits in typed-hole error messages. See Note [Valid hole fits include ...] -- in the "GHC.Tc.Errors.Hole" module. validHoleFitDefaults :: [GeneralFlag] validHoleFitDefaults = [ Opt_ShowTypeAppOfHoleFits , Opt_ShowTypeOfHoleFits , Opt_ShowProvOfHoleFits , Opt_ShowMatchesOfHoleFits , Opt_ShowValidHoleFits , Opt_SortValidHoleFits , Opt_SortBySizeHoleFits , Opt_ShowHoleConstraints ] validHoleFitsImpliedGFlags :: [(GeneralFlag, TurnOnFlag, GeneralFlag)] validHoleFitsImpliedGFlags = [ (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowTypeAppOfHoleFits) , (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowTypeAppVarsOfHoleFits) , (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowDocsOfHoleFits) , (Opt_ShowTypeAppVarsOfHoleFits, turnOff, Opt_ShowTypeAppOfHoleFits) , (Opt_UnclutterValidHoleFits, turnOff, Opt_ShowProvOfHoleFits) ] default_PIC :: Platform -> [GeneralFlag] default_PIC platform = case (platformOS platform, platformArch platform) of -- Darwin always requires PIC. Especially on more recent macOS releases -- there will be a 4GB __ZEROPAGE that prevents us from using 32bit addresses -- while we could work around this on x86_64 (like WINE does), we won't be -- able on aarch64, where this is enforced. (OSDarwin, ArchX86_64) -> [Opt_PIC] -- For AArch64, we need to always have PIC enabled. The relocation model -- on AArch64 does not permit arbitrary relocations. Under ASLR, we can't -- control much how far apart symbols are in memory for our in-memory static -- linker; and thus need to ensure we get sufficiently capable relocations. -- This requires PIC on AArch64, and ExternalDynamicRefs on Linux as on top -- of that. Subsequently we expect all code on aarch64/linux (and macOS) to -- be built with -fPIC. (OSDarwin, ArchAArch64) -> [Opt_PIC] (OSLinux, ArchAArch64) -> [Opt_PIC, Opt_ExternalDynamicRefs] (OSLinux, ArchARM {}) -> [Opt_PIC, Opt_ExternalDynamicRefs] (OSOpenBSD, ArchX86_64) -> [Opt_PIC] -- Due to PIE support in -- OpenBSD since 5.3 release -- (1 May 2013) we need to -- always generate PIC. See -- #10597 for more -- information. _ -> [] -- General flags that are switched on/off when other general flags are switched -- on impliedGFlags :: [(GeneralFlag, TurnOnFlag, GeneralFlag)] impliedGFlags = [(Opt_DeferTypeErrors, turnOn, Opt_DeferTypedHoles) ,(Opt_DeferTypeErrors, turnOn, Opt_DeferOutOfScopeVariables) ,(Opt_DoLinearCoreLinting, turnOn, Opt_DoCoreLinting) ,(Opt_Strictness, turnOn, Opt_WorkerWrapper) ] ++ validHoleFitsImpliedGFlags -- General flags that are switched on/off when other general flags are switched -- off impliedOffGFlags :: [(GeneralFlag, TurnOnFlag, GeneralFlag)] impliedOffGFlags = [(Opt_Strictness, turnOff, Opt_WorkerWrapper)] impliedXFlags :: [(LangExt.Extension, TurnOnFlag, LangExt.Extension)] impliedXFlags -- See Note [Updating flag description in the User's Guide] = [ (LangExt.RankNTypes, turnOn, LangExt.ExplicitForAll) , (LangExt.QuantifiedConstraints, turnOn, LangExt.ExplicitForAll) , (LangExt.ScopedTypeVariables, turnOn, LangExt.ExplicitForAll) , (LangExt.LiberalTypeSynonyms, turnOn, LangExt.ExplicitForAll) , (LangExt.ExistentialQuantification, turnOn, LangExt.ExplicitForAll) , (LangExt.FlexibleInstances, turnOn, LangExt.TypeSynonymInstances) , (LangExt.FunctionalDependencies, turnOn, LangExt.MultiParamTypeClasses) , (LangExt.MultiParamTypeClasses, turnOn, LangExt.ConstrainedClassMethods) -- c.f. #7854 , (LangExt.TypeFamilyDependencies, turnOn, LangExt.TypeFamilies) , (LangExt.RebindableSyntax, turnOff, LangExt.ImplicitPrelude) -- NB: turn off! , (LangExt.DerivingVia, turnOn, LangExt.DerivingStrategies) , (LangExt.GADTs, turnOn, LangExt.GADTSyntax) , (LangExt.GADTs, turnOn, LangExt.MonoLocalBinds) , (LangExt.TypeFamilies, turnOn, LangExt.MonoLocalBinds) , (LangExt.TypeFamilies, turnOn, LangExt.KindSignatures) -- Type families use kind signatures , (LangExt.PolyKinds, turnOn, LangExt.KindSignatures) -- Ditto polymorphic kinds -- TypeInType is now just a synonym for a couple of other extensions. , (LangExt.TypeInType, turnOn, LangExt.DataKinds) , (LangExt.TypeInType, turnOn, LangExt.PolyKinds) , (LangExt.TypeInType, turnOn, LangExt.KindSignatures) -- Standalone kind signatures are a replacement for CUSKs. , (LangExt.StandaloneKindSignatures, turnOff, LangExt.CUSKs) -- AutoDeriveTypeable is not very useful without DeriveDataTypeable , (LangExt.AutoDeriveTypeable, turnOn, LangExt.DeriveDataTypeable) -- We turn this on so that we can export associated type -- type synonyms in subordinates (e.g. MyClass(type AssocType)) , (LangExt.TypeFamilies, turnOn, LangExt.ExplicitNamespaces) , (LangExt.TypeOperators, turnOn, LangExt.ExplicitNamespaces) , (LangExt.ImpredicativeTypes, turnOn, LangExt.RankNTypes) -- Record wild-cards implies field disambiguation -- Otherwise if you write (C {..}) you may well get -- stuff like " 'a' not in scope ", which is a bit silly -- if the compiler has just filled in field 'a' of constructor 'C' , (LangExt.RecordWildCards, turnOn, LangExt.DisambiguateRecordFields) , (LangExt.ParallelArrays, turnOn, LangExt.ParallelListComp) , (LangExt.JavaScriptFFI, turnOn, LangExt.InterruptibleFFI) , (LangExt.DeriveTraversable, turnOn, LangExt.DeriveFunctor) , (LangExt.DeriveTraversable, turnOn, LangExt.DeriveFoldable) -- Duplicate record fields require field disambiguation , (LangExt.DuplicateRecordFields, turnOn, LangExt.DisambiguateRecordFields) , (LangExt.TemplateHaskell, turnOn, LangExt.TemplateHaskellQuotes) , (LangExt.Strict, turnOn, LangExt.StrictData) -- Historically only UnboxedTuples was required for unboxed sums to work. -- To avoid breaking code, we make UnboxedTuples imply UnboxedSums. , (LangExt.UnboxedTuples, turnOn, LangExt.UnboxedSums) -- The extensions needed to declare an H98 unlifted data type , (LangExt.UnliftedDatatypes, turnOn, LangExt.DataKinds) , (LangExt.UnliftedDatatypes, turnOn, LangExt.StandaloneKindSignatures) ] -- Note [When is StarIsType enabled] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The StarIsType extension determines whether to treat '*' as a regular type -- operator or as a synonym for 'Data.Kind.Type'. Many existing pre-TypeInType -- programs expect '*' to be synonymous with 'Type', so by default StarIsType is -- enabled. -- -- Programs that use TypeOperators might expect to repurpose '*' for -- multiplication or another binary operation, but making TypeOperators imply -- NoStarIsType caused too much breakage on Hackage. -- -- Note [Documenting optimisation flags] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- If you change the list of flags enabled for particular optimisation levels -- please remember to update the User's Guide. The relevant file is: -- -- docs/users_guide/using-optimisation.rst -- -- Make sure to note whether a flag is implied by -O0, -O or -O2. optLevelFlags :: [([Int], GeneralFlag)] -- Default settings of flags, before any command-line overrides optLevelFlags -- see Note [Documenting optimisation flags] = [ ([0,1,2], Opt_DoLambdaEtaExpansion) , ([0,1,2], Opt_DoEtaReduction) -- See Note [Eta-reduction in -O0] , ([0,1,2], Opt_LlvmTBAA) , ([0,1,2], Opt_ProfManualCcs ) , ([2], Opt_DictsStrict) , ([0], Opt_IgnoreInterfacePragmas) , ([0], Opt_OmitInterfacePragmas) , ([1,2], Opt_CoreConstantFolding) , ([1,2], Opt_CallArity) , ([1,2], Opt_Exitification) , ([1,2], Opt_CaseMerge) , ([1,2], Opt_CaseFolding) , ([1,2], Opt_CmmElimCommonBlocks) , ([2], Opt_AsmShortcutting) , ([1,2], Opt_CmmSink) , ([1,2], Opt_CmmStaticPred) , ([1,2], Opt_CSE) , ([1,2], Opt_StgCSE) , ([2], Opt_StgLiftLams) , ([1,2], Opt_CmmControlFlow) , ([1,2], Opt_EnableRewriteRules) -- Off for -O0. Otherwise we desugar list literals -- to 'build' but don't run the simplifier passes that -- would rewrite them back to cons cells! This seems -- silly, and matters for the GHCi debugger. , ([1,2], Opt_FloatIn) , ([1,2], Opt_FullLaziness) , ([1,2], Opt_IgnoreAsserts) , ([1,2], Opt_Loopification) , ([1,2], Opt_CfgBlocklayout) -- Experimental , ([1,2], Opt_Specialise) , ([1,2], Opt_CrossModuleSpecialise) , ([1,2], Opt_InlineGenerics) , ([1,2], Opt_Strictness) , ([1,2], Opt_UnboxSmallStrictFields) , ([1,2], Opt_CprAnal) , ([1,2], Opt_WorkerWrapper) , ([1,2], Opt_SolveConstantDicts) , ([1,2], Opt_NumConstantFolding) , ([2], Opt_LiberateCase) , ([2], Opt_SpecConstr) , ([2], Opt_FastPAPCalls) -- , ([2], Opt_RegsGraph) -- RegsGraph suffers performance regression. See #7679 -- , ([2], Opt_StaticArgumentTransformation) -- Static Argument Transformation needs investigation. See #9374 ] enableUnusedBinds :: DynP () enableUnusedBinds = mapM_ setWarningFlag unusedBindsFlags disableUnusedBinds :: DynP () disableUnusedBinds = mapM_ unSetWarningFlag unusedBindsFlags -- | Things you get with `-dlint`. enableDLint :: DynP () enableDLint = do mapM_ setGeneralFlag dLintFlags addWayDynP WayDebug where dLintFlags :: [GeneralFlag] dLintFlags = [ Opt_DoCoreLinting , Opt_DoStgLinting , Opt_DoCmmLinting , Opt_DoAsmLinting , Opt_CatchNonexhaustiveCases , Opt_LlvmFillUndefWithGarbage ] enableGlasgowExts :: DynP () enableGlasgowExts = do setGeneralFlag Opt_PrintExplicitForalls mapM_ setExtensionFlag glasgowExtsFlags disableGlasgowExts :: DynP () disableGlasgowExts = do unSetGeneralFlag Opt_PrintExplicitForalls mapM_ unSetExtensionFlag glasgowExtsFlags -- Please keep what_glasgow_exts_does.rst up to date with this list glasgowExtsFlags :: [LangExt.Extension] glasgowExtsFlags = [ LangExt.ConstrainedClassMethods , LangExt.DeriveDataTypeable , LangExt.DeriveFoldable , LangExt.DeriveFunctor , LangExt.DeriveGeneric , LangExt.DeriveTraversable , LangExt.EmptyDataDecls , LangExt.ExistentialQuantification , LangExt.ExplicitNamespaces , LangExt.FlexibleContexts , LangExt.FlexibleInstances , LangExt.ForeignFunctionInterface , LangExt.FunctionalDependencies , LangExt.GeneralizedNewtypeDeriving , LangExt.ImplicitParams , LangExt.KindSignatures , LangExt.LiberalTypeSynonyms , LangExt.MagicHash , LangExt.MultiParamTypeClasses , LangExt.ParallelListComp , LangExt.PatternGuards , LangExt.PostfixOperators , LangExt.RankNTypes , LangExt.RecursiveDo , LangExt.ScopedTypeVariables , LangExt.StandaloneDeriving , LangExt.TypeOperators , LangExt.TypeSynonymInstances , LangExt.UnboxedTuples , LangExt.UnicodeSyntax , LangExt.UnliftedFFITypes ] setWarnSafe :: Bool -> DynP () setWarnSafe True = getCurLoc >>= \l -> upd (\d -> d { warnSafeOnLoc = l }) setWarnSafe False = return () setWarnUnsafe :: Bool -> DynP () setWarnUnsafe True = getCurLoc >>= \l -> upd (\d -> d { warnUnsafeOnLoc = l }) setWarnUnsafe False = return () setPackageTrust :: DynP () setPackageTrust = do setGeneralFlag Opt_PackageTrust l <- getCurLoc upd $ \d -> d { pkgTrustOnLoc = l } setGenDeriving :: TurnOnFlag -> DynP () setGenDeriving True = getCurLoc >>= \l -> upd (\d -> d { newDerivOnLoc = l }) setGenDeriving False = return () setDeriveVia :: TurnOnFlag -> DynP () setDeriveVia True = getCurLoc >>= \l -> upd (\d -> d { deriveViaOnLoc = l }) setDeriveVia False = return () setOverlappingInsts :: TurnOnFlag -> DynP () setOverlappingInsts False = return () setOverlappingInsts True = do l <- getCurLoc upd (\d -> d { overlapInstLoc = l }) setIncoherentInsts :: TurnOnFlag -> DynP () setIncoherentInsts False = return () setIncoherentInsts True = do l <- getCurLoc upd (\d -> d { incoherentOnLoc = l }) checkTemplateHaskellOk :: TurnOnFlag -> DynP () checkTemplateHaskellOk _turn_on = getCurLoc >>= \l -> upd (\d -> d { thOnLoc = l }) {- ********************************************************************** %* * DynFlags constructors %* * %********************************************************************* -} type DynP = EwM (CmdLineP DynFlags) upd :: (DynFlags -> DynFlags) -> DynP () upd f = liftEwM (do dflags <- getCmdLineState putCmdLineState $! f dflags) updM :: (DynFlags -> DynP DynFlags) -> DynP () updM f = do dflags <- liftEwM getCmdLineState dflags' <- f dflags liftEwM $ putCmdLineState $! dflags' --------------- Constructor functions for OptKind ----------------- noArg :: (DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) noArg fn = NoArg (upd fn) noArgM :: (DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags) noArgM fn = NoArg (updM fn) hasArg :: (String -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) hasArg fn = HasArg (upd . fn) sepArg :: (String -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) sepArg fn = SepArg (upd . fn) intSuffix :: (Int -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) intSuffix fn = IntSuffix (\n -> upd (fn n)) intSuffixM :: (Int -> DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags) intSuffixM fn = IntSuffix (\n -> updM (fn n)) wordSuffix :: (Word -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) wordSuffix fn = WordSuffix (\n -> upd (fn n)) floatSuffix :: (Float -> DynFlags -> DynFlags) -> OptKind (CmdLineP DynFlags) floatSuffix fn = FloatSuffix (\n -> upd (fn n)) optIntSuffixM :: (Maybe Int -> DynFlags -> DynP DynFlags) -> OptKind (CmdLineP DynFlags) optIntSuffixM fn = OptIntSuffix (\mi -> updM (fn mi)) setDumpFlag :: DumpFlag -> OptKind (CmdLineP DynFlags) setDumpFlag dump_flag = NoArg (setDumpFlag' dump_flag) -------------------------- addWayDynP :: Way -> DynP () addWayDynP = upd . addWay' addWay' :: Way -> DynFlags -> DynFlags addWay' w dflags0 = let platform = targetPlatform dflags0 dflags1 = dflags0 { targetWays_ = addWay w (targetWays_ dflags0) } dflags2 = foldr setGeneralFlag' dflags1 (wayGeneralFlags platform w) dflags3 = foldr unSetGeneralFlag' dflags2 (wayUnsetGeneralFlags platform w) in dflags3 removeWayDyn :: DynP () removeWayDyn = upd (\dfs -> dfs { targetWays_ = removeWay WayDyn (targetWays_ dfs) }) -------------------------- setGeneralFlag, unSetGeneralFlag :: GeneralFlag -> DynP () setGeneralFlag f = upd (setGeneralFlag' f) unSetGeneralFlag f = upd (unSetGeneralFlag' f) setGeneralFlag' :: GeneralFlag -> DynFlags -> DynFlags setGeneralFlag' f dflags = foldr ($) (gopt_set dflags f) deps where deps = [ if turn_on then setGeneralFlag' d else unSetGeneralFlag' d | (f', turn_on, d) <- impliedGFlags, f' == f ] -- When you set f, set the ones it implies -- NB: use setGeneralFlag recursively, in case the implied flags -- implies further flags unSetGeneralFlag' :: GeneralFlag -> DynFlags -> DynFlags unSetGeneralFlag' f dflags = foldr ($) (gopt_unset dflags f) deps where deps = [ if turn_on then setGeneralFlag' d else unSetGeneralFlag' d | (f', turn_on, d) <- impliedOffGFlags, f' == f ] -- In general, when you un-set f, we don't un-set the things it implies. -- There are however some exceptions, e.g., -fno-strictness implies -- -fno-worker-wrapper. -- -- NB: use unSetGeneralFlag' recursively, in case the implied off flags -- imply further flags. -------------------------- setWarningFlag, unSetWarningFlag :: WarningFlag -> DynP () setWarningFlag f = upd (\dfs -> wopt_set dfs f) unSetWarningFlag f = upd (\dfs -> wopt_unset dfs f) setFatalWarningFlag, unSetFatalWarningFlag :: WarningFlag -> DynP () setFatalWarningFlag f = upd (\dfs -> wopt_set_fatal dfs f) unSetFatalWarningFlag f = upd (\dfs -> wopt_unset_fatal dfs f) setWErrorFlag :: WarningFlag -> DynP () setWErrorFlag flag = do { setWarningFlag flag ; setFatalWarningFlag flag } -------------------------- setExtensionFlag, unSetExtensionFlag :: LangExt.Extension -> DynP () setExtensionFlag f = upd (setExtensionFlag' f) unSetExtensionFlag f = upd (unSetExtensionFlag' f) setExtensionFlag', unSetExtensionFlag' :: LangExt.Extension -> DynFlags -> DynFlags setExtensionFlag' f dflags = foldr ($) (xopt_set dflags f) deps where deps = [ if turn_on then setExtensionFlag' d else unSetExtensionFlag' d | (f', turn_on, d) <- impliedXFlags, f' == f ] -- When you set f, set the ones it implies -- NB: use setExtensionFlag recursively, in case the implied flags -- implies further flags unSetExtensionFlag' f dflags = xopt_unset dflags f -- When you un-set f, however, we don't un-set the things it implies -- (except for -fno-glasgow-exts, which is treated specially) -------------------------- alterToolSettings :: (ToolSettings -> ToolSettings) -> DynFlags -> DynFlags alterToolSettings f dynFlags = dynFlags { toolSettings = f (toolSettings dynFlags) } -------------------------- setDumpFlag' :: DumpFlag -> DynP () setDumpFlag' dump_flag = do upd (\dfs -> dopt_set dfs dump_flag) when want_recomp forceRecompile where -- Certain dumpy-things are really interested in what's going -- on during recompilation checking, so in those cases we -- don't want to turn it off. want_recomp = dump_flag `notElem` [Opt_D_dump_if_trace, Opt_D_dump_hi_diffs, Opt_D_no_debug_output] forceRecompile :: DynP () -- Whenever we -ddump, force recompilation (by switching off the -- recompilation checker), else you don't see the dump! However, -- don't switch it off in --make mode, else *everything* gets -- recompiled which probably isn't what you want forceRecompile = do dfs <- liftEwM getCmdLineState when (force_recomp dfs) (setGeneralFlag Opt_ForceRecomp) where force_recomp dfs = isOneShot (ghcMode dfs) setVerbosity :: Maybe Int -> DynP () setVerbosity mb_n = upd (\dfs -> dfs{ verbosity = mb_n `orElse` 3 }) setDebugLevel :: Maybe Int -> DynP () setDebugLevel mb_n = upd (\dfs -> exposeSyms $ dfs{ debugLevel = n }) where n = mb_n `orElse` 2 exposeSyms | n > 2 = setGeneralFlag' Opt_ExposeInternalSymbols | otherwise = id data PkgDbRef = GlobalPkgDb | UserPkgDb | PkgDbPath FilePath deriving Eq addPkgDbRef :: PkgDbRef -> DynP () addPkgDbRef p = upd $ \s -> s { packageDBFlags = PackageDB p : packageDBFlags s } removeUserPkgDb :: DynP () removeUserPkgDb = upd $ \s -> s { packageDBFlags = NoUserPackageDB : packageDBFlags s } removeGlobalPkgDb :: DynP () removeGlobalPkgDb = upd $ \s -> s { packageDBFlags = NoGlobalPackageDB : packageDBFlags s } clearPkgDb :: DynP () clearPkgDb = upd $ \s -> s { packageDBFlags = ClearPackageDBs : packageDBFlags s } parsePackageFlag :: String -- the flag -> ReadP PackageArg -- type of argument -> String -- string to parse -> PackageFlag parsePackageFlag flag arg_parse str = case filter ((=="").snd) (readP_to_S parse str) of [(r, "")] -> r _ -> throwGhcException $ CmdLineError ("Can't parse package flag: " ++ str) where doc = flag ++ " " ++ str parse = do pkg_arg <- tok arg_parse let mk_expose = ExposePackage doc pkg_arg ( do _ <- tok $ string "with" fmap (mk_expose . ModRenaming True) parseRns <++ fmap (mk_expose . ModRenaming False) parseRns <++ return (mk_expose (ModRenaming True []))) parseRns = do _ <- tok $ R.char '(' rns <- tok $ sepBy parseItem (tok $ R.char ',') _ <- tok $ R.char ')' return rns parseItem = do orig <- tok $ parseModuleName (do _ <- tok $ string "as" new <- tok $ parseModuleName return (orig, new) +++ return (orig, orig)) tok m = m >>= \x -> skipSpaces >> return x exposePackage, exposePackageId, hidePackage, exposePluginPackage, exposePluginPackageId, ignorePackage, trustPackage, distrustPackage :: String -> DynP () exposePackage p = upd (exposePackage' p) exposePackageId p = upd (\s -> s{ packageFlags = parsePackageFlag "-package-id" parseUnitArg p : packageFlags s }) exposePluginPackage p = upd (\s -> s{ pluginPackageFlags = parsePackageFlag "-plugin-package" parsePackageArg p : pluginPackageFlags s }) exposePluginPackageId p = upd (\s -> s{ pluginPackageFlags = parsePackageFlag "-plugin-package-id" parseUnitArg p : pluginPackageFlags s }) hidePackage p = upd (\s -> s{ packageFlags = HidePackage p : packageFlags s }) ignorePackage p = upd (\s -> s{ ignorePackageFlags = IgnorePackage p : ignorePackageFlags s }) trustPackage p = exposePackage p >> -- both trust and distrust also expose a package upd (\s -> s{ trustFlags = TrustPackage p : trustFlags s }) distrustPackage p = exposePackage p >> upd (\s -> s{ trustFlags = DistrustPackage p : trustFlags s }) exposePackage' :: String -> DynFlags -> DynFlags exposePackage' p dflags = dflags { packageFlags = parsePackageFlag "-package" parsePackageArg p : packageFlags dflags } parsePackageArg :: ReadP PackageArg parsePackageArg = fmap PackageArg (munch1 (\c -> isAlphaNum c || c `elem` ":-_.")) parseUnitArg :: ReadP PackageArg parseUnitArg = fmap UnitIdArg parseUnit setUnitId :: String -> DynFlags -> DynFlags setUnitId p d = d { homeUnitId_ = stringToUnitId p } setWorkingDirectory :: String -> DynFlags -> DynFlags setWorkingDirectory p d = d { workingDirectory = Just p } {- Note [Filepaths and Multiple Home Units] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is common to assume that a package is compiled in the directory where its cabal file resides. Thus, all paths used in the compiler are assumed to be relative to this directory. When there are multiple home units the compiler is often not operating in the standard directory and instead where the cabal.project file is located. In this case the `-working-dir` option can be passed which specifies the path from the current directory to the directory the unit assumes to be it's root, normally the directory which contains the cabal file. When the flag is passed, any relative paths used by the compiler are offset by the working directory. Notably this includes `-i`, `-I⟨dir⟩`, `-hidir`, `-odir` etc and the location of input files. -} augmentByWorkingDirectory :: DynFlags -> FilePath -> FilePath augmentByWorkingDirectory dflags fp | isRelative fp, Just offset <- workingDirectory dflags = offset fp augmentByWorkingDirectory _ fp = fp setPackageName :: String -> DynFlags -> DynFlags setPackageName p d = d { thisPackageName = Just p } addHiddenModule :: String -> DynP () addHiddenModule p = upd (\s -> s{ hiddenModules = Set.insert (mkModuleName p) (hiddenModules s) }) addReexportedModule :: String -> DynP () addReexportedModule p = upd (\s -> s{ reexportedModules = Set.insert (mkModuleName p) (reexportedModules s) }) -- If we're linking a binary, then only backends that produce object -- code are allowed (requests for other target types are ignored). setBackend :: Backend -> DynP () setBackend l = upd $ \ dfs -> if ghcLink dfs /= LinkBinary || backendProducesObject l then dfs{ backend = l } else dfs -- Changes the target only if we're compiling object code. This is -- used by -fasm and -fllvm, which switch from one to the other, but -- not from bytecode to object-code. The idea is that -fasm/-fllvm -- can be safely used in an OPTIONS_GHC pragma. setObjBackend :: Backend -> DynP () setObjBackend l = updM set where set dflags | backendProducesObject (backend dflags) = return $ dflags { backend = l } | otherwise = return dflags setOptLevel :: Int -> DynFlags -> DynP DynFlags setOptLevel n dflags = return (updOptLevel n dflags) setCallerCcFilters :: String -> DynP () setCallerCcFilters arg = case parseCallerCcFilter arg of Right filt -> upd $ \d -> d { callerCcFilters = filt : callerCcFilters d } Left err -> addErr err setMainIs :: String -> DynP () setMainIs arg | not (null main_fn) && isLower (head main_fn) -- The arg looked like "Foo.Bar.baz" = upd $ \d -> d { mainFunIs = Just main_fn, mainModuleNameIs = mkModuleName main_mod } | isUpper (head arg) -- The arg looked like "Foo" or "Foo.Bar" = upd $ \d -> d { mainModuleNameIs = mkModuleName arg } | otherwise -- The arg looked like "baz" = upd $ \d -> d { mainFunIs = Just arg } where (main_mod, main_fn) = splitLongestPrefix arg (== '.') addLdInputs :: Option -> DynFlags -> DynFlags addLdInputs p dflags = dflags{ldInputs = ldInputs dflags ++ [p]} -- ----------------------------------------------------------------------------- -- Load dynflags from environment files. setFlagsFromEnvFile :: FilePath -> String -> DynP () setFlagsFromEnvFile envfile content = do setGeneralFlag Opt_HideAllPackages parseEnvFile envfile content parseEnvFile :: FilePath -> String -> DynP () parseEnvFile envfile = mapM_ parseEntry . lines where parseEntry str = case words str of ("package-db": _) -> addPkgDbRef (PkgDbPath (envdir db)) -- relative package dbs are interpreted relative to the env file where envdir = takeDirectory envfile db = drop 11 str ["clear-package-db"] -> clearPkgDb ["hide-package", pkg] -> hidePackage pkg ["global-package-db"] -> addPkgDbRef GlobalPkgDb ["user-package-db"] -> addPkgDbRef UserPkgDb ["package-id", pkgid] -> exposePackageId pkgid (('-':'-':_):_) -> return () -- comments -- and the original syntax introduced in 7.10: [pkgid] -> exposePackageId pkgid [] -> return () _ -> throwGhcException $ CmdLineError $ "Can't parse environment file entry: " ++ envfile ++ ": " ++ str ----------------------------------------------------------------------------- -- Paths & Libraries addImportPath, addLibraryPath, addIncludePath, addFrameworkPath :: FilePath -> DynP () -- -i on its own deletes the import paths addImportPath "" = upd (\s -> s{importPaths = []}) addImportPath p = upd (\s -> s{importPaths = importPaths s ++ splitPathList p}) addLibraryPath p = upd (\s -> s{libraryPaths = libraryPaths s ++ splitPathList p}) addIncludePath p = upd (\s -> s{includePaths = addGlobalInclude (includePaths s) (splitPathList p)}) addFrameworkPath p = upd (\s -> s{frameworkPaths = frameworkPaths s ++ splitPathList p}) #if !defined(mingw32_HOST_OS) split_marker :: Char split_marker = ':' -- not configurable (ToDo) #endif splitPathList :: String -> [String] splitPathList s = filter notNull (splitUp s) -- empty paths are ignored: there might be a trailing -- ':' in the initial list, for example. Empty paths can -- cause confusion when they are translated into -I options -- for passing to gcc. where #if !defined(mingw32_HOST_OS) splitUp xs = split split_marker xs #else -- Windows: 'hybrid' support for DOS-style paths in directory lists. -- -- That is, if "foo:bar:baz" is used, this interpreted as -- consisting of three entries, 'foo', 'bar', 'baz'. -- However, with "c:/foo:c:\\foo;x:/bar", this is interpreted -- as 3 elts, "c:/foo", "c:\\foo", "x:/bar" -- -- Notice that no attempt is made to fully replace the 'standard' -- split marker ':' with the Windows / DOS one, ';'. The reason being -- that this will cause too much breakage for users & ':' will -- work fine even with DOS paths, if you're not insisting on being silly. -- So, use either. splitUp [] = [] splitUp (x:':':div:xs) | div `elem` dir_markers = ((x:':':div:p): splitUp rs) where (p,rs) = findNextPath xs -- we used to check for existence of the path here, but that -- required the IO monad to be threaded through the command-line -- parser which is quite inconvenient. The splitUp xs = cons p (splitUp rs) where (p,rs) = findNextPath xs cons "" xs = xs cons x xs = x:xs -- will be called either when we've consumed nought or the -- ":/" part of a DOS path, so splitting is just a Q of -- finding the next split marker. findNextPath xs = case break (`elem` split_markers) xs of (p, _:ds) -> (p, ds) (p, xs) -> (p, xs) split_markers :: [Char] split_markers = [':', ';'] dir_markers :: [Char] dir_markers = ['/', '\\'] #endif -- ----------------------------------------------------------------------------- -- tmpDir, where we store temporary files. setTmpDir :: FilePath -> DynFlags -> DynFlags setTmpDir dir d = d { tmpDir = TempDir (normalise dir) } -- we used to fix /cygdrive/c/.. on Windows, but this doesn't -- seem necessary now --SDM 7/2/2008 ----------------------------------------------------------------------------- -- RTS opts setRtsOpts :: String -> DynP () setRtsOpts arg = upd $ \ d -> d {rtsOpts = Just arg} setRtsOptsEnabled :: RtsOptsEnabled -> DynP () setRtsOptsEnabled arg = upd $ \ d -> d {rtsOptsEnabled = arg} ----------------------------------------------------------------------------- -- Hpc stuff setOptHpcDir :: String -> DynP () setOptHpcDir arg = upd $ \ d -> d {hpcDir = arg} ----------------------------------------------------------------------------- -- Via-C compilation stuff -- There are some options that we need to pass to gcc when compiling -- Haskell code via C, but are only supported by recent versions of -- gcc. The configure script decides which of these options we need, -- and puts them in the "settings" file in $topdir. The advantage of -- having these in a separate file is that the file can be created at -- install-time depending on the available gcc version, and even -- re-generated later if gcc is upgraded. -- -- The options below are not dependent on the version of gcc, only the -- platform. picCCOpts :: DynFlags -> [String] picCCOpts dflags = case platformOS (targetPlatform dflags) of OSDarwin -- Apple prefers to do things the other way round. -- PIC is on by default. -- -mdynamic-no-pic: -- Turn off PIC code generation. -- -fno-common: -- Don't generate "common" symbols - these are unwanted -- in dynamic libraries. | gopt Opt_PIC dflags -> ["-fno-common", "-U__PIC__", "-D__PIC__"] | otherwise -> ["-mdynamic-no-pic"] OSMinGW32 -- no -fPIC for Windows | gopt Opt_PIC dflags -> ["-U__PIC__", "-D__PIC__"] | otherwise -> [] _ -- we need -fPIC for C files when we are compiling with -dynamic, -- otherwise things like stub.c files don't get compiled -- correctly. They need to reference data in the Haskell -- objects, but can't without -fPIC. See -- https://gitlab.haskell.org/ghc/ghc/wikis/commentary/position-independent-code | gopt Opt_PIC dflags || ways dflags `hasWay` WayDyn -> ["-fPIC", "-U__PIC__", "-D__PIC__"] -- gcc may be configured to have PIC on by default, let's be -- explicit here, see #15847 | otherwise -> ["-fno-PIC"] pieCCLDOpts :: DynFlags -> [String] pieCCLDOpts dflags | gopt Opt_PICExecutable dflags = ["-pie"] -- See Note [No PIE when linking] | toolSettings_ccSupportsNoPie (toolSettings dflags) = ["-no-pie"] | otherwise = [] {- Note [No PIE when linking] ~~~~~~~~~~~~~~~~~~~~~~~~~~ As of 2016 some Linux distributions (e.g. Debian) have started enabling -pie by default in their gcc builds. This is incompatible with -r as it implies that we are producing an executable. Consequently, we must manually pass -no-pie to gcc when joining object files or linking dynamic libraries. Unless, of course, the user has explicitly requested a PIE executable with -pie. See #12759. -} picPOpts :: DynFlags -> [String] picPOpts dflags | gopt Opt_PIC dflags = ["-U__PIC__", "-D__PIC__"] | otherwise = [] -- ----------------------------------------------------------------------------- -- Compiler Info compilerInfo :: DynFlags -> [(String, String)] compilerInfo dflags = -- We always make "Project name" be first to keep parsing in -- other languages simple, i.e. when looking for other fields, -- you don't have to worry whether there is a leading '[' or not ("Project name", cProjectName) -- Next come the settings, so anything else can be overridden -- in the settings file (as "lookup" uses the first match for the -- key) : map (fmap $ expandDirectories (topDir dflags) (toolDir dflags)) (rawSettings dflags) ++ [("Project version", projectVersion dflags), ("Project Git commit id", cProjectGitCommitId), ("Project Version Int", cProjectVersionInt), ("Project Patch Level", cProjectPatchLevel), ("Project Patch Level1", cProjectPatchLevel1), ("Project Patch Level2", cProjectPatchLevel2), ("Booter version", cBooterVersion), ("Stage", cStage), ("Build platform", cBuildPlatformString), ("Host platform", cHostPlatformString), ("Target platform", platformMisc_targetPlatformString $ platformMisc dflags), ("Have interpreter", showBool $ platformMisc_ghcWithInterpreter $ platformMisc dflags), ("Object splitting supported", showBool False), ("Have native code generator", showBool $ platformNcgSupported (targetPlatform dflags)), ("Target default backend", show $ platformDefaultBackend (targetPlatform dflags)), -- Whether or not we support @-dynamic-too@ ("Support dynamic-too", showBool $ not isWindows), -- Whether or not we support the @-j@ flag with @--make@. ("Support parallel --make", "YES"), -- Whether or not we support "Foo from foo-0.1-XXX:Foo" syntax in -- installed package info. ("Support reexported-modules", "YES"), -- Whether or not we support extended @-package foo (Foo)@ syntax. ("Support thinning and renaming package flags", "YES"), -- Whether or not we support Backpack. ("Support Backpack", "YES"), -- If true, we require that the 'id' field in installed package info -- match what is passed to the @-this-unit-id@ flag for modules -- built in it ("Requires unified installed package IDs", "YES"), -- Whether or not we support the @-this-package-key@ flag. Prefer -- "Uses unit IDs" over it. We still say yes even if @-this-package-key@ -- flag has been removed, otherwise it breaks Cabal... ("Uses package keys", "YES"), -- Whether or not we support the @-this-unit-id@ flag ("Uses unit IDs", "YES"), -- Whether or not GHC was compiled using -dynamic ("GHC Dynamic", showBool hostIsDynamic), -- Whether or not GHC was compiled using -prof ("GHC Profiled", showBool hostIsProfiled), ("Debug on", showBool debugIsOn), ("LibDir", topDir dflags), -- The path of the global package database used by GHC ("Global Package DB", globalPackageDatabasePath dflags) ] where showBool True = "YES" showBool False = "NO" platform = targetPlatform dflags isWindows = platformOS platform == OSMinGW32 useInplaceMinGW = toolSettings_useInplaceMinGW $ toolSettings dflags expandDirectories :: FilePath -> Maybe FilePath -> String -> String expandDirectories topd mtoold = expandToolDir useInplaceMinGW mtoold . expandTopDir topd -- | Get target profile targetProfile :: DynFlags -> Profile targetProfile dflags = Profile (targetPlatform dflags) (ways dflags) {- ----------------------------------------------------------------------------- Note [DynFlags consistency] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a number of number of DynFlags configurations which either do not make sense or lead to unimplemented or buggy codepaths in the compiler. makeDynFlagsConsistent is responsible for verifying the validity of a set of DynFlags, fixing any issues, and reporting them back to the caller. GHCi and -O --------------- When using optimization, the compiler can introduce several things (such as unboxed tuples) into the intermediate code, which GHCi later chokes on since the bytecode interpreter can't handle this (and while this is arguably a bug these aren't handled, there are no plans to fix it.) While the driver pipeline always checks for this particular erroneous combination when parsing flags, we also need to check when we update the flags; this is because API clients may parse flags but update the DynFlags afterwords, before finally running code inside a session (see T10052 and #10052). -} -- | Resolve any internal inconsistencies in a set of 'DynFlags'. -- Returns the consistent 'DynFlags' as well as a list of warnings -- to report to the user. makeDynFlagsConsistent :: DynFlags -> (DynFlags, [Located String]) -- Whenever makeDynFlagsConsistent does anything, it starts over, to -- ensure that a later change doesn't invalidate an earlier check. -- Be careful not to introduce potential loops! makeDynFlagsConsistent dflags -- Disable -dynamic-too on Windows (#8228, #7134, #5987) | os == OSMinGW32 && gopt Opt_BuildDynamicToo dflags = let dflags' = gopt_unset dflags Opt_BuildDynamicToo warn = "-dynamic-too is not supported on Windows" in loop dflags' warn -- Disable -dynamic-too if we are are compiling with -dynamic already, otherwise -- you get two dynamic object files (.o and .dyn_o). (#20436) | ways dflags `hasWay` WayDyn && gopt Opt_BuildDynamicToo dflags = let dflags' = gopt_unset dflags Opt_BuildDynamicToo warn = "-dynamic-too is ignored when using -dynamic" in loop dflags' warn -- Via-C backend only supports unregisterised ABI. Switch to a backend -- supporting it if possible. | backend dflags == ViaC && not (platformUnregisterised (targetPlatform dflags)) = case platformDefaultBackend (targetPlatform dflags) of NCG -> let dflags' = dflags { backend = NCG } warn = "Target platform doesn't use unregisterised ABI, so using native code generator rather than compiling via C" in loop dflags' warn LLVM -> let dflags' = dflags { backend = LLVM } warn = "Target platform doesn't use unregisterised ABI, so using LLVM rather than compiling via C" in loop dflags' warn _ -> pgmError "Compiling via C only supports unregisterised ABI but target platform doesn't use it." | gopt Opt_Hpc dflags && backend dflags == Interpreter = let dflags' = gopt_unset dflags Opt_Hpc warn = "Hpc can't be used with byte-code interpreter. Ignoring -fhpc." in loop dflags' warn | backend dflags `elem` [NCG, LLVM] && platformUnregisterised (targetPlatform dflags) = loop (dflags { backend = ViaC }) "Target platform uses unregisterised ABI, so compiling via C" | backend dflags == NCG && not (platformNcgSupported $ targetPlatform dflags) = let dflags' = dflags { backend = LLVM } warn = "Native code generator doesn't support target platform, so using LLVM" in loop dflags' warn | not (osElfTarget os) && gopt Opt_PIE dflags = loop (gopt_unset dflags Opt_PIE) "Position-independent only supported on ELF platforms" | os == OSDarwin && arch == ArchX86_64 && not (gopt Opt_PIC dflags) = loop (gopt_set dflags Opt_PIC) "Enabling -fPIC as it is always on for this platform" | backend dflags == Interpreter , let (dflags', changed) = updOptLevelChanged 0 dflags , changed = loop dflags' "Optimization flags conflict with --interactive; optimization flags ignored." | LinkInMemory <- ghcLink dflags , not (gopt Opt_ExternalInterpreter dflags) , hostIsProfiled , backendProducesObject (backend dflags) , ways dflags `hasNotWay` WayProf = loop dflags{targetWays_ = addWay WayProf (targetWays_ dflags)} "Enabling -prof, because -fobject-code is enabled and GHCi is profiled" | LinkMergedObj <- ghcLink dflags , Nothing <- outputFile dflags = pgmError "--output must be specified when using --merge-objs" | otherwise = (dflags, []) where loc = mkGeneralSrcSpan (fsLit "when making flags consistent") loop updated_dflags warning = case makeDynFlagsConsistent updated_dflags of (dflags', ws) -> (dflags', L loc warning : ws) platform = targetPlatform dflags arch = platformArch platform os = platformOS platform setUnsafeGlobalDynFlags :: DynFlags -> IO () setUnsafeGlobalDynFlags dflags = do writeIORef v_unsafeHasPprDebug (hasPprDebug dflags) writeIORef v_unsafeHasNoDebugOutput (hasNoDebugOutput dflags) writeIORef v_unsafeHasNoStateHack (hasNoStateHack dflags) -- ----------------------------------------------------------------------------- -- SSE and AVX isSse4_2Enabled :: DynFlags -> Bool isSse4_2Enabled dflags = sseVersion dflags >= Just SSE42 isAvxEnabled :: DynFlags -> Bool isAvxEnabled dflags = avx dflags || avx2 dflags || avx512f dflags isAvx2Enabled :: DynFlags -> Bool isAvx2Enabled dflags = avx2 dflags || avx512f dflags isAvx512cdEnabled :: DynFlags -> Bool isAvx512cdEnabled dflags = avx512cd dflags isAvx512erEnabled :: DynFlags -> Bool isAvx512erEnabled dflags = avx512er dflags isAvx512fEnabled :: DynFlags -> Bool isAvx512fEnabled dflags = avx512f dflags isAvx512pfEnabled :: DynFlags -> Bool isAvx512pfEnabled dflags = avx512pf dflags -- ----------------------------------------------------------------------------- -- BMI2 isBmiEnabled :: DynFlags -> Bool isBmiEnabled dflags = case platformArch (targetPlatform dflags) of ArchX86_64 -> bmiVersion dflags >= Just BMI1 ArchX86 -> bmiVersion dflags >= Just BMI1 _ -> False isBmi2Enabled :: DynFlags -> Bool isBmi2Enabled dflags = case platformArch (targetPlatform dflags) of ArchX86_64 -> bmiVersion dflags >= Just BMI2 ArchX86 -> bmiVersion dflags >= Just BMI2 _ -> False -- | Indicate if cost-centre profiling is enabled sccProfilingEnabled :: DynFlags -> Bool sccProfilingEnabled dflags = profileIsProfiling (targetProfile dflags) -- | Indicate whether we need to generate source notes needSourceNotes :: DynFlags -> Bool needSourceNotes dflags = debugLevel dflags > 0 || gopt Opt_InfoTableMap dflags -- ----------------------------------------------------------------------------- -- Linker/compiler information -- LinkerInfo contains any extra options needed by the system linker. data LinkerInfo = GnuLD [Option] | GnuGold [Option] | LlvmLLD [Option] | DarwinLD [Option] | SolarisLD [Option] | AixLD [Option] | UnknownLD deriving Eq -- CompilerInfo tells us which C compiler we're using data CompilerInfo = GCC | Clang | AppleClang | AppleClang51 | UnknownCC deriving Eq -- | Should we use `-XLinker -rpath` when linking or not? -- See Note [-fno-use-rpaths] useXLinkerRPath :: DynFlags -> OS -> Bool useXLinkerRPath _ OSDarwin = False -- See Note [Dynamic linking on macOS] useXLinkerRPath dflags _ = gopt Opt_RPath dflags {- Note [-fno-use-rpaths] ~~~~~~~~~~~~~~~~~~~~~~ First read, Note [Dynamic linking on macOS] to understand why on darwin we never use `-XLinker -rpath`. The specification of `Opt_RPath` is as follows: The default case `-fuse-rpaths`: * On darwin, never use `-Xlinker -rpath -Xlinker`, always inject the rpath afterwards, see `runInjectRPaths`. There is no way to use `-Xlinker` on darwin as things stand but it wasn't documented in the user guide before this patch how `-fuse-rpaths` should behave and the fact it was always disabled on darwin. * Otherwise, use `-Xlinker -rpath -Xlinker` to set the rpath of the executable, this is the normal way you should set the rpath. The case of `-fno-use-rpaths` * Never inject anything into the rpath. When this was first implemented, `Opt_RPath` was disabled on darwin, but the rpath was still always augmented by `runInjectRPaths`, and there was no way to stop this. This was problematic because you couldn't build an executable in CI with a clean rpath. -} -- ----------------------------------------------------------------------------- -- RTS hooks -- Convert sizes like "3.5M" into integers decodeSize :: String -> Integer decodeSize str | c == "" = truncate n | c == "K" || c == "k" = truncate (n * 1000) | c == "M" || c == "m" = truncate (n * 1000 * 1000) | c == "G" || c == "g" = truncate (n * 1000 * 1000 * 1000) | otherwise = throwGhcException (CmdLineError ("can't decode size: " ++ str)) where (m, c) = span pred str n = readRational m pred c = isDigit c || c == '.' foreign import ccall unsafe "ghc_lib_parser_setHeapSize" setHeapSize :: Int -> IO () foreign import ccall unsafe "ghc_lib_parser_enableTimingStats" enableTimingStats :: IO () -- | Initialize the pretty-printing options initSDocContext :: DynFlags -> PprStyle -> SDocContext initSDocContext dflags style = SDC { sdocStyle = style , sdocColScheme = colScheme dflags , sdocLastColour = Col.colReset , sdocShouldUseColor = overrideWith (canUseColor dflags) (useColor dflags) , sdocDefaultDepth = pprUserLength dflags , sdocLineLength = pprCols dflags , sdocCanUseUnicode = useUnicode dflags , sdocHexWordLiterals = gopt Opt_HexWordLiterals dflags , sdocPprDebug = dopt Opt_D_ppr_debug dflags , sdocPrintUnicodeSyntax = gopt Opt_PrintUnicodeSyntax dflags , sdocPrintCaseAsLet = gopt Opt_PprCaseAsLet dflags , sdocPrintTypecheckerElaboration = gopt Opt_PrintTypecheckerElaboration dflags , sdocPrintAxiomIncomps = gopt Opt_PrintAxiomIncomps dflags , sdocPrintExplicitKinds = gopt Opt_PrintExplicitKinds dflags , sdocPrintExplicitCoercions = gopt Opt_PrintExplicitCoercions dflags , sdocPrintExplicitRuntimeReps = gopt Opt_PrintExplicitRuntimeReps dflags , sdocPrintExplicitForalls = gopt Opt_PrintExplicitForalls dflags , sdocPrintPotentialInstances = gopt Opt_PrintPotentialInstances dflags , sdocPrintEqualityRelations = gopt Opt_PrintEqualityRelations dflags , sdocSuppressTicks = gopt Opt_SuppressTicks dflags , sdocSuppressTypeSignatures = gopt Opt_SuppressTypeSignatures dflags , sdocSuppressTypeApplications = gopt Opt_SuppressTypeApplications dflags , sdocSuppressIdInfo = gopt Opt_SuppressIdInfo dflags , sdocSuppressCoercions = gopt Opt_SuppressCoercions dflags , sdocSuppressCoercionTypes = gopt Opt_SuppressCoercionTypes dflags , sdocSuppressUnfoldings = gopt Opt_SuppressUnfoldings dflags , sdocSuppressVarKinds = gopt Opt_SuppressVarKinds dflags , sdocSuppressUniques = gopt Opt_SuppressUniques dflags , sdocSuppressModulePrefixes = gopt Opt_SuppressModulePrefixes dflags , sdocSuppressStgExts = gopt Opt_SuppressStgExts dflags , sdocSuppressStgReps = gopt Opt_SuppressStgReps dflags , sdocErrorSpans = gopt Opt_ErrorSpans dflags , sdocStarIsType = xopt LangExt.StarIsType dflags , sdocImpredicativeTypes = xopt LangExt.ImpredicativeTypes dflags , sdocLinearTypes = xopt LangExt.LinearTypes dflags , sdocPrintTypeAbbreviations = True , sdocUnitIdForUser = ftext } -- | Initialize the pretty-printing options using the default user style initDefaultSDocContext :: DynFlags -> SDocContext initDefaultSDocContext dflags = initSDocContext dflags defaultUserStyle outputFile :: DynFlags -> Maybe String outputFile dflags | dynamicNow dflags = dynOutputFile_ dflags | otherwise = outputFile_ dflags objectSuf :: DynFlags -> String objectSuf dflags | dynamicNow dflags = dynObjectSuf_ dflags | otherwise = objectSuf_ dflags ways :: DynFlags -> Ways ways dflags | dynamicNow dflags = addWay WayDyn (targetWays_ dflags) | otherwise = targetWays_ dflags -- | Pretty-print the difference between 2 DynFlags. -- -- For now only their general flags but it could be extended. -- Useful mostly for debugging. pprDynFlagsDiff :: DynFlags -> DynFlags -> SDoc pprDynFlagsDiff d1 d2 = let gf_removed = EnumSet.difference (generalFlags d1) (generalFlags d2) gf_added = EnumSet.difference (generalFlags d2) (generalFlags d1) ext_removed = EnumSet.difference (extensionFlags d1) (extensionFlags d2) ext_added = EnumSet.difference (extensionFlags d2) (extensionFlags d1) in vcat [ text "Added general flags:" , text $ show $ EnumSet.toList $ gf_added , text "Removed general flags:" , text $ show $ EnumSet.toList $ gf_removed , text "Added extension flags:" , text $ show $ EnumSet.toList $ ext_added , text "Removed extension flags:" , text $ show $ EnumSet.toList $ ext_removed ] updatePlatformConstants :: DynFlags -> Maybe PlatformConstants -> IO DynFlags updatePlatformConstants dflags mconstants = do let platform1 = (targetPlatform dflags) { platform_constants = mconstants } let dflags1 = dflags { targetPlatform = platform1 } return dflags1 ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap.hs0000644000000000000000000003635114472375231021632 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeInType #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE UnliftedFFITypes #-} {-| Module : GHC.Exts.Heap Copyright : (c) 2012 Joachim Breitner License : BSD3 Maintainer : Joachim Breitner With this module, you can investigate the heap representation of Haskell values, i.e. to investigate sharing and lazy evaluation. -} module GHC.Exts.Heap ( -- * Closure types Closure , GenClosure(..) , ClosureType(..) , PrimType(..) , WhatNext(..) , WhyBlocked(..) , TsoFlags(..) , HasHeapRep(getClosureData) , getClosureDataFromHeapRep , getClosureDataFromHeapRepPrim -- * Info Table types , StgInfoTable(..) , EntryFunPtr , HalfWord , ItblCodes , itblSize , peekItbl , pokeItbl -- * Cost Centre (profiling) types , StgTSOProfInfo(..) , IndexTable(..) , CostCentre(..) , CostCentreStack(..) -- * Closure inspection , getBoxedClosureData , allClosures -- * Boxes , Box(..) , asBox , areBoxesEqual ) where import Prelude import GHC.Exts.Heap.Closures import GHC.Exts.Heap.ClosureTypes import GHC.Exts.Heap.Constants import GHC.Exts.Heap.ProfInfo.Types #if defined(PROFILING) import GHC.Exts.Heap.InfoTableProf #else import GHC.Exts.Heap.InfoTable #endif import GHC.Exts.Heap.Utils import qualified GHC.Exts.Heap.FFIClosures as FFIClosures import qualified GHC.Exts.Heap.ProfInfo.PeekProfInfo as PPI import Control.Monad import Data.Bits import Foreign import GHC.Exts import GHC.Int import GHC.Word #include "ghcconfig.h" class HasHeapRep (a :: TYPE rep) where -- | Decode a closure to it's heap representation ('GenClosure'). getClosureData :: a -- ^ Closure to decode. -> IO Closure -- ^ Heap representation of the closure. #if __GLASGOW_HASKELL__ >= 901 instance HasHeapRep (a :: TYPE ('BoxedRep 'Lifted)) where #else instance HasHeapRep (a :: TYPE 'LiftedRep) where #endif getClosureData = getClosureDataFromHeapObject #if __GLASGOW_HASKELL__ >= 901 instance HasHeapRep (a :: TYPE ('BoxedRep 'Unlifted)) where #else instance HasHeapRep (a :: TYPE 'UnliftedRep) where #endif getClosureData x = getClosureDataFromHeapObject (unsafeCoerce# x) instance Int# ~ a => HasHeapRep (a :: TYPE 'IntRep) where getClosureData x = return $ IntClosure { ptipe = PInt, intVal = I# x } instance Word# ~ a => HasHeapRep (a :: TYPE 'WordRep) where getClosureData x = return $ WordClosure { ptipe = PWord, wordVal = W# x } instance Int64# ~ a => HasHeapRep (a :: TYPE 'Int64Rep) where getClosureData x = return $ Int64Closure { ptipe = PInt64, int64Val = I64# (unsafeCoerce# x) } instance Word64# ~ a => HasHeapRep (a :: TYPE 'Word64Rep) where getClosureData x = return $ Word64Closure { ptipe = PWord64, word64Val = W64# (unsafeCoerce# x) } instance Addr# ~ a => HasHeapRep (a :: TYPE 'AddrRep) where getClosureData x = return $ AddrClosure { ptipe = PAddr, addrVal = I# (unsafeCoerce# x) } instance Float# ~ a => HasHeapRep (a :: TYPE 'FloatRep) where getClosureData x = return $ FloatClosure { ptipe = PFloat, floatVal = F# x } instance Double# ~ a => HasHeapRep (a :: TYPE 'DoubleRep) where getClosureData x = return $ DoubleClosure { ptipe = PDouble, doubleVal = D# x } -- | Get the heap representation of a closure _at this moment_, even if it is -- unevaluated or an indirection or other exotic stuff. Beware when passing -- something to this function, the same caveats as for -- 'GHC.Exts.Heap.Closures.asBox' apply. -- -- For most use cases 'getClosureData' is an easier to use alternative. -- -- Currently TSO and STACK objects will return `UnsupportedClosure`. This is -- because it is not memory safe to extract TSO and STACK objects (done via -- `unpackClosure#`). Other threads may be mutating those objects and interleave -- with reads in `unpackClosure#`. This is particularly problematic with STACKs -- where pointer values may be overwritten by non-pointer values as the -- corresponding haskell thread runs. getClosureDataFromHeapObject :: a -- ^ Heap object to decode. -> IO Closure -- ^ Heap representation of the closure. getClosureDataFromHeapObject x = do case unpackClosure# x of (# infoTableAddr, heapRep, pointersArray #) -> do let infoTablePtr = Ptr infoTableAddr ptrList = [case indexArray# pointersArray i of (# ptr #) -> Box ptr | I# i <- [0..I# (sizeofArray# pointersArray) - 1] ] infoTable <- peekItbl infoTablePtr case tipe infoTable of TSO -> pure $ UnsupportedClosure infoTable STACK -> pure $ UnsupportedClosure infoTable _ -> getClosureDataFromHeapRep heapRep infoTablePtr ptrList -- | Convert an unpacked heap object, to a `GenClosure b`. The inputs to this -- function can be generated from a heap object using `unpackClosure#`. getClosureDataFromHeapRep :: ByteArray# -> Ptr StgInfoTable -> [b] -> IO (GenClosure b) getClosureDataFromHeapRep heapRep infoTablePtr pts = do itbl <- peekItbl infoTablePtr getClosureDataFromHeapRepPrim (dataConNames infoTablePtr) PPI.peekTopCCS itbl heapRep pts getClosureDataFromHeapRepPrim :: IO (String, String, String) -- ^ A continuation used to decode the constructor description field, -- in ghc-debug this code can lead to segfaults because dataConNames -- will dereference a random part of memory. -> (Ptr a -> IO (Maybe CostCentreStack)) -- ^ A continuation which is used to decode a cost centre stack -- In ghc-debug, this code will need to call back into the debuggee to -- fetch the representation of the CCS before decoding it. Using -- `peekTopCCS` for this argument can lead to segfaults in ghc-debug as -- the CCS argument will point outside the copied closure. -> StgInfoTable -- ^ The `StgInfoTable` of the closure, extracted from the heap -- representation. -> ByteArray# -- ^ Heap representation of the closure as returned by `unpackClosure#`. -- This includes all of the object including the header, info table -- pointer, pointer data, and non-pointer data. The ByteArray# may be -- pinned or unpinned. -> [b] -- ^ Pointers in the payload of the closure, extracted from the heap -- representation as returned by `collect_pointers()` in `Heap.c`. The type -- `b` is some representation of a pointer e.g. `Any` or `Ptr Any`. -> IO (GenClosure b) -- ^ Heap representation of the closure. getClosureDataFromHeapRepPrim getConDesc decodeCCS itbl heapRep pts = do let -- heapRep as a list of words. rawHeapWords :: [Word] rawHeapWords = [W# (indexWordArray# heapRep i) | I# i <- [0.. end] ] where nelems = I# (sizeofByteArray# heapRep) `div` wORD_SIZE end = fromIntegral nelems - 1 -- Just the payload of rawHeapWords (no header). payloadWords :: [Word] payloadWords = drop (closureTypeHeaderSize (tipe itbl)) rawHeapWords -- The non-pointer words in the payload. Only valid for closures with a -- "pointers first" layout. Not valid for bit field layout. npts :: [Word] npts = drop (closureTypeHeaderSize (tipe itbl) + length pts) rawHeapWords case tipe itbl of t | t >= CONSTR && t <= CONSTR_NOCAF -> do (p, m, n) <- getConDesc pure $ ConstrClosure itbl pts npts p m n t | t >= THUNK && t <= THUNK_STATIC -> do pure $ ThunkClosure itbl pts npts THUNK_SELECTOR -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to THUNK_SELECTOR" pure $ SelectorClosure itbl (head pts) t | t >= FUN && t <= FUN_STATIC -> do pure $ FunClosure itbl pts npts AP -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to AP" -- We expect at least the arity, n_args, and fun fields unless (length payloadWords >= 2) $ fail "Expected at least 2 raw words to AP" let splitWord = payloadWords !! 0 pure $ APClosure itbl #if defined(WORDS_BIGENDIAN) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) (fromIntegral splitWord) #else (fromIntegral splitWord) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) #endif (head pts) (tail pts) PAP -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to PAP" -- We expect at least the arity, n_args, and fun fields unless (length payloadWords >= 2) $ fail "Expected at least 2 raw words to PAP" let splitWord = payloadWords !! 0 pure $ PAPClosure itbl #if defined(WORDS_BIGENDIAN) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) (fromIntegral splitWord) #else (fromIntegral splitWord) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) #endif (head pts) (tail pts) AP_STACK -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to AP_STACK" pure $ APStackClosure itbl (head pts) (tail pts) IND -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to IND" pure $ IndClosure itbl (head pts) IND_STATIC -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to IND_STATIC" pure $ IndClosure itbl (head pts) BLACKHOLE -> do unless (length pts >= 1) $ fail "Expected at least 1 ptr argument to BLACKHOLE" pure $ BlackholeClosure itbl (head pts) BCO -> do unless (length pts >= 3) $ fail $ "Expected at least 3 ptr argument to BCO, found " ++ show (length pts) unless (length payloadWords >= 4) $ fail $ "Expected at least 4 words to BCO, found " ++ show (length payloadWords) let splitWord = payloadWords !! 3 pure $ BCOClosure itbl (pts !! 0) (pts !! 1) (pts !! 2) #if defined(WORDS_BIGENDIAN) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) (fromIntegral splitWord) #else (fromIntegral splitWord) (fromIntegral $ shiftR splitWord (wORD_SIZE_IN_BITS `div` 2)) #endif (drop 4 payloadWords) ARR_WORDS -> do unless (length payloadWords >= 1) $ fail $ "Expected at least 1 words to ARR_WORDS, found " ++ show (length payloadWords) pure $ ArrWordsClosure itbl (head payloadWords) (tail payloadWords) t | t >= MUT_ARR_PTRS_CLEAN && t <= MUT_ARR_PTRS_FROZEN_CLEAN -> do unless (length payloadWords >= 2) $ fail $ "Expected at least 2 words to MUT_ARR_PTRS_* " ++ "found " ++ show (length payloadWords) pure $ MutArrClosure itbl (payloadWords !! 0) (payloadWords !! 1) pts t | t >= SMALL_MUT_ARR_PTRS_CLEAN && t <= SMALL_MUT_ARR_PTRS_FROZEN_CLEAN -> do unless (length payloadWords >= 1) $ fail $ "Expected at least 1 word to SMALL_MUT_ARR_PTRS_* " ++ "found " ++ show (length payloadWords) pure $ SmallMutArrClosure itbl (payloadWords !! 0) pts t | t == MUT_VAR_CLEAN || t == MUT_VAR_DIRTY -> do unless (length pts >= 1) $ fail $ "Expected at least 1 words to MUT_VAR, found " ++ show (length pts) pure $ MutVarClosure itbl (head pts) t | t == MVAR_CLEAN || t == MVAR_DIRTY -> do unless (length pts >= 3) $ fail $ "Expected at least 3 ptrs to MVAR, found " ++ show (length pts) pure $ MVarClosure itbl (pts !! 0) (pts !! 1) (pts !! 2) BLOCKING_QUEUE -> pure $ OtherClosure itbl pts rawHeapWords -- pure $ BlockingQueueClosure itbl -- (pts !! 0) (pts !! 1) (pts !! 2) (pts !! 3) -- pure $ OtherClosure itbl pts rawHeapWords -- WEAK -> do pure $ WeakClosure { info = itbl , cfinalizers = pts !! 0 , key = pts !! 1 , value = pts !! 2 , finalizer = pts !! 3 , weakLink = case drop 4 pts of [] -> Nothing [p] -> Just p _ -> error $ "Expected 4 or 5 words in WEAK, found " ++ show (length pts) } TSO | [ u_lnk, u_gbl_lnk, tso_stack, u_trec, u_blk_ex, u_bq] <- pts -> withArray rawHeapWords (\ptr -> do fields <- FFIClosures.peekTSOFields decodeCCS ptr pure $ TSOClosure { info = itbl , link = u_lnk , global_link = u_gbl_lnk , tsoStack = tso_stack , trec = u_trec , blocked_exceptions = u_blk_ex , bq = u_bq , what_next = FFIClosures.tso_what_next fields , why_blocked = FFIClosures.tso_why_blocked fields , flags = FFIClosures.tso_flags fields , threadId = FFIClosures.tso_threadId fields , saved_errno = FFIClosures.tso_saved_errno fields , tso_dirty = FFIClosures.tso_dirty fields , alloc_limit = FFIClosures.tso_alloc_limit fields , tot_stack_size = FFIClosures.tso_tot_stack_size fields , prof = FFIClosures.tso_prof fields }) | otherwise -> fail $ "Expected 6 ptr arguments to TSO, found " ++ show (length pts) STACK | [] <- pts -> withArray rawHeapWords (\ptr -> do fields <- FFIClosures.peekStackFields ptr pure $ StackClosure { info = itbl , stack_size = FFIClosures.stack_size fields , stack_dirty = FFIClosures.stack_dirty fields #if __GLASGOW_HASKELL__ >= 811 , stack_marking = FFIClosures.stack_marking fields #endif }) | otherwise -> fail $ "Expected 0 ptr argument to STACK, found " ++ show (length pts) _ -> pure $ UnsupportedClosure itbl -- | Like 'getClosureData', but taking a 'Box', so it is easier to work with. getBoxedClosureData :: Box -> IO Closure getBoxedClosureData (Box a) = getClosureData a ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/ClosureTypes.hs0000644000000000000000000000412714472375231024267 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DeriveGeneric #-} module GHC.Exts.Heap.ClosureTypes ( ClosureType(..) , closureTypeHeaderSize ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics {- --------------------------------------------- -- Enum representing closure types -- This is a mirror of: -- rts/include/rts/storage/ClosureTypes.h -- ---------------------------------------------} data ClosureType = INVALID_OBJECT | CONSTR | CONSTR_1_0 | CONSTR_0_1 | CONSTR_2_0 | CONSTR_1_1 | CONSTR_0_2 | CONSTR_NOCAF | FUN | FUN_1_0 | FUN_0_1 | FUN_2_0 | FUN_1_1 | FUN_0_2 | FUN_STATIC | THUNK | THUNK_1_0 | THUNK_0_1 | THUNK_2_0 | THUNK_1_1 | THUNK_0_2 | THUNK_STATIC | THUNK_SELECTOR | BCO | AP | PAP | AP_STACK | IND | IND_STATIC | RET_BCO | RET_SMALL | RET_BIG | RET_FUN | UPDATE_FRAME | CATCH_FRAME | UNDERFLOW_FRAME | STOP_FRAME | BLOCKING_QUEUE | BLACKHOLE | MVAR_CLEAN | MVAR_DIRTY | TVAR | ARR_WORDS | MUT_ARR_PTRS_CLEAN | MUT_ARR_PTRS_DIRTY | MUT_ARR_PTRS_FROZEN_DIRTY | MUT_ARR_PTRS_FROZEN_CLEAN | MUT_VAR_CLEAN | MUT_VAR_DIRTY | WEAK | PRIM | MUT_PRIM | TSO | STACK | TREC_CHUNK | ATOMICALLY_FRAME | CATCH_RETRY_FRAME | CATCH_STM_FRAME | WHITEHOLE | SMALL_MUT_ARR_PTRS_CLEAN | SMALL_MUT_ARR_PTRS_DIRTY | SMALL_MUT_ARR_PTRS_FROZEN_DIRTY | SMALL_MUT_ARR_PTRS_FROZEN_CLEAN | COMPACT_NFDATA | N_CLOSURE_TYPES deriving (Enum, Eq, Ord, Show, Generic) -- | Return the size of the closures header in words closureTypeHeaderSize :: ClosureType -> Int closureTypeHeaderSize closType = case closType of ct | THUNK <= ct && ct <= THUNK_0_2 -> thunkHeader ct | ct == THUNK_SELECTOR -> thunkHeader ct | ct == AP -> thunkHeader ct | ct == AP_STACK -> thunkHeader _ -> header where header = 1 + prof thunkHeader = 2 + prof #if defined(PROFILING) prof = 2 #else prof = 0 #endif ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/Closures.hs0000644000000000000000000003374714472400004023422 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ForeignFunctionInterface #-} {-# LANGUAGE GHCForeignImportPrim #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE UnliftedFFITypes #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveTraversable #-} module GHC.Exts.Heap.Closures ( -- * Closures Closure , GenClosure(..) , PrimType(..) , WhatNext(..) , WhyBlocked(..) , TsoFlags(..) , allClosures , closureSize -- * Boxes , Box(..) , areBoxesEqual , asBox ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.Constants #if defined(PROFILING) import GHC.Exts.Heap.InfoTableProf #else import GHC.Exts.Heap.InfoTable -- `ghc -M` currently doesn't properly account for ways when generating -- dependencies (#15197). This import ensures correct build-ordering between -- this module and GHC.Exts.Heap.InfoTableProf. It should be removed when #15197 -- is fixed. import GHC.Exts.Heap.InfoTableProf () #endif import GHC.Exts.Heap.ProfInfo.Types import Data.Bits import Data.Foldable (toList) import Data.Int import Data.Word import GHC.Exts import GHC.Generics import Numeric ------------------------------------------------------------------------ -- Boxes foreign import prim "Ghclib_aToWordzh" aToWord# :: Any -> Word# foreign import prim "Ghclib_reallyUnsafePtrEqualityUpToTag" reallyUnsafePtrEqualityUpToTag# :: Any -> Any -> Int# -- | An arbitrary Haskell value in a safe Box. The point is that even -- unevaluated thunks can safely be moved around inside the Box, and when -- required, e.g. in 'getBoxedClosureData', the function knows how far it has -- to evaluate the argument. data Box = Box Any instance Show Box where -- From libraries/base/GHC/Ptr.lhs showsPrec _ (Box a) rs = -- unsafePerformIO (print "↓" >> pClosure a) `seq` pad_out (showHex addr "") ++ (if tag>0 then "/" ++ show tag else "") ++ rs where ptr = W# (aToWord# a) tag = ptr .&. fromIntegral tAG_MASK -- ((1 `shiftL` TAG_BITS) -1) addr = ptr - tag pad_out ls = '0':'x':ls -- |This takes an arbitrary value and puts it into a box. -- Note that calls like -- -- > asBox (head list) -- -- will put the thunk \"head list\" into the box, /not/ the element at the head -- of the list. For that, use careful case expressions: -- -- > case list of x:_ -> asBox x asBox :: a -> Box asBox x = Box (unsafeCoerce# x) -- | Boxes can be compared, but this is not pure, as different heap objects can, -- after garbage collection, become the same object. areBoxesEqual :: Box -> Box -> IO Bool areBoxesEqual (Box a) (Box b) = case reallyUnsafePtrEqualityUpToTag# a b of 0# -> pure False _ -> pure True ------------------------------------------------------------------------ -- Closures type Closure = GenClosure Box -- | This is the representation of a Haskell value on the heap. It reflects -- -- -- The data type is parametrized by `b`: the type to store references in. -- Usually this is a 'Box' with the type synonym 'Closure'. -- -- All Heap objects have the same basic layout. A header containing a pointer to -- the info table and a payload with various fields. The @info@ field below -- always refers to the info table pointed to by the header. The remaining -- fields are the payload. -- -- See -- -- for more information. data GenClosure b = -- | A data constructor ConstrClosure { info :: !StgInfoTable , ptrArgs :: ![b] -- ^ Pointer arguments , dataArgs :: ![Word] -- ^ Non-pointer arguments , pkg :: !String -- ^ Package name , modl :: !String -- ^ Module name , name :: !String -- ^ Constructor name } -- | A function | FunClosure { info :: !StgInfoTable , ptrArgs :: ![b] -- ^ Pointer arguments , dataArgs :: ![Word] -- ^ Non-pointer arguments } -- | A thunk, an expression not obviously in head normal form | ThunkClosure { info :: !StgInfoTable , ptrArgs :: ![b] -- ^ Pointer arguments , dataArgs :: ![Word] -- ^ Non-pointer arguments } -- | A thunk which performs a simple selection operation | SelectorClosure { info :: !StgInfoTable , selectee :: !b -- ^ Pointer to the object being -- selected from } -- | An unsaturated function application | PAPClosure { info :: !StgInfoTable , arity :: !HalfWord -- ^ Arity of the partial application , n_args :: !HalfWord -- ^ Size of the payload in words , fun :: !b -- ^ Pointer to a 'FunClosure' , payload :: ![b] -- ^ Sequence of already applied -- arguments } -- In GHCi, if Linker.h would allow a reverse lookup, we could for exported -- functions fun actually find the name here. -- At least the other direction works via "lookupSymbol -- base_GHCziBase_zpzp_closure" and yields the same address (up to tags) -- | A function application | APClosure { info :: !StgInfoTable , arity :: !HalfWord -- ^ Always 0 , n_args :: !HalfWord -- ^ Size of payload in words , fun :: !b -- ^ Pointer to a 'FunClosure' , payload :: ![b] -- ^ Sequence of already applied -- arguments } -- | A suspended thunk evaluation | APStackClosure { info :: !StgInfoTable , fun :: !b -- ^ Function closure , payload :: ![b] -- ^ Stack right before suspension } -- | A pointer to another closure, introduced when a thunk is updated -- to point at its value | IndClosure { info :: !StgInfoTable , indirectee :: !b -- ^ Target closure } -- | A byte-code object (BCO) which can be interpreted by GHC's byte-code -- interpreter (e.g. as used by GHCi) | BCOClosure { info :: !StgInfoTable , instrs :: !b -- ^ A pointer to an ArrWords -- of instructions , literals :: !b -- ^ A pointer to an ArrWords -- of literals , bcoptrs :: !b -- ^ A pointer to an ArrWords -- of byte code objects , arity :: !HalfWord -- ^ The arity of this BCO , size :: !HalfWord -- ^ The size of this BCO in words , bitmap :: ![Word] -- ^ An StgLargeBitmap describing the -- pointerhood of its args/free vars } -- | A thunk under evaluation by another thread | BlackholeClosure { info :: !StgInfoTable , indirectee :: !b -- ^ The target closure } -- | A @ByteArray#@ | ArrWordsClosure { info :: !StgInfoTable , bytes :: !Word -- ^ Size of array in bytes , arrWords :: ![Word] -- ^ Array payload } -- | A @MutableByteArray#@ | MutArrClosure { info :: !StgInfoTable , mccPtrs :: !Word -- ^ Number of pointers , mccSize :: !Word -- ^ ?? Closures.h vs ClosureMacros.h , mccPayload :: ![b] -- ^ Array payload -- Card table ignored } -- | A @SmallMutableArray#@ -- -- @since 8.10.1 | SmallMutArrClosure { info :: !StgInfoTable , mccPtrs :: !Word -- ^ Number of pointers , mccPayload :: ![b] -- ^ Array payload } -- | An @MVar#@, with a queue of thread state objects blocking on them | MVarClosure { info :: !StgInfoTable , queueHead :: !b -- ^ Pointer to head of queue , queueTail :: !b -- ^ Pointer to tail of queue , value :: !b -- ^ Pointer to closure } -- | An @IOPort#@, with a queue of thread state objects blocking on them | IOPortClosure { info :: !StgInfoTable , queueHead :: !b -- ^ Pointer to head of queue , queueTail :: !b -- ^ Pointer to tail of queue , value :: !b -- ^ Pointer to closure } -- | A @MutVar#@ | MutVarClosure { info :: !StgInfoTable , var :: !b -- ^ Pointer to contents } -- | An STM blocking queue. | BlockingQueueClosure { info :: !StgInfoTable , link :: !b -- ^ ?? Here so it looks like an IND , blackHole :: !b -- ^ The blackhole closure , owner :: !b -- ^ The owning thread state object , queue :: !b -- ^ ?? } | WeakClosure { info :: !StgInfoTable , cfinalizers :: !b , key :: !b , value :: !b , finalizer :: !b , weakLink :: !(Maybe b) -- ^ next weak pointer for the capability } -- | Representation of StgTSO: A Thread State Object. The values for -- 'what_next', 'why_blocked' and 'flags' are defined in @Constants.h@. | TSOClosure { info :: !StgInfoTable -- pointers , link :: !b , global_link :: !b , tsoStack :: !b -- ^ stackobj from StgTSO , trec :: !b , blocked_exceptions :: !b , bq :: !b -- values , what_next :: !WhatNext , why_blocked :: !WhyBlocked , flags :: ![TsoFlags] , threadId :: !Word64 , saved_errno :: !Word32 , tso_dirty :: !Word32 -- ^ non-zero => dirty , alloc_limit :: !Int64 , tot_stack_size :: !Word32 , prof :: !(Maybe StgTSOProfInfo) } -- | Representation of StgStack: The 'tsoStack ' of a 'TSOClosure'. | StackClosure { info :: !StgInfoTable , stack_size :: !Word32 -- ^ stack size in *words* , stack_dirty :: !Word8 -- ^ non-zero => dirty #if __GLASGOW_HASKELL__ >= 811 , stack_marking :: !Word8 #endif } ------------------------------------------------------------ -- Unboxed unlifted closures -- | Primitive Int | IntClosure { ptipe :: PrimType , intVal :: !Int } -- | Primitive Word | WordClosure { ptipe :: PrimType , wordVal :: !Word } -- | Primitive Int64 | Int64Closure { ptipe :: PrimType , int64Val :: !Int64 } -- | Primitive Word64 | Word64Closure { ptipe :: PrimType , word64Val :: !Word64 } -- | Primitive Addr | AddrClosure { ptipe :: PrimType , addrVal :: !Int } -- | Primitive Float | FloatClosure { ptipe :: PrimType , floatVal :: !Float } -- | Primitive Double | DoubleClosure { ptipe :: PrimType , doubleVal :: !Double } ----------------------------------------------------------- -- Anything else -- | Another kind of closure | OtherClosure { info :: !StgInfoTable , hvalues :: ![b] , rawWords :: ![Word] } | UnsupportedClosure { info :: !StgInfoTable } deriving (Show, Generic, Functor, Foldable, Traversable) data PrimType = PInt | PWord | PInt64 | PWord64 | PAddr | PFloat | PDouble deriving (Eq, Show, Generic, Ord) data WhatNext = ThreadRunGHC | ThreadInterpret | ThreadKilled | ThreadComplete | WhatNextUnknownValue Word16 -- ^ Please report this as a bug deriving (Eq, Show, Generic, Ord) data WhyBlocked = NotBlocked | BlockedOnMVar | BlockedOnMVarRead | BlockedOnBlackHole | BlockedOnRead | BlockedOnWrite | BlockedOnDelay | BlockedOnSTM | BlockedOnDoProc | BlockedOnCCall | BlockedOnCCall_Interruptible | BlockedOnMsgThrowTo | ThreadMigrating | WhyBlockedUnknownValue Word16 -- ^ Please report this as a bug deriving (Eq, Show, Generic, Ord) data TsoFlags = TsoLocked | TsoBlockx | TsoInterruptible | TsoStoppedOnBreakpoint | TsoMarked | TsoSqueezed | TsoAllocLimit | TsoFlagsUnknownValue Word32 -- ^ Please report this as a bug deriving (Eq, Show, Generic, Ord) -- | For generic code, this function returns all referenced closures. allClosures :: GenClosure b -> [b] allClosures (ConstrClosure {..}) = ptrArgs allClosures (ThunkClosure {..}) = ptrArgs allClosures (SelectorClosure {..}) = [selectee] allClosures (IndClosure {..}) = [indirectee] allClosures (BlackholeClosure {..}) = [indirectee] allClosures (APClosure {..}) = fun:payload allClosures (PAPClosure {..}) = fun:payload allClosures (APStackClosure {..}) = fun:payload allClosures (BCOClosure {..}) = [instrs,literals,bcoptrs] allClosures (ArrWordsClosure {}) = [] allClosures (MutArrClosure {..}) = mccPayload allClosures (SmallMutArrClosure {..}) = mccPayload allClosures (MutVarClosure {..}) = [var] allClosures (MVarClosure {..}) = [queueHead,queueTail,value] allClosures (IOPortClosure {..}) = [queueHead,queueTail,value] allClosures (FunClosure {..}) = ptrArgs allClosures (BlockingQueueClosure {..}) = [link, blackHole, owner, queue] allClosures (WeakClosure {..}) = [cfinalizers, key, value, finalizer] ++ Data.Foldable.toList weakLink allClosures (OtherClosure {..}) = hvalues allClosures _ = [] -- | Get the size of the top-level closure in words. -- Includes header and payload. Does not follow pointers. -- -- @since 8.10.1 closureSize :: Box -> Int closureSize (Box x) = I# (closureSize# x) ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/Constants.hsc0000644000000000000000000000060514470055371023740 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Exts.Heap.Constants ( wORD_SIZE , tAG_MASK , wORD_SIZE_IN_BITS ) where #include "MachDeps.h" import Prelude -- See note [Why do we import Prelude here?] import Data.Bits wORD_SIZE, tAG_MASK, wORD_SIZE_IN_BITS :: Int wORD_SIZE = #const SIZEOF_HSWORD wORD_SIZE_IN_BITS = #const WORD_SIZE_IN_BITS tAG_MASK = (1 `shift` #const TAG_BITS) - 1 ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/FFIClosures.hs0000644000000000000000000000377414470055371023757 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Exts.Heap.FFIClosures (module Reexport) where -- NOTE [hsc and CPP workaround] -- -- # Problem -- -- Often, .hsc files are used to get the correct offsets of C struct fields. -- Those structs may be affected by CPP directives e.g. profiled vs not profiled -- closure headers is affected by the PROFILED cpp define. Since we are building -- multiple variants of the RTS, we must support all possible offsets e.g. by -- running hsc2hs with cpp defines corresponding to each RTS flavour. The -- problem is that GHC's build system runs hsc2hs *only once* per .hsc file -- without properly setting cpp defines. This results in the same (probably -- incorrect) offsets into our C structs. -- -- -- # Workaround -- -- To work around this issue, we create multiple .hsc files each manually -- defining thir cpp defines (see e.g. FFIClosures_ProfilingDisabled.hsc and -- FFIClosures_ProfilingEnabled.hsc). Then we rely on cpp defines working -- correctly in .hs files and use CPP to switch on which .hsc module to -- re-export (see below). In each case we import the desired .hsc module as -- `Reexport` and we import `()` (i.e. nothing) from all other .hsc variants -- just so that the build system sees all .hsc file as dependencies. -- -- -- # Future Work -- -- - Duplication of the code in the .hsc files could be reduced simply by -- placing the code in a single .hsc.in file and `#include`ing it from each -- .hsc file. The .hsc files would only be responsible for setting the correct -- cpp defines. This currently doesn't work as hadrian doesn't know to copy -- the .hsc.in file to the build directory. -- - The correct solution would be for the build system to run `hsc2hs` with the -- correct cpp defines once per RTS flavour. -- #if defined(PROFILING) import GHC.Exts.Heap.FFIClosures_ProfilingEnabled as Reexport import GHC.Exts.Heap.FFIClosures_ProfilingDisabled () #else import GHC.Exts.Heap.FFIClosures_ProfilingDisabled as Reexport import GHC.Exts.Heap.FFIClosures_ProfilingEnabled () #endif ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/FFIClosures_ProfilingDisabled.hsc0000644000000000000000000001171114470055371027551 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} module GHC.Exts.Heap.FFIClosures_ProfilingDisabled where -- See [hsc and CPP workaround] #undef PROFILING #include "Rts.h" import Prelude import Foreign import GHC.Exts import GHC.Exts.Heap.ProfInfo.PeekProfInfo import GHC.Exts.Heap.ProfInfo.Types import GHC.Exts.Heap.Closures(WhatNext(..), WhyBlocked(..), TsoFlags(..)) data TSOFields = TSOFields { tso_what_next :: WhatNext, tso_why_blocked :: WhyBlocked, tso_flags :: [TsoFlags], -- Unfortunately block_info is a union without clear discriminator. -- block_info :: TDB, tso_threadId :: Word64, tso_saved_errno :: Word32, tso_dirty:: Word32, tso_alloc_limit :: Int64, tso_tot_stack_size :: Word32, tso_prof :: Maybe StgTSOProfInfo } -- | Get non-pointer fields from @StgTSO_@ (@TSO.h@) peekTSOFields :: (Ptr a -> IO (Maybe CostCentreStack)) -> Ptr tsoPtr -> IO TSOFields peekTSOFields decodeCCS ptr = do what_next' <- (#peek struct StgTSO_, what_next) ptr why_blocked' <- (#peek struct StgTSO_, why_blocked) ptr flags' <- (#peek struct StgTSO_, flags) ptr threadId' <- (#peek struct StgTSO_, id) ptr saved_errno' <- (#peek struct StgTSO_, saved_errno) ptr dirty' <- (#peek struct StgTSO_, dirty) ptr alloc_limit' <- (#peek struct StgTSO_, alloc_limit) ptr tot_stack_size' <- (#peek struct StgTSO_, tot_stack_size) ptr tso_prof' <- peekStgTSOProfInfo decodeCCS ptr return TSOFields { tso_what_next = parseWhatNext what_next', tso_why_blocked = parseWhyBlocked why_blocked', tso_flags = parseTsoFlags flags', tso_threadId = threadId', tso_saved_errno = saved_errno', tso_dirty = dirty', tso_alloc_limit = alloc_limit', tso_tot_stack_size = tot_stack_size', tso_prof = tso_prof' } parseWhatNext :: Word16 -> WhatNext parseWhatNext w = case w of (#const ThreadRunGHC) -> ThreadRunGHC (#const ThreadInterpret) -> ThreadInterpret (#const ThreadKilled) -> ThreadKilled (#const ThreadComplete) -> ThreadComplete _ -> WhatNextUnknownValue w parseWhyBlocked :: Word16 -> WhyBlocked parseWhyBlocked w = case w of (#const NotBlocked) -> NotBlocked (#const BlockedOnMVar) -> BlockedOnMVar (#const BlockedOnMVarRead) -> BlockedOnMVarRead (#const BlockedOnBlackHole) -> BlockedOnBlackHole (#const BlockedOnRead) -> BlockedOnRead (#const BlockedOnWrite) -> BlockedOnWrite (#const BlockedOnDelay) -> BlockedOnDelay (#const BlockedOnSTM) -> BlockedOnSTM (#const BlockedOnDoProc) -> BlockedOnDoProc (#const BlockedOnCCall) -> BlockedOnCCall (#const BlockedOnCCall_Interruptible) -> BlockedOnCCall_Interruptible (#const BlockedOnMsgThrowTo) -> BlockedOnMsgThrowTo (#const ThreadMigrating) -> ThreadMigrating _ -> WhyBlockedUnknownValue w parseTsoFlags :: Word32 -> [TsoFlags] parseTsoFlags w | isSet (#const TSO_LOCKED) w = TsoLocked : parseTsoFlags (unset (#const TSO_LOCKED) w) | isSet (#const TSO_BLOCKEX) w = TsoBlockx : parseTsoFlags (unset (#const TSO_BLOCKEX) w) | isSet (#const TSO_INTERRUPTIBLE) w = TsoInterruptible : parseTsoFlags (unset (#const TSO_INTERRUPTIBLE) w) | isSet (#const TSO_STOPPED_ON_BREAKPOINT) w = TsoStoppedOnBreakpoint : parseTsoFlags (unset (#const TSO_STOPPED_ON_BREAKPOINT) w) | isSet (#const TSO_MARKED) w = TsoMarked : parseTsoFlags (unset (#const TSO_MARKED) w) | isSet (#const TSO_SQUEEZED) w = TsoSqueezed : parseTsoFlags (unset (#const TSO_SQUEEZED) w) | isSet (#const TSO_ALLOC_LIMIT) w = TsoAllocLimit : parseTsoFlags (unset (#const TSO_ALLOC_LIMIT) w) parseTsoFlags 0 = [] parseTsoFlags w = [TsoFlagsUnknownValue w] isSet :: Word32 -> Word32 -> Bool isSet bitMask w = w .&. bitMask /= 0 unset :: Word32 -> Word32 -> Word32 unset bitMask w = w `xor` bitMask data StackFields = StackFields { stack_size :: Word32, stack_dirty :: Word8, #if __GLASGOW_HASKELL__ >= 811 stack_marking :: Word8, #endif stack_sp :: Addr## } -- | Get non-closure fields from @StgStack_@ (@TSO.h@) peekStackFields :: Ptr a -> IO StackFields peekStackFields ptr = do stack_size' <- (#peek struct StgStack_, stack_size) ptr ::IO Word32 dirty' <- (#peek struct StgStack_, dirty) ptr #if __GLASGOW_HASKELL__ >= 811 marking' <- (#peek struct StgStack_, marking) ptr #endif Ptr sp' <- (#peek struct StgStack_, sp) ptr -- TODO decode the stack. return StackFields { stack_size = stack_size', stack_dirty = dirty', #if __GLASGOW_HASKELL__ >= 811 stack_marking = marking', #endif stack_sp = sp' } ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/FFIClosures_ProfilingEnabled.hsc0000644000000000000000000001171014470055371027373 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} module GHC.Exts.Heap.FFIClosures_ProfilingEnabled where -- See [hsc and CPP workaround] #define PROFILING #include "Rts.h" import Prelude import Foreign import GHC.Exts import GHC.Exts.Heap.ProfInfo.PeekProfInfo import GHC.Exts.Heap.ProfInfo.Types import GHC.Exts.Heap.Closures(WhatNext(..), WhyBlocked(..), TsoFlags(..)) data TSOFields = TSOFields { tso_what_next :: WhatNext, tso_why_blocked :: WhyBlocked, tso_flags :: [TsoFlags], -- Unfortunately block_info is a union without clear discriminator. -- block_info :: TDB, tso_threadId :: Word64, tso_saved_errno :: Word32, tso_dirty:: Word32, tso_alloc_limit :: Int64, tso_tot_stack_size :: Word32, tso_prof :: Maybe StgTSOProfInfo } -- | Get non-pointer fields from @StgTSO_@ (@TSO.h@) peekTSOFields :: (Ptr a -> IO (Maybe CostCentreStack)) -> Ptr tsoPtr -> IO TSOFields peekTSOFields decodeCCS ptr = do what_next' <- (#peek struct StgTSO_, what_next) ptr why_blocked' <- (#peek struct StgTSO_, why_blocked) ptr flags' <- (#peek struct StgTSO_, flags) ptr threadId' <- (#peek struct StgTSO_, id) ptr saved_errno' <- (#peek struct StgTSO_, saved_errno) ptr dirty' <- (#peek struct StgTSO_, dirty) ptr alloc_limit' <- (#peek struct StgTSO_, alloc_limit) ptr tot_stack_size' <- (#peek struct StgTSO_, tot_stack_size) ptr tso_prof' <- peekStgTSOProfInfo decodeCCS ptr return TSOFields { tso_what_next = parseWhatNext what_next', tso_why_blocked = parseWhyBlocked why_blocked', tso_flags = parseTsoFlags flags', tso_threadId = threadId', tso_saved_errno = saved_errno', tso_dirty = dirty', tso_alloc_limit = alloc_limit', tso_tot_stack_size = tot_stack_size', tso_prof = tso_prof' } parseWhatNext :: Word16 -> WhatNext parseWhatNext w = case w of (#const ThreadRunGHC) -> ThreadRunGHC (#const ThreadInterpret) -> ThreadInterpret (#const ThreadKilled) -> ThreadKilled (#const ThreadComplete) -> ThreadComplete _ -> WhatNextUnknownValue w parseWhyBlocked :: Word16 -> WhyBlocked parseWhyBlocked w = case w of (#const NotBlocked) -> NotBlocked (#const BlockedOnMVar) -> BlockedOnMVar (#const BlockedOnMVarRead) -> BlockedOnMVarRead (#const BlockedOnBlackHole) -> BlockedOnBlackHole (#const BlockedOnRead) -> BlockedOnRead (#const BlockedOnWrite) -> BlockedOnWrite (#const BlockedOnDelay) -> BlockedOnDelay (#const BlockedOnSTM) -> BlockedOnSTM (#const BlockedOnDoProc) -> BlockedOnDoProc (#const BlockedOnCCall) -> BlockedOnCCall (#const BlockedOnCCall_Interruptible) -> BlockedOnCCall_Interruptible (#const BlockedOnMsgThrowTo) -> BlockedOnMsgThrowTo (#const ThreadMigrating) -> ThreadMigrating _ -> WhyBlockedUnknownValue w parseTsoFlags :: Word32 -> [TsoFlags] parseTsoFlags w | isSet (#const TSO_LOCKED) w = TsoLocked : parseTsoFlags (unset (#const TSO_LOCKED) w) | isSet (#const TSO_BLOCKEX) w = TsoBlockx : parseTsoFlags (unset (#const TSO_BLOCKEX) w) | isSet (#const TSO_INTERRUPTIBLE) w = TsoInterruptible : parseTsoFlags (unset (#const TSO_INTERRUPTIBLE) w) | isSet (#const TSO_STOPPED_ON_BREAKPOINT) w = TsoStoppedOnBreakpoint : parseTsoFlags (unset (#const TSO_STOPPED_ON_BREAKPOINT) w) | isSet (#const TSO_MARKED) w = TsoMarked : parseTsoFlags (unset (#const TSO_MARKED) w) | isSet (#const TSO_SQUEEZED) w = TsoSqueezed : parseTsoFlags (unset (#const TSO_SQUEEZED) w) | isSet (#const TSO_ALLOC_LIMIT) w = TsoAllocLimit : parseTsoFlags (unset (#const TSO_ALLOC_LIMIT) w) parseTsoFlags 0 = [] parseTsoFlags w = [TsoFlagsUnknownValue w] isSet :: Word32 -> Word32 -> Bool isSet bitMask w = w .&. bitMask /= 0 unset :: Word32 -> Word32 -> Word32 unset bitMask w = w `xor` bitMask data StackFields = StackFields { stack_size :: Word32, stack_dirty :: Word8, #if __GLASGOW_HASKELL__ >= 811 stack_marking :: Word8, #endif stack_sp :: Addr## } -- | Get non-closure fields from @StgStack_@ (@TSO.h@) peekStackFields :: Ptr a -> IO StackFields peekStackFields ptr = do stack_size' <- (#peek struct StgStack_, stack_size) ptr ::IO Word32 dirty' <- (#peek struct StgStack_, dirty) ptr #if __GLASGOW_HASKELL__ >= 811 marking' <- (#peek struct StgStack_, marking) ptr #endif Ptr sp' <- (#peek struct StgStack_, sp) ptr -- TODO decode the stack. return StackFields { stack_size = stack_size', stack_dirty = dirty', #if __GLASGOW_HASKELL__ >= 811 stack_marking = marking', #endif stack_sp = sp' } ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/InfoTable.hsc0000644000000000000000000000465514472400113023626 0ustar0000000000000000{-# LANGUAGE NoMonoLocalBinds #-} module GHC.Exts.Heap.InfoTable ( module GHC.Exts.Heap.InfoTable.Types , itblSize , peekItbl , pokeItbl ) where #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.InfoTable.Types #if !defined(TABLES_NEXT_TO_CODE) import GHC.Exts.Heap.Constants import Data.Maybe #endif import Foreign ------------------------------------------------------------------------- -- Profiling specific code -- -- The functions that follow all rely on PROFILING. They are duplicated in -- ghc-heap/GHC/Exts/Heap/InfoTableProf.hsc where PROFILING is defined. This -- allows hsc2hs to generate values for both profiling and non-profiling builds. -- | Read an InfoTable from the heap into a haskell type. -- WARNING: This code assumes it is passed a pointer to a "standard" info -- table. If tables_next_to_code is enabled, it will look 1 byte before the -- start for the entry field. peekItbl :: Ptr StgInfoTable -> IO StgInfoTable peekItbl a0 = do #if !defined(TABLES_NEXT_TO_CODE) let ptr = a0 `plusPtr` (negate wORD_SIZE) entry' <- Just <$> (#peek struct StgInfoTable_, entry) ptr #else let ptr = a0 entry' = Nothing #endif ptrs' <- (#peek struct StgInfoTable_, layout.payload.ptrs) ptr nptrs' <- (#peek struct StgInfoTable_, layout.payload.nptrs) ptr tipe' <- (#peek struct StgInfoTable_, type) ptr srtlen' <- (#peek struct StgInfoTable_, srt) a0 return StgInfoTable { entry = entry' , ptrs = ptrs' , nptrs = nptrs' , tipe = toEnum (fromIntegral (tipe' :: HalfWord)) , srtlen = srtlen' , code = Nothing } pokeItbl :: Ptr StgInfoTable -> StgInfoTable -> IO () pokeItbl a0 itbl = do #if !defined(TABLES_NEXT_TO_CODE) (#poke StgInfoTable, entry) a0 (fromJust (entry itbl)) #endif (#poke StgInfoTable, layout.payload.ptrs) a0 (ptrs itbl) (#poke StgInfoTable, layout.payload.nptrs) a0 (nptrs itbl) (#poke StgInfoTable, type) a0 (toHalfWord (fromEnum (tipe itbl))) (#poke StgInfoTable, srt) a0 (srtlen itbl) #if defined(TABLES_NEXT_TO_CODE) let code_offset = a0 `plusPtr` (#offset StgInfoTable, code) case code itbl of Nothing -> return () Just (Left xs) -> pokeArray code_offset xs Just (Right xs) -> pokeArray code_offset xs #endif where toHalfWord :: Int -> HalfWord toHalfWord i = fromIntegral i -- | Size in bytes of a standard InfoTable itblSize :: Int itblSize = (#size struct StgInfoTable_) ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/InfoTable/Types.hsc0000644000000000000000000000211614472367564024746 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} module GHC.Exts.Heap.InfoTable.Types ( StgInfoTable(..) , EntryFunPtr , HalfWord , ItblCodes ) where #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics import GHC.Exts.Heap.ClosureTypes import Foreign type ItblCodes = Either [Word8] [Word32] #include "ghcautoconf.h" -- Ultra-minimalist version specially for constructors #if SIZEOF_VOID_P == 8 type HalfWord = Word32 #elif SIZEOF_VOID_P == 4 type HalfWord = Word16 #else #error Unknown SIZEOF_VOID_P #endif type EntryFunPtr = FunPtr (Ptr () -> IO (Ptr ())) -- | This is a somewhat faithful representation of an info table. See -- -- for more details on this data structure. data StgInfoTable = StgInfoTable { entry :: Maybe EntryFunPtr, -- Just <=> not TABLES_NEXT_TO_CODE ptrs :: HalfWord, nptrs :: HalfWord, tipe :: ClosureType, srtlen :: HalfWord, code :: Maybe ItblCodes -- Just <=> TABLES_NEXT_TO_CODE } deriving (Show, Generic) ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/InfoTableProf.hsc0000644000000000000000000000427614472400113024454 0ustar0000000000000000{-# LANGUAGE NoMonoLocalBinds #-} module GHC.Exts.Heap.InfoTableProf ( module GHC.Exts.Heap.InfoTable.Types , itblSize , peekItbl , pokeItbl ) where -- This file overrides InfoTable.hsc's implementation of peekItbl and pokeItbl. -- Manually defining PROFILING gives the #peek and #poke macros an accurate -- representation of StgInfoTable_ when hsc2hs runs. #define PROFILING #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.InfoTable.Types #if !defined(TABLES_NEXT_TO_CODE) import GHC.Exts.Heap.Constants import Data.Maybe #endif import Foreign -- | Read an InfoTable from the heap into a haskell type. -- WARNING: This code assumes it is passed a pointer to a "standard" info -- table. If tables_next_to_code is enabled, it will look 1 byte before the -- start for the entry field. peekItbl :: Ptr StgInfoTable -> IO StgInfoTable peekItbl a0 = do #if !defined(TABLES_NEXT_TO_CODE) let ptr = a0 `plusPtr` (negate wORD_SIZE) entry' <- Just <$> (#peek struct StgInfoTable_, entry) ptr #else let ptr = a0 entry' = Nothing #endif ptrs' <- (#peek struct StgInfoTable_, layout.payload.ptrs) ptr nptrs' <- (#peek struct StgInfoTable_, layout.payload.nptrs) ptr tipe' <- (#peek struct StgInfoTable_, type) ptr srtlen' <- (#peek struct StgInfoTable_, srt) a0 return StgInfoTable { entry = entry' , ptrs = ptrs' , nptrs = nptrs' , tipe = toEnum (fromIntegral (tipe' :: HalfWord)) , srtlen = srtlen' , code = Nothing } pokeItbl :: Ptr StgInfoTable -> StgInfoTable -> IO () pokeItbl a0 itbl = do #if !defined(TABLES_NEXT_TO_CODE) (#poke StgInfoTable, entry) a0 (fromJust (entry itbl)) #endif (#poke StgInfoTable, layout.payload.ptrs) a0 (ptrs itbl) (#poke StgInfoTable, layout.payload.nptrs) a0 (nptrs itbl) (#poke StgInfoTable, type) a0 (fromEnum (tipe itbl)) (#poke StgInfoTable, srt) a0 (srtlen itbl) #if defined(TABLES_NEXT_TO_CODE) let code_offset = a0 `plusPtr` (#offset StgInfoTable, code) case code itbl of Nothing -> return () Just (Left xs) -> pokeArray code_offset xs Just (Right xs) -> pokeArray code_offset xs #endif itblSize :: Int itblSize = (#size struct StgInfoTable_) ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/PeekProfInfo.hs0000644000000000000000000000065714470055371025701 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Exts.Heap.ProfInfo.PeekProfInfo (module Reexport) where -- See [hsc and CPP workaround] #if defined(PROFILING) import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled as Reexport import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled () #else import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled as Reexport import GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled () #endif libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/PeekProfInfo_ProfilingDisabled.hsc0000644000000000000000000000103414470055371031414 0ustar0000000000000000ghc-lib-parser-9.4.7.20230826module GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled( peekStgTSOProfInfo , peekTopCCS ) where import Prelude import Foreign import GHC.Exts.Heap.ProfInfo.Types -- | This implementation is used when PROFILING is undefined. -- It always returns 'Nothing', because there is no profiling info available. peekStgTSOProfInfo :: (Ptr a -> IO (Maybe CostCentreStack)) -> Ptr tsoPtr -> IO (Maybe StgTSOProfInfo) peekStgTSOProfInfo _ _ = return Nothing peekTopCCS :: Ptr a -> IO (Maybe CostCentreStack) peekTopCCS _ = return Nothing libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/PeekProfInfo_ProfilingEnabled.hsc0000644000000000000000000001560214470055371031245 0ustar0000000000000000ghc-lib-parser-9.4.7.20230826{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE MagicHash #-} module GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled( peekStgTSOProfInfo , peekTopCCS ) where #if __GLASGOW_HASKELL__ >= 811 -- See [hsc and CPP workaround] #define PROFILING #include "Rts.h" #undef BLOCK_SIZE #undef MBLOCK_SIZE #undef BLOCKS_PER_MBLOCK #include "DerivedConstants.h" import Data.IntSet (IntSet) import qualified Data.IntSet as IntSet import Data.IntMap.Strict (IntMap) import qualified Data.IntMap.Strict as IntMap import Data.IORef (IORef, newIORef, readIORef, writeIORef) import Foreign import Foreign.C.String import GHC.Exts import GHC.Exts.Heap.ProfInfo.Types import Prelude -- Use Int based containers for pointers (addresses) for better performance. -- These will be queried a lot! type AddressSet = IntSet type AddressMap = IntMap peekStgTSOProfInfo :: (Ptr b -> IO (Maybe CostCentreStack)) -> Ptr a -> IO (Maybe StgTSOProfInfo) peekStgTSOProfInfo decodeCCS tsoPtr = do cccs_ptr <- peekByteOff tsoPtr cccsOffset cccs' <- decodeCCS cccs_ptr return $ Just StgTSOProfInfo { cccs = cccs' } peekTopCCS :: Ptr b -> IO (Maybe CostCentreStack) peekTopCCS cccs_ptr = do costCenterCacheRef <- newIORef IntMap.empty peekCostCentreStack IntSet.empty costCenterCacheRef cccs_ptr cccsOffset :: Int cccsOffset = (#const OFFSET_StgTSO_cccs) + (#size StgHeader) peekCostCentreStack :: AddressSet -> IORef (AddressMap CostCentre) -> Ptr costCentreStack -> IO (Maybe CostCentreStack) peekCostCentreStack _ _ ptr | ptr == nullPtr = return Nothing peekCostCentreStack loopBreakers _ ptr | IntSet.member (ptrToInt ptr) loopBreakers = return Nothing peekCostCentreStack loopBreakers costCenterCacheRef ptr = do ccs_ccsID' <- (#peek struct CostCentreStack_, ccsID) ptr ccs_cc_ptr <- (#peek struct CostCentreStack_, cc) ptr ccs_cc' <- peekCostCentre costCenterCacheRef ccs_cc_ptr ccs_prevStack_ptr <- (#peek struct CostCentreStack_, prevStack) ptr let loopBreakers' = (IntSet.insert ptrAsInt loopBreakers) ccs_prevStack' <- peekCostCentreStack loopBreakers' costCenterCacheRef ccs_prevStack_ptr ccs_indexTable_ptr <- (#peek struct CostCentreStack_, indexTable) ptr ccs_indexTable' <- peekIndexTable loopBreakers' costCenterCacheRef ccs_indexTable_ptr ccs_root_ptr <- (#peek struct CostCentreStack_, root) ptr ccs_root' <- peekCostCentreStack loopBreakers' costCenterCacheRef ccs_root_ptr ccs_depth' <- (#peek struct CostCentreStack_, depth) ptr ccs_scc_count' <- (#peek struct CostCentreStack_, scc_count) ptr ccs_selected' <- (#peek struct CostCentreStack_, selected) ptr ccs_time_ticks' <- (#peek struct CostCentreStack_, time_ticks) ptr ccs_mem_alloc' <- (#peek struct CostCentreStack_, mem_alloc) ptr ccs_inherited_alloc' <- (#peek struct CostCentreStack_, inherited_alloc) ptr ccs_inherited_ticks' <- (#peek struct CostCentreStack_, inherited_ticks) ptr return $ Just CostCentreStack { ccs_ccsID = ccs_ccsID', ccs_cc = ccs_cc', ccs_prevStack = ccs_prevStack', ccs_indexTable = ccs_indexTable', ccs_root = ccs_root', ccs_depth = ccs_depth', ccs_scc_count = ccs_scc_count', ccs_selected = ccs_selected', ccs_time_ticks = ccs_time_ticks', ccs_mem_alloc = ccs_mem_alloc', ccs_inherited_alloc = ccs_inherited_alloc', ccs_inherited_ticks = ccs_inherited_ticks' } where ptrAsInt = ptrToInt ptr peekCostCentre :: IORef (AddressMap CostCentre) -> Ptr costCentre -> IO CostCentre peekCostCentre costCenterCacheRef ptr = do costCenterCache <- readIORef costCenterCacheRef case IntMap.lookup ptrAsInt costCenterCache of (Just a) -> return a Nothing -> do cc_ccID' <- (#peek struct CostCentre_, ccID) ptr cc_label_ptr <- (#peek struct CostCentre_, label) ptr cc_label' <- peekCString cc_label_ptr cc_module_ptr <- (#peek struct CostCentre_, module) ptr cc_module' <- peekCString cc_module_ptr cc_srcloc_ptr <- (#peek struct CostCentre_, srcloc) ptr cc_srcloc' <- do if cc_srcloc_ptr == nullPtr then return Nothing else fmap Just (peekCString cc_srcloc_ptr) cc_mem_alloc' <- (#peek struct CostCentre_, mem_alloc) ptr cc_time_ticks' <- (#peek struct CostCentre_, time_ticks) ptr cc_is_caf' <- (#peek struct CostCentre_, is_caf) ptr cc_link_ptr <- (#peek struct CostCentre_, link) ptr cc_link' <- if cc_link_ptr == nullPtr then return Nothing else fmap Just (peekCostCentre costCenterCacheRef cc_link_ptr) let result = CostCentre { cc_ccID = cc_ccID', cc_label = cc_label', cc_module = cc_module', cc_srcloc = cc_srcloc', cc_mem_alloc = cc_mem_alloc', cc_time_ticks = cc_time_ticks', cc_is_caf = cc_is_caf', cc_link = cc_link' } writeIORef costCenterCacheRef (IntMap.insert ptrAsInt result costCenterCache) return result where ptrAsInt = ptrToInt ptr peekIndexTable :: AddressSet -> IORef (AddressMap CostCentre) -> Ptr indexTable -> IO (Maybe IndexTable) peekIndexTable _ _ ptr | ptr == nullPtr = return Nothing peekIndexTable loopBreakers costCenterCacheRef ptr = do it_cc_ptr <- (#peek struct IndexTable_, cc) ptr it_cc' <- peekCostCentre costCenterCacheRef it_cc_ptr it_ccs_ptr <- (#peek struct IndexTable_, ccs) ptr it_ccs' <- peekCostCentreStack loopBreakers costCenterCacheRef it_ccs_ptr it_next_ptr <- (#peek struct IndexTable_, next) ptr it_next' <- peekIndexTable loopBreakers costCenterCacheRef it_next_ptr it_back_edge' <- (#peek struct IndexTable_, back_edge) ptr return $ Just IndexTable { it_cc = it_cc', it_ccs = it_ccs', it_next = it_next', it_back_edge = it_back_edge' } -- | casts a @Ptr@ to an @Int@ ptrToInt :: Ptr a -> Int ptrToInt (Ptr a##) = I## (addr2Int## a##) #else import Prelude import Foreign import GHC.Exts.Heap.ProfInfo.Types peekStgTSOProfInfo :: (Ptr b -> IO (Maybe CostCentreStack)) -> Ptr a -> IO (Maybe StgTSOProfInfo) peekStgTSOProfInfo _ _ = return Nothing peekTopCCS :: Ptr a -> IO (Maybe CostCentreStack) peekTopCCS _ = return Nothing #endif ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/ProfInfo/Types.hs0000644000000000000000000000361114470055371024447 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} module GHC.Exts.Heap.ProfInfo.Types where import Prelude import Data.Word import GHC.Generics -- | This is a somewhat faithful representation of StgTSOProfInfo. See -- -- for more details on this data structure. newtype StgTSOProfInfo = StgTSOProfInfo { cccs :: Maybe CostCentreStack } deriving (Show, Generic, Eq, Ord) -- | This is a somewhat faithful representation of CostCentreStack. See -- -- for more details on this data structure. data CostCentreStack = CostCentreStack { ccs_ccsID :: Int, ccs_cc :: CostCentre, ccs_prevStack :: Maybe CostCentreStack, ccs_indexTable :: Maybe IndexTable, ccs_root :: Maybe CostCentreStack, ccs_depth :: Word, ccs_scc_count :: Word64, ccs_selected :: Word, ccs_time_ticks :: Word, ccs_mem_alloc :: Word64, ccs_inherited_alloc :: Word64, ccs_inherited_ticks :: Word } deriving (Show, Generic, Eq, Ord) -- | This is a somewhat faithful representation of CostCentre. See -- -- for more details on this data structure. data CostCentre = CostCentre { cc_ccID :: Int, cc_label :: String, cc_module :: String, cc_srcloc :: Maybe String, cc_mem_alloc :: Word64, cc_time_ticks :: Word, cc_is_caf :: Bool, cc_link :: Maybe CostCentre } deriving (Show, Generic, Eq, Ord) -- | This is a somewhat faithful representation of IndexTable. See -- -- for more details on this data structure. data IndexTable = IndexTable { it_cc :: CostCentre, it_ccs :: Maybe CostCentreStack, it_next :: Maybe IndexTable, it_back_edge :: Bool } deriving (Show, Generic, Eq, Ord) ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/GHC/Exts/Heap/Utils.hsc0000644000000000000000000001133614472375231023071 0ustar0000000000000000{-# LANGUAGE CPP, MagicHash #-} module GHC.Exts.Heap.Utils ( dataConNames ) where #include "Rts.h" import Prelude -- See note [Why do we import Prelude here?] import GHC.Exts.Heap.Constants import GHC.Exts.Heap.InfoTable import Data.Char import Data.List (intercalate) import Foreign import GHC.CString import GHC.Exts {- To find the string in the constructor's info table we need to consider the layout of info tables relative to the entry code for a closure. An info table can be next to the entry code for the closure, or it can be separate. The former (faster) is used in registerised versions of ghc, and the latter (portable) is for non-registerised versions. The diagrams below show where the string is to be found relative to the normal info table of the closure. 1) Tables next to code: -------------- | | <- pointer to the start of the string -------------- | | <- the (start of the) info table structure | | | | -------------- | entry code | | .... | In this case the pointer to the start of the string can be found in the memory location _one word before_ the first entry in the normal info table. 2) Tables NOT next to code: -------------- info table structure -> | *------------------> -------------- | | | entry code | | | | .... | -------------- ptr to start of str -> | | -------------- In this case the pointer to the start of the string can be found in the memory location: info_table_ptr + info_table_size -} -- Given a ptr to an 'StgInfoTable' for a data constructor -- return (Package, Module, Name) dataConNames :: Ptr StgInfoTable -> IO (String, String, String) dataConNames ptr = do conDescAddress <- getConDescAddress pure $ parse conDescAddress where -- Retrieve the con_desc field address pointing to -- 'Package:Module.Name' string getConDescAddress :: IO (Ptr Word8) getConDescAddress #if defined(TABLES_NEXT_TO_CODE) = do offsetToString <- peek (ptr `plusPtr` negate wORD_SIZE) pure $ (ptr `plusPtr` stdInfoTableSizeB) `plusPtr` fromIntegral (offsetToString :: Int32) #else = peek $ intPtrToPtr $ ptrToIntPtr ptr + fromIntegral stdInfoTableSizeB #endif stdInfoTableSizeW :: Int -- The size of a standard info table varies with profiling/ticky etc, -- so we can't get it from Constants -- It must vary in sync with mkStdInfoTable stdInfoTableSizeW = size_fixed + size_prof where size_fixed = 2 -- layout, type ##if defined(PROFILING) size_prof = 2 ##else size_prof = 0 ##endif stdInfoTableSizeB :: Int stdInfoTableSizeB = stdInfoTableSizeW * wORD_SIZE -- parsing names is a little bit fiddly because we have a string in the form: -- pkg:A.B.C.foo, and we want to split it into three parts: ("pkg", "A.B.C", "foo"). -- Thus we split at the leftmost colon and the rightmost occurrence of the dot. -- It would be easier if the string was in the form pkg:A.B.C:foo, but alas -- this is not the conventional way of writing Haskell names. We stick with -- convention, even though it makes the parsing code more troublesome. -- Warning: this code assumes that the string is well formed. parse :: Ptr Word8 -> (String, String, String) parse (Ptr addr) = if not . all (>0) . fmap length $ [p,m,occ] then ([], [], input) else (p, m, occ) where input = unpackCStringUtf8## addr (p, rest1) = break (== ':') input (m, occ) = (intercalate "." $ reverse modWords, occWord) where (modWords, occWord) = if length rest1 < 1 -- XXXXXXXXx YUKX --then error "getConDescAddress:parse:length rest1 < 1" then parseModOcc [] [] else parseModOcc [] (tail rest1) -- We only look for dots if str could start with a module name, -- i.e. if it starts with an upper case character. -- Otherwise we might think that "X.:->" is the module name in -- "X.:->.+", whereas actually "X" is the module name and -- ":->.+" is a constructor name. parseModOcc :: [String] -> String -> ([String], String) parseModOcc acc str@(c : _) | isUpper c = case break (== '.') str of (top, []) -> (acc, top) (top, _:bot) -> parseModOcc (top : acc) bot parseModOcc acc str = (acc, str) ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/ForeignSrcLang.hs0000644000000000000000000000041714470055371022713 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} -- | See @GHC.LanguageExtensions@ for an explanation -- on why this is needed module GHC.ForeignSrcLang ( module GHC.ForeignSrcLang.Type ) where import Data.Binary import GHC.ForeignSrcLang.Type instance Binary ForeignSrcLang ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/GHC/ForeignSrcLang/Type.hs0000644000000000000000000000072614472375231024252 0ustar0000000000000000{-# LANGUAGE DeriveGeneric #-} module GHC.ForeignSrcLang.Type ( ForeignSrcLang(..) ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics (Generic) -- | Foreign formats supported by GHC via TH data ForeignSrcLang = LangC -- ^ C | LangCxx -- ^ C++ | LangObjc -- ^ Objective C | LangObjcxx -- ^ Objective C++ | LangAsm -- ^ Assembly language (.s) | RawObject -- ^ Object (.o) deriving (Eq, Show, Generic) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs.hs0000644000000000000000000001354314472400112016547 0ustar0000000000000000{-# OPTIONS_GHC -O0 #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section{Haskell abstract syntax definition} This module glues together the pieces of the Haskell abstract syntax, which is declared in the various \tr{Hs*} modules. This module, therefore, is almost nothing but re-exporting. -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE FlexibleInstances #-} -- For deriving instance Data module GHC.Hs ( module Language.Haskell.Syntax, module GHC.Hs.Binds, module GHC.Hs.Decls, module GHC.Hs.Expr, module GHC.Hs.ImpExp, module GHC.Hs.Lit, module GHC.Hs.Pat, module GHC.Hs.Type, module GHC.Hs.Utils, module GHC.Hs.Doc, module GHC.Hs.Extension, module GHC.Parser.Annotation, Fixity, HsModule(..), AnnsModule(..), HsParsedModule(..) ) where -- friends: import GHC.Prelude import GHC.Hs.Decls import GHC.Hs.Binds import GHC.Hs.Expr import GHC.Hs.ImpExp import GHC.Hs.Lit import Language.Haskell.Syntax import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Hs.Pat import GHC.Hs.Type import GHC.Hs.Utils import GHC.Hs.Doc import GHC.Hs.Instances () -- For Data instances -- others: import GHC.Utils.Outputable import GHC.Types.Fixity ( Fixity ) import GHC.Types.SrcLoc import GHC.Unit.Module ( ModuleName ) import GHC.Unit.Module.Warnings ( WarningTxt ) -- libraries: import Data.Data hiding ( Fixity ) -- | Haskell Module -- -- All we actually declare here is the top-level structure for a module. data HsModule = -- | 'GHC.Parser.Annotation.AnnKeywordId's -- -- - 'GHC.Parser.Annotation.AnnModule','GHC.Parser.Annotation.AnnWhere' -- -- - 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnClose' for explicit braces and semi around -- hsmodImports,hsmodDecls if this style is used. -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation HsModule { hsmodAnn :: EpAnn AnnsModule, hsmodLayout :: LayoutInfo, -- ^ Layout info for the module. -- For incomplete modules (e.g. the output of parseHeader), it is NoLayoutInfo. hsmodName :: Maybe (LocatedA ModuleName), -- ^ @Nothing@: \"module X where\" is omitted (in which case the next -- field is Nothing too) hsmodExports :: Maybe (LocatedL [LIE GhcPs]), -- ^ Export list -- -- - @Nothing@: export list omitted, so export everything -- -- - @Just []@: export /nothing/ -- -- - @Just [...]@: as you would expect... -- -- -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnOpen' -- ,'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation hsmodImports :: [LImportDecl GhcPs], -- ^ We snaffle interesting stuff out of the imported interfaces early -- on, adding that info to TyDecls/etc; so this list is often empty, -- downstream. hsmodDecls :: [LHsDecl GhcPs], -- ^ Type, class, value, and interface signature decls hsmodDeprecMessage :: Maybe (LocatedP (WarningTxt GhcPs)), -- ^ reason\/explanation for warning/deprecation of this module -- -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnOpen' -- ,'GHC.Parser.Annotation.AnnClose' -- -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation hsmodHaddockModHeader :: Maybe (LHsDoc GhcPs) -- ^ Haddock module info and description, unparsed -- -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnOpen' -- ,'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation } deriving instance Data HsModule data AnnsModule = AnnsModule { am_main :: [AddEpAnn], am_decls :: AnnList } deriving (Data, Eq) instance Outputable HsModule where ppr (HsModule _ _ Nothing _ imports decls _ mbDoc) = pprMaybeWithDoc mbDoc $ pp_nonnull imports $$ pp_nonnull decls ppr (HsModule _ _ (Just name) exports imports decls deprec mbDoc) = pprMaybeWithDoc mbDoc $ vcat [ case exports of Nothing -> pp_header (text "where") Just es -> vcat [ pp_header lparen, nest 8 (fsep (punctuate comma (map ppr (unLoc es)))), nest 4 (text ") where") ], pp_nonnull imports, pp_nonnull decls ] where pp_header rest = case deprec of Nothing -> pp_modname <+> rest Just d -> vcat [ pp_modname, ppr d, rest ] pp_modname = text "module" <+> ppr name pp_nonnull :: Outputable t => [t] -> SDoc pp_nonnull [] = empty pp_nonnull xs = vcat (map ppr xs) data HsParsedModule = HsParsedModule { hpm_module :: Located HsModule, hpm_src_files :: [FilePath] -- ^ extra source files (e.g. from #includes). The lexer collects -- these from '# ' pragmas, which the C preprocessor -- leaves behind. These files and their timestamps are stored in -- the .hi file, so that we can force recompilation if any of -- them change (#3589) } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Binds.hs0000644000000000000000000007324114472400112017607 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[HsBinds]{Abstract syntax: top-level bindings and signatures} Datatype for: @BindGroup@, @Bind@, @Sig@, @Bind@. -} module GHC.Hs.Binds ( module Language.Haskell.Syntax.Binds , module GHC.Hs.Binds ) where import GHC.Prelude import Language.Haskell.Syntax.Binds import {-# SOURCE #-} GHC.Hs.Expr ( pprExpr, pprFunBind, pprPatBind ) import {-# SOURCE #-} GHC.Hs.Pat (pprLPat ) import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Hs.Type import GHC.Tc.Types.Evidence import GHC.Core.Type import GHC.Types.Name.Set import GHC.Types.Basic import GHC.Types.SourceText import GHC.Types.SrcLoc as SrcLoc import GHC.Types.Var import GHC.Data.Bag import GHC.Data.BooleanFormula (LBooleanFormula) import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Function import Data.List (sortBy) import Data.Data (Data) {- ************************************************************************ * * \subsection{Bindings: @BindGroup@} * * ************************************************************************ Global bindings (where clauses) -} -- the ...LR datatypes are parametrized by two id types, -- one for the left and one for the right. type instance XHsValBinds (GhcPass pL) (GhcPass pR) = EpAnn AnnList type instance XHsIPBinds (GhcPass pL) (GhcPass pR) = EpAnn AnnList type instance XEmptyLocalBinds (GhcPass pL) (GhcPass pR) = NoExtField type instance XXHsLocalBindsLR (GhcPass pL) (GhcPass pR) = DataConCantHappen -- --------------------------------------------------------------------- -- Deal with ValBindsOut -- TODO: make this the only type for ValBinds data NHsValBindsLR idL = NValBinds [(RecFlag, LHsBinds idL)] [LSig GhcRn] type instance XValBinds (GhcPass pL) (GhcPass pR) = AnnSortKey type instance XXValBindsLR (GhcPass pL) pR = NHsValBindsLR (GhcPass pL) -- --------------------------------------------------------------------- type instance XFunBind (GhcPass pL) GhcPs = NoExtField type instance XFunBind (GhcPass pL) GhcRn = NameSet -- ^ After the renamer (but before the type-checker), the FunBind -- extension field contains the locally-bound free variables of this -- defn. See Note [Bind free vars] type instance XFunBind (GhcPass pL) GhcTc = HsWrapper -- ^ After the type-checker, the FunBind extension field contains a -- coercion from the type of the MatchGroup to the type of the Id. -- Example: -- -- @ -- f :: Int -> forall a. a -> a -- f x y = y -- @ -- -- Then the MatchGroup will have type (Int -> a' -> a') -- (with a free type variable a'). The coercion will take -- a CoreExpr of this type and convert it to a CoreExpr of -- type Int -> forall a'. a' -> a' -- Notice that the coercion captures the free a'. type instance XPatBind GhcPs (GhcPass pR) = EpAnn [AddEpAnn] type instance XPatBind GhcRn (GhcPass pR) = NameSet -- See Note [Bind free vars] type instance XPatBind GhcTc (GhcPass pR) = Type -- Type of the GRHSs type instance XVarBind (GhcPass pL) (GhcPass pR) = NoExtField type instance XPatSynBind (GhcPass pL) (GhcPass pR) = NoExtField type instance XXHsBindsLR GhcPs pR = DataConCantHappen type instance XXHsBindsLR GhcRn pR = DataConCantHappen type instance XXHsBindsLR GhcTc pR = AbsBinds type instance XPSB (GhcPass idL) GhcPs = EpAnn [AddEpAnn] type instance XPSB (GhcPass idL) GhcRn = NameSet -- Post renaming, FVs. See Note [Bind free vars] type instance XPSB (GhcPass idL) GhcTc = NameSet type instance XXPatSynBind (GhcPass idL) (GhcPass idR) = DataConCantHappen -- --------------------------------------------------------------------- -- | Typechecked, generalised bindings, used in the output to the type checker. -- See Note [AbsBinds]. data AbsBinds = AbsBinds { abs_tvs :: [TyVar], abs_ev_vars :: [EvVar], -- ^ Includes equality constraints -- | AbsBinds only gets used when idL = idR after renaming, -- but these need to be idL's for the collect... code in HsUtil -- to have the right type abs_exports :: [ABExport], -- | Evidence bindings -- Why a list? See "GHC.Tc.TyCl.Instance" -- Note [Typechecking plan for instance declarations] abs_ev_binds :: [TcEvBinds], -- | Typechecked user bindings abs_binds :: LHsBinds GhcTc, abs_sig :: Bool -- See Note [The abs_sig field of AbsBinds] } -- Consider (AbsBinds tvs ds [(ftvs, poly_f, mono_f) binds] -- -- Creates bindings for (polymorphic, overloaded) poly_f -- in terms of monomorphic, non-overloaded mono_f -- -- Invariants: -- 1. 'binds' binds mono_f -- 2. ftvs is a subset of tvs -- 3. ftvs includes all tyvars free in ds -- -- See Note [AbsBinds] -- | Abstraction Bindings Export data ABExport = ABE { abe_poly :: Id -- ^ Any INLINE pragma is attached to this Id , abe_mono :: Id , abe_wrap :: HsWrapper -- ^ See Note [ABExport wrapper] -- Shape: (forall abs_tvs. abs_ev_vars => abe_mono) ~ abe_poly , abe_prags :: TcSpecPrags -- ^ SPECIALISE pragmas } {- Note [AbsBinds] ~~~~~~~~~~~~~~~ The AbsBinds constructor is used in the output of the type checker, to record *typechecked* and *generalised* bindings. Specifically AbsBinds { abs_tvs = tvs , abs_ev_vars = [d1,d2] , abs_exports = [ABE { abe_poly = fp, abe_mono = fm , abe_wrap = fwrap } ABE { slly for g } ] , abs_ev_binds = DBINDS , abs_binds = BIND[fm,gm] } where 'BIND' binds the monomorphic Ids 'fm' and 'gm', means fp = fwrap [/\ tvs. \d1 d2. letrec { DBINDS ] [ ; BIND[fm,gm] } ] [ in fm ] gp = ...same again, with gm instead of fm The 'fwrap' is an impedance-matcher that typically does nothing; see Note [ABExport wrapper]. This is a pretty bad translation, because it duplicates all the bindings. So the desugarer tries to do a better job: fp = /\ [a,b] -> \ [d1,d2] -> case tp [a,b] [d1,d2] of (fm,gm) -> fm ..ditto for gp.. tp = /\ [a,b] -> \ [d1,d2] -> letrec { DBINDS; BIND } in (fm,gm) In general: * abs_tvs are the type variables over which the binding group is generalised * abs_ev_var are the evidence variables (usually dictionaries) over which the binding group is generalised * abs_binds are the monomorphic bindings * abs_ex_binds are the evidence bindings that wrap the abs_binds * abs_exports connects the monomorphic Ids bound by abs_binds with the polymorphic Ids bound by the AbsBinds itself. For example, consider a module M, with this top-level binding, where there is no type signature for M.reverse, M.reverse [] = [] M.reverse (x:xs) = M.reverse xs ++ [x] In Hindley-Milner, a recursive binding is typechecked with the *recursive* uses being *monomorphic*. So after typechecking *and* desugaring we will get something like this M.reverse :: forall a. [a] -> [a] = /\a. letrec reverse :: [a] -> [a] = \xs -> case xs of [] -> [] (x:xs) -> reverse xs ++ [x] in reverse Notice that 'M.reverse' is polymorphic as expected, but there is a local definition for plain 'reverse' which is *monomorphic*. The type variable 'a' scopes over the entire letrec. That's after desugaring. What about after type checking but before desugaring? That's where AbsBinds comes in. It looks like this: AbsBinds { abs_tvs = [a] , abs_ev_vars = [] , abs_exports = [ABE { abe_poly = M.reverse :: forall a. [a] -> [a], , abe_mono = reverse :: [a] -> [a]}] , abs_ev_binds = {} , abs_binds = { reverse :: [a] -> [a] = \xs -> case xs of [] -> [] (x:xs) -> reverse xs ++ [x] } } Here, * abs_tvs says what type variables are abstracted over the binding group, just 'a' in this case. * abs_binds is the *monomorphic* bindings of the group * abs_exports describes how to get the polymorphic Id 'M.reverse' from the monomorphic one 'reverse' Notice that the *original* function (the polymorphic one you thought you were defining) appears in the abe_poly field of the abs_exports. The bindings in abs_binds are for fresh, local, Ids with a *monomorphic* Id. If there is a group of mutually recursive (see Note [Polymorphic recursion]) functions without type signatures, we get one AbsBinds with the monomorphic versions of the bindings in abs_binds, and one element of abe_exports for each variable bound in the mutually recursive group. This is true even for pattern bindings. Example: (f,g) = (\x -> x, f) After type checking we get AbsBinds { abs_tvs = [a] , abs_exports = [ ABE { abe_poly = M.f :: forall a. a -> a , abe_mono = f :: a -> a } , ABE { abe_poly = M.g :: forall a. a -> a , abe_mono = g :: a -> a }] , abs_binds = { (f,g) = (\x -> x, f) } Note [Polymorphic recursion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider Rec { f x = ...(g ef)... ; g :: forall a. [a] -> [a] ; g y = ...(f eg)... } These bindings /are/ mutually recursive (f calls g, and g calls f). But we can use the type signature for g to break the recursion, like this: 1. Add g :: forall a. [a] -> [a] to the type environment 2. Typecheck the definition of f, all by itself, including generalising it to find its most general type, say f :: forall b. b -> b -> [b] 3. Extend the type environment with that type for f 4. Typecheck the definition of g, all by itself, checking that it has the type claimed by its signature Steps 2 and 4 each generate a separate AbsBinds, so we end up with Rec { AbsBinds { ...for f ... } ; AbsBinds { ...for g ... } } This approach allows both f and to call each other polymorphically, even though only g has a signature. We get an AbsBinds that encompasses multiple source-program bindings only when * Each binding in the group has at least one binder that lacks a user type signature * The group forms a strongly connected component Note [The abs_sig field of AbsBinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The abs_sig field supports a couple of special cases for bindings. Consider x :: Num a => (# a, a #) x = (# 3, 4 #) The general desugaring for AbsBinds would give x = /\a. \ ($dNum :: Num a) -> letrec xm = (# fromInteger $dNum 3, fromInteger $dNum 4 #) in xm But that has an illegal let-binding for an unboxed tuple. In this case we'd prefer to generate the (more direct) x = /\ a. \ ($dNum :: Num a) -> (# fromInteger $dNum 3, fromInteger $dNum 4 #) A similar thing happens with representation-polymorphic defns (#11405): undef :: forall (r :: RuntimeRep) (a :: TYPE r). HasCallStack => a undef = error "undef" Again, the vanilla desugaring gives a local let-binding for a representation-polymorphic (undefm :: a), which is illegal. But again we can desugar without a let: undef = /\ a. \ (d:HasCallStack) -> error a d "undef" The abs_sig field supports this direct desugaring, with no local let-binding. When abs_sig = True * the abs_binds is single FunBind * the abs_exports is a singleton * we have a complete type sig for binder and hence the abs_binds is non-recursive (it binds the mono_id but refers to the poly_id These properties are exploited in GHC.HsToCore.Binds.dsAbsBinds to generate code without a let-binding. Note [ABExport wrapper] ~~~~~~~~~~~~~~~~~~~~~~~ Consider (f,g) = (\x.x, \y.y) This ultimately desugars to something like this: tup :: forall a b. (a->a, b->b) tup = /\a b. (\x:a.x, \y:b.y) f :: forall a. a -> a f = /\a. case tup a Any of (fm::a->a,gm:Any->Any) -> fm ...similarly for g... The abe_wrap field deals with impedance-matching between (/\a b. case tup a b of { (f,g) -> f }) and the thing we really want, which may have fewer type variables. The action happens in GHC.Tc.Gen.Bind.mkExport. Note [Bind free vars] ~~~~~~~~~~~~~~~~~~~~~ The extension fields of FunBind, PatBind and PatSynBind at GhcRn records the free variables of the definition. It is used for the following purposes: a) Dependency analysis prior to type checking (see GHC.Tc.Gen.Bind.tc_group) b) Deciding whether we can do generalisation of the binding (see GHC.Tc.Gen.Bind.decideGeneralisationPlan) c) Deciding whether the binding can be used in static forms (see GHC.Tc.Gen.Expr.checkClosedInStaticForm for the HsStatic case and GHC.Tc.Gen.Bind.isClosedBndrGroup). Specifically, * it includes all free vars that are defined in this module (including top-level things and lexically scoped type variables) * it excludes imported vars; this is just to keep the set smaller * Before renaming, and after typechecking, the field is unused; it's just an error thunk -} instance (OutputableBndrId pl, OutputableBndrId pr) => Outputable (HsLocalBindsLR (GhcPass pl) (GhcPass pr)) where ppr (HsValBinds _ bs) = ppr bs ppr (HsIPBinds _ bs) = ppr bs ppr (EmptyLocalBinds _) = empty instance (OutputableBndrId pl, OutputableBndrId pr) => Outputable (HsValBindsLR (GhcPass pl) (GhcPass pr)) where ppr (ValBinds _ binds sigs) = pprDeclList (pprLHsBindsForUser binds sigs) ppr (XValBindsLR (NValBinds sccs sigs)) = getPprDebug $ \case -- Print with sccs showing True -> vcat (map ppr sigs) $$ vcat (map ppr_scc sccs) False -> pprDeclList (pprLHsBindsForUser (unionManyBags (map snd sccs)) sigs) where ppr_scc (rec_flag, binds) = pp_rec rec_flag <+> pprLHsBinds binds pp_rec Recursive = text "rec" pp_rec NonRecursive = text "nonrec" pprLHsBinds :: (OutputableBndrId idL, OutputableBndrId idR) => LHsBindsLR (GhcPass idL) (GhcPass idR) -> SDoc pprLHsBinds binds | isEmptyLHsBinds binds = empty | otherwise = pprDeclList (map ppr (bagToList binds)) pprLHsBindsForUser :: (OutputableBndrId idL, OutputableBndrId idR, OutputableBndrId id2) => LHsBindsLR (GhcPass idL) (GhcPass idR) -> [LSig (GhcPass id2)] -> [SDoc] -- pprLHsBindsForUser is different to pprLHsBinds because -- a) No braces: 'let' and 'where' include a list of HsBindGroups -- and we don't want several groups of bindings each -- with braces around -- b) Sort by location before printing -- c) Include signatures pprLHsBindsForUser binds sigs = map snd (sort_by_loc decls) where decls :: [(SrcSpan, SDoc)] decls = [(locA loc, ppr sig) | L loc sig <- sigs] ++ [(locA loc, ppr bind) | L loc bind <- bagToList binds] sort_by_loc decls = sortBy (SrcLoc.leftmost_smallest `on` fst) decls pprDeclList :: [SDoc] -> SDoc -- Braces with a space -- Print a bunch of declarations -- One could choose { d1; d2; ... }, using 'sep' -- or d1 -- d2 -- .. -- using vcat -- At the moment we chose the latter -- Also we do the 'pprDeeperList' thing. pprDeclList ds = pprDeeperList vcat ds ------------ emptyLocalBinds :: HsLocalBindsLR (GhcPass a) (GhcPass b) emptyLocalBinds = EmptyLocalBinds noExtField eqEmptyLocalBinds :: HsLocalBindsLR a b -> Bool eqEmptyLocalBinds (EmptyLocalBinds _) = True eqEmptyLocalBinds _ = False isEmptyValBinds :: HsValBindsLR (GhcPass a) (GhcPass b) -> Bool isEmptyValBinds (ValBinds _ ds sigs) = isEmptyLHsBinds ds && null sigs isEmptyValBinds (XValBindsLR (NValBinds ds sigs)) = null ds && null sigs emptyValBindsIn, emptyValBindsOut :: HsValBindsLR (GhcPass a) (GhcPass b) emptyValBindsIn = ValBinds NoAnnSortKey emptyBag [] emptyValBindsOut = XValBindsLR (NValBinds [] []) emptyLHsBinds :: LHsBindsLR (GhcPass idL) idR emptyLHsBinds = emptyBag isEmptyLHsBinds :: LHsBindsLR (GhcPass idL) idR -> Bool isEmptyLHsBinds = isEmptyBag ------------ plusHsValBinds :: HsValBinds (GhcPass a) -> HsValBinds (GhcPass a) -> HsValBinds(GhcPass a) plusHsValBinds (ValBinds _ ds1 sigs1) (ValBinds _ ds2 sigs2) = ValBinds NoAnnSortKey (ds1 `unionBags` ds2) (sigs1 ++ sigs2) plusHsValBinds (XValBindsLR (NValBinds ds1 sigs1)) (XValBindsLR (NValBinds ds2 sigs2)) = XValBindsLR (NValBinds (ds1 ++ ds2) (sigs1 ++ sigs2)) plusHsValBinds _ _ = panic "HsBinds.plusHsValBinds" instance (OutputableBndrId pl, OutputableBndrId pr) => Outputable (HsBindLR (GhcPass pl) (GhcPass pr)) where ppr mbind = ppr_monobind mbind ppr_monobind :: forall idL idR. (OutputableBndrId idL, OutputableBndrId idR) => HsBindLR (GhcPass idL) (GhcPass idR) -> SDoc ppr_monobind (PatBind { pat_lhs = pat, pat_rhs = grhss }) = pprPatBind pat grhss ppr_monobind (VarBind { var_id = var, var_rhs = rhs }) = sep [pprBndr CasePatBind var, nest 2 $ equals <+> pprExpr (unLoc rhs)] ppr_monobind (FunBind { fun_id = fun, fun_matches = matches, fun_tick = ticks, fun_ext = wrap }) = pprTicks empty (if null ticks then empty else text "-- ticks = " <> ppr ticks) $$ whenPprDebug (pprBndr LetBind (unLoc fun)) $$ pprFunBind matches $$ whenPprDebug (pprIfTc @idR $ ppr wrap) ppr_monobind (PatSynBind _ psb) = ppr psb ppr_monobind (XHsBindsLR b) = case ghcPass @idL of #if __GLASGOW_HASKELL__ <= 900 GhcPs -> dataConCantHappen b GhcRn -> dataConCantHappen b #endif GhcTc -> ppr_absbinds b where ppr_absbinds (AbsBinds { abs_tvs = tyvars, abs_ev_vars = dictvars , abs_exports = exports, abs_binds = val_binds , abs_ev_binds = ev_binds }) = sdocOption sdocPrintTypecheckerElaboration $ \case False -> pprLHsBinds val_binds True -> -- Show extra information (bug number: #10662) hang (text "AbsBinds" <+> sep [ brackets (interpp'SP tyvars) , brackets (interpp'SP dictvars) ]) 2 $ braces $ vcat [ text "Exports:" <+> brackets (sep (punctuate comma (map ppr exports))) , text "Exported types:" <+> vcat [pprBndr LetBind (abe_poly ex) | ex <- exports] , text "Binds:" <+> pprLHsBinds val_binds , pprIfTc @idR (text "Evidence:" <+> ppr ev_binds) ] instance Outputable ABExport where ppr (ABE { abe_wrap = wrap, abe_poly = gbl, abe_mono = lcl, abe_prags = prags }) = vcat [ sep [ ppr gbl, nest 2 (text "<=" <+> ppr lcl) ] , nest 2 (pprTcSpecPrags prags) , ppr $ nest 2 (text "wrap:" <+> ppr wrap) ] instance (OutputableBndrId l, OutputableBndrId r) => Outputable (PatSynBind (GhcPass l) (GhcPass r)) where ppr (PSB{ psb_id = (L _ psyn), psb_args = details, psb_def = pat, psb_dir = dir }) = ppr_lhs <+> ppr_rhs where ppr_lhs = text "pattern" <+> ppr_details ppr_simple syntax = syntax <+> pprLPat pat ppr_details = case details of InfixCon v1 v2 -> hsep [ppr_v v1, pprInfixOcc psyn, ppr_v v2] where ppr_v v = case ghcPass @r of GhcPs -> ppr v GhcRn -> ppr v GhcTc -> ppr v PrefixCon _ vs -> hsep (pprPrefixOcc psyn : map ppr_v vs) where ppr_v v = case ghcPass @r of GhcPs -> ppr v GhcRn -> ppr v GhcTc -> ppr v RecCon vs -> pprPrefixOcc psyn <> braces (sep (punctuate comma (map ppr_v vs))) where ppr_v v = case ghcPass @r of GhcPs -> ppr v GhcRn -> ppr v GhcTc -> ppr v ppr_rhs = case dir of Unidirectional -> ppr_simple (text "<-") ImplicitBidirectional -> ppr_simple equals ExplicitBidirectional mg -> ppr_simple (text "<-") <+> text "where" $$ (nest 2 $ pprFunBind mg) pprTicks :: SDoc -> SDoc -> SDoc -- Print stuff about ticks only when -dppr-debug is on, to avoid -- them appearing in error messages (from the desugarer); see # 3263 -- Also print ticks in dumpStyle, so that -ddump-hpc actually does -- something useful. pprTicks pp_no_debug pp_when_debug = getPprStyle $ \sty -> getPprDebug $ \debug -> if debug || dumpStyle sty then pp_when_debug else pp_no_debug {- ************************************************************************ * * Implicit parameter bindings * * ************************************************************************ -} type instance XIPBinds GhcPs = NoExtField type instance XIPBinds GhcRn = NoExtField type instance XIPBinds GhcTc = TcEvBinds -- binds uses of the -- implicit parameters type instance XXHsIPBinds (GhcPass p) = DataConCantHappen isEmptyIPBindsPR :: HsIPBinds (GhcPass p) -> Bool isEmptyIPBindsPR (IPBinds _ is) = null is isEmptyIPBindsTc :: HsIPBinds GhcTc -> Bool isEmptyIPBindsTc (IPBinds ds is) = null is && isEmptyTcEvBinds ds -- EPA annotations in GhcPs, dictionary Id in GhcTc type instance XCIPBind GhcPs = EpAnn [AddEpAnn] type instance XCIPBind GhcRn = NoExtField type instance XCIPBind GhcTc = Id type instance XXIPBind (GhcPass p) = DataConCantHappen instance OutputableBndrId p => Outputable (HsIPBinds (GhcPass p)) where ppr (IPBinds ds bs) = pprDeeperList vcat (map ppr bs) $$ whenPprDebug (pprIfTc @p $ ppr ds) instance OutputableBndrId p => Outputable (IPBind (GhcPass p)) where ppr (IPBind x (L _ ip) rhs) = name <+> equals <+> pprExpr (unLoc rhs) where name = case ghcPass @p of GhcPs -> pprBndr LetBind ip GhcRn -> pprBndr LetBind ip GhcTc -> pprBndr LetBind x {- ************************************************************************ * * \subsection{@Sig@: type signatures and value-modifying user pragmas} * * ************************************************************************ -} type instance XTypeSig (GhcPass p) = EpAnn AnnSig type instance XPatSynSig (GhcPass p) = EpAnn AnnSig type instance XClassOpSig (GhcPass p) = EpAnn AnnSig type instance XIdSig (GhcPass p) = NoExtField -- No anns, generated type instance XFixSig (GhcPass p) = EpAnn [AddEpAnn] type instance XInlineSig (GhcPass p) = EpAnn [AddEpAnn] type instance XSpecSig (GhcPass p) = EpAnn [AddEpAnn] type instance XSpecInstSig (GhcPass p) = EpAnn [AddEpAnn] type instance XMinimalSig (GhcPass p) = EpAnn [AddEpAnn] type instance XSCCFunSig (GhcPass p) = EpAnn [AddEpAnn] type instance XCompleteMatchSig (GhcPass p) = EpAnn [AddEpAnn] type instance XXSig (GhcPass p) = DataConCantHappen type instance XFixitySig (GhcPass p) = NoExtField type instance XXFixitySig (GhcPass p) = DataConCantHappen data AnnSig = AnnSig { asDcolon :: AddEpAnn, -- Not an EpaAnchor to capture unicode option asRest :: [AddEpAnn] } deriving Data -- | Type checker Specialisation Pragmas -- -- 'TcSpecPrags' conveys @SPECIALISE@ pragmas from the type checker to the desugarer data TcSpecPrags = IsDefaultMethod -- ^ Super-specialised: a default method should -- be macro-expanded at every call site | SpecPrags [LTcSpecPrag] deriving Data -- | Located Type checker Specification Pragmas type LTcSpecPrag = Located TcSpecPrag -- | Type checker Specification Pragma data TcSpecPrag = SpecPrag Id HsWrapper InlinePragma -- ^ The Id to be specialised, a wrapper that specialises the -- polymorphic function, and inlining spec for the specialised function deriving Data noSpecPrags :: TcSpecPrags noSpecPrags = SpecPrags [] hasSpecPrags :: TcSpecPrags -> Bool hasSpecPrags (SpecPrags ps) = not (null ps) hasSpecPrags IsDefaultMethod = False isDefaultMethod :: TcSpecPrags -> Bool isDefaultMethod IsDefaultMethod = True isDefaultMethod (SpecPrags {}) = False instance OutputableBndrId p => Outputable (Sig (GhcPass p)) where ppr sig = ppr_sig sig ppr_sig :: forall p. OutputableBndrId p => Sig (GhcPass p) -> SDoc ppr_sig (TypeSig _ vars ty) = pprVarSig (map unLoc vars) (ppr ty) ppr_sig (ClassOpSig _ is_deflt vars ty) | is_deflt = text "default" <+> pprVarSig (map unLoc vars) (ppr ty) | otherwise = pprVarSig (map unLoc vars) (ppr ty) ppr_sig (IdSig _ id) = pprVarSig [id] (ppr (varType id)) ppr_sig (FixSig _ fix_sig) = ppr fix_sig ppr_sig (SpecSig _ var ty inl@(InlinePragma { inl_inline = spec })) = pragSrcBrackets (inlinePragmaSource inl) pragmaSrc (pprSpec (unLoc var) (interpp'SP ty) inl) where pragmaSrc = case spec of NoUserInlinePrag -> "{-# " ++ extractSpecPragName (inl_src inl) _ -> "{-# " ++ extractSpecPragName (inl_src inl) ++ "_INLINE" ppr_sig (InlineSig _ var inl) = pragSrcBrackets (inlinePragmaSource inl) "{-# INLINE" (pprInline inl <+> pprPrefixOcc (unLoc var)) ppr_sig (SpecInstSig _ src ty) = pragSrcBrackets src "{-# pragma" (text "instance" <+> ppr ty) ppr_sig (MinimalSig _ src bf) = pragSrcBrackets src "{-# MINIMAL" (pprMinimalSig bf) ppr_sig (PatSynSig _ names sig_ty) = text "pattern" <+> pprVarSig (map unLoc names) (ppr sig_ty) ppr_sig (SCCFunSig _ src fn mlabel) = pragSrcBrackets src "{-# SCC" (ppr_fn <+> maybe empty ppr mlabel ) where ppr_fn = case ghcPass @p of GhcPs -> ppr fn GhcRn -> ppr fn GhcTc -> ppr fn ppr_sig (CompleteMatchSig _ src cs mty) = pragSrcBrackets src "{-# COMPLETE" ((hsep (punctuate comma (map ppr_n (unLoc cs)))) <+> opt_sig) where opt_sig = maybe empty ((\t -> dcolon <+> ppr t) . unLoc) mty ppr_n n = case ghcPass @p of GhcPs -> ppr n GhcRn -> ppr n GhcTc -> ppr n instance OutputableBndrId p => Outputable (FixitySig (GhcPass p)) where ppr (FixitySig _ names fixity) = sep [ppr fixity, pprops] where pprops = hsep $ punctuate comma (map (pprInfixOcc . unLoc) names) pragBrackets :: SDoc -> SDoc pragBrackets doc = text "{-#" <+> doc <+> text "#-}" -- | Using SourceText in case the pragma was spelled differently or used mixed -- case pragSrcBrackets :: SourceText -> String -> SDoc -> SDoc pragSrcBrackets (SourceText src) _ doc = text src <+> doc <+> text "#-}" pragSrcBrackets NoSourceText alt doc = text alt <+> doc <+> text "#-}" pprVarSig :: (OutputableBndr id) => [id] -> SDoc -> SDoc pprVarSig vars pp_ty = sep [pprvars <+> dcolon, nest 2 pp_ty] where pprvars = hsep $ punctuate comma (map pprPrefixOcc vars) pprSpec :: (OutputableBndr id) => id -> SDoc -> InlinePragma -> SDoc pprSpec var pp_ty inl = pp_inl <+> pprVarSig [var] pp_ty where pp_inl | isDefaultInlinePragma inl = empty | otherwise = pprInline inl pprTcSpecPrags :: TcSpecPrags -> SDoc pprTcSpecPrags IsDefaultMethod = text "" pprTcSpecPrags (SpecPrags ps) = vcat (map (ppr . unLoc) ps) instance Outputable TcSpecPrag where ppr (SpecPrag var _ inl) = ppr (extractSpecPragName $ inl_src inl) <+> pprSpec var (text "") inl pprMinimalSig :: (OutputableBndr name) => LBooleanFormula (GenLocated l name) -> SDoc pprMinimalSig (L _ bf) = ppr (fmap unLoc bf) {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (HsBindLR (GhcPass idL) (GhcPass idR)) = SrcSpanAnnA type instance Anno (IPBind (GhcPass p)) = SrcSpanAnnA type instance Anno (Sig (GhcPass p)) = SrcSpanAnnA -- For CompleteMatchSig type instance Anno [LocatedN RdrName] = SrcSpan type instance Anno [LocatedN Name] = SrcSpan type instance Anno [LocatedN Id] = SrcSpan type instance Anno (FixitySig (GhcPass p)) = SrcSpanAnnA type instance Anno StringLiteral = SrcAnn NoEpAnns type instance Anno (LocatedN RdrName) = SrcSpan type instance Anno (LocatedN Name) = SrcSpan type instance Anno (LocatedN Id) = SrcSpan ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Decls.hs0000644000000000000000000013501314472400112017576 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Abstract syntax of global declarations. -- -- Definitions for: @SynDecl@ and @ConDecl@, @ClassDecl@, -- @InstDecl@, @DefaultDecl@ and @ForeignDecl@. module GHC.Hs.Decls ( -- * Toplevel declarations HsDecl(..), LHsDecl, HsDataDefn(..), HsDeriving, LHsFunDep, HsDerivingClause(..), LHsDerivingClause, DerivClauseTys(..), LDerivClauseTys, NewOrData(..), newOrDataToFlavour, StandaloneKindSig(..), LStandaloneKindSig, standaloneKindSigName, -- ** Class or type declarations TyClDecl(..), LTyClDecl, DataDeclRn(..), TyClGroup(..), tyClGroupTyClDecls, tyClGroupInstDecls, tyClGroupRoleDecls, tyClGroupKindSigs, isClassDecl, isDataDecl, isSynDecl, tcdName, isFamilyDecl, isTypeFamilyDecl, isDataFamilyDecl, isOpenTypeFamilyInfo, isClosedTypeFamilyInfo, tyFamInstDeclName, tyFamInstDeclLName, countTyClDecls, pprTyClDeclFlavour, tyClDeclLName, tyClDeclTyVars, hsDeclHasCusk, famResultKindSignature, FamilyDecl(..), LFamilyDecl, FunDep(..), -- ** Instance declarations InstDecl(..), LInstDecl, FamilyInfo(..), TyFamInstDecl(..), LTyFamInstDecl, instDeclDataFamInsts, TyFamDefltDecl, LTyFamDefltDecl, DataFamInstDecl(..), LDataFamInstDecl, pprDataFamInstFlavour, pprTyFamInstDecl, pprHsFamInstLHS, FamEqn(..), TyFamInstEqn, LTyFamInstEqn, HsTyPats, LClsInstDecl, ClsInstDecl(..), -- ** Standalone deriving declarations DerivDecl(..), LDerivDecl, -- ** Deriving strategies DerivStrategy(..), LDerivStrategy, derivStrategyName, foldDerivStrategy, mapDerivStrategy, XViaStrategyPs(..), -- ** @RULE@ declarations LRuleDecls,RuleDecls(..),RuleDecl(..),LRuleDecl,HsRuleRn(..), HsRuleAnn(..), RuleBndr(..),LRuleBndr, collectRuleBndrSigTys, flattenRuleDecls, pprFullRuleName, -- ** @default@ declarations DefaultDecl(..), LDefaultDecl, -- ** Template haskell declaration splice SpliceExplicitFlag(..), SpliceDecl(..), LSpliceDecl, -- ** Foreign function interface declarations ForeignDecl(..), LForeignDecl, ForeignImport(..), ForeignExport(..), CImportSpec(..), -- ** Data-constructor declarations ConDecl(..), LConDecl, HsConDeclH98Details, HsConDeclGADTDetails(..), hsConDeclTheta, getConNames, getRecConArgs_maybe, -- ** Document comments DocDecl(..), LDocDecl, docDeclDoc, -- ** Deprecations WarnDecl(..), LWarnDecl, WarnDecls(..), LWarnDecls, -- ** Annotations AnnDecl(..), LAnnDecl, AnnProvenance(..), annProvenanceName_maybe, -- ** Role annotations RoleAnnotDecl(..), LRoleAnnotDecl, roleAnnotDeclName, -- ** Injective type families FamilyResultSig(..), LFamilyResultSig, InjectivityAnn(..), LInjectivityAnn, resultVariableName, familyDeclLName, familyDeclName, -- * Grouping HsGroup(..), emptyRdrGroup, emptyRnGroup, appendGroups, hsGroupInstDecls, hsGroupTopLevelFixitySigs, partitionBindsAndSigs, ) where -- friends: import GHC.Prelude import Language.Haskell.Syntax.Decls import {-# SOURCE #-} GHC.Hs.Expr ( pprExpr, pprSpliceDecl ) -- Because Expr imports Decls via HsBracket import GHC.Hs.Binds import GHC.Hs.Type import GHC.Hs.Doc import GHC.Types.Basic import GHC.Core.Coercion import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Types.Name import GHC.Types.Name.Set import GHC.Types.Fixity -- others: import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Core.Type import GHC.Types.ForeignCall import GHC.Data.Bag import GHC.Data.Maybe import Data.Data (Data) {- ************************************************************************ * * \subsection[HsDecl]{Declarations} * * ************************************************************************ -} type instance XTyClD (GhcPass _) = NoExtField type instance XInstD (GhcPass _) = NoExtField type instance XDerivD (GhcPass _) = NoExtField type instance XValD (GhcPass _) = NoExtField type instance XSigD (GhcPass _) = NoExtField type instance XKindSigD (GhcPass _) = NoExtField type instance XDefD (GhcPass _) = NoExtField type instance XForD (GhcPass _) = NoExtField type instance XWarningD (GhcPass _) = NoExtField type instance XAnnD (GhcPass _) = NoExtField type instance XRuleD (GhcPass _) = NoExtField type instance XSpliceD (GhcPass _) = NoExtField type instance XDocD (GhcPass _) = NoExtField type instance XRoleAnnotD (GhcPass _) = NoExtField type instance XXHsDecl (GhcPass _) = DataConCantHappen -- | Partition a list of HsDecls into function/pattern bindings, signatures, -- type family declarations, type family instances, and documentation comments. -- -- Panics when given a declaration that cannot be put into any of the output -- groups. -- -- The primary use of this function is to implement -- 'GHC.Parser.PostProcess.cvBindsAndSigs'. partitionBindsAndSigs :: [LHsDecl GhcPs] -> (LHsBinds GhcPs, [LSig GhcPs], [LFamilyDecl GhcPs], [LTyFamInstDecl GhcPs], [LDataFamInstDecl GhcPs], [LDocDecl GhcPs]) partitionBindsAndSigs = go where go [] = (emptyBag, [], [], [], [], []) go ((L l decl) : ds) = let (bs, ss, ts, tfis, dfis, docs) = go ds in case decl of ValD _ b -> (L l b `consBag` bs, ss, ts, tfis, dfis, docs) SigD _ s -> (bs, L l s : ss, ts, tfis, dfis, docs) TyClD _ (FamDecl _ t) -> (bs, ss, L l t : ts, tfis, dfis, docs) InstD _ (TyFamInstD { tfid_inst = tfi }) -> (bs, ss, ts, L l tfi : tfis, dfis, docs) InstD _ (DataFamInstD { dfid_inst = dfi }) -> (bs, ss, ts, tfis, L l dfi : dfis, docs) DocD _ d -> (bs, ss, ts, tfis, dfis, L l d : docs) _ -> pprPanic "partitionBindsAndSigs" (ppr decl) type instance XCHsGroup (GhcPass _) = NoExtField type instance XXHsGroup (GhcPass _) = DataConCantHappen emptyGroup, emptyRdrGroup, emptyRnGroup :: HsGroup (GhcPass p) emptyRdrGroup = emptyGroup { hs_valds = emptyValBindsIn } emptyRnGroup = emptyGroup { hs_valds = emptyValBindsOut } emptyGroup = HsGroup { hs_ext = noExtField, hs_tyclds = [], hs_derivds = [], hs_fixds = [], hs_defds = [], hs_annds = [], hs_fords = [], hs_warnds = [], hs_ruleds = [], hs_valds = error "emptyGroup hs_valds: Can't happen", hs_splcds = [], hs_docs = [] } -- | The fixity signatures for each top-level declaration and class method -- in an 'HsGroup'. -- See Note [Top-level fixity signatures in an HsGroup] hsGroupTopLevelFixitySigs :: HsGroup (GhcPass p) -> [LFixitySig (GhcPass p)] hsGroupTopLevelFixitySigs (HsGroup{ hs_fixds = fixds, hs_tyclds = tyclds }) = fixds ++ cls_fixds where cls_fixds = [ L loc sig | L _ ClassDecl{tcdSigs = sigs} <- tyClGroupTyClDecls tyclds , L loc (FixSig _ sig) <- sigs ] appendGroups :: HsGroup (GhcPass p) -> HsGroup (GhcPass p) -> HsGroup (GhcPass p) appendGroups HsGroup { hs_valds = val_groups1, hs_splcds = spliceds1, hs_tyclds = tyclds1, hs_derivds = derivds1, hs_fixds = fixds1, hs_defds = defds1, hs_annds = annds1, hs_fords = fords1, hs_warnds = warnds1, hs_ruleds = rulds1, hs_docs = docs1 } HsGroup { hs_valds = val_groups2, hs_splcds = spliceds2, hs_tyclds = tyclds2, hs_derivds = derivds2, hs_fixds = fixds2, hs_defds = defds2, hs_annds = annds2, hs_fords = fords2, hs_warnds = warnds2, hs_ruleds = rulds2, hs_docs = docs2 } = HsGroup { hs_ext = noExtField, hs_valds = val_groups1 `plusHsValBinds` val_groups2, hs_splcds = spliceds1 ++ spliceds2, hs_tyclds = tyclds1 ++ tyclds2, hs_derivds = derivds1 ++ derivds2, hs_fixds = fixds1 ++ fixds2, hs_annds = annds1 ++ annds2, hs_defds = defds1 ++ defds2, hs_fords = fords1 ++ fords2, hs_warnds = warnds1 ++ warnds2, hs_ruleds = rulds1 ++ rulds2, hs_docs = docs1 ++ docs2 } instance (OutputableBndrId p) => Outputable (HsDecl (GhcPass p)) where ppr (TyClD _ dcl) = ppr dcl ppr (ValD _ binds) = ppr binds ppr (DefD _ def) = ppr def ppr (InstD _ inst) = ppr inst ppr (DerivD _ deriv) = ppr deriv ppr (ForD _ fd) = ppr fd ppr (SigD _ sd) = ppr sd ppr (KindSigD _ ksd) = ppr ksd ppr (RuleD _ rd) = ppr rd ppr (WarningD _ wd) = ppr wd ppr (AnnD _ ad) = ppr ad ppr (SpliceD _ dd) = ppr dd ppr (DocD _ doc) = ppr doc ppr (RoleAnnotD _ ra) = ppr ra instance (OutputableBndrId p) => Outputable (HsGroup (GhcPass p)) where ppr (HsGroup { hs_valds = val_decls, hs_tyclds = tycl_decls, hs_derivds = deriv_decls, hs_fixds = fix_decls, hs_warnds = deprec_decls, hs_annds = ann_decls, hs_fords = foreign_decls, hs_defds = default_decls, hs_ruleds = rule_decls }) = vcat_mb empty [ppr_ds fix_decls, ppr_ds default_decls, ppr_ds deprec_decls, ppr_ds ann_decls, ppr_ds rule_decls, if isEmptyValBinds val_decls then Nothing else Just (ppr val_decls), ppr_ds (tyClGroupRoleDecls tycl_decls), ppr_ds (tyClGroupKindSigs tycl_decls), ppr_ds (tyClGroupTyClDecls tycl_decls), ppr_ds (tyClGroupInstDecls tycl_decls), ppr_ds deriv_decls, ppr_ds foreign_decls] where ppr_ds :: Outputable a => [a] -> Maybe SDoc ppr_ds [] = Nothing ppr_ds ds = Just (vcat (map ppr ds)) vcat_mb :: SDoc -> [Maybe SDoc] -> SDoc -- Concatenate vertically with white-space between non-blanks vcat_mb _ [] = empty vcat_mb gap (Nothing : ds) = vcat_mb gap ds vcat_mb gap (Just d : ds) = gap $$ d $$ vcat_mb blankLine ds type instance XSpliceDecl (GhcPass _) = NoExtField type instance XXSpliceDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (SpliceDecl (GhcPass p)) where ppr (SpliceDecl _ (L _ e) f) = pprSpliceDecl e f {- ************************************************************************ * * Type and class declarations * * ************************************************************************ -} type instance XFamDecl (GhcPass _) = NoExtField type instance XSynDecl GhcPs = EpAnn [AddEpAnn] type instance XSynDecl GhcRn = NameSet -- FVs type instance XSynDecl GhcTc = NameSet -- FVs type instance XDataDecl GhcPs = EpAnn [AddEpAnn] type instance XDataDecl GhcRn = DataDeclRn type instance XDataDecl GhcTc = DataDeclRn type instance XClassDecl GhcPs = (EpAnn [AddEpAnn], AnnSortKey, LayoutInfo) -- See Note [Class LayoutInfo] -- TODO:AZ:tidy up AnnSortKey above type instance XClassDecl GhcRn = NameSet -- FVs type instance XClassDecl GhcTc = NameSet -- FVs type instance XXTyClDecl (GhcPass _) = DataConCantHappen type instance XCTyFamInstDecl (GhcPass _) = EpAnn [AddEpAnn] type instance XXTyFamInstDecl (GhcPass _) = DataConCantHappen -- Dealing with names tyFamInstDeclName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyFamInstDecl (GhcPass p) -> IdP (GhcPass p) tyFamInstDeclName = unLoc . tyFamInstDeclLName tyFamInstDeclLName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyFamInstDecl (GhcPass p) -> LocatedN (IdP (GhcPass p)) tyFamInstDeclLName (TyFamInstDecl { tfid_eqn = FamEqn { feqn_tycon = ln }}) = ln tyClDeclLName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyClDecl (GhcPass p) -> LocatedN (IdP (GhcPass p)) tyClDeclLName (FamDecl { tcdFam = fd }) = familyDeclLName fd tyClDeclLName (SynDecl { tcdLName = ln }) = ln tyClDeclLName (DataDecl { tcdLName = ln }) = ln tyClDeclLName (ClassDecl { tcdLName = ln }) = ln -- FIXME: tcdName is commonly used by both GHC and third-party tools, so it -- needs to be polymorphic in the pass tcdName :: Anno (IdGhcP p) ~ SrcSpanAnnN => TyClDecl (GhcPass p) -> IdP (GhcPass p) tcdName = unLoc . tyClDeclLName -- | Does this declaration have a complete, user-supplied kind signature? -- See Note [CUSKs: complete user-supplied kind signatures] hsDeclHasCusk :: TyClDecl GhcRn -> Bool hsDeclHasCusk (FamDecl { tcdFam = FamilyDecl { fdInfo = fam_info , fdTyVars = tyvars , fdResultSig = L _ resultSig } }) = case fam_info of ClosedTypeFamily {} -> hsTvbAllKinded tyvars && isJust (famResultKindSignature resultSig) _ -> True -- Un-associated open type/data families have CUSKs hsDeclHasCusk (SynDecl { tcdTyVars = tyvars, tcdRhs = rhs }) = hsTvbAllKinded tyvars && isJust (hsTyKindSig rhs) hsDeclHasCusk (DataDecl { tcdDExt = DataDeclRn { tcdDataCusk = cusk }}) = cusk hsDeclHasCusk (ClassDecl { tcdTyVars = tyvars }) = hsTvbAllKinded tyvars -- Pretty-printing TyClDecl -- ~~~~~~~~~~~~~~~~~~~~~~~~ instance (OutputableBndrId p) => Outputable (TyClDecl (GhcPass p)) where ppr (FamDecl { tcdFam = decl }) = ppr decl ppr (SynDecl { tcdLName = ltycon, tcdTyVars = tyvars, tcdFixity = fixity , tcdRhs = rhs }) = hang (text "type" <+> pp_vanilla_decl_head ltycon tyvars fixity Nothing <+> equals) 4 (ppr rhs) ppr (DataDecl { tcdLName = ltycon, tcdTyVars = tyvars, tcdFixity = fixity , tcdDataDefn = defn }) = pp_data_defn (pp_vanilla_decl_head ltycon tyvars fixity) defn ppr (ClassDecl {tcdCtxt = context, tcdLName = lclas, tcdTyVars = tyvars, tcdFixity = fixity, tcdFDs = fds, tcdSigs = sigs, tcdMeths = methods, tcdATs = ats, tcdATDefs = at_defs}) | null sigs && isEmptyBag methods && null ats && null at_defs -- No "where" part = top_matter | otherwise -- Laid out = vcat [ top_matter <+> text "where" , nest 2 $ pprDeclList (map (ppr . unLoc) ats ++ map (pprTyFamDefltDecl . unLoc) at_defs ++ pprLHsBindsForUser methods sigs) ] where top_matter = text "class" <+> pp_vanilla_decl_head lclas tyvars fixity context <+> pprFundeps (map unLoc fds) instance OutputableBndrId p => Outputable (TyClGroup (GhcPass p)) where ppr (TyClGroup { group_tyclds = tyclds , group_roles = roles , group_kisigs = kisigs , group_instds = instds } ) = hang (text "TyClGroup") 2 $ ppr kisigs $$ ppr tyclds $$ ppr roles $$ ppr instds pp_vanilla_decl_head :: (OutputableBndrId p) => XRec (GhcPass p) (IdP (GhcPass p)) -> LHsQTyVars (GhcPass p) -> LexicalFixity -> Maybe (LHsContext (GhcPass p)) -> SDoc pp_vanilla_decl_head thing (HsQTvs { hsq_explicit = tyvars }) fixity context = hsep [pprLHsContext context, pp_tyvars tyvars] where pp_tyvars (varl:varsr) | fixity == Infix && length varsr > 1 = hsep [char '(',ppr (unLoc varl), pprInfixOcc (unLoc thing) , (ppr.unLoc) (head varsr), char ')' , hsep (map (ppr.unLoc) (tail varsr))] | fixity == Infix = hsep [ppr (unLoc varl), pprInfixOcc (unLoc thing) , hsep (map (ppr.unLoc) varsr)] | otherwise = hsep [ pprPrefixOcc (unLoc thing) , hsep (map (ppr.unLoc) (varl:varsr))] pp_tyvars [] = pprPrefixOcc (unLoc thing) pprTyClDeclFlavour :: TyClDecl (GhcPass p) -> SDoc pprTyClDeclFlavour (ClassDecl {}) = text "class" pprTyClDeclFlavour (SynDecl {}) = text "type" pprTyClDeclFlavour (FamDecl { tcdFam = FamilyDecl { fdInfo = info }}) = pprFlavour info <+> text "family" pprTyClDeclFlavour (DataDecl { tcdDataDefn = HsDataDefn { dd_ND = nd } }) = ppr nd instance OutputableBndrId p => Outputable (FunDep (GhcPass p)) where ppr = pprFunDep type instance XCFunDep (GhcPass _) = EpAnn [AddEpAnn] type instance XXFunDep (GhcPass _) = DataConCantHappen pprFundeps :: OutputableBndrId p => [FunDep (GhcPass p)] -> SDoc pprFundeps [] = empty pprFundeps fds = hsep (vbar : punctuate comma (map pprFunDep fds)) pprFunDep :: OutputableBndrId p => FunDep (GhcPass p) -> SDoc pprFunDep (FunDep _ us vs) = hsep [interppSP us, arrow, interppSP vs] {- ********************************************************************* * * TyClGroup Strongly connected components of type, class, instance, and role declarations * * ********************************************************************* -} type instance XCTyClGroup (GhcPass _) = NoExtField type instance XXTyClGroup (GhcPass _) = DataConCantHappen {- ********************************************************************* * * Data and type family declarations * * ********************************************************************* -} type instance XNoSig (GhcPass _) = NoExtField type instance XCKindSig (GhcPass _) = NoExtField type instance XTyVarSig (GhcPass _) = NoExtField type instance XXFamilyResultSig (GhcPass _) = DataConCantHappen type instance XCFamilyDecl (GhcPass _) = EpAnn [AddEpAnn] type instance XXFamilyDecl (GhcPass _) = DataConCantHappen ------------- Functions over FamilyDecls ----------- familyDeclLName :: FamilyDecl (GhcPass p) -> XRec (GhcPass p) (IdP (GhcPass p)) familyDeclLName (FamilyDecl { fdLName = n }) = n familyDeclName :: FamilyDecl (GhcPass p) -> IdP (GhcPass p) familyDeclName = unLoc . familyDeclLName famResultKindSignature :: FamilyResultSig (GhcPass p) -> Maybe (LHsKind (GhcPass p)) famResultKindSignature (NoSig _) = Nothing famResultKindSignature (KindSig _ ki) = Just ki famResultKindSignature (TyVarSig _ bndr) = case unLoc bndr of UserTyVar _ _ _ -> Nothing KindedTyVar _ _ _ ki -> Just ki -- | Maybe return name of the result type variable resultVariableName :: FamilyResultSig (GhcPass a) -> Maybe (IdP (GhcPass a)) resultVariableName (TyVarSig _ sig) = Just $ hsLTyVarName sig resultVariableName _ = Nothing ------------- Pretty printing FamilyDecls ----------- type instance XCInjectivityAnn (GhcPass _) = EpAnn [AddEpAnn] type instance XXInjectivityAnn (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (FamilyDecl (GhcPass p)) where ppr (FamilyDecl { fdInfo = info, fdLName = ltycon , fdTopLevel = top_level , fdTyVars = tyvars , fdFixity = fixity , fdResultSig = L _ result , fdInjectivityAnn = mb_inj }) = vcat [ pprFlavour info <+> pp_top_level <+> pp_vanilla_decl_head ltycon tyvars fixity Nothing <+> pp_kind <+> pp_inj <+> pp_where , nest 2 $ pp_eqns ] where pp_top_level = case top_level of TopLevel -> text "family" NotTopLevel -> empty pp_kind = case result of NoSig _ -> empty KindSig _ kind -> dcolon <+> ppr kind TyVarSig _ tv_bndr -> text "=" <+> ppr tv_bndr pp_inj = case mb_inj of Just (L _ (InjectivityAnn _ lhs rhs)) -> hsep [ vbar, ppr lhs, text "->", hsep (map ppr rhs) ] Nothing -> empty (pp_where, pp_eqns) = case info of ClosedTypeFamily mb_eqns -> ( text "where" , case mb_eqns of Nothing -> text ".." Just eqns -> vcat $ map (ppr_fam_inst_eqn . unLoc) eqns ) _ -> (empty, empty) {- ********************************************************************* * * Data types and data constructors * * ********************************************************************* -} type instance XCHsDataDefn (GhcPass _) = NoExtField type instance XXHsDataDefn (GhcPass _) = DataConCantHappen type instance XCHsDerivingClause (GhcPass _) = EpAnn [AddEpAnn] type instance XXHsDerivingClause (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (HsDerivingClause (GhcPass p)) where ppr (HsDerivingClause { deriv_clause_strategy = dcs , deriv_clause_tys = L _ dct }) = hsep [ text "deriving" , pp_strat_before , ppr dct , pp_strat_after ] where -- @via@ is unique in that in comes /after/ the class being derived, -- so we must special-case it. (pp_strat_before, pp_strat_after) = case dcs of Just (L _ via@ViaStrategy{}) -> (empty, ppr via) _ -> (ppDerivStrategy dcs, empty) type instance XDctSingle (GhcPass _) = NoExtField type instance XDctMulti (GhcPass _) = NoExtField type instance XXDerivClauseTys (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (DerivClauseTys (GhcPass p)) where ppr (DctSingle _ ty) = ppr ty ppr (DctMulti _ tys) = parens (interpp'SP tys) type instance XStandaloneKindSig GhcPs = EpAnn [AddEpAnn] type instance XStandaloneKindSig GhcRn = NoExtField type instance XStandaloneKindSig GhcTc = NoExtField type instance XXStandaloneKindSig (GhcPass p) = DataConCantHappen standaloneKindSigName :: StandaloneKindSig (GhcPass p) -> IdP (GhcPass p) standaloneKindSigName (StandaloneKindSig _ lname _) = unLoc lname type instance XConDeclGADT (GhcPass _) = EpAnn [AddEpAnn] type instance XConDeclH98 (GhcPass _) = EpAnn [AddEpAnn] type instance XXConDecl (GhcPass _) = DataConCantHappen getConNames :: ConDecl GhcRn -> [LocatedN Name] getConNames ConDeclH98 {con_name = name} = [name] getConNames ConDeclGADT {con_names = names} = names -- | Return @'Just' fields@ if a data constructor declaration uses record -- syntax (i.e., 'RecCon'), where @fields@ are the field selectors. -- Otherwise, return 'Nothing'. getRecConArgs_maybe :: ConDecl GhcRn -> Maybe (LocatedL [LConDeclField GhcRn]) getRecConArgs_maybe (ConDeclH98{con_args = args}) = case args of PrefixCon{} -> Nothing RecCon flds -> Just flds InfixCon{} -> Nothing getRecConArgs_maybe (ConDeclGADT{con_g_args = args}) = case args of PrefixConGADT{} -> Nothing RecConGADT flds _ -> Just flds hsConDeclTheta :: Maybe (LHsContext (GhcPass p)) -> [LHsType (GhcPass p)] hsConDeclTheta Nothing = [] hsConDeclTheta (Just (L _ theta)) = theta pp_data_defn :: (OutputableBndrId p) => (Maybe (LHsContext (GhcPass p)) -> SDoc) -- Printing the header -> HsDataDefn (GhcPass p) -> SDoc pp_data_defn pp_hdr (HsDataDefn { dd_ND = new_or_data, dd_ctxt = context , dd_cType = mb_ct , dd_kindSig = mb_sig , dd_cons = condecls, dd_derivs = derivings }) | null condecls = ppr new_or_data <+> pp_ct <+> pp_hdr context <+> pp_sig <+> pp_derivings derivings | otherwise = hang (ppr new_or_data <+> pp_ct <+> pp_hdr context <+> pp_sig) 2 (pp_condecls condecls $$ pp_derivings derivings) where pp_ct = case mb_ct of Nothing -> empty Just ct -> ppr ct pp_sig = case mb_sig of Nothing -> empty Just kind -> dcolon <+> ppr kind pp_derivings ds = vcat (map ppr ds) instance OutputableBndrId p => Outputable (HsDataDefn (GhcPass p)) where ppr d = pp_data_defn (\_ -> text "Naked HsDataDefn") d instance OutputableBndrId p => Outputable (StandaloneKindSig (GhcPass p)) where ppr (StandaloneKindSig _ v ki) = text "type" <+> pprPrefixOcc (unLoc v) <+> text "::" <+> ppr ki pp_condecls :: forall p. OutputableBndrId p => [LConDecl (GhcPass p)] -> SDoc pp_condecls cs | gadt_syntax -- In GADT syntax = hang (text "where") 2 (vcat (map ppr cs)) | otherwise -- In H98 syntax = equals <+> sep (punctuate (text " |") (map ppr cs)) where gadt_syntax = case cs of [] -> False (L _ ConDeclH98{} : _) -> False (L _ ConDeclGADT{} : _) -> True instance (OutputableBndrId p) => Outputable (ConDecl (GhcPass p)) where ppr = pprConDecl pprConDecl :: forall p. OutputableBndrId p => ConDecl (GhcPass p) -> SDoc pprConDecl (ConDeclH98 { con_name = L _ con , con_ex_tvs = ex_tvs , con_mb_cxt = mcxt , con_args = args , con_doc = doc }) = pprMaybeWithDoc doc $ sep [ pprHsForAll (mkHsForAllInvisTele noAnn ex_tvs) mcxt , ppr_details args ] where -- In ppr_details: let's not print the multiplicities (they are always 1, by -- definition) as they do not appear in an actual declaration. ppr_details (InfixCon t1 t2) = hsep [ppr (hsScaledThing t1), pprInfixOcc con, ppr (hsScaledThing t2)] ppr_details (PrefixCon _ tys) = hsep (pprPrefixOcc con : map (pprHsType . unLoc . hsScaledThing) tys) ppr_details (RecCon fields) = pprPrefixOcc con <+> pprConDeclFields (unLoc fields) pprConDecl (ConDeclGADT { con_names = cons, con_bndrs = L _ outer_bndrs , con_mb_cxt = mcxt, con_g_args = args , con_res_ty = res_ty, con_doc = doc }) = pprMaybeWithDoc doc $ ppr_con_names cons <+> dcolon <+> (sep [pprHsOuterSigTyVarBndrs outer_bndrs <+> pprLHsContext mcxt, sep (ppr_args args ++ [ppr res_ty]) ]) where ppr_args (PrefixConGADT args) = map (\(HsScaled arr t) -> ppr t <+> ppr_arr arr) args ppr_args (RecConGADT fields _) = [pprConDeclFields (unLoc fields) <+> arrow] -- Display linear arrows as unrestricted with -XNoLinearTypes -- (cf. dataConDisplayType in Note [Displaying linear fields] in GHC.Core.DataCon) ppr_arr (HsLinearArrow _) = sdocOption sdocLinearTypes $ \show_linear_types -> if show_linear_types then lollipop else arrow ppr_arr arr = pprHsArrow arr ppr_con_names :: (OutputableBndr a) => [GenLocated l a] -> SDoc ppr_con_names = pprWithCommas (pprPrefixOcc . unLoc) {- ************************************************************************ * * Instance declarations * * ************************************************************************ -} type instance XCFamEqn (GhcPass _) r = EpAnn [AddEpAnn] type instance XXFamEqn (GhcPass _) r = DataConCantHappen type instance Anno (FamEqn (GhcPass p) _) = SrcSpanAnnA ----------------- Class instances ------------- type instance XCClsInstDecl GhcPs = (EpAnn [AddEpAnn], AnnSortKey) -- TODO:AZ:tidy up type instance XCClsInstDecl GhcRn = NoExtField type instance XCClsInstDecl GhcTc = NoExtField type instance XXClsInstDecl (GhcPass _) = DataConCantHappen ----------------- Instances of all kinds ------------- type instance XClsInstD (GhcPass _) = NoExtField type instance XDataFamInstD (GhcPass _) = NoExtField type instance XTyFamInstD GhcPs = NoExtField type instance XTyFamInstD GhcRn = NoExtField type instance XTyFamInstD GhcTc = NoExtField type instance XXInstDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (TyFamInstDecl (GhcPass p)) where ppr = pprTyFamInstDecl TopLevel pprTyFamInstDecl :: (OutputableBndrId p) => TopLevelFlag -> TyFamInstDecl (GhcPass p) -> SDoc pprTyFamInstDecl top_lvl (TyFamInstDecl { tfid_eqn = eqn }) = text "type" <+> ppr_instance_keyword top_lvl <+> ppr_fam_inst_eqn eqn ppr_instance_keyword :: TopLevelFlag -> SDoc ppr_instance_keyword TopLevel = text "instance" ppr_instance_keyword NotTopLevel = empty pprTyFamDefltDecl :: (OutputableBndrId p) => TyFamDefltDecl (GhcPass p) -> SDoc pprTyFamDefltDecl = pprTyFamInstDecl NotTopLevel ppr_fam_inst_eqn :: (OutputableBndrId p) => TyFamInstEqn (GhcPass p) -> SDoc ppr_fam_inst_eqn (FamEqn { feqn_tycon = L _ tycon , feqn_bndrs = bndrs , feqn_pats = pats , feqn_fixity = fixity , feqn_rhs = rhs }) = pprHsFamInstLHS tycon bndrs pats fixity Nothing <+> equals <+> ppr rhs instance OutputableBndrId p => Outputable (DataFamInstDecl (GhcPass p)) where ppr = pprDataFamInstDecl TopLevel pprDataFamInstDecl :: (OutputableBndrId p) => TopLevelFlag -> DataFamInstDecl (GhcPass p) -> SDoc pprDataFamInstDecl top_lvl (DataFamInstDecl { dfid_eqn = (FamEqn { feqn_tycon = L _ tycon , feqn_bndrs = bndrs , feqn_pats = pats , feqn_fixity = fixity , feqn_rhs = defn })}) = pp_data_defn pp_hdr defn where pp_hdr mctxt = ppr_instance_keyword top_lvl <+> pprHsFamInstLHS tycon bndrs pats fixity mctxt -- pp_data_defn pretty-prints the kind sig. See #14817. pprDataFamInstFlavour :: DataFamInstDecl (GhcPass p) -> SDoc pprDataFamInstFlavour (DataFamInstDecl { dfid_eqn = (FamEqn { feqn_rhs = HsDataDefn { dd_ND = nd }})}) = ppr nd pprHsFamInstLHS :: (OutputableBndrId p) => IdP (GhcPass p) -> HsOuterFamEqnTyVarBndrs (GhcPass p) -> HsTyPats (GhcPass p) -> LexicalFixity -> Maybe (LHsContext (GhcPass p)) -> SDoc pprHsFamInstLHS thing bndrs typats fixity mb_ctxt = hsep [ pprHsOuterFamEqnTyVarBndrs bndrs , pprLHsContext mb_ctxt , pprHsArgsApp thing fixity typats ] instance OutputableBndrId p => Outputable (ClsInstDecl (GhcPass p)) where ppr (ClsInstDecl { cid_poly_ty = inst_ty, cid_binds = binds , cid_sigs = sigs, cid_tyfam_insts = ats , cid_overlap_mode = mbOverlap , cid_datafam_insts = adts }) | null sigs, null ats, null adts, isEmptyBag binds -- No "where" part = top_matter | otherwise -- Laid out = vcat [ top_matter <+> text "where" , nest 2 $ pprDeclList $ map (pprTyFamInstDecl NotTopLevel . unLoc) ats ++ map (pprDataFamInstDecl NotTopLevel . unLoc) adts ++ pprLHsBindsForUser binds sigs ] where top_matter = text "instance" <+> ppOverlapPragma mbOverlap <+> ppr inst_ty ppDerivStrategy :: OutputableBndrId p => Maybe (LDerivStrategy (GhcPass p)) -> SDoc ppDerivStrategy mb = case mb of Nothing -> empty Just (L _ ds) -> ppr ds ppOverlapPragma :: Maybe (LocatedP OverlapMode) -> SDoc ppOverlapPragma mb = case mb of Nothing -> empty Just (L _ (NoOverlap s)) -> maybe_stext s "{-# NO_OVERLAP #-}" Just (L _ (Overlappable s)) -> maybe_stext s "{-# OVERLAPPABLE #-}" Just (L _ (Overlapping s)) -> maybe_stext s "{-# OVERLAPPING #-}" Just (L _ (Overlaps s)) -> maybe_stext s "{-# OVERLAPS #-}" Just (L _ (Incoherent s)) -> maybe_stext s "{-# INCOHERENT #-}" where maybe_stext NoSourceText alt = text alt maybe_stext (SourceText src) _ = text src <+> text "#-}" instance (OutputableBndrId p) => Outputable (InstDecl (GhcPass p)) where ppr (ClsInstD { cid_inst = decl }) = ppr decl ppr (TyFamInstD { tfid_inst = decl }) = ppr decl ppr (DataFamInstD { dfid_inst = decl }) = ppr decl -- Extract the declarations of associated data types from an instance instDeclDataFamInsts :: [LInstDecl (GhcPass p)] -> [DataFamInstDecl (GhcPass p)] instDeclDataFamInsts inst_decls = concatMap do_one inst_decls where do_one :: LInstDecl (GhcPass p) -> [DataFamInstDecl (GhcPass p)] do_one (L _ (ClsInstD { cid_inst = ClsInstDecl { cid_datafam_insts = fam_insts } })) = map unLoc fam_insts do_one (L _ (DataFamInstD { dfid_inst = fam_inst })) = [fam_inst] do_one (L _ (TyFamInstD {})) = [] {- ************************************************************************ * * \subsection[DerivDecl]{A stand-alone instance deriving declaration} * * ************************************************************************ -} type instance XCDerivDecl (GhcPass _) = EpAnn [AddEpAnn] type instance XXDerivDecl (GhcPass _) = DataConCantHappen type instance Anno OverlapMode = SrcSpanAnnP instance OutputableBndrId p => Outputable (DerivDecl (GhcPass p)) where ppr (DerivDecl { deriv_type = ty , deriv_strategy = ds , deriv_overlap_mode = o }) = hsep [ text "deriving" , ppDerivStrategy ds , text "instance" , ppOverlapPragma o , ppr ty ] {- ************************************************************************ * * Deriving strategies * * ************************************************************************ -} type instance XStockStrategy GhcPs = EpAnn [AddEpAnn] type instance XStockStrategy GhcRn = NoExtField type instance XStockStrategy GhcTc = NoExtField type instance XAnyClassStrategy GhcPs = EpAnn [AddEpAnn] type instance XAnyClassStrategy GhcRn = NoExtField type instance XAnyClassStrategy GhcTc = NoExtField type instance XNewtypeStrategy GhcPs = EpAnn [AddEpAnn] type instance XNewtypeStrategy GhcRn = NoExtField type instance XNewtypeStrategy GhcTc = NoExtField type instance XViaStrategy GhcPs = XViaStrategyPs type instance XViaStrategy GhcRn = LHsSigType GhcRn type instance XViaStrategy GhcTc = Type data XViaStrategyPs = XViaStrategyPs (EpAnn [AddEpAnn]) (LHsSigType GhcPs) instance OutputableBndrId p => Outputable (DerivStrategy (GhcPass p)) where ppr (StockStrategy _) = text "stock" ppr (AnyclassStrategy _) = text "anyclass" ppr (NewtypeStrategy _) = text "newtype" ppr (ViaStrategy ty) = text "via" <+> case ghcPass @p of GhcPs -> ppr ty GhcRn -> ppr ty GhcTc -> ppr ty instance Outputable XViaStrategyPs where ppr (XViaStrategyPs _ t) = ppr t -- | Eliminate a 'DerivStrategy'. foldDerivStrategy :: (p ~ GhcPass pass) => r -> (XViaStrategy p -> r) -> DerivStrategy p -> r foldDerivStrategy other _ (StockStrategy _) = other foldDerivStrategy other _ (AnyclassStrategy _) = other foldDerivStrategy other _ (NewtypeStrategy _) = other foldDerivStrategy _ via (ViaStrategy t) = via t -- | Map over the @via@ type if dealing with 'ViaStrategy'. Otherwise, -- return the 'DerivStrategy' unchanged. mapDerivStrategy :: (p ~ GhcPass pass) => (XViaStrategy p -> XViaStrategy p) -> DerivStrategy p -> DerivStrategy p mapDerivStrategy f ds = foldDerivStrategy ds (ViaStrategy . f) ds {- ************************************************************************ * * \subsection[DefaultDecl]{A @default@ declaration} * * ************************************************************************ -} type instance XCDefaultDecl GhcPs = EpAnn [AddEpAnn] type instance XCDefaultDecl GhcRn = NoExtField type instance XCDefaultDecl GhcTc = NoExtField type instance XXDefaultDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (DefaultDecl (GhcPass p)) where ppr (DefaultDecl _ tys) = text "default" <+> parens (interpp'SP tys) {- ************************************************************************ * * \subsection{Foreign function interface declaration} * * ************************************************************************ -} type instance XForeignImport GhcPs = EpAnn [AddEpAnn] type instance XForeignImport GhcRn = NoExtField type instance XForeignImport GhcTc = Coercion type instance XForeignExport GhcPs = EpAnn [AddEpAnn] type instance XForeignExport GhcRn = NoExtField type instance XForeignExport GhcTc = Coercion type instance XXForeignDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (ForeignDecl (GhcPass p)) where ppr (ForeignImport { fd_name = n, fd_sig_ty = ty, fd_fi = fimport }) = hang (text "foreign import" <+> ppr fimport <+> ppr n) 2 (dcolon <+> ppr ty) ppr (ForeignExport { fd_name = n, fd_sig_ty = ty, fd_fe = fexport }) = hang (text "foreign export" <+> ppr fexport <+> ppr n) 2 (dcolon <+> ppr ty) {- ************************************************************************ * * \subsection{Rewrite rules} * * ************************************************************************ -} type instance XCRuleDecls GhcPs = EpAnn [AddEpAnn] type instance XCRuleDecls GhcRn = NoExtField type instance XCRuleDecls GhcTc = NoExtField type instance XXRuleDecls (GhcPass _) = DataConCantHappen type instance XHsRule GhcPs = EpAnn HsRuleAnn type instance XHsRule GhcRn = HsRuleRn type instance XHsRule GhcTc = HsRuleRn type instance XXRuleDecl (GhcPass _) = DataConCantHappen type instance Anno (SourceText, RuleName) = SrcAnn NoEpAnns data HsRuleAnn = HsRuleAnn { ra_tyanns :: Maybe (AddEpAnn, AddEpAnn) -- ^ The locations of 'forall' and '.' for forall'd type vars -- Using AddEpAnn to capture possible unicode variants , ra_tmanns :: Maybe (AddEpAnn, AddEpAnn) -- ^ The locations of 'forall' and '.' for forall'd term vars -- Using AddEpAnn to capture possible unicode variants , ra_rest :: [AddEpAnn] } deriving (Data, Eq) flattenRuleDecls :: [LRuleDecls (GhcPass p)] -> [LRuleDecl (GhcPass p)] flattenRuleDecls decls = concatMap (rds_rules . unLoc) decls type instance XCRuleBndr (GhcPass _) = EpAnn [AddEpAnn] type instance XRuleBndrSig (GhcPass _) = EpAnn [AddEpAnn] type instance XXRuleBndr (GhcPass _) = DataConCantHappen instance (OutputableBndrId p) => Outputable (RuleDecls (GhcPass p)) where ppr (HsRules { rds_src = st , rds_rules = rules }) = pprWithSourceText st (text "{-# RULES") <+> vcat (punctuate semi (map ppr rules)) <+> text "#-}" instance (OutputableBndrId p) => Outputable (RuleDecl (GhcPass p)) where ppr (HsRule { rd_name = name , rd_act = act , rd_tyvs = tys , rd_tmvs = tms , rd_lhs = lhs , rd_rhs = rhs }) = sep [pprFullRuleName name <+> ppr act, nest 4 (pp_forall_ty tys <+> pp_forall_tm tys <+> pprExpr (unLoc lhs)), nest 6 (equals <+> pprExpr (unLoc rhs)) ] where pp_forall_ty Nothing = empty pp_forall_ty (Just qtvs) = forAllLit <+> fsep (map ppr qtvs) <> dot pp_forall_tm Nothing | null tms = empty pp_forall_tm _ = forAllLit <+> fsep (map ppr tms) <> dot instance (OutputableBndrId p) => Outputable (RuleBndr (GhcPass p)) where ppr (RuleBndr _ name) = ppr name ppr (RuleBndrSig _ name ty) = parens (ppr name <> dcolon <> ppr ty) {- ************************************************************************ * * \subsection[DeprecDecl]{Deprecations} * * ************************************************************************ -} type instance XWarnings GhcPs = EpAnn [AddEpAnn] type instance XWarnings GhcRn = NoExtField type instance XWarnings GhcTc = NoExtField type instance XXWarnDecls (GhcPass _) = DataConCantHappen type instance XWarning (GhcPass _) = EpAnn [AddEpAnn] type instance XXWarnDecl (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (WarnDecls (GhcPass p)) where ppr (Warnings _ (SourceText src) decls) = text src <+> vcat (punctuate comma (map ppr decls)) <+> text "#-}" ppr (Warnings _ NoSourceText _decls) = panic "WarnDecls" instance OutputableBndrId p => Outputable (WarnDecl (GhcPass p)) where ppr (Warning _ thing txt) = hsep ( punctuate comma (map ppr thing)) <+> ppr txt {- ************************************************************************ * * \subsection[AnnDecl]{Annotations} * * ************************************************************************ -} type instance XHsAnnotation (GhcPass _) = EpAnn AnnPragma type instance XXAnnDecl (GhcPass _) = DataConCantHappen instance (OutputableBndrId p) => Outputable (AnnDecl (GhcPass p)) where ppr (HsAnnotation _ _ provenance expr) = hsep [text "{-#", pprAnnProvenance provenance, pprExpr (unLoc expr), text "#-}"] pprAnnProvenance :: OutputableBndrId p => AnnProvenance (GhcPass p) -> SDoc pprAnnProvenance ModuleAnnProvenance = text "ANN module" pprAnnProvenance (ValueAnnProvenance (L _ name)) = text "ANN" <+> ppr name pprAnnProvenance (TypeAnnProvenance (L _ name)) = text "ANN type" <+> ppr name {- ************************************************************************ * * \subsection[RoleAnnot]{Role annotations} * * ************************************************************************ -} type instance XCRoleAnnotDecl GhcPs = EpAnn [AddEpAnn] type instance XCRoleAnnotDecl GhcRn = NoExtField type instance XCRoleAnnotDecl GhcTc = NoExtField type instance XXRoleAnnotDecl (GhcPass _) = DataConCantHappen type instance Anno (Maybe Role) = SrcAnn NoEpAnns instance OutputableBndr (IdP (GhcPass p)) => Outputable (RoleAnnotDecl (GhcPass p)) where ppr (RoleAnnotDecl _ ltycon roles) = text "type role" <+> pprPrefixOcc (unLoc ltycon) <+> hsep (map (pp_role . unLoc) roles) where pp_role Nothing = underscore pp_role (Just r) = ppr r roleAnnotDeclName :: RoleAnnotDecl (GhcPass p) -> IdP (GhcPass p) roleAnnotDeclName (RoleAnnotDecl _ (L _ name) _) = name {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (HsDecl (GhcPass _)) = SrcSpanAnnA type instance Anno (SpliceDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (TyClDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (FunDep (GhcPass p)) = SrcSpanAnnA type instance Anno (FamilyResultSig (GhcPass p)) = SrcAnn NoEpAnns type instance Anno (FamilyDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (InjectivityAnn (GhcPass p)) = SrcAnn NoEpAnns type instance Anno CType = SrcSpanAnnP type instance Anno (HsDerivingClause (GhcPass p)) = SrcAnn NoEpAnns type instance Anno (DerivClauseTys (GhcPass _)) = SrcSpanAnnC type instance Anno (StandaloneKindSig (GhcPass p)) = SrcSpanAnnA type instance Anno (ConDecl (GhcPass p)) = SrcSpanAnnA type instance Anno Bool = SrcAnn NoEpAnns type instance Anno [LocatedA (ConDeclField (GhcPass _))] = SrcSpanAnnL type instance Anno (FamEqn p (LocatedA (HsType p))) = SrcSpanAnnA type instance Anno (TyFamInstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (DataFamInstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (FamEqn (GhcPass p) _) = SrcSpanAnnA type instance Anno (ClsInstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (InstDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (DocDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (DerivDecl (GhcPass p)) = SrcSpanAnnA type instance Anno OverlapMode = SrcSpanAnnP type instance Anno (DerivStrategy (GhcPass p)) = SrcAnn NoEpAnns type instance Anno (DefaultDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (ForeignDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (RuleDecls (GhcPass p)) = SrcSpanAnnA type instance Anno (RuleDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (SourceText, RuleName) = SrcAnn NoEpAnns type instance Anno (RuleBndr (GhcPass p)) = SrcAnn NoEpAnns type instance Anno (WarnDecls (GhcPass p)) = SrcSpanAnnA type instance Anno (WarnDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (AnnDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (RoleAnnotDecl (GhcPass p)) = SrcSpanAnnA type instance Anno (Maybe Role) = SrcAnn NoEpAnns ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Doc.hs0000644000000000000000000002340514472400112017252 0ustar0000000000000000-- | Types and functions for raw and lexed docstrings. {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE StandaloneDeriving #-} module GHC.Hs.Doc ( HsDoc , WithHsDocIdentifiers(..) , hsDocIds , LHsDoc , pprHsDocDebug , pprWithDoc , pprMaybeWithDoc , module GHC.Hs.DocString , ExtractedTHDocs(..) , DocStructureItem(..) , DocStructure , Docs(..) , emptyDocs ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Types.Name import GHC.Utils.Outputable as Outputable hiding ((<>)) import GHC.Types.SrcLoc import qualified GHC.Data.EnumSet as EnumSet import GHC.Data.EnumSet (EnumSet) import GHC.Types.Avail import GHC.Types.Name.Set import GHC.Unit.Module.Name import GHC.Driver.Flags import Control.Applicative (liftA2) import Data.Data import Data.IntMap (IntMap) import qualified Data.IntMap as IntMap import Data.Map (Map) import qualified Data.Map as Map import Data.List.NonEmpty (NonEmpty(..)) import GHC.LanguageExtensions.Type import qualified GHC.Utils.Outputable as O import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Types.Unique.Map import Data.List (sortBy) import GHC.Hs.DocString -- | A docstring with the (probable) identifiers found in it. type HsDoc = WithHsDocIdentifiers HsDocString -- | Annotate a value with the probable identifiers found in it -- These will be used by haddock to generate links. -- -- The identifiers are bundled along with their location in the source file. -- This is useful for tooling to know exactly where they originate. -- -- This type is currently used in two places - for regular documentation comments, -- with 'a' set to 'HsDocString', and for adding identifier information to -- warnings, where 'a' is 'StringLiteral' data WithHsDocIdentifiers a pass = WithHsDocIdentifiers { hsDocString :: !a , hsDocIdentifiers :: ![Located (IdP pass)] } deriving instance (Data pass, Data (IdP pass), Data a) => Data (WithHsDocIdentifiers a pass) deriving instance (Eq (IdP pass), Eq a) => Eq (WithHsDocIdentifiers a pass) -- | For compatibility with the existing @-ddump-parsed' output, we only show -- the docstring. -- -- Use 'pprHsDoc' to show `HsDoc`'s internals. instance Outputable a => Outputable (WithHsDocIdentifiers a pass) where ppr (WithHsDocIdentifiers s _ids) = ppr s instance Binary a => Binary (WithHsDocIdentifiers a GhcRn) where put_ bh (WithHsDocIdentifiers s ids) = do put_ bh s put_ bh ids get bh = liftA2 WithHsDocIdentifiers (get bh) (get bh) -- | Extract a mapping from the lexed identifiers to the names they may -- correspond to. hsDocIds :: WithHsDocIdentifiers a GhcRn -> NameSet hsDocIds (WithHsDocIdentifiers _ ids) = mkNameSet $ map unLoc ids -- | Pretty print a thing with its doc -- The docstring will include the comment decorators '-- |', '{-|' etc -- and will come either before or after depending on how it was written -- i.e it will come after the thing if it is a '-- ^' or '{-^' and before -- otherwise. pprWithDoc :: LHsDoc name -> SDoc -> SDoc pprWithDoc doc = pprWithDocString (hsDocString $ unLoc doc) -- | See 'pprWithHsDoc' pprMaybeWithDoc :: Maybe (LHsDoc name) -> SDoc -> SDoc pprMaybeWithDoc Nothing = id pprMaybeWithDoc (Just doc) = pprWithDoc doc -- | Print a doc with its identifiers, useful for debugging pprHsDocDebug :: (Outputable (IdP name)) => HsDoc name -> SDoc pprHsDocDebug (WithHsDocIdentifiers s ids) = vcat [ text "text:" $$ nest 2 (pprHsDocString s) , text "identifiers:" $$ nest 2 (vcat (map pprLocatedAlways ids)) ] type LHsDoc pass = Located (HsDoc pass) -- | A simplified version of 'HsImpExp.IE'. data DocStructureItem = DsiSectionHeading Int (HsDoc GhcRn) | DsiDocChunk (HsDoc GhcRn) | DsiNamedChunkRef String | DsiExports Avails | DsiModExport (NonEmpty ModuleName) -- ^ We might re-export avails from multiple -- modules with a single export declaration. E.g. -- when we have -- -- > module M (module X) where -- > import R0 as X -- > import R1 as X Avails instance Binary DocStructureItem where put_ bh = \case DsiSectionHeading level doc -> do putByte bh 0 put_ bh level put_ bh doc DsiDocChunk doc -> do putByte bh 1 put_ bh doc DsiNamedChunkRef name -> do putByte bh 2 put_ bh name DsiExports avails -> do putByte bh 3 put_ bh avails DsiModExport mod_names avails -> do putByte bh 4 put_ bh mod_names put_ bh avails get bh = do tag <- getByte bh case tag of 0 -> DsiSectionHeading <$> get bh <*> get bh 1 -> DsiDocChunk <$> get bh 2 -> DsiNamedChunkRef <$> get bh 3 -> DsiExports <$> get bh 4 -> DsiModExport <$> get bh <*> get bh _ -> fail "instance Binary DocStructureItem: Invalid tag" instance Outputable DocStructureItem where ppr = \case DsiSectionHeading level doc -> vcat [ text "section heading, level" <+> ppr level O.<> colon , nest 2 (pprHsDocDebug doc) ] DsiDocChunk doc -> vcat [ text "documentation chunk:" , nest 2 (pprHsDocDebug doc) ] DsiNamedChunkRef name -> text "reference to named chunk:" <+> text name DsiExports avails -> text "avails:" $$ nest 2 (ppr avails) DsiModExport mod_names avails -> text "re-exported module(s):" <+> ppr mod_names $$ nest 2 (ppr avails) type DocStructure = [DocStructureItem] data Docs = Docs { docs_mod_hdr :: Maybe (HsDoc GhcRn) -- ^ Module header. , docs_decls :: UniqMap Name [HsDoc GhcRn] -- ^ Docs for declarations: functions, data types, instances, methods etc. -- A list because sometimes subsequent haddock comments can be combined into one , docs_args :: UniqMap Name (IntMap (HsDoc GhcRn)) -- ^ Docs for arguments. E.g. function arguments, method arguments. , docs_structure :: DocStructure , docs_named_chunks :: Map String (HsDoc GhcRn) -- ^ Map from chunk name to content. -- -- This map will be empty unless we have an explicit export list from which -- we can reference the chunks. , docs_haddock_opts :: Maybe String -- ^ Haddock options from @OPTIONS_HADDOCK@ or from @-haddock-opts@. , docs_language :: Maybe Language -- ^ The 'Language' used in the module, for example 'Haskell2010'. , docs_extensions :: EnumSet Extension -- ^ The full set of language extensions used in the module. } instance Binary Docs where put_ bh docs = do put_ bh (docs_mod_hdr docs) put_ bh (sortBy (\a b -> (fst a) `stableNameCmp` fst b) $ nonDetEltsUniqMap $ docs_decls docs) put_ bh (sortBy (\a b -> (fst a) `stableNameCmp` fst b) $ nonDetEltsUniqMap $ docs_args docs) put_ bh (docs_structure docs) put_ bh (Map.toList $ docs_named_chunks docs) put_ bh (docs_haddock_opts docs) put_ bh (docs_language docs) put_ bh (docs_extensions docs) get bh = do mod_hdr <- get bh decls <- listToUniqMap <$> get bh args <- listToUniqMap <$> get bh structure <- get bh named_chunks <- Map.fromList <$> get bh haddock_opts <- get bh language <- get bh exts <- get bh pure Docs { docs_mod_hdr = mod_hdr , docs_decls = decls , docs_args = args , docs_structure = structure , docs_named_chunks = named_chunks , docs_haddock_opts = haddock_opts , docs_language = language , docs_extensions = exts } instance Outputable Docs where ppr docs = vcat [ pprField (pprMaybe pprHsDocDebug) "module header" docs_mod_hdr , pprField (ppr . fmap (ppr . map pprHsDocDebug)) "declaration docs" docs_decls , pprField (ppr . fmap (pprIntMap ppr pprHsDocDebug)) "arg docs" docs_args , pprField (vcat . map ppr) "documentation structure" docs_structure , pprField (pprMap (doubleQuotes . text) pprHsDocDebug) "named chunks" docs_named_chunks , pprField pprMbString "haddock options" docs_haddock_opts , pprField ppr "language" docs_language , pprField (vcat . map ppr . EnumSet.toList) "language extensions" docs_extensions ] where pprField :: (a -> SDoc) -> String -> (Docs -> a) -> SDoc pprField ppr' heading lbl = text heading O.<> colon $$ nest 2 (ppr' (lbl docs)) pprMap pprKey pprVal m = vcat $ flip map (Map.toList m) $ \(k, v) -> pprKey k O.<> colon $$ nest 2 (pprVal v) pprIntMap pprKey pprVal m = vcat $ flip map (IntMap.toList m) $ \(k, v) -> pprKey k O.<> colon $$ nest 2 (pprVal v) pprMbString Nothing = empty pprMbString (Just s) = text s pprMaybe ppr' = \case Nothing -> text "Nothing" Just x -> text "Just" <+> ppr' x emptyDocs :: Docs emptyDocs = Docs { docs_mod_hdr = Nothing , docs_decls = emptyUniqMap , docs_args = emptyUniqMap , docs_structure = [] , docs_named_chunks = Map.empty , docs_haddock_opts = Nothing , docs_language = Nothing , docs_extensions = EnumSet.empty } -- | Maps of docs that were added via Template Haskell's @putDoc@. data ExtractedTHDocs = ExtractedTHDocs { ethd_mod_header :: Maybe (HsDoc GhcRn) -- ^ The added module header documentation, if it exists. , ethd_decl_docs :: UniqMap Name (HsDoc GhcRn) -- ^ The documentation added to declarations. , ethd_arg_docs :: UniqMap Name (IntMap (HsDoc GhcRn)) -- ^ The documentation added to function arguments. , ethd_inst_docs :: UniqMap Name (HsDoc GhcRn) -- ^ The documentation added to class and family instances. } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/DocString.hs0000644000000000000000000001605514472400112020444 0ustar0000000000000000-- | An exactprintable structure for docstrings {-# LANGUAGE DeriveDataTypeable #-} module GHC.Hs.DocString ( LHsDocString , HsDocString(..) , HsDocStringDecorator(..) , HsDocStringChunk(..) , LHsDocStringChunk , isEmptyDocString , unpackHDSC , mkHsDocStringChunk , mkHsDocStringChunkUtf8ByteString , pprHsDocString , pprHsDocStrings , mkGeneratedHsDocString , docStringChunks , renderHsDocString , renderHsDocStrings , exactPrintHsDocString , pprWithDocString ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Utils.Encoding import GHC.Utils.Outputable as Outputable hiding ((<>)) import GHC.Types.SrcLoc import Data.ByteString (ByteString) import qualified Data.ByteString as BS import Data.Data import Data.List.NonEmpty (NonEmpty(..)) import Data.List (intercalate) type LHsDocString = Located HsDocString -- | Haskell Documentation String -- -- Rich structure to support exact printing -- The location around each chunk doesn't include the decorators data HsDocString = MultiLineDocString !HsDocStringDecorator !(NonEmpty LHsDocStringChunk) -- ^ The first chunk is preceded by "-- " and each following chunk is preceded by "--" -- Example: -- | This is a docstring for 'foo'. It is the line with the decorator '|' and is always included -- -- This continues that docstring and is the second element in the NonEmpty list -- foo :: a -> a | NestedDocString !HsDocStringDecorator LHsDocStringChunk -- ^ The docstring is preceded by "{-" and followed by "-}" -- The chunk contains balanced pairs of '{-' and '-}' | GeneratedDocString HsDocStringChunk -- ^ A docstring generated either internally or via TH -- Pretty printed with the '-- |' decorator -- This is because it may contain unbalanced pairs of '{-' and '-}' and -- not form a valid 'NestedDocString' deriving (Eq, Data, Show) instance Outputable HsDocString where ppr = text . renderHsDocString -- | Annotate a pretty printed thing with its doc -- The docstring comes after if is 'HsDocStringPrevious' -- Otherwise it comes before. -- Note - we convert MultiLineDocString HsDocStringPrevious to HsDocStringNext -- because we can't control if something else will be pretty printed on the same line pprWithDocString :: HsDocString -> SDoc -> SDoc pprWithDocString (MultiLineDocString HsDocStringPrevious ds) sd = pprWithDocString (MultiLineDocString HsDocStringNext ds) sd pprWithDocString doc@(NestedDocString HsDocStringPrevious _) sd = sd <+> pprHsDocString doc pprWithDocString doc sd = pprHsDocString doc $+$ sd instance Binary HsDocString where put_ bh x = case x of MultiLineDocString dec xs -> do putByte bh 0 put_ bh dec put_ bh xs NestedDocString dec x -> do putByte bh 1 put_ bh dec put_ bh x GeneratedDocString x -> do putByte bh 2 put_ bh x get bh = do tag <- getByte bh case tag of 0 -> MultiLineDocString <$> get bh <*> get bh 1 -> NestedDocString <$> get bh <*> get bh 2 -> GeneratedDocString <$> get bh t -> fail $ "HsDocString: invalid tag " ++ show t data HsDocStringDecorator = HsDocStringNext -- ^ '|' is the decorator | HsDocStringPrevious -- ^ '^' is the decorator | HsDocStringNamed !String -- ^ '$' is the decorator | HsDocStringGroup !Int -- ^ The decorator is the given number of '*'s deriving (Eq, Ord, Show, Data) instance Outputable HsDocStringDecorator where ppr = text . printDecorator printDecorator :: HsDocStringDecorator -> String printDecorator HsDocStringNext = "|" printDecorator HsDocStringPrevious = "^" printDecorator (HsDocStringNamed n) = '$':n printDecorator (HsDocStringGroup n) = replicate n '*' instance Binary HsDocStringDecorator where put_ bh x = case x of HsDocStringNext -> putByte bh 0 HsDocStringPrevious -> putByte bh 1 HsDocStringNamed n -> putByte bh 2 >> put_ bh n HsDocStringGroup n -> putByte bh 3 >> put_ bh n get bh = do tag <- getByte bh case tag of 0 -> pure HsDocStringNext 1 -> pure HsDocStringPrevious 2 -> HsDocStringNamed <$> get bh 3 -> HsDocStringGroup <$> get bh t -> fail $ "HsDocStringDecorator: invalid tag " ++ show t type LHsDocStringChunk = Located HsDocStringChunk -- | A continguous chunk of documentation newtype HsDocStringChunk = HsDocStringChunk ByteString deriving (Eq,Ord,Data, Show) instance Binary HsDocStringChunk where put_ bh (HsDocStringChunk bs) = put_ bh bs get bh = HsDocStringChunk <$> get bh instance Outputable HsDocStringChunk where ppr = text . unpackHDSC mkHsDocStringChunk :: String -> HsDocStringChunk mkHsDocStringChunk s = HsDocStringChunk (utf8EncodeString s) -- | Create a 'HsDocString' from a UTF8-encoded 'ByteString'. mkHsDocStringChunkUtf8ByteString :: ByteString -> HsDocStringChunk mkHsDocStringChunkUtf8ByteString = HsDocStringChunk unpackHDSC :: HsDocStringChunk -> String unpackHDSC (HsDocStringChunk bs) = utf8DecodeByteString bs nullHDSC :: HsDocStringChunk -> Bool nullHDSC (HsDocStringChunk bs) = BS.null bs mkGeneratedHsDocString :: String -> HsDocString mkGeneratedHsDocString = GeneratedDocString . mkHsDocStringChunk isEmptyDocString :: HsDocString -> Bool isEmptyDocString (MultiLineDocString _ xs) = all (nullHDSC . unLoc) xs isEmptyDocString (NestedDocString _ s) = nullHDSC $ unLoc s isEmptyDocString (GeneratedDocString x) = nullHDSC x docStringChunks :: HsDocString -> [LHsDocStringChunk] docStringChunks (MultiLineDocString _ (x:|xs)) = x:xs docStringChunks (NestedDocString _ x) = [x] docStringChunks (GeneratedDocString x) = [L (UnhelpfulSpan UnhelpfulGenerated) x] -- | Pretty print with decorators, exactly as the user wrote it pprHsDocString :: HsDocString -> SDoc pprHsDocString = text . exactPrintHsDocString pprHsDocStrings :: [HsDocString] -> SDoc pprHsDocStrings = text . intercalate "\n\n" . map exactPrintHsDocString -- | Pretty print with decorators, exactly as the user wrote it exactPrintHsDocString :: HsDocString -> String exactPrintHsDocString (MultiLineDocString dec (x :| xs)) = unlines' $ ("-- " ++ printDecorator dec ++ unpackHDSC (unLoc x)) : map (\x -> "--" ++ unpackHDSC (unLoc x)) xs exactPrintHsDocString (NestedDocString dec (L _ s)) = "{-" ++ printDecorator dec ++ unpackHDSC s ++ "-}" exactPrintHsDocString (GeneratedDocString x) = case lines (unpackHDSC x) of [] -> "" (x:xs) -> unlines' $ ( "-- |" ++ x) : map (\y -> "--"++y) xs -- | Just get the docstring, without any decorators renderHsDocString :: HsDocString -> String renderHsDocString (MultiLineDocString _ (x :| xs)) = unlines' $ map (unpackHDSC . unLoc) (x:xs) renderHsDocString (NestedDocString _ ds) = unpackHDSC $ unLoc ds renderHsDocString (GeneratedDocString x) = unpackHDSC x -- | Don't add a newline to a single string unlines' :: [String] -> String unlines' = intercalate "\n" -- | Just get the docstring, without any decorators -- Seperates docstrings using "\n\n", which is how haddock likes to render them renderHsDocStrings :: [HsDocString] -> String renderHsDocStrings = intercalate "\n\n" . map renderHsDocString ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Dump.hs0000644000000000000000000003341714472400112017456 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} -- | Contains a debug function to dump parts of the GHC.Hs AST. It uses a syb -- traversal which falls back to displaying based on the constructor name, so -- can be used to dump anything having a @Data.Data@ instance. module GHC.Hs.Dump ( -- * Dumping ASTs showAstData, showAstDataFull, BlankSrcSpan(..), BlankEpAnnotations(..), ) where import GHC.Prelude import GHC.Hs import GHC.Core.DataCon import GHC.Data.Bag import GHC.Data.FastString import GHC.Types.Name.Set import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Types.Var import GHC.Types.SourceText import GHC.Unit.Module import GHC.Utils.Outputable import Data.Data hiding (Fixity) import qualified Data.ByteString as B -- | Should source spans be removed from output. data BlankSrcSpan = BlankSrcSpan | BlankSrcSpanFile | NoBlankSrcSpan deriving (Eq,Show) -- | Should EpAnnotations be removed from output. data BlankEpAnnotations = BlankEpAnnotations | NoBlankEpAnnotations deriving (Eq,Show) -- | Show the full AST as the compiler sees it. showAstDataFull :: Data a => a -> SDoc showAstDataFull = showAstData NoBlankSrcSpan NoBlankEpAnnotations -- | Show a GHC syntax tree. This parameterised because it is also used for -- comparing ASTs in ppr roundtripping tests, where the SrcSpan's are blanked -- out, to avoid comparing locations, only structure showAstData :: Data a => BlankSrcSpan -> BlankEpAnnotations -> a -> SDoc showAstData bs ba a0 = blankLine $$ showAstData' a0 where showAstData' :: Data a => a -> SDoc showAstData' = generic `ext1Q` list `extQ` string `extQ` fastString `extQ` srcSpan `extQ` realSrcSpan `extQ` annotation `extQ` annotationModule `extQ` annotationAddEpAnn `extQ` annotationGrhsAnn `extQ` annotationEpAnnHsCase `extQ` annotationAnnList `extQ` annotationEpAnnImportDecl `extQ` annotationAnnParen `extQ` annotationTrailingAnn `extQ` annotationEpaLocation `extQ` annotationNoEpAnns `extQ` addEpAnn `extQ` lit `extQ` litr `extQ` litt `extQ` sourceText `extQ` deltaPos `extQ` epaAnchor `extQ` bytestring `extQ` name `extQ` occName `extQ` moduleName `extQ` var `extQ` dataCon `extQ` bagName `extQ` bagRdrName `extQ` bagVar `extQ` nameSet `extQ` fixity `ext2Q` located `extQ` srcSpanAnnA `extQ` srcSpanAnnL `extQ` srcSpanAnnP `extQ` srcSpanAnnC `extQ` srcSpanAnnN where generic :: Data a => a -> SDoc generic t = parens $ text (showConstr (toConstr t)) $$ vcat (gmapQ showAstData' t) string :: String -> SDoc string = text . normalize_newlines . show fastString :: FastString -> SDoc fastString s = braces $ text "FastString:" <+> text (normalize_newlines . show $ s) bytestring :: B.ByteString -> SDoc bytestring = text . normalize_newlines . show list [] = brackets empty list [x] = brackets (showAstData' x) list (x1 : x2 : xs) = (text "[" <> showAstData' x1) $$ go x2 xs where go y [] = text "," <> showAstData' y <> text "]" go y1 (y2 : ys) = (text "," <> showAstData' y1) $$ go y2 ys -- Eliminate word-size dependence lit :: HsLit GhcPs -> SDoc lit (HsWordPrim s x) = numericLit "HsWord{64}Prim" x s lit (HsWord64Prim s x) = numericLit "HsWord{64}Prim" x s lit (HsIntPrim s x) = numericLit "HsInt{64}Prim" x s lit (HsInt64Prim s x) = numericLit "HsInt{64}Prim" x s lit l = generic l litr :: HsLit GhcRn -> SDoc litr (HsWordPrim s x) = numericLit "HsWord{64}Prim" x s litr (HsWord64Prim s x) = numericLit "HsWord{64}Prim" x s litr (HsIntPrim s x) = numericLit "HsInt{64}Prim" x s litr (HsInt64Prim s x) = numericLit "HsInt{64}Prim" x s litr l = generic l litt :: HsLit GhcTc -> SDoc litt (HsWordPrim s x) = numericLit "HsWord{64}Prim" x s litt (HsWord64Prim s x) = numericLit "HsWord{64}Prim" x s litt (HsIntPrim s x) = numericLit "HsInt{64}Prim" x s litt (HsInt64Prim s x) = numericLit "HsInt{64}Prim" x s litt l = generic l numericLit :: String -> Integer -> SourceText -> SDoc numericLit tag x s = braces $ hsep [ text tag , generic x , generic s ] sourceText :: SourceText -> SDoc sourceText NoSourceText = parens $ text "NoSourceText" sourceText (SourceText src) = case bs of NoBlankSrcSpan -> parens $ text "SourceText" <+> text src BlankSrcSpanFile -> parens $ text "SourceText" <+> text src _ -> parens $ text "SourceText" <+> text "blanked" epaAnchor :: EpaLocation -> SDoc epaAnchor (EpaSpan r) = parens $ text "EpaSpan" <+> realSrcSpan r epaAnchor (EpaDelta d cs) = case ba of NoBlankEpAnnotations -> parens $ text "EpaDelta" <+> deltaPos d <+> showAstData' cs BlankEpAnnotations -> parens $ text "EpaDelta" <+> deltaPos d <+> text "blanked" deltaPos :: DeltaPos -> SDoc deltaPos (SameLine c) = parens $ text "SameLine" <+> ppr c deltaPos (DifferentLine l c) = parens $ text "DifferentLine" <+> ppr l <+> ppr c name :: Name -> SDoc name nm = braces $ text "Name:" <+> ppr nm occName n = braces $ text "OccName:" <+> text (occNameString n) moduleName :: ModuleName -> SDoc moduleName m = braces $ text "ModuleName:" <+> ppr m srcSpan :: SrcSpan -> SDoc srcSpan ss = case bs of BlankSrcSpan -> text "{ ss }" NoBlankSrcSpan -> braces $ char ' ' <> (hang (ppr ss) 1 -- TODO: show annotations here (text "")) BlankSrcSpanFile -> braces $ char ' ' <> (hang (pprUserSpan False ss) 1 -- TODO: show annotations here (text "")) realSrcSpan :: RealSrcSpan -> SDoc realSrcSpan ss = case bs of BlankSrcSpan -> text "{ ss }" NoBlankSrcSpan -> braces $ char ' ' <> (hang (ppr ss) 1 -- TODO: show annotations here (text "")) BlankSrcSpanFile -> braces $ char ' ' <> (hang (pprUserRealSpan False ss) 1 -- TODO: show annotations here (text "")) addEpAnn :: AddEpAnn -> SDoc addEpAnn (AddEpAnn a s) = case ba of BlankEpAnnotations -> parens $ text "blanked:" <+> text "AddEpAnn" NoBlankEpAnnotations -> parens $ text "AddEpAnn" <+> ppr a <+> epaAnchor s var :: Var -> SDoc var v = braces $ text "Var:" <+> ppr v dataCon :: DataCon -> SDoc dataCon c = braces $ text "DataCon:" <+> ppr c bagRdrName:: Bag (LocatedA (HsBind GhcPs)) -> SDoc bagRdrName bg = braces $ text "Bag(LocatedA (HsBind GhcPs)):" $$ (list . bagToList $ bg) bagName :: Bag (LocatedA (HsBind GhcRn)) -> SDoc bagName bg = braces $ text "Bag(LocatedA (HsBind Name)):" $$ (list . bagToList $ bg) bagVar :: Bag (LocatedA (HsBind GhcTc)) -> SDoc bagVar bg = braces $ text "Bag(LocatedA (HsBind Var)):" $$ (list . bagToList $ bg) nameSet ns = braces $ text "NameSet:" $$ (list . nameSetElemsStable $ ns) fixity :: Fixity -> SDoc fixity fx = braces $ text "Fixity:" <+> ppr fx located :: (Data a, Data b) => GenLocated a b -> SDoc located (L ss a) = parens (text "L" $$ vcat [showAstData' ss, showAstData' a]) -- ------------------------- annotation :: EpAnn [AddEpAnn] -> SDoc annotation = annotation' (text "EpAnn [AddEpAnn]") annotationModule :: EpAnn AnnsModule -> SDoc annotationModule = annotation' (text "EpAnn AnnsModule") annotationAddEpAnn :: EpAnn AddEpAnn -> SDoc annotationAddEpAnn = annotation' (text "EpAnn AddEpAnn") annotationGrhsAnn :: EpAnn GrhsAnn -> SDoc annotationGrhsAnn = annotation' (text "EpAnn GrhsAnn") annotationEpAnnHsCase :: EpAnn EpAnnHsCase -> SDoc annotationEpAnnHsCase = annotation' (text "EpAnn EpAnnHsCase") annotationAnnList :: EpAnn AnnList -> SDoc annotationAnnList = annotation' (text "EpAnn AnnList") annotationEpAnnImportDecl :: EpAnn EpAnnImportDecl -> SDoc annotationEpAnnImportDecl = annotation' (text "EpAnn EpAnnImportDecl") annotationAnnParen :: EpAnn AnnParen -> SDoc annotationAnnParen = annotation' (text "EpAnn AnnParen") annotationTrailingAnn :: EpAnn TrailingAnn -> SDoc annotationTrailingAnn = annotation' (text "EpAnn TrailingAnn") annotationEpaLocation :: EpAnn EpaLocation -> SDoc annotationEpaLocation = annotation' (text "EpAnn EpaLocation") annotationNoEpAnns :: EpAnn NoEpAnns -> SDoc annotationNoEpAnns = annotation' (text "EpAnn NoEpAnns") annotation' :: forall a .(Data a, Typeable a) => SDoc -> EpAnn a -> SDoc annotation' tag anns = case ba of BlankEpAnnotations -> parens (text "blanked:" <+> tag) NoBlankEpAnnotations -> parens $ text (showConstr (toConstr anns)) $$ vcat (gmapQ showAstData' anns) -- ------------------------- srcSpanAnnA :: SrcSpanAnn' (EpAnn AnnListItem) -> SDoc srcSpanAnnA = locatedAnn'' (text "SrcSpanAnnA") srcSpanAnnL :: SrcSpanAnn' (EpAnn AnnList) -> SDoc srcSpanAnnL = locatedAnn'' (text "SrcSpanAnnL") srcSpanAnnP :: SrcSpanAnn' (EpAnn AnnPragma) -> SDoc srcSpanAnnP = locatedAnn'' (text "SrcSpanAnnP") srcSpanAnnC :: SrcSpanAnn' (EpAnn AnnContext) -> SDoc srcSpanAnnC = locatedAnn'' (text "SrcSpanAnnC") srcSpanAnnN :: SrcSpanAnn' (EpAnn NameAnn) -> SDoc srcSpanAnnN = locatedAnn'' (text "SrcSpanAnnN") locatedAnn'' :: forall a. (Typeable a, Data a) => SDoc -> SrcSpanAnn' a -> SDoc locatedAnn'' tag ss = parens $ case cast ss of Just ((SrcSpanAnn ann s) :: SrcSpanAnn' a) -> case ba of BlankEpAnnotations -> parens (text "blanked:" <+> tag) NoBlankEpAnnotations -> text "SrcSpanAnn" <+> showAstData' ann <+> srcSpan s Nothing -> text "locatedAnn:unmatched" <+> tag <+> (parens $ text (showConstr (toConstr ss))) normalize_newlines :: String -> String normalize_newlines ('\\':'r':'\\':'n':xs) = '\\':'n':normalize_newlines xs normalize_newlines (x:xs) = x:normalize_newlines xs normalize_newlines [] = [] {- ************************************************************************ * * * Copied from syb * * ************************************************************************ -} -- | The type constructor for queries newtype Q q x = Q { unQ :: x -> q } -- | Extend a generic query by a type-specific case extQ :: ( Typeable a , Typeable b ) => (a -> q) -> (b -> q) -> a -> q extQ f g a = maybe (f a) g (cast a) -- | Type extension of queries for type constructors ext1Q :: (Data d, Typeable t) => (d -> q) -> (forall e. Data e => t e -> q) -> d -> q ext1Q def ext = unQ ((Q def) `ext1` (Q ext)) -- | Type extension of queries for type constructors ext2Q :: (Data d, Typeable t) => (d -> q) -> (forall d1 d2. (Data d1, Data d2) => t d1 d2 -> q) -> d -> q ext2Q def ext = unQ ((Q def) `ext2` (Q ext)) -- | Flexible type extension ext1 :: (Data a, Typeable t) => c a -> (forall d. Data d => c (t d)) -> c a ext1 def ext = maybe def id (dataCast1 ext) -- | Flexible type extension ext2 :: (Data a, Typeable t) => c a -> (forall d1 d2. (Data d1, Data d2) => c (t d1 d2)) -> c a ext2 def ext = maybe def id (dataCast2 ext) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Expr.hs0000644000000000000000000024164614472400112017474 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Abstract Haskell syntax for expressions. module GHC.Hs.Expr ( module Language.Haskell.Syntax.Expr , module GHC.Hs.Expr ) where import Language.Haskell.Syntax.Expr -- friends: import GHC.Prelude import GHC.Hs.Decls import GHC.Hs.Pat import GHC.Hs.Lit import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Hs.Type import GHC.Hs.Binds import GHC.Parser.Annotation -- others: import GHC.Tc.Types.Evidence import GHC.Core.DataCon (FieldLabelString) import GHC.Types.Name import GHC.Types.Name.Set import GHC.Types.Basic import GHC.Types.Fixity import GHC.Types.SourceText import GHC.Types.SrcLoc import GHC.Types.Tickish (CoreTickish) import GHC.Core.ConLike import GHC.Unit.Module (ModuleName) import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString import GHC.Core.Type import GHC.Builtin.Types (mkTupleStr) import GHC.Tc.Utils.TcType (TcType, TcTyVar) import {-# SOURCE #-} GHC.Tc.Types (TcLclEnv) -- libraries: import Data.Data hiding (Fixity(..)) import qualified Data.Data as Data (Fixity(..)) import qualified Data.Kind import Data.Maybe (isJust) import Data.Foldable ( toList ) import Data.List (uncons) import Data.Bifunctor (first) {- ********************************************************************* * * Expressions proper * * ********************************************************************* -} -- | Post-Type checking Expression -- -- PostTcExpr is an evidence expression attached to the syntax tree by the -- type checker (c.f. postTcType). type PostTcExpr = HsExpr GhcTc -- | Post-Type checking Table -- -- We use a PostTcTable where there are a bunch of pieces of evidence, more -- than is convenient to keep individually. type PostTcTable = [(Name, PostTcExpr)] ------------------------- -- Defining SyntaxExpr in two stages allows for better type inference, because -- we can declare SyntaxExprGhc to be injective (and closed). Without injectivity, -- noSyntaxExpr would be ambiguous. type instance SyntaxExpr (GhcPass p) = SyntaxExprGhc p type family SyntaxExprGhc (p :: Pass) = (r :: Data.Kind.Type) | r -> p where SyntaxExprGhc 'Parsed = NoExtField SyntaxExprGhc 'Renamed = SyntaxExprRn SyntaxExprGhc 'Typechecked = SyntaxExprTc -- | The function to use in rebindable syntax. See Note [NoSyntaxExpr]. data SyntaxExprRn = SyntaxExprRn (HsExpr GhcRn) -- Why is the payload not just a Name? -- See Note [Monad fail : Rebindable syntax, overloaded strings] in "GHC.Rename.Expr" | NoSyntaxExprRn -- | An expression with wrappers, used for rebindable syntax -- -- This should desugar to -- -- > syn_res_wrap $ syn_expr (syn_arg_wraps[0] arg0) -- > (syn_arg_wraps[1] arg1) ... -- -- where the actual arguments come from elsewhere in the AST. data SyntaxExprTc = SyntaxExprTc { syn_expr :: HsExpr GhcTc , syn_arg_wraps :: [HsWrapper] , syn_res_wrap :: HsWrapper } | NoSyntaxExprTc -- See Note [NoSyntaxExpr] -- | This is used for rebindable-syntax pieces that are too polymorphic -- for tcSyntaxOp (trS_fmap and the mzip in ParStmt) noExpr :: HsExpr (GhcPass p) noExpr = HsLit noComments (HsString (SourceText "noExpr") (fsLit "noExpr")) noSyntaxExpr :: forall p. IsPass p => SyntaxExpr (GhcPass p) -- Before renaming, and sometimes after -- See Note [NoSyntaxExpr] noSyntaxExpr = case ghcPass @p of GhcPs -> noExtField GhcRn -> NoSyntaxExprRn GhcTc -> NoSyntaxExprTc -- | Make a 'SyntaxExpr GhcRn' from an expression -- Used only in getMonadFailOp. -- See Note [Monad fail : Rebindable syntax, overloaded strings] in "GHC.Rename.Expr" mkSyntaxExpr :: HsExpr GhcRn -> SyntaxExprRn mkSyntaxExpr = SyntaxExprRn -- | Make a 'SyntaxExpr' from a 'Name' (the "rn" is because this is used in the -- renamer). mkRnSyntaxExpr :: Name -> SyntaxExprRn mkRnSyntaxExpr name = SyntaxExprRn $ HsVar noExtField $ noLocA name instance Outputable SyntaxExprRn where ppr (SyntaxExprRn expr) = ppr expr ppr NoSyntaxExprRn = text "" instance Outputable SyntaxExprTc where ppr (SyntaxExprTc { syn_expr = expr , syn_arg_wraps = arg_wraps , syn_res_wrap = res_wrap }) = sdocOption sdocPrintExplicitCoercions $ \print_co -> getPprDebug $ \debug -> if debug || print_co then ppr expr <> braces (pprWithCommas ppr arg_wraps) <> braces (ppr res_wrap) else ppr expr ppr NoSyntaxExprTc = text "" -- | Extra data fields for a 'RecordUpd', added by the type checker data RecordUpdTc = RecordUpdTc { rupd_cons :: [ConLike] -- Filled in by the type checker to the -- _non-empty_ list of DataCons that have -- all the upd'd fields , rupd_in_tys :: [Type] -- Argument types of *input* record type , rupd_out_tys :: [Type] -- and *output* record type -- For a data family, these are the type args of the -- /representation/ type constructor , rupd_wrap :: HsWrapper -- See Note [Record Update HsWrapper] } -- | HsWrap appears only in typechecker output data HsWrap hs_syn = HsWrap HsWrapper -- the wrapper (hs_syn GhcTc) -- the thing that is wrapped deriving instance (Data (hs_syn GhcTc), Typeable hs_syn) => Data (HsWrap hs_syn) -- --------------------------------------------------------------------- {- Note [The life cycle of a TH quotation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When desugaring a bracket (aka quotation), we want to produce Core code that, when run, will produce the TH syntax tree for the quotation. To that end, we want to desugar /renamed/ but not /typechecked/ code; the latter is cluttered with the typechecker's elaboration that should not appear in the TH syntax tree. So in (HsExpr GhcTc) tree, we must have a (HsExpr GhcRn) for the quotation itself. As such, when typechecking both typed and untyped brackets, we keep a /renamed/ bracket in the extension field. The HsBracketTc, the GhcTc ext field for both brackets, contains: - The renamed quote :: HsQuote GhcRn -- for the desugarer - [PendingTcSplice] - The type of the quote - Maybe QuoteWrapper Note that (HsBracketTc) stores the untyped (HsQuote GhcRn) for both typed and untyped brackets. They are treated uniformly by the desugarer, and we can easily construct untyped brackets from typed ones (with ExpBr). Typed quotes ~~~~~~~~~~~~ Here is the life cycle of a /typed/ quote [|| e ||], whose datacon is HsTypedBracket (XTypedBracket p) (LHsExpr p) In pass p (XTypedBracket p) (LHsExpr p) ------------------------------------------- GhcPs Annotations only LHsExpr GhcPs GhcRn Annotations only LHsExpr GhcRn GhcTc HsBracketTc LHsExpr GhcTc: unused! Note that in the GhcTc tree, the second field (HsExpr GhcTc) is entirely unused; the desugarer uses the (HsExpr GhcRn) from the first field. Untyped quotes ~~~~~~~~~~~~~~ Here is the life cycle of an /untyped/ quote, whose datacon is HsUntypedBracket (XUntypedBracket p) (HsQuote p) Here HsQuote is a sum-type of expressions [| e |], patterns [| p |], types [| t |] etc. In pass p (XUntypedBracket p) (HsQuote p) ------------------------------------------------------- GhcPs Annotations only HsQuote GhcPs GhcRn Annotations, [PendingRnSplice] HsQuote GhcRn GhcTc HsBracketTc HsQuote GhcTc: unused! The difficulty is: the typechecker does not typecheck the body of an untyped quote, so how do we make a (HsQuote GhcTc) to put in the second field? Answer: we use the extension constructor of HsQuote, XQuote, and make all the other constructors into DataConCantHappen. That is, the only non-bottom value of type (HsQuote GhcTc) is (XQuote noExtField). Hence the instances type instance XExpBr GhcTc = DataConCantHappen ...etc... See the related Note [How brackets and nested splices are handled] in GHC.Tc.Gen.Splice -} data HsBracketTc = HsBracketTc { brack_renamed_quote :: (HsQuote GhcRn) -- See Note [The life cycle of a TH quotation] , brack_ty :: Type , brack_quote_wrapper :: (Maybe QuoteWrapper) -- The wrapper to apply type and dictionary argument to the quote. , brack_pending_splices :: [PendingTcSplice] -- Output of the type checker is the *original* -- renamed expression, plus -- _typechecked_ splices to be -- pasted back in by the desugarer } type instance XTypedBracket GhcPs = EpAnn [AddEpAnn] type instance XTypedBracket GhcRn = NoExtField type instance XTypedBracket GhcTc = HsBracketTc type instance XUntypedBracket GhcPs = EpAnn [AddEpAnn] type instance XUntypedBracket GhcRn = [PendingRnSplice] -- See Note [Pending Splices] -- Output of the renamer is the *original* renamed expression, -- plus _renamed_ splices to be type checked type instance XUntypedBracket GhcTc = HsBracketTc -- --------------------------------------------------------------------- -- API Annotations types data EpAnnHsCase = EpAnnHsCase { hsCaseAnnCase :: EpaLocation , hsCaseAnnOf :: EpaLocation , hsCaseAnnsRest :: [AddEpAnn] } deriving Data data EpAnnUnboundVar = EpAnnUnboundVar { hsUnboundBackquotes :: (EpaLocation, EpaLocation) , hsUnboundHole :: EpaLocation } deriving Data type instance XVar (GhcPass _) = NoExtField -- Record selectors at parse time are HsVar; they convert to HsRecSel -- on renaming. type instance XRecSel GhcPs = DataConCantHappen type instance XRecSel GhcRn = NoExtField type instance XRecSel GhcTc = NoExtField type instance XLam (GhcPass _) = NoExtField -- OverLabel not present in GhcTc pass; see GHC.Rename.Expr -- Note [Handling overloaded and rebindable constructs] type instance XOverLabel GhcPs = EpAnnCO type instance XOverLabel GhcRn = EpAnnCO type instance XOverLabel GhcTc = DataConCantHappen -- --------------------------------------------------------------------- type instance XVar (GhcPass _) = NoExtField type instance XUnboundVar GhcPs = EpAnn EpAnnUnboundVar type instance XUnboundVar GhcRn = NoExtField type instance XUnboundVar GhcTc = HoleExprRef -- We really don't need the whole HoleExprRef; just the IORef EvTerm -- would be enough. But then deriving a Data instance becomes impossible. -- Much, much easier just to define HoleExprRef with a Data instance and -- store the whole structure. type instance XIPVar GhcPs = EpAnnCO type instance XIPVar GhcRn = EpAnnCO type instance XIPVar GhcTc = DataConCantHappen type instance XOverLitE (GhcPass _) = EpAnnCO type instance XLitE (GhcPass _) = EpAnnCO type instance XLam (GhcPass _) = NoExtField type instance XLamCase (GhcPass _) = EpAnn [AddEpAnn] type instance XApp (GhcPass _) = EpAnnCO type instance XAppTypeE GhcPs = SrcSpan -- Where the `@` lives type instance XAppTypeE GhcRn = NoExtField type instance XAppTypeE GhcTc = Type -- OpApp not present in GhcTc pass; see GHC.Rename.Expr -- Note [Handling overloaded and rebindable constructs] type instance XOpApp GhcPs = EpAnn [AddEpAnn] type instance XOpApp GhcRn = Fixity type instance XOpApp GhcTc = DataConCantHappen -- SectionL, SectionR not present in GhcTc pass; see GHC.Rename.Expr -- Note [Handling overloaded and rebindable constructs] type instance XSectionL GhcPs = EpAnnCO type instance XSectionR GhcPs = EpAnnCO type instance XSectionL GhcRn = EpAnnCO type instance XSectionR GhcRn = EpAnnCO type instance XSectionL GhcTc = DataConCantHappen type instance XSectionR GhcTc = DataConCantHappen type instance XNegApp GhcPs = EpAnn [AddEpAnn] type instance XNegApp GhcRn = NoExtField type instance XNegApp GhcTc = NoExtField type instance XPar (GhcPass _) = EpAnnCO type instance XExplicitTuple GhcPs = EpAnn [AddEpAnn] type instance XExplicitTuple GhcRn = NoExtField type instance XExplicitTuple GhcTc = NoExtField type instance XExplicitSum GhcPs = EpAnn AnnExplicitSum type instance XExplicitSum GhcRn = NoExtField type instance XExplicitSum GhcTc = [Type] type instance XCase GhcPs = EpAnn EpAnnHsCase type instance XCase GhcRn = NoExtField type instance XCase GhcTc = NoExtField type instance XIf GhcPs = EpAnn AnnsIf type instance XIf GhcRn = NoExtField type instance XIf GhcTc = NoExtField type instance XMultiIf GhcPs = EpAnn [AddEpAnn] type instance XMultiIf GhcRn = NoExtField type instance XMultiIf GhcTc = Type type instance XLet GhcPs = EpAnnCO type instance XLet GhcRn = NoExtField type instance XLet GhcTc = NoExtField type instance XDo GhcPs = EpAnn AnnList type instance XDo GhcRn = NoExtField type instance XDo GhcTc = Type type instance XExplicitList GhcPs = EpAnn AnnList type instance XExplicitList GhcRn = NoExtField type instance XExplicitList GhcTc = Type -- GhcPs: ExplicitList includes all source-level -- list literals, including overloaded ones -- GhcRn and GhcTc: ExplicitList used only for list literals -- that denote Haskell's built-in lists. Overloaded lists -- have been expanded away in the renamer -- See Note [Handling overloaded and rebindable constructs] -- in GHC.Rename.Expr type instance XRecordCon GhcPs = EpAnn [AddEpAnn] type instance XRecordCon GhcRn = NoExtField type instance XRecordCon GhcTc = PostTcExpr -- Instantiated constructor function type instance XRecordUpd GhcPs = EpAnn [AddEpAnn] type instance XRecordUpd GhcRn = NoExtField type instance XRecordUpd GhcTc = RecordUpdTc type instance XGetField GhcPs = EpAnnCO type instance XGetField GhcRn = NoExtField type instance XGetField GhcTc = DataConCantHappen -- HsGetField is eliminated by the renamer. See [Handling overloaded -- and rebindable constructs]. type instance XProjection GhcPs = EpAnn AnnProjection type instance XProjection GhcRn = NoExtField type instance XProjection GhcTc = DataConCantHappen -- HsProjection is eliminated by the renamer. See [Handling overloaded -- and rebindable constructs]. type instance XExprWithTySig GhcPs = EpAnn [AddEpAnn] type instance XExprWithTySig GhcRn = NoExtField type instance XExprWithTySig GhcTc = NoExtField type instance XArithSeq GhcPs = EpAnn [AddEpAnn] type instance XArithSeq GhcRn = NoExtField type instance XArithSeq GhcTc = PostTcExpr type instance XSpliceE (GhcPass _) = EpAnnCO type instance XProc (GhcPass _) = EpAnn [AddEpAnn] type instance XStatic GhcPs = EpAnn [AddEpAnn] type instance XStatic GhcRn = NameSet type instance XStatic GhcTc = (NameSet, Type) -- Free variables and type of expression, this is stored for convenience as wiring in -- StaticPtr is a bit tricky (see #20150) type instance XPragE (GhcPass _) = NoExtField type instance Anno [LocatedA ((StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (body (GhcPass pr)))))] = SrcSpanAnnL type instance Anno (StmtLR GhcRn GhcRn (LocatedA (body GhcRn))) = SrcSpanAnnA data AnnExplicitSum = AnnExplicitSum { aesOpen :: EpaLocation, aesBarsBefore :: [EpaLocation], aesBarsAfter :: [EpaLocation], aesClose :: EpaLocation } deriving Data data AnnFieldLabel = AnnFieldLabel { afDot :: Maybe EpaLocation } deriving Data data AnnProjection = AnnProjection { apOpen :: EpaLocation, -- ^ '(' apClose :: EpaLocation -- ^ ')' } deriving Data data AnnsIf = AnnsIf { aiIf :: EpaLocation, aiThen :: EpaLocation, aiElse :: EpaLocation, aiThenSemi :: Maybe EpaLocation, aiElseSemi :: Maybe EpaLocation } deriving Data -- --------------------------------------------------------------------- type instance XSCC (GhcPass _) = EpAnn AnnPragma type instance XXPragE (GhcPass _) = DataConCantHappen type instance XCDotFieldOcc (GhcPass _) = EpAnn AnnFieldLabel type instance XXDotFieldOcc (GhcPass _) = DataConCantHappen type instance XPresent (GhcPass _) = EpAnn [AddEpAnn] type instance XMissing GhcPs = EpAnn EpaLocation type instance XMissing GhcRn = NoExtField type instance XMissing GhcTc = Scaled Type type instance XXTupArg (GhcPass _) = DataConCantHappen tupArgPresent :: HsTupArg (GhcPass p) -> Bool tupArgPresent (Present {}) = True tupArgPresent (Missing {}) = False {- ********************************************************************* * * XXExpr: the extension constructor of HsExpr * * ********************************************************************* -} type instance XXExpr GhcPs = DataConCantHappen type instance XXExpr GhcRn = HsExpansion (HsExpr GhcRn) (HsExpr GhcRn) type instance XXExpr GhcTc = XXExprGhcTc -- HsExpansion: see Note [Rebindable syntax and HsExpansion] below data XXExprGhcTc = WrapExpr -- Type and evidence application and abstractions {-# UNPACK #-} !(HsWrap HsExpr) | ExpansionExpr -- See Note [Rebindable syntax and HsExpansion] below {-# UNPACK #-} !(HsExpansion (HsExpr GhcRn) (HsExpr GhcTc)) | ConLikeTc -- Result of typechecking a data-con -- See Note [Typechecking data constructors] in -- GHC.Tc.Gen.Head -- The two arguments describe how to eta-expand -- the data constructor when desugaring ConLike [TcTyVar] [Scaled TcType] --------------------------------------- -- Haskell program coverage (Hpc) Support | HsTick CoreTickish (LHsExpr GhcTc) -- sub-expression | HsBinTick Int -- module-local tick number for True Int -- module-local tick number for False (LHsExpr GhcTc) -- sub-expression {- ********************************************************************* * * Pretty-printing expressions * * ********************************************************************* -} instance (OutputableBndrId p) => Outputable (HsExpr (GhcPass p)) where ppr expr = pprExpr expr ----------------------- -- pprExpr, pprLExpr, pprBinds call pprDeeper; -- the underscore versions do not pprLExpr :: (OutputableBndrId p) => LHsExpr (GhcPass p) -> SDoc pprLExpr (L _ e) = pprExpr e pprExpr :: (OutputableBndrId p) => HsExpr (GhcPass p) -> SDoc pprExpr e | isAtomicHsExpr e || isQuietHsExpr e = ppr_expr e | otherwise = pprDeeper (ppr_expr e) isQuietHsExpr :: HsExpr id -> Bool -- Parentheses do display something, but it gives little info and -- if we go deeper when we go inside them then we get ugly things -- like (...) isQuietHsExpr (HsPar {}) = True -- applications don't display anything themselves isQuietHsExpr (HsApp {}) = True isQuietHsExpr (HsAppType {}) = True isQuietHsExpr (OpApp {}) = True isQuietHsExpr _ = False pprBinds :: (OutputableBndrId idL, OutputableBndrId idR) => HsLocalBindsLR (GhcPass idL) (GhcPass idR) -> SDoc pprBinds b = pprDeeper (ppr b) ----------------------- ppr_lexpr :: (OutputableBndrId p) => LHsExpr (GhcPass p) -> SDoc ppr_lexpr e = ppr_expr (unLoc e) ppr_expr :: forall p. (OutputableBndrId p) => HsExpr (GhcPass p) -> SDoc ppr_expr (HsVar _ (L _ v)) = pprPrefixOcc v ppr_expr (HsUnboundVar _ uv) = pprPrefixOcc uv ppr_expr (HsRecSel _ f) = pprPrefixOcc f ppr_expr (HsIPVar _ v) = ppr v ppr_expr (HsOverLabel _ l) = char '#' <> ppr l ppr_expr (HsLit _ lit) = ppr lit ppr_expr (HsOverLit _ lit) = ppr lit ppr_expr (HsPar _ _ e _) = parens (ppr_lexpr e) ppr_expr (HsPragE _ prag e) = sep [ppr prag, ppr_lexpr e] ppr_expr e@(HsApp {}) = ppr_apps e [] ppr_expr e@(HsAppType {}) = ppr_apps e [] ppr_expr (OpApp _ e1 op e2) | Just pp_op <- ppr_infix_expr (unLoc op) = pp_infixly pp_op | otherwise = pp_prefixly where pp_e1 = pprDebugParendExpr opPrec e1 -- In debug mode, add parens pp_e2 = pprDebugParendExpr opPrec e2 -- to make precedence clear pp_prefixly = hang (ppr op) 2 (sep [pp_e1, pp_e2]) pp_infixly pp_op = hang pp_e1 2 (sep [pp_op, nest 2 pp_e2]) ppr_expr (NegApp _ e _) = char '-' <+> pprDebugParendExpr appPrec e ppr_expr (SectionL _ expr op) | Just pp_op <- ppr_infix_expr (unLoc op) = pp_infixly pp_op | otherwise = pp_prefixly where pp_expr = pprDebugParendExpr opPrec expr pp_prefixly = hang (hsep [text " \\ x_ ->", ppr op]) 4 (hsep [pp_expr, text "x_ )"]) pp_infixly v = (sep [pp_expr, v]) ppr_expr (SectionR _ op expr) | Just pp_op <- ppr_infix_expr (unLoc op) = pp_infixly pp_op | otherwise = pp_prefixly where pp_expr = pprDebugParendExpr opPrec expr pp_prefixly = hang (hsep [text "( \\ x_ ->", ppr op, text "x_"]) 4 (pp_expr <> rparen) pp_infixly v = sep [v, pp_expr] ppr_expr (ExplicitTuple _ exprs boxity) -- Special-case unary boxed tuples so that they are pretty-printed as -- `Solo x`, not `(x)` | [Present _ expr] <- exprs , Boxed <- boxity = hsep [text (mkTupleStr Boxed 1), ppr expr] | otherwise = tupleParens (boxityTupleSort boxity) (fcat (ppr_tup_args exprs)) where ppr_tup_args [] = [] ppr_tup_args (Present _ e : es) = (ppr_lexpr e <> punc es) : ppr_tup_args es ppr_tup_args (Missing _ : es) = punc es : ppr_tup_args es punc (Present {} : _) = comma <> space punc (Missing {} : _) = comma punc (XTupArg {} : _) = comma <> space punc [] = empty ppr_expr (ExplicitSum _ alt arity expr) = text "(#" <+> ppr_bars (alt - 1) <+> ppr expr <+> ppr_bars (arity - alt) <+> text "#)" where ppr_bars n = hsep (replicate n (char '|')) ppr_expr (HsLam _ matches) = pprMatches matches ppr_expr (HsLamCase _ lc_variant matches) = sep [ sep [lamCaseKeyword lc_variant], nest 2 (pprMatches matches) ] ppr_expr (HsCase _ expr matches@(MG { mg_alts = L _ alts })) = sep [ sep [text "case", nest 4 (ppr expr), text "of"], pp_alts ] where pp_alts | null alts = text "{}" | otherwise = nest 2 (pprMatches matches) ppr_expr (HsIf _ e1 e2 e3) = sep [hsep [text "if", nest 2 (ppr e1), text "then"], nest 4 (ppr e2), text "else", nest 4 (ppr e3)] ppr_expr (HsMultiIf _ alts) = hang (text "if") 3 (vcat (map ppr_alt alts)) where ppr_alt (L _ (GRHS _ guards expr)) = hang vbar 2 (ppr_one one_alt) where ppr_one [] = panic "ppr_exp HsMultiIf" ppr_one (h:t) = hang h 2 (sep t) one_alt = [ interpp'SP guards , text "->" <+> pprDeeper (ppr expr) ] ppr_alt (L _ (XGRHS x)) = ppr x -- special case: let ... in let ... ppr_expr (HsLet _ _ binds _ expr@(L _ (HsLet _ _ _ _ _))) = sep [hang (text "let") 2 (hsep [pprBinds binds, text "in"]), ppr_lexpr expr] ppr_expr (HsLet _ _ binds _ expr) = sep [hang (text "let") 2 (pprBinds binds), hang (text "in") 2 (ppr expr)] ppr_expr (HsDo _ do_or_list_comp (L _ stmts)) = pprDo do_or_list_comp stmts ppr_expr (ExplicitList _ exprs) = brackets (pprDeeperList fsep (punctuate comma (map ppr_lexpr exprs))) ppr_expr (RecordCon { rcon_con = con, rcon_flds = rbinds }) = hang pp_con 2 (ppr rbinds) where -- con :: ConLikeP (GhcPass p) -- so we need case analysis to know to print it pp_con = case ghcPass @p of GhcPs -> ppr con GhcRn -> ppr con GhcTc -> ppr con ppr_expr (RecordUpd { rupd_expr = L _ aexp, rupd_flds = flds }) = case flds of Left rbinds -> hang (ppr aexp) 2 (braces (fsep (punctuate comma (map ppr rbinds)))) Right pbinds -> hang (ppr aexp) 2 (braces (fsep (punctuate comma (map ppr pbinds)))) ppr_expr (HsGetField { gf_expr = L _ fexp, gf_field = field }) = ppr fexp <> dot <> ppr field ppr_expr (HsProjection { proj_flds = flds }) = parens (hcat (dot : (punctuate dot (map ppr $ toList flds)))) ppr_expr (ExprWithTySig _ expr sig) = hang (nest 2 (ppr_lexpr expr) <+> dcolon) 4 (ppr sig) ppr_expr (ArithSeq _ _ info) = brackets (ppr info) ppr_expr (HsSpliceE _ s) = pprSplice s ppr_expr (HsTypedBracket b e) = case ghcPass @p of GhcPs -> thTyBrackets (ppr e) GhcRn -> thTyBrackets (ppr e) GhcTc | HsBracketTc _ _ty _wrap ps <- b -> thTyBrackets (ppr e) `ppr_with_pending_tc_splices` ps ppr_expr (HsUntypedBracket b q) = case ghcPass @p of GhcPs -> ppr q GhcRn -> case b of [] -> ppr q ps -> ppr q $$ text "pending(rn)" <+> ppr ps GhcTc | HsBracketTc rnq _ty _wrap ps <- b -> ppr rnq `ppr_with_pending_tc_splices` ps ppr_expr (HsProc _ pat (L _ (HsCmdTop _ cmd))) = hsep [text "proc", ppr pat, text "->", ppr cmd] ppr_expr (HsStatic _ e) = hsep [text "static", ppr e] ppr_expr (XExpr x) = case ghcPass @p of #if __GLASGOW_HASKELL__ < 811 GhcPs -> ppr x #endif GhcRn -> ppr x GhcTc -> ppr x instance Outputable XXExprGhcTc where ppr (WrapExpr (HsWrap co_fn e)) = pprHsWrapper co_fn (\_parens -> pprExpr e) ppr (ExpansionExpr e) = ppr e -- e is an HsExpansion, we print the original -- expression (LHsExpr GhcPs), not the -- desugared one (LHsExpr GhcTc). ppr (ConLikeTc con _ _) = pprPrefixOcc con -- Used in error messages generated by -- the pattern match overlap checker ppr (HsTick tickish exp) = pprTicks (ppr exp) $ ppr tickish <+> ppr_lexpr exp ppr (HsBinTick tickIdTrue tickIdFalse exp) = pprTicks (ppr exp) $ hcat [text "bintick<", ppr tickIdTrue, text ",", ppr tickIdFalse, text ">(", ppr exp, text ")"] ppr_infix_expr :: forall p. (OutputableBndrId p) => HsExpr (GhcPass p) -> Maybe SDoc ppr_infix_expr (HsVar _ (L _ v)) = Just (pprInfixOcc v) ppr_infix_expr (HsRecSel _ f) = Just (pprInfixOcc f) ppr_infix_expr (HsUnboundVar _ occ) = Just (pprInfixOcc occ) ppr_infix_expr (XExpr x) = case ghcPass @p of #if __GLASGOW_HASKELL__ < 901 GhcPs -> Nothing #endif GhcRn -> ppr_infix_expr_rn x GhcTc -> ppr_infix_expr_tc x ppr_infix_expr _ = Nothing ppr_infix_expr_rn :: HsExpansion (HsExpr GhcRn) (HsExpr GhcRn) -> Maybe SDoc ppr_infix_expr_rn (HsExpanded a _) = ppr_infix_expr a ppr_infix_expr_tc :: XXExprGhcTc -> Maybe SDoc ppr_infix_expr_tc (WrapExpr (HsWrap _ e)) = ppr_infix_expr e ppr_infix_expr_tc (ExpansionExpr (HsExpanded a _)) = ppr_infix_expr a ppr_infix_expr_tc (ConLikeTc {}) = Nothing ppr_infix_expr_tc (HsTick {}) = Nothing ppr_infix_expr_tc (HsBinTick {}) = Nothing ppr_apps :: (OutputableBndrId p) => HsExpr (GhcPass p) -> [Either (LHsExpr (GhcPass p)) (LHsWcType (NoGhcTc (GhcPass p)))] -> SDoc ppr_apps (HsApp _ (L _ fun) arg) args = ppr_apps fun (Left arg : args) ppr_apps (HsAppType _ (L _ fun) arg) args = ppr_apps fun (Right arg : args) ppr_apps fun args = hang (ppr_expr fun) 2 (fsep (map pp args)) where pp (Left arg) = ppr arg -- pp (Right (LHsWcTypeX (HsWC { hswc_body = L _ arg }))) -- = char '@' <> pprHsType arg pp (Right arg) = text "@" <> ppr arg pprDebugParendExpr :: (OutputableBndrId p) => PprPrec -> LHsExpr (GhcPass p) -> SDoc pprDebugParendExpr p expr = getPprDebug $ \case True -> pprParendLExpr p expr False -> pprLExpr expr pprParendLExpr :: (OutputableBndrId p) => PprPrec -> LHsExpr (GhcPass p) -> SDoc pprParendLExpr p (L _ e) = pprParendExpr p e pprParendExpr :: (OutputableBndrId p) => PprPrec -> HsExpr (GhcPass p) -> SDoc pprParendExpr p expr | hsExprNeedsParens p expr = parens (pprExpr expr) | otherwise = pprExpr expr -- Using pprLExpr makes sure that we go 'deeper' -- I think that is usually (always?) right -- | @'hsExprNeedsParens' p e@ returns 'True' if the expression @e@ needs -- parentheses under precedence @p@. hsExprNeedsParens :: forall p. IsPass p => PprPrec -> HsExpr (GhcPass p) -> Bool hsExprNeedsParens prec = go where go :: HsExpr (GhcPass p) -> Bool go (HsVar{}) = False go (HsUnboundVar{}) = False go (HsIPVar{}) = False go (HsOverLabel{}) = False go (HsLit _ l) = hsLitNeedsParens prec l go (HsOverLit _ ol) = hsOverLitNeedsParens prec ol go (HsPar{}) = False go (HsApp{}) = prec >= appPrec go (HsAppType {}) = prec >= appPrec go (OpApp{}) = prec >= opPrec go (NegApp{}) = prec > topPrec go (SectionL{}) = True go (SectionR{}) = True -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Identity (Solo x)`, not `Identity Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go (ExplicitTuple _ [Present{}] Boxed) = prec >= appPrec go (ExplicitTuple{}) = False go (ExplicitSum{}) = False go (HsLam{}) = prec > topPrec go (HsLamCase{}) = prec > topPrec go (HsCase{}) = prec > topPrec go (HsIf{}) = prec > topPrec go (HsMultiIf{}) = prec > topPrec go (HsLet{}) = prec > topPrec go (HsDo _ sc _) | isDoComprehensionContext sc = False | otherwise = prec > topPrec go (ExplicitList{}) = False go (RecordUpd{}) = False go (ExprWithTySig{}) = prec >= sigPrec go (ArithSeq{}) = False go (HsPragE{}) = prec >= appPrec go (HsSpliceE{}) = False go (HsTypedBracket{}) = False go (HsUntypedBracket{}) = False go (HsProc{}) = prec > topPrec go (HsStatic{}) = prec >= appPrec go (RecordCon{}) = False go (HsRecSel{}) = False go (HsProjection{}) = True go (HsGetField{}) = False go (XExpr x) = case ghcPass @p of GhcTc -> go_x_tc x GhcRn -> go_x_rn x #if __GLASGOW_HASKELL__ <= 900 GhcPs -> True #endif go_x_tc :: XXExprGhcTc -> Bool go_x_tc (WrapExpr (HsWrap _ e)) = hsExprNeedsParens prec e go_x_tc (ExpansionExpr (HsExpanded a _)) = hsExprNeedsParens prec a go_x_tc (ConLikeTc {}) = False go_x_tc (HsTick _ (L _ e)) = hsExprNeedsParens prec e go_x_tc (HsBinTick _ _ (L _ e)) = hsExprNeedsParens prec e go_x_rn :: HsExpansion (HsExpr GhcRn) (HsExpr GhcRn) -> Bool go_x_rn (HsExpanded a _) = hsExprNeedsParens prec a -- | Parenthesize an expression without token information gHsPar :: LHsExpr (GhcPass id) -> HsExpr (GhcPass id) gHsPar e = HsPar noAnn noHsTok e noHsTok -- | @'parenthesizeHsExpr' p e@ checks if @'hsExprNeedsParens' p e@ is true, -- and if so, surrounds @e@ with an 'HsPar'. Otherwise, it simply returns @e@. parenthesizeHsExpr :: IsPass p => PprPrec -> LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) parenthesizeHsExpr p le@(L loc e) | hsExprNeedsParens p e = L loc (gHsPar le) | otherwise = le stripParensLHsExpr :: LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) stripParensLHsExpr (L _ (HsPar _ _ e _)) = stripParensLHsExpr e stripParensLHsExpr e = e stripParensHsExpr :: HsExpr (GhcPass p) -> HsExpr (GhcPass p) stripParensHsExpr (HsPar _ _ (L _ e) _) = stripParensHsExpr e stripParensHsExpr e = e isAtomicHsExpr :: forall p. IsPass p => HsExpr (GhcPass p) -> Bool -- True of a single token isAtomicHsExpr (HsVar {}) = True isAtomicHsExpr (HsLit {}) = True isAtomicHsExpr (HsOverLit {}) = True isAtomicHsExpr (HsIPVar {}) = True isAtomicHsExpr (HsOverLabel {}) = True isAtomicHsExpr (HsUnboundVar {}) = True isAtomicHsExpr (HsRecSel{}) = True isAtomicHsExpr (XExpr x) | GhcTc <- ghcPass @p = go_x_tc x | GhcRn <- ghcPass @p = go_x_rn x where go_x_tc (WrapExpr (HsWrap _ e)) = isAtomicHsExpr e go_x_tc (ExpansionExpr (HsExpanded a _)) = isAtomicHsExpr a go_x_tc (ConLikeTc {}) = True go_x_tc (HsTick {}) = False go_x_tc (HsBinTick {}) = False go_x_rn (HsExpanded a _) = isAtomicHsExpr a isAtomicHsExpr _ = False instance Outputable (HsPragE (GhcPass p)) where ppr (HsPragSCC _ st (StringLiteral stl lbl _)) = pprWithSourceText st (text "{-# SCC") -- no doublequotes if stl empty, for the case where the SCC was written -- without quotes. <+> pprWithSourceText stl (ftext lbl) <+> text "#-}" {- ********************************************************************* * * HsExpansion and rebindable syntax * * ********************************************************************* -} {- Note [Rebindable syntax and HsExpansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We implement rebindable syntax (RS) support by performing a desugaring in the renamer. We transform GhcPs expressions and patterns affected by RS into the appropriate desugared form, but **annotated with the original expression/pattern**. Let us consider a piece of code like: {-# LANGUAGE RebindableSyntax #-} ifThenElse :: Char -> () -> () -> () ifThenElse _ _ _ = () x = if 'a' then () else True The parsed AST for the RHS of x would look something like (slightly simplified): L locif (HsIf (L loca 'a') (L loctrue ()) (L locfalse True)) Upon seeing such an AST with RS on, we could transform it into a mere function call, as per the RS rules, equivalent to the following function application: ifThenElse 'a' () True which doesn't typecheck. But GHC would report an error about not being able to match the third argument's type (Bool) with the expected type: (), in the expression _as desugared_, i.e in the aforementioned function application. But the user never wrote a function application! This would be pretty bad. To remedy this, instead of transforming the original HsIf node into mere applications of 'ifThenElse', we keep the original 'if' expression around too, using the TTG XExpr extension point to allow GHC to construct an 'HsExpansion' value that will keep track of the original expression in its first field, and the desugared one in the second field. The resulting renamed AST would look like: L locif (XExpr (HsExpanded (HsIf (L loca 'a') (L loctrue ()) (L locfalse True) ) (App (L generatedSrcSpan (App (L generatedSrcSpan (App (L generatedSrcSpan (Var ifThenElse)) (L loca 'a') ) ) (L loctrue ()) ) ) (L locfalse True) ) ) ) When comes the time to typecheck the program, we end up calling tcMonoExpr on the AST above. If this expression gives rise to a type error, then it will appear in a context line and GHC will pretty-print it using the 'Outputable (HsExpansion a b)' instance defined below, which *only prints the original expression*. This is the gist of the idea, but is not quite enough to recover the error messages that we had with the SyntaxExpr-based, typechecking/desugaring-to-core time implementation of rebindable syntax. The key idea is to decorate some elements of the desugared expression so as to be able to give them a special treatment when typechecking the desugared expression, to print a different context line or skip one altogether. Whenever we 'setSrcSpan' a 'generatedSrcSpan', we update a field in TcLclEnv called 'tcl_in_gen_code', setting it to True, which indicates that we entered generated code, i.e code fabricated by the compiler when rebinding some syntax. If someone tries to push some error context line while that field is set to True, the pushing won't actually happen and the context line is just dropped. Once we 'setSrcSpan' a real span (for an expression that was in the original source code), we set 'tcl_in_gen_code' back to False, indicating that we "emerged from the generated code tunnel", and that the expressions we will be processing are relevant to report in context lines again. You might wonder why TcLclEnv has both tcl_loc :: RealSrcSpan tcl_in_gen_code :: Bool Could we not store a Maybe RealSrcSpan? The problem is that we still generate constraints when processing generated code, and a CtLoc must contain a RealSrcSpan -- otherwise, error messages might appear without source locations. So tcl_loc keeps the RealSrcSpan of the last location spotted that wasn't generated; it's as good as we're going to get in generated code. Once we get to sub-trees that are not generated, then we update the RealSrcSpan appropriately, and set the tcl_in_gen_code Bool to False. --- An overview of the constructs that are desugared in this way is laid out in Note [Handling overloaded and rebindable constructs] in GHC.Rename.Expr. A general recipe to follow this approach for new constructs could go as follows: - Remove any GhcRn-time SyntaxExpr extensions to the relevant constructor for your construct, in HsExpr or related syntax data types. - At renaming-time: - take your original node of interest (HsIf above) - rename its subexpressions/subpatterns (condition and true/false branches above) - construct the suitable "rebound"-and-renamed result (ifThenElse call above), where the 'SrcSpan' attached to any _fabricated node_ (the HsVar/HsApp nodes, above) is set to 'generatedSrcSpan' - take both the original node and that rebound-and-renamed result and wrap them into an expansion construct: for expressions, XExpr (HsExpanded ) for patterns, XPat (HsPatExpanded ) - At typechecking-time: - remove any logic that was previously dealing with your rebindable construct, typically involving [tc]SyntaxOp, SyntaxExpr and friends. - the XExpr (HsExpanded ... ...) case in tcExpr already makes sure that we typecheck the desugared expression while reporting the original one in errors -} {- Note [Overview of record dot syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is the note that explains all the moving parts for record dot syntax. The language extensions @OverloadedRecordDot@ and @OverloadedRecordUpdate@ (providing "record dot syntax") are implemented using the techniques of Note [Rebindable syntax and HsExpansion]. When OverloadedRecordDot is enabled: - Field selection expressions - e.g. foo.bar.baz - Have abstract syntax HsGetField - After renaming are XExpr (HsExpanded (HsGetField ...) (getField @"..."...)) expressions - Field selector expressions e.g. (.x.y) - Have abstract syntax HsProjection - After renaming are XExpr (HsExpanded (HsProjection ...) ((getField @"...") . (getField @"...") . ...) expressions When OverloadedRecordUpdate is enabled: - Record update expressions - e.g. a{foo.bar=1, quux="corge", baz} - Have abstract syntax RecordUpd - With rupd_flds containting a Right - See Note [RecordDotSyntax field updates] (in Language.Haskell.Syntax.Expr) - After renaming are XExpr (HsExpanded (RecordUpd ...) (setField@"..." ...) expressions - Note that this is true for all record updates even for those that do not involve '.' When OverloadedRecordDot is enabled and RebindableSyntax is not enabled the name 'getField' is resolved to GHC.Records.getField. When OverloadedRecordDot is enabled and RebindableSyntax is enabled the name 'getField' is whatever in-scope name that is. When OverloadedRecordUpd is enabled and RebindableSyntax is not enabled it is an error for now (temporary while we wait on native setField support; see https://gitlab.haskell.org/ghc/ghc/-/issues/16232). When OverloadedRecordUpd is enabled and RebindableSyntax is enabled the names 'getField' and 'setField' are whatever in-scope names they are. -} -- See Note [Rebindable syntax and HsExpansion] just above. data HsExpansion orig expanded = HsExpanded orig expanded deriving Data -- | Just print the original expression (the @a@). instance (Outputable a, Outputable b) => Outputable (HsExpansion a b) where ppr (HsExpanded orig expanded) = ifPprDebug (vcat [ppr orig, braces (text "Expansion:" <+> ppr expanded)]) (ppr orig) {- ************************************************************************ * * \subsection{Commands (in arrow abstractions)} * * ************************************************************************ -} type instance XCmdArrApp GhcPs = EpAnn AddEpAnn type instance XCmdArrApp GhcRn = NoExtField type instance XCmdArrApp GhcTc = Type type instance XCmdArrForm GhcPs = EpAnn AnnList type instance XCmdArrForm GhcRn = NoExtField type instance XCmdArrForm GhcTc = NoExtField type instance XCmdApp (GhcPass _) = EpAnnCO type instance XCmdLam (GhcPass _) = NoExtField type instance XCmdPar (GhcPass _) = EpAnnCO type instance XCmdCase GhcPs = EpAnn EpAnnHsCase type instance XCmdCase GhcRn = NoExtField type instance XCmdCase GhcTc = NoExtField type instance XCmdLamCase (GhcPass _) = EpAnn [AddEpAnn] type instance XCmdIf GhcPs = EpAnn AnnsIf type instance XCmdIf GhcRn = NoExtField type instance XCmdIf GhcTc = NoExtField type instance XCmdLet GhcPs = EpAnnCO type instance XCmdLet GhcRn = NoExtField type instance XCmdLet GhcTc = NoExtField type instance XCmdDo GhcPs = EpAnn AnnList type instance XCmdDo GhcRn = NoExtField type instance XCmdDo GhcTc = Type type instance XCmdWrap (GhcPass _) = NoExtField type instance XXCmd GhcPs = DataConCantHappen type instance XXCmd GhcRn = DataConCantHappen type instance XXCmd GhcTc = HsWrap HsCmd type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsCmd (GhcPass pr))))] = SrcSpanAnnL -- If cmd :: arg1 --> res -- wrap :: arg1 "->" arg2 -- Then (XCmd (HsWrap wrap cmd)) :: arg2 --> res data CmdTopTc = CmdTopTc Type -- Nested tuple of inputs on the command's stack Type -- return type of the command (CmdSyntaxTable GhcTc) -- See Note [CmdSyntaxTable] type instance XCmdTop GhcPs = NoExtField type instance XCmdTop GhcRn = CmdSyntaxTable GhcRn -- See Note [CmdSyntaxTable] type instance XCmdTop GhcTc = CmdTopTc type instance XXCmdTop (GhcPass _) = DataConCantHappen instance (OutputableBndrId p) => Outputable (HsCmd (GhcPass p)) where ppr cmd = pprCmd cmd ----------------------- -- pprCmd and pprLCmd call pprDeeper; -- the underscore versions do not pprLCmd :: (OutputableBndrId p) => LHsCmd (GhcPass p) -> SDoc pprLCmd (L _ c) = pprCmd c pprCmd :: (OutputableBndrId p) => HsCmd (GhcPass p) -> SDoc pprCmd c | isQuietHsCmd c = ppr_cmd c | otherwise = pprDeeper (ppr_cmd c) isQuietHsCmd :: HsCmd id -> Bool -- Parentheses do display something, but it gives little info and -- if we go deeper when we go inside them then we get ugly things -- like (...) isQuietHsCmd (HsCmdPar {}) = True -- applications don't display anything themselves isQuietHsCmd (HsCmdApp {}) = True isQuietHsCmd _ = False ----------------------- ppr_lcmd :: (OutputableBndrId p) => LHsCmd (GhcPass p) -> SDoc ppr_lcmd c = ppr_cmd (unLoc c) ppr_cmd :: forall p. (OutputableBndrId p ) => HsCmd (GhcPass p) -> SDoc ppr_cmd (HsCmdPar _ _ c _) = parens (ppr_lcmd c) ppr_cmd (HsCmdApp _ c e) = let (fun, args) = collect_args c [e] in hang (ppr_lcmd fun) 2 (sep (map ppr args)) where collect_args (L _ (HsCmdApp _ fun arg)) args = collect_args fun (arg:args) collect_args fun args = (fun, args) ppr_cmd (HsCmdLam _ matches) = pprMatches matches ppr_cmd (HsCmdCase _ expr matches) = sep [ sep [text "case", nest 4 (ppr expr), text "of"], nest 2 (pprMatches matches) ] ppr_cmd (HsCmdLamCase _ lc_variant matches) = sep [ lamCaseKeyword lc_variant, nest 2 (pprMatches matches) ] ppr_cmd (HsCmdIf _ _ e ct ce) = sep [hsep [text "if", nest 2 (ppr e), text "then"], nest 4 (ppr ct), text "else", nest 4 (ppr ce)] -- special case: let ... in let ... ppr_cmd (HsCmdLet _ _ binds _ cmd@(L _ (HsCmdLet {}))) = sep [hang (text "let") 2 (hsep [pprBinds binds, text "in"]), ppr_lcmd cmd] ppr_cmd (HsCmdLet _ _ binds _ cmd) = sep [hang (text "let") 2 (pprBinds binds), hang (text "in") 2 (ppr cmd)] ppr_cmd (HsCmdDo _ (L _ stmts)) = pprArrowExpr stmts ppr_cmd (HsCmdArrApp _ arrow arg HsFirstOrderApp True) = hsep [ppr_lexpr arrow, larrowt, ppr_lexpr arg] ppr_cmd (HsCmdArrApp _ arrow arg HsFirstOrderApp False) = hsep [ppr_lexpr arg, arrowt, ppr_lexpr arrow] ppr_cmd (HsCmdArrApp _ arrow arg HsHigherOrderApp True) = hsep [ppr_lexpr arrow, larrowtt, ppr_lexpr arg] ppr_cmd (HsCmdArrApp _ arrow arg HsHigherOrderApp False) = hsep [ppr_lexpr arg, arrowtt, ppr_lexpr arrow] ppr_cmd (HsCmdArrForm _ (L _ op) ps_fix rn_fix args) | HsVar _ (L _ v) <- op = ppr_cmd_infix v | GhcTc <- ghcPass @p , XExpr (ConLikeTc c _ _) <- op = ppr_cmd_infix (conLikeName c) | otherwise = fall_through where fall_through = hang (text "(|" <+> ppr_expr op) 4 (sep (map (pprCmdArg.unLoc) args) <+> text "|)") ppr_cmd_infix :: OutputableBndr v => v -> SDoc ppr_cmd_infix v | [arg1, arg2] <- args , isJust rn_fix || ps_fix == Infix = hang (pprCmdArg (unLoc arg1)) 4 (sep [ pprInfixOcc v, pprCmdArg (unLoc arg2)]) | otherwise = fall_through ppr_cmd (XCmd x) = case ghcPass @p of #if __GLASGOW_HASKELL__ < 811 GhcPs -> ppr x GhcRn -> ppr x #endif GhcTc -> case x of HsWrap w cmd -> pprHsWrapper w (\_ -> parens (ppr_cmd cmd)) pprCmdArg :: (OutputableBndrId p) => HsCmdTop (GhcPass p) -> SDoc pprCmdArg (HsCmdTop _ cmd) = ppr_lcmd cmd instance (OutputableBndrId p) => Outputable (HsCmdTop (GhcPass p)) where ppr = pprCmdArg {- ************************************************************************ * * \subsection{@Match@, @GRHSs@, and @GRHS@ datatypes} * * ************************************************************************ -} type instance XMG GhcPs b = NoExtField type instance XMG GhcRn b = NoExtField type instance XMG GhcTc b = MatchGroupTc type instance XXMatchGroup (GhcPass _) b = DataConCantHappen type instance XCMatch (GhcPass _) b = EpAnn [AddEpAnn] type instance XXMatch (GhcPass _) b = DataConCantHappen instance (OutputableBndrId pr, Outputable body) => Outputable (Match (GhcPass pr) body) where ppr = pprMatch isEmptyMatchGroup :: MatchGroup (GhcPass p) body -> Bool isEmptyMatchGroup (MG { mg_alts = ms }) = null $ unLoc ms -- | Is there only one RHS in this list of matches? isSingletonMatchGroup :: [LMatch (GhcPass p) body] -> Bool isSingletonMatchGroup matches | [L _ match] <- matches , Match { m_grhss = GRHSs { grhssGRHSs = [_] } } <- match = True | otherwise = False matchGroupArity :: MatchGroup (GhcPass id) body -> Arity -- Precondition: MatchGroup is non-empty -- This is called before type checking, when mg_arg_tys is not set matchGroupArity (MG { mg_alts = alts }) | L _ (alt1:_) <- alts = length (hsLMatchPats alt1) | otherwise = panic "matchGroupArity" hsLMatchPats :: LMatch (GhcPass id) body -> [LPat (GhcPass id)] hsLMatchPats (L _ (Match { m_pats = pats })) = pats -- We keep the type checker happy by providing EpAnnComments. They -- can only be used if they follow a `where` keyword with no binds, -- but in that case the comment is attached to the following parsed -- item. So this can never be used in practice. type instance XCGRHSs (GhcPass _) _ = EpAnnComments type instance XXGRHSs (GhcPass _) _ = DataConCantHappen data GrhsAnn = GrhsAnn { ga_vbar :: Maybe EpaLocation, -- TODO:AZ do we need this? ga_sep :: AddEpAnn -- ^ Match separator location } deriving (Data) type instance XCGRHS (GhcPass _) _ = EpAnn GrhsAnn -- Location of matchSeparator -- TODO:AZ does this belong on the GRHS, or GRHSs? type instance XXGRHS (GhcPass _) b = DataConCantHappen pprMatches :: (OutputableBndrId idR, Outputable body) => MatchGroup (GhcPass idR) body -> SDoc pprMatches MG { mg_alts = matches } = vcat (map pprMatch (map unLoc (unLoc matches))) -- Don't print the type; it's only a place-holder before typechecking -- Exported to GHC.Hs.Binds, which can't see the defn of HsMatchContext pprFunBind :: (OutputableBndrId idR) => MatchGroup (GhcPass idR) (LHsExpr (GhcPass idR)) -> SDoc pprFunBind matches = pprMatches matches -- Exported to GHC.Hs.Binds, which can't see the defn of HsMatchContext pprPatBind :: forall bndr p . (OutputableBndrId bndr, OutputableBndrId p) => LPat (GhcPass bndr) -> GRHSs (GhcPass p) (LHsExpr (GhcPass p)) -> SDoc pprPatBind pat grhss = sep [ppr pat, nest 2 (pprGRHSs (PatBindRhs :: HsMatchContext (GhcPass p)) grhss)] pprMatch :: (OutputableBndrId idR, Outputable body) => Match (GhcPass idR) body -> SDoc pprMatch (Match { m_pats = pats, m_ctxt = ctxt, m_grhss = grhss }) = sep [ sep (herald : map (nest 2 . pprParendLPat appPrec) other_pats) , nest 2 (pprGRHSs ctxt grhss) ] where (herald, other_pats) = case ctxt of FunRhs {mc_fun=L _ fun, mc_fixity=fixity, mc_strictness=strictness} | SrcStrict <- strictness -> assert (null pats) -- A strict variable binding (char '!'<>pprPrefixOcc fun, pats) | Prefix <- fixity -> (pprPrefixOcc fun, pats) -- f x y z = e -- Not pprBndr; the AbsBinds will -- have printed the signature | otherwise -> case pats of (p1:p2:rest) | null rest -> (pp_infix, []) -- x &&& y = e | otherwise -> (parens pp_infix, rest) -- (x &&& y) z = e where pp_infix = pprParendLPat opPrec p1 <+> pprInfixOcc fun <+> pprParendLPat opPrec p2 _ -> pprPanic "pprMatch" (ppr ctxt $$ ppr pats) LambdaExpr -> (char '\\', pats) -- We don't simply return (empty, pats) to avoid introducing an -- additional `nest 2` via the empty herald LamCaseAlt LamCases -> maybe (empty, []) (first $ pprParendLPat appPrec) (uncons pats) ArrowMatchCtxt (ArrowLamCaseAlt LamCases) -> maybe (empty, []) (first $ pprParendLPat appPrec) (uncons pats) ArrowMatchCtxt KappaExpr -> (char '\\', pats) ArrowMatchCtxt ProcExpr -> (text "proc", pats) _ -> case pats of [] -> (empty, []) [pat] -> (ppr pat, []) -- No parens around the single pat in a case _ -> pprPanic "pprMatch" (ppr ctxt $$ ppr pats) pprGRHSs :: (OutputableBndrId idR, Outputable body) => HsMatchContext passL -> GRHSs (GhcPass idR) body -> SDoc pprGRHSs ctxt (GRHSs _ grhss binds) = vcat (map (pprGRHS ctxt . unLoc) grhss) -- Print the "where" even if the contents of the binds is empty. Only -- EmptyLocalBinds means no "where" keyword $$ ppUnless (eqEmptyLocalBinds binds) (text "where" $$ nest 4 (pprBinds binds)) pprGRHS :: (OutputableBndrId idR, Outputable body) => HsMatchContext passL -> GRHS (GhcPass idR) body -> SDoc pprGRHS ctxt (GRHS _ [] body) = pp_rhs ctxt body pprGRHS ctxt (GRHS _ guards body) = sep [vbar <+> interpp'SP guards, pp_rhs ctxt body] pp_rhs :: Outputable body => HsMatchContext passL -> body -> SDoc pp_rhs ctxt rhs = matchSeparator ctxt <+> pprDeeper (ppr rhs) instance Outputable GrhsAnn where ppr (GrhsAnn v s) = text "GrhsAnn" <+> ppr v <+> ppr s {- ************************************************************************ * * \subsection{Do stmts and list comprehensions} * * ************************************************************************ -} -- Extra fields available post typechecking for RecStmt. data RecStmtTc = RecStmtTc { recS_bind_ty :: Type -- S in (>>=) :: Q -> (R -> S) -> T , recS_later_rets :: [PostTcExpr] -- (only used in the arrow version) , recS_rec_rets :: [PostTcExpr] -- These expressions correspond 1-to-1 -- with recS_later_ids and recS_rec_ids, -- and are the expressions that should be -- returned by the recursion. -- They may not quite be the Ids themselves, -- because the Id may be *polymorphic*, but -- the returned thing has to be *monomorphic*, -- so they may be type applications , recS_ret_ty :: Type -- The type of -- do { stmts; return (a,b,c) } -- With rebindable syntax the type might not -- be quite as simple as (m (tya, tyb, tyc)). } type instance XLastStmt (GhcPass _) (GhcPass _) b = NoExtField type instance XBindStmt (GhcPass _) GhcPs b = EpAnn [AddEpAnn] type instance XBindStmt (GhcPass _) GhcRn b = XBindStmtRn type instance XBindStmt (GhcPass _) GhcTc b = XBindStmtTc data XBindStmtRn = XBindStmtRn { xbsrn_bindOp :: SyntaxExpr GhcRn , xbsrn_failOp :: FailOperator GhcRn } data XBindStmtTc = XBindStmtTc { xbstc_bindOp :: SyntaxExpr GhcTc , xbstc_boundResultType :: Type -- If (>>=) :: Q -> (R -> S) -> T, this is S , xbstc_boundResultMult :: Mult -- If (>>=) :: Q -> (R -> S) -> T, this is S , xbstc_failOp :: FailOperator GhcTc } type instance XApplicativeStmt (GhcPass _) GhcPs b = NoExtField type instance XApplicativeStmt (GhcPass _) GhcRn b = NoExtField type instance XApplicativeStmt (GhcPass _) GhcTc b = Type type instance XBodyStmt (GhcPass _) GhcPs b = NoExtField type instance XBodyStmt (GhcPass _) GhcRn b = NoExtField type instance XBodyStmt (GhcPass _) GhcTc b = Type type instance XLetStmt (GhcPass _) (GhcPass _) b = EpAnn [AddEpAnn] type instance XParStmt (GhcPass _) GhcPs b = NoExtField type instance XParStmt (GhcPass _) GhcRn b = NoExtField type instance XParStmt (GhcPass _) GhcTc b = Type type instance XTransStmt (GhcPass _) GhcPs b = EpAnn [AddEpAnn] type instance XTransStmt (GhcPass _) GhcRn b = NoExtField type instance XTransStmt (GhcPass _) GhcTc b = Type type instance XRecStmt (GhcPass _) GhcPs b = EpAnn AnnList type instance XRecStmt (GhcPass _) GhcRn b = NoExtField type instance XRecStmt (GhcPass _) GhcTc b = RecStmtTc type instance XXStmtLR (GhcPass _) (GhcPass _) b = DataConCantHappen type instance XParStmtBlock (GhcPass pL) (GhcPass pR) = NoExtField type instance XXParStmtBlock (GhcPass pL) (GhcPass pR) = DataConCantHappen type instance XApplicativeArgOne GhcPs = NoExtField type instance XApplicativeArgOne GhcRn = FailOperator GhcRn type instance XApplicativeArgOne GhcTc = FailOperator GhcTc type instance XApplicativeArgMany (GhcPass _) = NoExtField type instance XXApplicativeArg (GhcPass _) = DataConCantHappen instance (Outputable (StmtLR (GhcPass idL) (GhcPass idL) (LHsExpr (GhcPass idL))), Outputable (XXParStmtBlock (GhcPass idL) (GhcPass idR))) => Outputable (ParStmtBlock (GhcPass idL) (GhcPass idR)) where ppr (ParStmtBlock _ stmts _ _) = interpp'SP stmts instance (OutputableBndrId pl, OutputableBndrId pr, Anno (StmtLR (GhcPass pl) (GhcPass pr) body) ~ SrcSpanAnnA, Outputable body) => Outputable (StmtLR (GhcPass pl) (GhcPass pr) body) where ppr stmt = pprStmt stmt pprStmt :: forall idL idR body . (OutputableBndrId idL, OutputableBndrId idR, Anno (StmtLR (GhcPass idL) (GhcPass idR) body) ~ SrcSpanAnnA, Outputable body) => (StmtLR (GhcPass idL) (GhcPass idR) body) -> SDoc pprStmt (LastStmt _ expr m_dollar_stripped _) = whenPprDebug (text "[last]") <+> (case m_dollar_stripped of Just True -> text "return $" Just False -> text "return" Nothing -> empty) <+> ppr expr pprStmt (BindStmt _ pat expr) = pprBindStmt pat expr pprStmt (LetStmt _ binds) = hsep [text "let", pprBinds binds] pprStmt (BodyStmt _ expr _ _) = ppr expr pprStmt (ParStmt _ stmtss _ _) = sep (punctuate (text " | ") (map ppr stmtss)) pprStmt (TransStmt { trS_stmts = stmts, trS_by = by , trS_using = using, trS_form = form }) = sep $ punctuate comma (map ppr stmts ++ [pprTransStmt by using form]) pprStmt (RecStmt { recS_stmts = segment, recS_rec_ids = rec_ids , recS_later_ids = later_ids }) = text "rec" <+> vcat [ ppr_do_stmts (unLoc segment) , whenPprDebug (vcat [ text "rec_ids=" <> ppr rec_ids , text "later_ids=" <> ppr later_ids])] pprStmt (ApplicativeStmt _ args mb_join) = getPprStyle $ \style -> if userStyle style then pp_for_user else pp_debug where -- make all the Applicative stuff invisible in error messages by -- flattening the whole ApplicativeStmt nest back to a sequence -- of statements. pp_for_user = vcat $ concatMap flattenArg args -- ppr directly rather than transforming here, because we need to -- inject a "return" which is hard when we're polymorphic in the id -- type. flattenStmt :: ExprLStmt (GhcPass idL) -> [SDoc] flattenStmt (L _ (ApplicativeStmt _ args _)) = concatMap flattenArg args flattenStmt stmt = [ppr stmt] flattenArg :: forall a . (a, ApplicativeArg (GhcPass idL)) -> [SDoc] flattenArg (_, ApplicativeArgOne _ pat expr isBody) | isBody = [ppr expr] -- See Note [Applicative BodyStmt] | otherwise = [pprBindStmt pat expr] flattenArg (_, ApplicativeArgMany _ stmts _ _ _) = concatMap flattenStmt stmts pp_debug = let ap_expr = sep (punctuate (text " |") (map pp_arg args)) in whenPprDebug (if isJust mb_join then text "[join]" else empty) <+> (if lengthAtLeast args 2 then parens else id) ap_expr pp_arg :: (a, ApplicativeArg (GhcPass idL)) -> SDoc pp_arg (_, applicativeArg) = ppr applicativeArg pprBindStmt :: (Outputable pat, Outputable expr) => pat -> expr -> SDoc pprBindStmt pat expr = hsep [ppr pat, larrow, ppr expr] instance (OutputableBndrId idL) => Outputable (ApplicativeArg (GhcPass idL)) where ppr = pprArg pprArg :: forall idL . (OutputableBndrId idL) => ApplicativeArg (GhcPass idL) -> SDoc pprArg (ApplicativeArgOne _ pat expr isBody) | isBody = ppr expr -- See Note [Applicative BodyStmt] | otherwise = pprBindStmt pat expr pprArg (ApplicativeArgMany _ stmts return pat ctxt) = ppr pat <+> text "<-" <+> pprDo ctxt (stmts ++ [noLocA (LastStmt noExtField (noLocA return) Nothing noSyntaxExpr)]) pprTransformStmt :: (OutputableBndrId p) => [IdP (GhcPass p)] -> LHsExpr (GhcPass p) -> Maybe (LHsExpr (GhcPass p)) -> SDoc pprTransformStmt bndrs using by = sep [ text "then" <+> whenPprDebug (braces (ppr bndrs)) , nest 2 (ppr using) , nest 2 (pprBy by)] pprTransStmt :: Outputable body => Maybe body -> body -> TransForm -> SDoc pprTransStmt by using ThenForm = sep [ text "then", nest 2 (ppr using), nest 2 (pprBy by)] pprTransStmt by using GroupForm = sep [ text "then group", nest 2 (pprBy by), nest 2 (text "using" <+> ppr using)] pprBy :: Outputable body => Maybe body -> SDoc pprBy Nothing = empty pprBy (Just e) = text "by" <+> ppr e pprDo :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA ) => HsDoFlavour -> [LStmt (GhcPass p) body] -> SDoc pprDo (DoExpr m) stmts = ppr_module_name_prefix m <> text "do" <+> ppr_do_stmts stmts pprDo GhciStmtCtxt stmts = text "do" <+> ppr_do_stmts stmts pprDo (MDoExpr m) stmts = ppr_module_name_prefix m <> text "mdo" <+> ppr_do_stmts stmts pprDo ListComp stmts = brackets $ pprComp stmts pprDo MonadComp stmts = brackets $ pprComp stmts pprArrowExpr :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA ) => [LStmt (GhcPass p) body] -> SDoc pprArrowExpr stmts = text "do" <+> ppr_do_stmts stmts ppr_module_name_prefix :: Maybe ModuleName -> SDoc ppr_module_name_prefix = \case Nothing -> empty Just module_name -> ppr module_name <> char '.' ppr_do_stmts :: (OutputableBndrId idL, OutputableBndrId idR, Anno (StmtLR (GhcPass idL) (GhcPass idR) body) ~ SrcSpanAnnA, Outputable body) => [LStmtLR (GhcPass idL) (GhcPass idR) body] -> SDoc -- Print a bunch of do stmts ppr_do_stmts stmts = pprDeeperList vcat (map ppr stmts) pprComp :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA) => [LStmt (GhcPass p) body] -> SDoc pprComp quals -- Prints: body | qual1, ..., qualn | Just (initStmts, L _ (LastStmt _ body _ _)) <- snocView quals = if null initStmts -- If there are no statements in a list comprehension besides the last -- one, we simply treat it like a normal list. This does arise -- occasionally in code that GHC generates, e.g., in implementations of -- 'range' for derived 'Ix' instances for product datatypes with exactly -- one constructor (e.g., see #12583). then ppr body else hang (ppr body <+> vbar) 2 (pprQuals initStmts) | otherwise = pprPanic "pprComp" (pprQuals quals) pprQuals :: (OutputableBndrId p, Outputable body, Anno (StmtLR (GhcPass p) (GhcPass p) body) ~ SrcSpanAnnA) => [LStmt (GhcPass p) body] -> SDoc -- Show list comprehension qualifiers separated by commas pprQuals quals = interpp'SP quals {- ************************************************************************ * * Template Haskell quotation brackets * * ************************************************************************ -} newtype HsSplicedT = HsSplicedT DelayedSplice deriving (Data) type instance XTypedSplice (GhcPass _) = EpAnn [AddEpAnn] type instance XUntypedSplice (GhcPass _) = EpAnn [AddEpAnn] type instance XQuasiQuote (GhcPass _) = NoExtField type instance XSpliced (GhcPass _) = NoExtField type instance XXSplice GhcPs = DataConCantHappen type instance XXSplice GhcRn = DataConCantHappen type instance XXSplice GhcTc = HsSplicedT -- See Note [Running typed splices in the zonker] -- These are the arguments that are passed to `GHC.Tc.Gen.Splice.runTopSplice` data DelayedSplice = DelayedSplice TcLclEnv -- The local environment to run the splice in (LHsExpr GhcRn) -- The original renamed expression TcType -- The result type of running the splice, unzonked (LHsExpr GhcTc) -- The typechecked expression to run and splice in the result -- A Data instance which ignores the argument of 'DelayedSplice'. instance Data DelayedSplice where gunfold _ _ _ = panic "DelayedSplice" toConstr a = mkConstr (dataTypeOf a) "DelayedSplice" [] Data.Prefix dataTypeOf a = mkDataType "HsExpr.DelayedSplice" [toConstr a] -- See Note [Pending Splices] type SplicePointName = Name -- | Pending Renamer Splice data PendingRnSplice = PendingRnSplice UntypedSpliceFlavour SplicePointName (LHsExpr GhcRn) -- | Pending Type-checker Splice data PendingTcSplice = PendingTcSplice SplicePointName (LHsExpr GhcTc) {- Note [Pending Splices] ~~~~~~~~~~~~~~~~~~~~~~ When we rename an untyped bracket, we name and lift out all the nested splices, so that when the typechecker hits the bracket, it can typecheck those nested splices without having to walk over the untyped bracket code. So for example [| f $(g x) |] looks like HsUntypedBracket _ (HsApp (HsVar "f") (HsSpliceE _ (HsUntypedSplice sn (g x))) which the renamer rewrites to HsUntypedBracket [PendingRnSplice UntypedExpSplice sn (g x)] (HsApp (HsVar f) (HsSpliceE _ (HsUntypedSplice sn (g x))) * The 'sn' is the Name of the splice point, the SplicePointName * The PendingRnExpSplice gives the splice that splice-point name maps to; and the typechecker can now conveniently find these sub-expressions * Note that a nested splice, such as the `$(g x)` now appears twice: - In the PendingRnSplice: this is the version that will later be typechecked - In the HsSpliceE in the body of the bracket. This copy is used only for pretty printing. There are four varieties of pending splices generated by the renamer, distinguished by their UntypedSpliceFlavour * Pending expression splices (UntypedExpSplice), e.g., [|$(f x) + 2|] UntypedExpSplice is also used for * quasi-quotes, where the pending expression expands to $(quoter "...blah...") (see GHC.Rename.Splice.makePending, HsQuasiQuote case) * cross-stage lifting, where the pending expression expands to $(lift x) (see GHC.Rename.Splice.checkCrossStageLifting) * Pending pattern splices (UntypedPatSplice), e.g., [| \$(f x) -> x |] * Pending type splices (UntypedTypeSplice), e.g., [| f :: $(g x) |] * Pending declaration (UntypedDeclSplice), e.g., [| let $(f x) in ... |] There is a fifth variety of pending splice, which is generated by the type checker: * Pending *typed* expression splices, (PendingTcSplice), e.g., [||1 + $$(f 2)||] -} instance OutputableBndrId p => Outputable (HsSplicedThing (GhcPass p)) where ppr (HsSplicedExpr e) = ppr_expr e ppr (HsSplicedTy t) = ppr t ppr (HsSplicedPat p) = ppr p instance (OutputableBndrId p) => Outputable (HsSplice (GhcPass p)) where ppr s = pprSplice s pprPendingSplice :: (OutputableBndrId p) => SplicePointName -> LHsExpr (GhcPass p) -> SDoc pprPendingSplice n e = angleBrackets (ppr n <> comma <+> ppr (stripParensLHsExpr e)) pprSpliceDecl :: (OutputableBndrId p) => HsSplice (GhcPass p) -> SpliceExplicitFlag -> SDoc pprSpliceDecl e@HsQuasiQuote{} _ = pprSplice e pprSpliceDecl e ExplicitSplice = text "$" <> ppr_splice_decl e pprSpliceDecl e ImplicitSplice = ppr_splice_decl e ppr_splice_decl :: (OutputableBndrId p) => HsSplice (GhcPass p) -> SDoc ppr_splice_decl (HsUntypedSplice _ _ n e) = ppr_splice empty n e empty ppr_splice_decl e = pprSplice e pprSplice :: forall p. (OutputableBndrId p) => HsSplice (GhcPass p) -> SDoc pprSplice (HsTypedSplice _ DollarSplice n e) = ppr_splice (text "$$") n e empty pprSplice (HsTypedSplice _ BareSplice _ _ ) = panic "Bare typed splice" -- impossible pprSplice (HsUntypedSplice _ DollarSplice n e) = ppr_splice (text "$") n e empty pprSplice (HsUntypedSplice _ BareSplice n e) = ppr_splice empty n e empty pprSplice (HsQuasiQuote _ n q _ s) = ppr_quasi n q s pprSplice (HsSpliced _ _ thing) = ppr thing pprSplice (XSplice x) = case ghcPass @p of #if __GLASGOW_HASKELL__ < 811 GhcPs -> dataConCantHappen x GhcRn -> dataConCantHappen x #endif GhcTc -> case x of HsSplicedT _ -> text "Unevaluated typed splice" ppr_quasi :: OutputableBndr p => p -> p -> FastString -> SDoc ppr_quasi n quoter quote = whenPprDebug (brackets (ppr n)) <> char '[' <> ppr quoter <> vbar <> ppr quote <> text "|]" ppr_splice :: (OutputableBndrId p) => SDoc -> (IdP (GhcPass p)) -> LHsExpr (GhcPass p) -> SDoc -> SDoc ppr_splice herald n e trail = herald <> whenPprDebug (brackets (ppr n)) <> ppr e <> trail type instance XExpBr GhcPs = NoExtField type instance XPatBr GhcPs = NoExtField type instance XDecBrL GhcPs = NoExtField type instance XDecBrG GhcPs = NoExtField type instance XTypBr GhcPs = NoExtField type instance XVarBr GhcPs = NoExtField type instance XXQuote GhcPs = DataConCantHappen type instance XExpBr GhcRn = NoExtField type instance XPatBr GhcRn = NoExtField type instance XDecBrL GhcRn = NoExtField type instance XDecBrG GhcRn = NoExtField type instance XTypBr GhcRn = NoExtField type instance XVarBr GhcRn = NoExtField type instance XXQuote GhcRn = DataConCantHappen -- See Note [The life cycle of a TH quotation] type instance XExpBr GhcTc = DataConCantHappen type instance XPatBr GhcTc = DataConCantHappen type instance XDecBrL GhcTc = DataConCantHappen type instance XDecBrG GhcTc = DataConCantHappen type instance XTypBr GhcTc = DataConCantHappen type instance XVarBr GhcTc = DataConCantHappen type instance XXQuote GhcTc = NoExtField instance OutputableBndrId p => Outputable (HsQuote (GhcPass p)) where ppr = pprHsQuote where pprHsQuote :: forall p. (OutputableBndrId p) => HsQuote (GhcPass p) -> SDoc pprHsQuote (ExpBr _ e) = thBrackets empty (ppr e) pprHsQuote (PatBr _ p) = thBrackets (char 'p') (ppr p) pprHsQuote (DecBrG _ gp) = thBrackets (char 'd') (ppr gp) pprHsQuote (DecBrL _ ds) = thBrackets (char 'd') (vcat (map ppr ds)) pprHsQuote (TypBr _ t) = thBrackets (char 't') (ppr t) pprHsQuote (VarBr _ True n) = char '\'' <> pprPrefixOcc (unLoc n) pprHsQuote (VarBr _ False n) = text "''" <> pprPrefixOcc (unLoc n) pprHsQuote (XQuote b) = case ghcPass @p of #if __GLASGOW_HASKELL__ <= 900 GhcPs -> dataConCantHappen b GhcRn -> dataConCantHappen b #endif GhcTc -> pprPanic "pprHsQuote: `HsQuote GhcTc` shouldn't exist" (ppr b) -- See Note [The life cycle of a TH quotation] thBrackets :: SDoc -> SDoc -> SDoc thBrackets pp_kind pp_body = char '[' <> pp_kind <> vbar <+> pp_body <+> text "|]" thTyBrackets :: SDoc -> SDoc thTyBrackets pp_body = text "[||" <+> pp_body <+> text "||]" instance Outputable PendingRnSplice where ppr (PendingRnSplice _ n e) = pprPendingSplice n e instance Outputable PendingTcSplice where ppr (PendingTcSplice n e) = pprPendingSplice n e ppr_with_pending_tc_splices :: SDoc -> [PendingTcSplice] -> SDoc ppr_with_pending_tc_splices x [] = x ppr_with_pending_tc_splices x ps = x $$ text "pending(tc)" <+> ppr ps {- ************************************************************************ * * \subsection{Enumerations and list comprehensions} * * ************************************************************************ -} instance OutputableBndrId p => Outputable (ArithSeqInfo (GhcPass p)) where ppr (From e1) = hcat [ppr e1, pp_dotdot] ppr (FromThen e1 e2) = hcat [ppr e1, comma, space, ppr e2, pp_dotdot] ppr (FromTo e1 e3) = hcat [ppr e1, pp_dotdot, ppr e3] ppr (FromThenTo e1 e2 e3) = hcat [ppr e1, comma, space, ppr e2, pp_dotdot, ppr e3] pp_dotdot :: SDoc pp_dotdot = text " .. " {- ************************************************************************ * * \subsection{HsMatchCtxt} * * ************************************************************************ -} instance OutputableBndrId p => Outputable (HsMatchContext (GhcPass p)) where ppr m@(FunRhs{}) = text "FunRhs" <+> ppr (mc_fun m) <+> ppr (mc_fixity m) ppr LambdaExpr = text "LambdaExpr" ppr CaseAlt = text "CaseAlt" ppr (LamCaseAlt lc_variant) = text "LamCaseAlt" <+> ppr lc_variant ppr IfAlt = text "IfAlt" ppr (ArrowMatchCtxt c) = text "ArrowMatchCtxt" <+> ppr c ppr PatBindRhs = text "PatBindRhs" ppr PatBindGuards = text "PatBindGuards" ppr RecUpd = text "RecUpd" ppr (StmtCtxt _) = text "StmtCtxt _" ppr ThPatSplice = text "ThPatSplice" ppr ThPatQuote = text "ThPatQuote" ppr PatSyn = text "PatSyn" instance Outputable LamCaseVariant where ppr = text . \case LamCase -> "LamCase" LamCases -> "LamCases" instance Outputable HsArrowMatchContext where ppr ProcExpr = text "ProcExpr" ppr ArrowCaseAlt = text "ArrowCaseAlt" ppr (ArrowLamCaseAlt lc_variant) = parens $ text "ArrowLamCaseAlt" <+> ppr lc_variant ppr KappaExpr = text "KappaExpr" ----------------- instance OutputableBndrId p => Outputable (HsStmtContext (GhcPass p)) where ppr = pprStmtContext -- Used to generate the string for a *runtime* error message matchContextErrString :: OutputableBndrId p => HsMatchContext (GhcPass p) -> SDoc matchContextErrString (FunRhs{mc_fun=L _ fun}) = text "function" <+> ppr fun matchContextErrString CaseAlt = text "case" matchContextErrString (LamCaseAlt lc_variant) = lamCaseKeyword lc_variant matchContextErrString IfAlt = text "multi-way if" matchContextErrString PatBindRhs = text "pattern binding" matchContextErrString PatBindGuards = text "pattern binding guards" matchContextErrString RecUpd = text "record update" matchContextErrString LambdaExpr = text "lambda" matchContextErrString (ArrowMatchCtxt c) = matchArrowContextErrString c matchContextErrString ThPatSplice = panic "matchContextErrString" -- Not used at runtime matchContextErrString ThPatQuote = panic "matchContextErrString" -- Not used at runtime matchContextErrString PatSyn = panic "matchContextErrString" -- Not used at runtime matchContextErrString (StmtCtxt (ParStmtCtxt c)) = matchContextErrString (StmtCtxt c) matchContextErrString (StmtCtxt (TransStmtCtxt c)) = matchContextErrString (StmtCtxt c) matchContextErrString (StmtCtxt (PatGuard _)) = text "pattern guard" matchContextErrString (StmtCtxt (ArrowExpr)) = text "'do' block" matchContextErrString (StmtCtxt (HsDoStmt flavour)) = matchDoContextErrString flavour matchArrowContextErrString :: HsArrowMatchContext -> SDoc matchArrowContextErrString ProcExpr = text "proc" matchArrowContextErrString ArrowCaseAlt = text "case" matchArrowContextErrString (ArrowLamCaseAlt lc_variant) = lamCaseKeyword lc_variant matchArrowContextErrString KappaExpr = text "kappa" matchDoContextErrString :: HsDoFlavour -> SDoc matchDoContextErrString GhciStmtCtxt = text "interactive GHCi command" matchDoContextErrString (DoExpr m) = prependQualified m (text "'do' block") matchDoContextErrString (MDoExpr m) = prependQualified m (text "'mdo' block") matchDoContextErrString ListComp = text "list comprehension" matchDoContextErrString MonadComp = text "monad comprehension" pprMatchInCtxt :: (OutputableBndrId idR, Outputable body) => Match (GhcPass idR) body -> SDoc pprMatchInCtxt match = hang (text "In" <+> pprMatchContext (m_ctxt match) <> colon) 4 (pprMatch match) pprStmtInCtxt :: (OutputableBndrId idL, OutputableBndrId idR, OutputableBndrId ctx, Outputable body, Anno (StmtLR (GhcPass idL) (GhcPass idR) body) ~ SrcSpanAnnA) => HsStmtContext (GhcPass ctx) -> StmtLR (GhcPass idL) (GhcPass idR) body -> SDoc pprStmtInCtxt ctxt (LastStmt _ e _ _) | isComprehensionContext ctxt -- For [ e | .. ], do not mutter about "stmts" = hang (text "In the expression:") 2 (ppr e) pprStmtInCtxt ctxt stmt = hang (text "In a stmt of" <+> pprAStmtContext ctxt <> colon) 2 (ppr_stmt stmt) where -- For Group and Transform Stmts, don't print the nested stmts! ppr_stmt (TransStmt { trS_by = by, trS_using = using , trS_form = form }) = pprTransStmt by using form ppr_stmt stmt = pprStmt stmt {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (HsExpr (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA ((StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsExpr (GhcPass pr)))))] = SrcSpanAnnL type instance Anno [LocatedA ((StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsCmd (GhcPass pr)))))] = SrcSpanAnnL type instance Anno (HsCmd (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsCmd (GhcPass pr))))] = SrcSpanAnnL type instance Anno (HsCmdTop (GhcPass p)) = SrcAnn NoEpAnns type instance Anno [LocatedA (Match (GhcPass p) (LocatedA (HsExpr (GhcPass p))))] = SrcSpanAnnL type instance Anno [LocatedA (Match (GhcPass p) (LocatedA (HsCmd (GhcPass p))))] = SrcSpanAnnL type instance Anno (Match (GhcPass p) (LocatedA (HsExpr (GhcPass p)))) = SrcSpanAnnA type instance Anno (Match (GhcPass p) (LocatedA (HsCmd (GhcPass p)))) = SrcSpanAnnA type instance Anno (GRHS (GhcPass p) (LocatedA (HsExpr (GhcPass p)))) = SrcAnn NoEpAnns type instance Anno (GRHS (GhcPass p) (LocatedA (HsCmd (GhcPass p)))) = SrcAnn NoEpAnns type instance Anno (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsExpr (GhcPass pr)))) = SrcSpanAnnA type instance Anno (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsCmd (GhcPass pr)))) = SrcSpanAnnA type instance Anno (HsSplice (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsExpr (GhcPass pr))))] = SrcSpanAnnL type instance Anno [LocatedA (StmtLR (GhcPass pl) (GhcPass pr) (LocatedA (HsCmd (GhcPass pr))))] = SrcSpanAnnL type instance Anno (FieldLabelStrings (GhcPass p)) = SrcAnn NoEpAnns type instance Anno (FieldLabelString) = SrcSpanAnnN type instance Anno (DotFieldOcc (GhcPass p)) = SrcAnn NoEpAnns instance (Anno a ~ SrcSpanAnn' (EpAnn an)) => WrapXRec (GhcPass p) a where wrapXRec = noLocA ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Extension.hs0000644000000000000000000002223414472400112020520 0ustar0000000000000000{-# LANGUAGE AllowAmbiguousTypes #-} -- for pprIfTc, etc. {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE EmptyDataDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableSuperClasses #-} -- for IsPass; see Note [NoGhcTc] {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension module GHC.Hs.Extension where -- This module captures the type families to precisely identify the extension -- points for GHC.Hs syntax import GHC.Prelude import Data.Data hiding ( Fixity ) import Language.Haskell.Syntax.Extension import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Var import GHC.Utils.Outputable hiding ((<>)) import GHC.Types.SrcLoc (GenLocated(..), unLoc) import GHC.Utils.Panic import GHC.Parser.Annotation {- Note [IsPass] ~~~~~~~~~~~~~ One challenge with the Trees That Grow approach is that we sometimes have different information in different passes. For example, we have type instance XViaStrategy GhcPs = LHsSigType GhcPs type instance XViaStrategy GhcRn = LHsSigType GhcRn type instance XViaStrategy GhcTc = Type This means that printing a DerivStrategy (which contains an XViaStrategy) might need to print a LHsSigType, or it might need to print a type. Yet we want one Outputable instance for a DerivStrategy, instead of one per pass. We could have a large constraint, including e.g. (Outputable (XViaStrategy p), Outputable (XViaStrategy GhcTc)), and pass that around in every context where we might output a DerivStrategy. But a simpler alternative is to pass a witness to whichever pass we're in. When we pattern-match on that (GADT) witness, we learn the pass identity and can then print away. To wit, we get the definition of GhcPass and the functions isPass. These allow us to do away with big constraints, passing around all manner of dictionaries we might or might not use. It does mean that we have to manually use isPass when printing, but these places are few. See Note [NoGhcTc] about the superclass constraint to IsPass. Note [NoGhcTc] ~~~~~~~~~~~~~~ An expression is parsed into HsExpr GhcPs, renamed into HsExpr GhcRn, and then type-checked into HsExpr GhcTc. Not so for types! These get parsed into HsType GhcPs, renamed into HsType GhcRn, and then type-checked into Type. We never build an HsType GhcTc. Why do this? Because we need to be able to compare type-checked types for equality, and we don't want to do this with HsType. This causes wrinkles within the AST, where we normally think that the whole AST travels through the GhcPs --> GhcRn --> GhcTc pipeline as one. So we have the NoGhcTc type family, which just replaces GhcTc with GhcRn, so that user-written types can be preserved (as HsType GhcRn) even in e.g. HsExpr GhcTc. For example, this is used in ExprWithTySig: | ExprWithTySig (XExprWithTySig p) (LHsExpr p) (LHsSigWcType (NoGhcTc p)) If we have (e :: ty), we still want to be able to print that (with the :: ty) after type-checking. So we retain the LHsSigWcType GhcRn, even in an HsExpr GhcTc. That's what NoGhcTc does. When we're printing the type annotation, we need to know (Outputable (LHsSigWcType GhcRn)), even though we've assumed only that (OutputableBndrId GhcTc). We thus must be able to prove OutputableBndrId (NoGhcTc p) from OutputableBndrId p. The extra constraints in OutputableBndrId and the superclass constraints of IsPass allow this. Note that the superclass constraint of IsPass is *recursive*: it asserts that IsPass (NoGhcTcPass p) holds. For this to make sense, we need -XUndecidableSuperClasses and the other constraint, saying that NoGhcTcPass is idempotent. -} -- See Note [XRec and Anno in the AST] in GHC.Parser.Annotation type instance XRec (GhcPass p) a = GenLocated (Anno a) a type instance Anno RdrName = SrcSpanAnnN type instance Anno Name = SrcSpanAnnN type instance Anno Id = SrcSpanAnnN type IsSrcSpanAnn p a = ( Anno (IdGhcP p) ~ SrcSpanAnn' (EpAnn a), IsPass p) instance UnXRec (GhcPass p) where unXRec = unLoc instance MapXRec (GhcPass p) where mapXRec = fmap -- instance WrapXRec (GhcPass p) a where -- wrapXRec = noLocA {- Note [DataConCantHappen and strict fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, any unused TTG extension constructor will generally look like the following: type instance XXHsDecl (GhcPass _) = DataConCantHappen data HsDecl p = ... | XHsDecl !(XXHsDecl p) The field of type `XXHsDecl p` is strict for a good reason: it allows the pattern-match coverage checker to conclude that any matches against XHsDecl are unreachable whenever `p ~ GhcPass _`. To see why this is the case, consider the following function which consumes an HsDecl: ex :: HsDecl GhcPs -> HsDecl GhcRn ... ex (XHsDecl nec) = dataConCantHappen nec Because `p` equals GhcPs (i.e., GhcPass 'Parsed), XHsDecl's field has the type DataConCantHappen. But since (1) the field is strict and (2) DataConCantHappen is an empty data type, there is no possible way to reach the right-hand side of the XHsDecl case. As a result, the coverage checker concludes that the XHsDecl case is inaccessible, so it can be removed. (See Note [Strict argument type constraints] in GHC.HsToCore.Pmc.Solver for more on how this works.) Bottom line: if you add a TTG extension constructor that uses DataConCantHappen, make sure that any uses of it as a field are strict. -} -- | Used as a data type index for the hsSyn AST; also serves -- as a singleton type for Pass data GhcPass (c :: Pass) where GhcPs :: GhcPass 'Parsed GhcRn :: GhcPass 'Renamed GhcTc :: GhcPass 'Typechecked -- This really should never be entered, but the data-deriving machinery -- needs the instance to exist. instance Typeable p => Data (GhcPass p) where gunfold _ _ _ = panic "instance Data GhcPass" toConstr _ = panic "instance Data GhcPass" dataTypeOf _ = panic "instance Data GhcPass" data Pass = Parsed | Renamed | Typechecked deriving (Data) -- Type synonyms as a shorthand for tagging type GhcPs = GhcPass 'Parsed -- Output of parser type GhcRn = GhcPass 'Renamed -- Output of renamer type GhcTc = GhcPass 'Typechecked -- Output of typechecker -- | Allows us to check what phase we're in at GHC's runtime. -- For example, this class allows us to write -- > f :: forall p. IsPass p => HsExpr (GhcPass p) -> blah -- > f e = case ghcPass @p of -- > GhcPs -> ... in this RHS we have HsExpr GhcPs... -- > GhcRn -> ... in this RHS we have HsExpr GhcRn... -- > GhcTc -> ... in this RHS we have HsExpr GhcTc... -- which is very useful, for example, when pretty-printing. -- See Note [IsPass]. class ( NoGhcTcPass (NoGhcTcPass p) ~ NoGhcTcPass p , IsPass (NoGhcTcPass p) ) => IsPass p where ghcPass :: GhcPass p instance IsPass 'Parsed where ghcPass = GhcPs instance IsPass 'Renamed where ghcPass = GhcRn instance IsPass 'Typechecked where ghcPass = GhcTc type instance IdP (GhcPass p) = IdGhcP p -- | Maps the "normal" id type for a given GHC pass type family IdGhcP pass where IdGhcP 'Parsed = RdrName IdGhcP 'Renamed = Name IdGhcP 'Typechecked = Id -- | Marks that a field uses the GhcRn variant even when the pass -- parameter is GhcTc. Useful for storing HsTypes in GHC.Hs.Exprs, say, because -- HsType GhcTc should never occur. -- See Note [NoGhcTc] -- Breaking it up this way, GHC can figure out that the result is a GhcPass type instance NoGhcTc (GhcPass pass) = GhcPass (NoGhcTcPass pass) type family NoGhcTcPass (p :: Pass) :: Pass where NoGhcTcPass 'Typechecked = 'Renamed NoGhcTcPass other = other -- |Constraint type to bundle up the requirement for 'OutputableBndr' on both -- the @id@ and the 'NoGhcTc' of it. See Note [NoGhcTc]. type OutputableBndrId pass = ( OutputableBndr (IdGhcP pass) , OutputableBndr (IdGhcP (NoGhcTcPass pass)) , Outputable (GenLocated (Anno (IdGhcP pass)) (IdGhcP pass)) , Outputable (GenLocated (Anno (IdGhcP (NoGhcTcPass pass))) (IdGhcP (NoGhcTcPass pass))) , IsPass pass ) -- useful helper functions: pprIfPs :: forall p. IsPass p => (p ~ 'Parsed => SDoc) -> SDoc pprIfPs pp = case ghcPass @p of GhcPs -> pp _ -> empty pprIfRn :: forall p. IsPass p => (p ~ 'Renamed => SDoc) -> SDoc pprIfRn pp = case ghcPass @p of GhcRn -> pp _ -> empty pprIfTc :: forall p. IsPass p => (p ~ 'Typechecked => SDoc) -> SDoc pprIfTc pp = case ghcPass @p of GhcTc -> pp _ -> empty type instance Anno (HsToken tok) = TokenLocation noHsTok :: GenLocated TokenLocation (HsToken tok) noHsTok = L NoTokenLoc HsTok type instance Anno (HsUniToken tok utok) = TokenLocation noHsUniTok :: GenLocated TokenLocation (HsUniToken tok utok) noHsUniTok = L NoTokenLoc HsNormalTok ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/ImpExp.hs0000644000000000000000000004206014472400112017745 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 GHC.Hs.ImpExp: Abstract syntax: imports, exports, interfaces -} module GHC.Hs.ImpExp where import GHC.Prelude import GHC.Unit.Module ( ModuleName, IsBootInterface(..) ) import GHC.Hs.Doc import GHC.Types.SourceText ( SourceText(..) ) import GHC.Types.FieldLabel ( FieldLabel ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.SrcLoc import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Types.Name import GHC.Types.PkgQual import Data.Data import Data.Maybe {- ************************************************************************ * * \subsection{Import and export declaration lists} * * ************************************************************************ One per \tr{import} declaration in a module. -} -- | Located Import Declaration type LImportDecl pass = XRec pass (ImportDecl pass) -- ^ When in a list this may have -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnSemi' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation type instance Anno (ImportDecl (GhcPass p)) = SrcSpanAnnA -- | If/how an import is 'qualified'. data ImportDeclQualifiedStyle = QualifiedPre -- ^ 'qualified' appears in prepositive position. | QualifiedPost -- ^ 'qualified' appears in postpositive position. | NotQualified -- ^ Not qualified. deriving (Eq, Data) -- | Given two possible located 'qualified' tokens, compute a style -- (in a conforming Haskell program only one of the two can be not -- 'Nothing'). This is called from "GHC.Parser". importDeclQualifiedStyle :: Maybe EpaLocation -> Maybe EpaLocation -> (Maybe EpaLocation, ImportDeclQualifiedStyle) importDeclQualifiedStyle mPre mPost = if isJust mPre then (mPre, QualifiedPre) else if isJust mPost then (mPost,QualifiedPost) else (Nothing, NotQualified) -- | Convenience function to answer the question if an import decl. is -- qualified. isImportDeclQualified :: ImportDeclQualifiedStyle -> Bool isImportDeclQualified NotQualified = False isImportDeclQualified _ = True -- | Import Declaration -- -- A single Haskell @import@ declaration. data ImportDecl pass = ImportDecl { ideclExt :: XCImportDecl pass, ideclSourceSrc :: SourceText, -- Note [Pragma source text] in GHC.Types.SourceText ideclName :: XRec pass ModuleName, -- ^ Module name. ideclPkgQual :: ImportDeclPkgQual pass, -- ^ Package qualifier. ideclSource :: IsBootInterface, -- ^ IsBoot <=> {-\# SOURCE \#-} import ideclSafe :: Bool, -- ^ True => safe import ideclQualified :: ImportDeclQualifiedStyle, -- ^ If/how the import is qualified. ideclImplicit :: Bool, -- ^ True => implicit import (of Prelude) ideclAs :: Maybe (XRec pass ModuleName), -- ^ as Module ideclHiding :: Maybe (Bool, XRec pass [LIE pass]) -- ^ (True => hiding, names) } | XImportDecl !(XXImportDecl pass) -- ^ -- 'GHC.Parser.Annotation.AnnKeywordId's -- -- - 'GHC.Parser.Annotation.AnnImport' -- -- - 'GHC.Parser.Annotation.AnnOpen', 'GHC.Parser.Annotation.AnnClose' for ideclSource -- -- - 'GHC.Parser.Annotation.AnnSafe','GHC.Parser.Annotation.AnnQualified', -- 'GHC.Parser.Annotation.AnnPackageName','GHC.Parser.Annotation.AnnAs', -- 'GHC.Parser.Annotation.AnnVal' -- -- - 'GHC.Parser.Annotation.AnnHiding','GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' attached -- to location in ideclHiding -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation type family ImportDeclPkgQual pass type instance ImportDeclPkgQual GhcPs = RawPkgQual type instance ImportDeclPkgQual GhcRn = PkgQual type instance ImportDeclPkgQual GhcTc = PkgQual type instance XCImportDecl GhcPs = EpAnn EpAnnImportDecl type instance XCImportDecl GhcRn = NoExtField type instance XCImportDecl GhcTc = NoExtField type instance XXImportDecl (GhcPass _) = DataConCantHappen type instance Anno ModuleName = SrcSpanAnnA type instance Anno [LocatedA (IE (GhcPass p))] = SrcSpanAnnL -- --------------------------------------------------------------------- -- API Annotations types data EpAnnImportDecl = EpAnnImportDecl { importDeclAnnImport :: EpaLocation , importDeclAnnPragma :: Maybe (EpaLocation, EpaLocation) , importDeclAnnSafe :: Maybe EpaLocation , importDeclAnnQualified :: Maybe EpaLocation , importDeclAnnPackage :: Maybe EpaLocation , importDeclAnnAs :: Maybe EpaLocation } deriving (Data) -- --------------------------------------------------------------------- simpleImportDecl :: ModuleName -> ImportDecl GhcPs simpleImportDecl mn = ImportDecl { ideclExt = noAnn, ideclSourceSrc = NoSourceText, ideclName = noLocA mn, ideclPkgQual = NoRawPkgQual, ideclSource = NotBoot, ideclSafe = False, ideclImplicit = False, ideclQualified = NotQualified, ideclAs = Nothing, ideclHiding = Nothing } instance (OutputableBndrId p , Outputable (Anno (IE (GhcPass p))) , Outputable (ImportDeclPkgQual (GhcPass p))) => Outputable (ImportDecl (GhcPass p)) where ppr (ImportDecl { ideclSourceSrc = mSrcText, ideclName = mod' , ideclPkgQual = pkg , ideclSource = from, ideclSafe = safe , ideclQualified = qual, ideclImplicit = implicit , ideclAs = as, ideclHiding = spec }) = hang (hsep [text "import", ppr_imp from, pp_implicit implicit, pp_safe safe, pp_qual qual False, ppr pkg, ppr mod', pp_qual qual True, pp_as as]) 4 (pp_spec spec) where pp_implicit False = empty pp_implicit True = text "(implicit)" pp_qual QualifiedPre False = text "qualified" -- Prepositive qualifier/prepositive position. pp_qual QualifiedPost True = text "qualified" -- Postpositive qualifier/postpositive position. pp_qual QualifiedPre True = empty -- Prepositive qualifier/postpositive position. pp_qual QualifiedPost False = empty -- Postpositive qualifier/prepositive position. pp_qual NotQualified _ = empty pp_safe False = empty pp_safe True = text "safe" pp_as Nothing = empty pp_as (Just a) = text "as" <+> ppr a ppr_imp IsBoot = case mSrcText of NoSourceText -> text "{-# SOURCE #-}" SourceText src -> text src <+> text "#-}" ppr_imp NotBoot = empty pp_spec Nothing = empty pp_spec (Just (False, (L _ ies))) = ppr_ies ies pp_spec (Just (True, (L _ ies))) = text "hiding" <+> ppr_ies ies ppr_ies [] = text "()" ppr_ies ies = char '(' <+> interpp'SP ies <+> char ')' {- ************************************************************************ * * \subsection{Imported and exported entities} * * ************************************************************************ -} -- | A name in an import or export specification which may have -- adornments. Used primarily for accurate pretty printing of -- ParsedSource, and API Annotation placement. The -- 'GHC.Parser.Annotation' is the location of the adornment in -- the original source. data IEWrappedName name = IEName (LocatedN name) -- ^ no extra | IEPattern EpaLocation (LocatedN name) -- ^ pattern X | IEType EpaLocation (LocatedN name) -- ^ type (:+:) deriving (Eq,Data) -- | Located name with possible adornment -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnPattern' type LIEWrappedName name = LocatedA (IEWrappedName name) -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Located Import or Export type LIE pass = XRec pass (IE pass) -- ^ When in a list this may have -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnComma' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation type instance Anno (IE (GhcPass p)) = SrcSpanAnnA -- | Imported or exported entity. data IE pass = IEVar (XIEVar pass) (LIEWrappedName (IdP pass)) -- ^ Imported or Exported Variable | IEThingAbs (XIEThingAbs pass) (LIEWrappedName (IdP pass)) -- ^ Imported or exported Thing with Absent list -- -- The thing is a Class/Type (can't tell) -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnPattern', -- 'GHC.Parser.Annotation.AnnType','GHC.Parser.Annotation.AnnVal' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- See Note [Located RdrNames] in GHC.Hs.Expr | IEThingAll (XIEThingAll pass) (LIEWrappedName (IdP pass)) -- ^ Imported or exported Thing with All imported or exported -- -- The thing is a Class/Type and the All refers to methods/constructors -- -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnDotdot','GHC.Parser.Annotation.AnnClose', -- 'GHC.Parser.Annotation.AnnType' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- See Note [Located RdrNames] in GHC.Hs.Expr | IEThingWith (XIEThingWith pass) (LIEWrappedName (IdP pass)) IEWildcard [LIEWrappedName (IdP pass)] -- ^ Imported or exported Thing With given imported or exported -- -- The thing is a Class/Type and the imported or exported things are -- methods/constructors and record fields; see Note [IEThingWith] -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose', -- 'GHC.Parser.Annotation.AnnComma', -- 'GHC.Parser.Annotation.AnnType' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | IEModuleContents (XIEModuleContents pass) (XRec pass ModuleName) -- ^ Imported or exported module contents -- -- (Export Only) -- -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnModule' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | IEGroup (XIEGroup pass) Int (LHsDoc pass) -- ^ Doc section heading | IEDoc (XIEDoc pass) (LHsDoc pass) -- ^ Some documentation | IEDocNamed (XIEDocNamed pass) String -- ^ Reference to named doc | XIE !(XXIE pass) type instance XIEVar GhcPs = NoExtField type instance XIEVar GhcRn = NoExtField type instance XIEVar GhcTc = NoExtField type instance XIEThingAbs (GhcPass _) = EpAnn [AddEpAnn] type instance XIEThingAll (GhcPass _) = EpAnn [AddEpAnn] -- See Note [IEThingWith] type instance XIEThingWith (GhcPass 'Parsed) = EpAnn [AddEpAnn] type instance XIEThingWith (GhcPass 'Renamed) = [Located FieldLabel] type instance XIEThingWith (GhcPass 'Typechecked) = NoExtField type instance XIEModuleContents GhcPs = EpAnn [AddEpAnn] type instance XIEModuleContents GhcRn = NoExtField type instance XIEModuleContents GhcTc = NoExtField type instance XIEGroup (GhcPass _) = NoExtField type instance XIEDoc (GhcPass _) = NoExtField type instance XIEDocNamed (GhcPass _) = NoExtField type instance XXIE (GhcPass _) = DataConCantHappen type instance Anno (LocatedA (IE (GhcPass p))) = SrcSpanAnnA -- | Imported or Exported Wildcard data IEWildcard = NoIEWildcard | IEWildcard Int deriving (Eq, Data) {- Note [IEThingWith] ~~~~~~~~~~~~~~~~~~ A definition like {-# LANGUAGE DuplicateRecordFields #-} module M ( T(MkT, x) ) where data T = MkT { x :: Int } gives rise to this in the output of the parser: IEThingWith NoExtField T [MkT, x] NoIEWildcard But in the renamer we need to attach the correct field label, because the selector Name is mangled (see Note [FieldLabel] in GHC.Types.FieldLabel). Hence we change this to: IEThingWith [FieldLabel "x" True $sel:x:MkT)] T [MkT] NoIEWildcard using the TTG extension field to store the list of fields in renamed syntax only. (Record fields always appear in this list, regardless of whether DuplicateRecordFields was in use at the definition site or not.) See Note [Representing fields in AvailInfo] in GHC.Types.Avail for more details. -} ieName :: IE (GhcPass p) -> IdP (GhcPass p) ieName (IEVar _ (L _ n)) = ieWrappedName n ieName (IEThingAbs _ (L _ n)) = ieWrappedName n ieName (IEThingWith _ (L _ n) _ _) = ieWrappedName n ieName (IEThingAll _ (L _ n)) = ieWrappedName n ieName _ = panic "ieName failed pattern match!" ieNames :: IE (GhcPass p) -> [IdP (GhcPass p)] ieNames (IEVar _ (L _ n) ) = [ieWrappedName n] ieNames (IEThingAbs _ (L _ n) ) = [ieWrappedName n] ieNames (IEThingAll _ (L _ n) ) = [ieWrappedName n] ieNames (IEThingWith _ (L _ n) _ ns) = ieWrappedName n : map (ieWrappedName . unLoc) ns -- NB the above case does not include names of field selectors ieNames (IEModuleContents {}) = [] ieNames (IEGroup {}) = [] ieNames (IEDoc {}) = [] ieNames (IEDocNamed {}) = [] ieWrappedLName :: IEWrappedName name -> LocatedN name ieWrappedLName (IEName ln) = ln ieWrappedLName (IEPattern _ ln) = ln ieWrappedLName (IEType _ ln) = ln ieWrappedName :: IEWrappedName name -> name ieWrappedName = unLoc . ieWrappedLName lieWrappedName :: LIEWrappedName name -> name lieWrappedName (L _ n) = ieWrappedName n ieLWrappedName :: LIEWrappedName name -> LocatedN name ieLWrappedName (L _ n) = ieWrappedLName n replaceWrappedName :: IEWrappedName name1 -> name2 -> IEWrappedName name2 replaceWrappedName (IEName (L l _)) n = IEName (L l n) replaceWrappedName (IEPattern r (L l _)) n = IEPattern r (L l n) replaceWrappedName (IEType r (L l _)) n = IEType r (L l n) replaceLWrappedName :: LIEWrappedName name1 -> name2 -> LIEWrappedName name2 replaceLWrappedName (L l n) n' = L l (replaceWrappedName n n') instance OutputableBndrId p => Outputable (IE (GhcPass p)) where ppr (IEVar _ var) = ppr (unLoc var) ppr (IEThingAbs _ thing) = ppr (unLoc thing) ppr (IEThingAll _ thing) = hcat [ppr (unLoc thing), text "(..)"] ppr (IEThingWith flds thing wc withs) = ppr (unLoc thing) <> parens (fsep (punctuate comma (ppWiths ++ ppFields) )) where ppWiths = case wc of NoIEWildcard -> map (ppr . unLoc) withs IEWildcard pos -> let (bs, as) = splitAt pos (map (ppr . unLoc) withs) in bs ++ [text ".."] ++ as ppFields = case ghcPass @p of GhcRn -> map ppr flds _ -> [] ppr (IEModuleContents _ mod') = text "module" <+> ppr mod' ppr (IEGroup _ n _) = text ("") ppr (IEDoc _ doc) = ppr doc ppr (IEDocNamed _ string) = text ("") instance (HasOccName name) => HasOccName (IEWrappedName name) where occName w = occName (ieWrappedName w) instance (OutputableBndr name) => OutputableBndr (IEWrappedName name) where pprBndr bs w = pprBndr bs (ieWrappedName w) pprPrefixOcc w = pprPrefixOcc (ieWrappedName w) pprInfixOcc w = pprInfixOcc (ieWrappedName w) instance (OutputableBndr name) => Outputable (IEWrappedName name) where ppr (IEName n) = pprPrefixOcc (unLoc n) ppr (IEPattern _ n) = text "pattern" <+> pprPrefixOcc (unLoc n) ppr (IEType _ n) = text "type" <+> pprPrefixOcc (unLoc n) pprImpExp :: (HasOccName name, OutputableBndr name) => name -> SDoc pprImpExp name = type_pref <+> pprPrefixOcc name where occ = occName name type_pref | isTcOcc occ && isSymOcc occ = text "type" | otherwise = empty ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Instances.hs0000644000000000000000000005140214472400112020472 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- This module contains exclusively Data instances, which are going to be slow -- no matter what we do. Furthermore, they are incredibly slow to compile with -- optimisation (see #9557). Consequently we compile this with -O0. -- See #18254. {-# OPTIONS_GHC -O0 #-} module GHC.Hs.Instances where -- This module defines the Data instances for the hsSyn AST. -- It happens here to avoid massive constraint types on the AST with concomitant -- slow GHC bootstrap times. -- UndecidableInstances ? import Data.Data hiding ( Fixity ) import GHC.Prelude import GHC.Hs.Extension import GHC.Hs.Binds import GHC.Hs.Decls import GHC.Hs.Expr import GHC.Hs.Lit import GHC.Hs.Type import GHC.Hs.Pat import GHC.Hs.ImpExp import GHC.Parser.Annotation -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs----------------------------------------- -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Binds ---------------------------------- -- deriving instance (DataIdLR pL pR) => Data (HsLocalBindsLR pL pR) deriving instance Data (HsLocalBindsLR GhcPs GhcPs) deriving instance Data (HsLocalBindsLR GhcPs GhcRn) deriving instance Data (HsLocalBindsLR GhcRn GhcRn) deriving instance Data (HsLocalBindsLR GhcTc GhcTc) -- deriving instance (DataIdLR pL pR) => Data (HsValBindsLR pL pR) deriving instance Data (HsValBindsLR GhcPs GhcPs) deriving instance Data (HsValBindsLR GhcPs GhcRn) deriving instance Data (HsValBindsLR GhcRn GhcRn) deriving instance Data (HsValBindsLR GhcTc GhcTc) -- deriving instance (DataIdLR pL pL) => Data (NHsValBindsLR pL) deriving instance Data (NHsValBindsLR GhcPs) deriving instance Data (NHsValBindsLR GhcRn) deriving instance Data (NHsValBindsLR GhcTc) -- deriving instance (DataIdLR pL pR) => Data (HsBindLR pL pR) deriving instance Data (HsBindLR GhcPs GhcPs) deriving instance Data (HsBindLR GhcPs GhcRn) deriving instance Data (HsBindLR GhcRn GhcRn) deriving instance Data (HsBindLR GhcTc GhcTc) deriving instance Data AbsBinds deriving instance Data ABExport -- deriving instance DataId p => Data (RecordPatSynField p) deriving instance Data (RecordPatSynField GhcPs) deriving instance Data (RecordPatSynField GhcRn) deriving instance Data (RecordPatSynField GhcTc) -- deriving instance (DataIdLR pL pR) => Data (PatSynBind pL pR) deriving instance Data (PatSynBind GhcPs GhcPs) deriving instance Data (PatSynBind GhcPs GhcRn) deriving instance Data (PatSynBind GhcRn GhcRn) deriving instance Data (PatSynBind GhcTc GhcTc) -- deriving instance (DataIdLR p p) => Data (HsIPBinds p) deriving instance Data (HsIPBinds GhcPs) deriving instance Data (HsIPBinds GhcRn) deriving instance Data (HsIPBinds GhcTc) -- deriving instance (DataIdLR p p) => Data (IPBind p) deriving instance Data (IPBind GhcPs) deriving instance Data (IPBind GhcRn) deriving instance Data (IPBind GhcTc) -- deriving instance (DataIdLR p p) => Data (Sig p) deriving instance Data (Sig GhcPs) deriving instance Data (Sig GhcRn) deriving instance Data (Sig GhcTc) -- deriving instance (DataId p) => Data (FixitySig p) deriving instance Data (FixitySig GhcPs) deriving instance Data (FixitySig GhcRn) deriving instance Data (FixitySig GhcTc) -- deriving instance (DataId p) => Data (StandaloneKindSig p) deriving instance Data (StandaloneKindSig GhcPs) deriving instance Data (StandaloneKindSig GhcRn) deriving instance Data (StandaloneKindSig GhcTc) -- deriving instance (DataIdLR p p) => Data (HsPatSynDir p) deriving instance Data (HsPatSynDir GhcPs) deriving instance Data (HsPatSynDir GhcRn) deriving instance Data (HsPatSynDir GhcTc) -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Decls ---------------------------------- -- deriving instance (DataIdLR p p) => Data (HsDecl p) deriving instance Data (HsDecl GhcPs) deriving instance Data (HsDecl GhcRn) deriving instance Data (HsDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (HsGroup p) deriving instance Data (HsGroup GhcPs) deriving instance Data (HsGroup GhcRn) deriving instance Data (HsGroup GhcTc) -- deriving instance (DataIdLR p p) => Data (SpliceDecl p) deriving instance Data (SpliceDecl GhcPs) deriving instance Data (SpliceDecl GhcRn) deriving instance Data (SpliceDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (TyClDecl p) deriving instance Data (TyClDecl GhcPs) deriving instance Data (TyClDecl GhcRn) deriving instance Data (TyClDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (FunDep p) deriving instance Data (FunDep GhcPs) deriving instance Data (FunDep GhcRn) deriving instance Data (FunDep GhcTc) -- deriving instance (DataIdLR p p) => Data (TyClGroup p) deriving instance Data (TyClGroup GhcPs) deriving instance Data (TyClGroup GhcRn) deriving instance Data (TyClGroup GhcTc) -- deriving instance (DataIdLR p p) => Data (FamilyResultSig p) deriving instance Data (FamilyResultSig GhcPs) deriving instance Data (FamilyResultSig GhcRn) deriving instance Data (FamilyResultSig GhcTc) -- deriving instance (DataIdLR p p) => Data (FamilyDecl p) deriving instance Data (FamilyDecl GhcPs) deriving instance Data (FamilyDecl GhcRn) deriving instance Data (FamilyDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (InjectivityAnn p) deriving instance Data (InjectivityAnn GhcPs) deriving instance Data (InjectivityAnn GhcRn) deriving instance Data (InjectivityAnn GhcTc) -- deriving instance (DataIdLR p p) => Data (FamilyInfo p) deriving instance Data (FamilyInfo GhcPs) deriving instance Data (FamilyInfo GhcRn) deriving instance Data (FamilyInfo GhcTc) -- deriving instance (DataIdLR p p) => Data (HsDataDefn p) deriving instance Data (HsDataDefn GhcPs) deriving instance Data (HsDataDefn GhcRn) deriving instance Data (HsDataDefn GhcTc) -- deriving instance (DataIdLR p p) => Data (HsDerivingClause p) deriving instance Data (HsDerivingClause GhcPs) deriving instance Data (HsDerivingClause GhcRn) deriving instance Data (HsDerivingClause GhcTc) -- deriving instance DataIdLR p p => Data (DerivClauseTys p) deriving instance Data (DerivClauseTys GhcPs) deriving instance Data (DerivClauseTys GhcRn) deriving instance Data (DerivClauseTys GhcTc) -- deriving instance (DataIdLR p p) => Data (ConDecl p) deriving instance Data (ConDecl GhcPs) deriving instance Data (ConDecl GhcRn) deriving instance Data (ConDecl GhcTc) -- deriving instance DataIdLR p p => Data (HsConDeclGADTDetails p) deriving instance Data (HsConDeclGADTDetails GhcPs) deriving instance Data (HsConDeclGADTDetails GhcRn) deriving instance Data (HsConDeclGADTDetails GhcTc) -- deriving instance DataIdLR p p => Data (TyFamInstDecl p) deriving instance Data (TyFamInstDecl GhcPs) deriving instance Data (TyFamInstDecl GhcRn) deriving instance Data (TyFamInstDecl GhcTc) -- deriving instance DataIdLR p p => Data (DataFamInstDecl p) deriving instance Data (DataFamInstDecl GhcPs) deriving instance Data (DataFamInstDecl GhcRn) deriving instance Data (DataFamInstDecl GhcTc) -- deriving instance (DataIdLR p p,Data rhs)=>Data (FamEqn p rhs) deriving instance Data rhs => Data (FamEqn GhcPs rhs) deriving instance Data rhs => Data (FamEqn GhcRn rhs) deriving instance Data rhs => Data (FamEqn GhcTc rhs) -- deriving instance (DataIdLR p p) => Data (ClsInstDecl p) deriving instance Data (ClsInstDecl GhcPs) deriving instance Data (ClsInstDecl GhcRn) deriving instance Data (ClsInstDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (InstDecl p) deriving instance Data (InstDecl GhcPs) deriving instance Data (InstDecl GhcRn) deriving instance Data (InstDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (DerivDecl p) deriving instance Data (DerivDecl GhcPs) deriving instance Data (DerivDecl GhcRn) deriving instance Data (DerivDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (DerivStrategy p) deriving instance Data (DerivStrategy GhcPs) deriving instance Data (DerivStrategy GhcRn) deriving instance Data (DerivStrategy GhcTc) -- deriving instance (DataIdLR p p) => Data (DefaultDecl p) deriving instance Data (DefaultDecl GhcPs) deriving instance Data (DefaultDecl GhcRn) deriving instance Data (DefaultDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (ForeignDecl p) deriving instance Data (ForeignDecl GhcPs) deriving instance Data (ForeignDecl GhcRn) deriving instance Data (ForeignDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (RuleDecls p) deriving instance Data (RuleDecls GhcPs) deriving instance Data (RuleDecls GhcRn) deriving instance Data (RuleDecls GhcTc) -- deriving instance (DataIdLR p p) => Data (RuleDecl p) deriving instance Data (RuleDecl GhcPs) deriving instance Data (RuleDecl GhcRn) deriving instance Data (RuleDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (RuleBndr p) deriving instance Data (RuleBndr GhcPs) deriving instance Data (RuleBndr GhcRn) deriving instance Data (RuleBndr GhcTc) -- deriving instance (DataId p) => Data (WarnDecls p) deriving instance Data (WarnDecls GhcPs) deriving instance Data (WarnDecls GhcRn) deriving instance Data (WarnDecls GhcTc) -- deriving instance (DataId p) => Data (WarnDecl p) deriving instance Data (WarnDecl GhcPs) deriving instance Data (WarnDecl GhcRn) deriving instance Data (WarnDecl GhcTc) -- deriving instance (DataIdLR p p) => Data (AnnDecl p) deriving instance Data (AnnProvenance GhcPs) deriving instance Data (AnnProvenance GhcRn) deriving instance Data (AnnProvenance GhcTc) deriving instance Data (AnnDecl GhcPs) deriving instance Data (AnnDecl GhcRn) deriving instance Data (AnnDecl GhcTc) -- deriving instance (DataId p) => Data (RoleAnnotDecl p) deriving instance Data (RoleAnnotDecl GhcPs) deriving instance Data (RoleAnnotDecl GhcRn) deriving instance Data (RoleAnnotDecl GhcTc) -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Expr ----------------------------------- deriving instance Data (FieldLabelStrings GhcPs) deriving instance Data (FieldLabelStrings GhcRn) deriving instance Data (FieldLabelStrings GhcTc) deriving instance Data (DotFieldOcc GhcPs) deriving instance Data (DotFieldOcc GhcRn) deriving instance Data (DotFieldOcc GhcTc) -- deriving instance (DataIdLR p p) => Data (HsPragE p) deriving instance Data (HsPragE GhcPs) deriving instance Data (HsPragE GhcRn) deriving instance Data (HsPragE GhcTc) -- deriving instance (DataIdLR p p) => Data (HsExpr p) deriving instance Data (HsExpr GhcPs) deriving instance Data (HsExpr GhcRn) deriving instance Data (HsExpr GhcTc) -- deriving instance (DataIdLR p p) => Data (HsTupArg p) deriving instance Data (HsTupArg GhcPs) deriving instance Data (HsTupArg GhcRn) deriving instance Data (HsTupArg GhcTc) -- deriving instance (DataIdLR p p) => Data (HsCmd p) deriving instance Data (HsCmd GhcPs) deriving instance Data (HsCmd GhcRn) deriving instance Data (HsCmd GhcTc) -- deriving instance (DataIdLR p p) => Data (HsCmdTop p) deriving instance Data (HsCmdTop GhcPs) deriving instance Data (HsCmdTop GhcRn) deriving instance Data (HsCmdTop GhcTc) -- deriving instance (DataIdLR p p,Data body) => Data (MatchGroup p body) deriving instance Data (MatchGroup GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (MatchGroup GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (MatchGroup GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (MatchGroup GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (MatchGroup GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (MatchGroup GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (Match p body) deriving instance Data (Match GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (Match GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (Match GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (Match GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (Match GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (Match GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (GRHSs p body) deriving instance Data (GRHSs GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (GRHSs GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (GRHSs GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (GRHSs GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (GRHSs GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (GRHSs GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (GRHS p body) deriving instance Data (GRHS GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (GRHS GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (GRHS GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (GRHS GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (GRHS GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (GRHS GhcTc (LocatedA (HsCmd GhcTc))) -- deriving instance (DataIdLR p p,Data body) => Data (StmtLR p p body) deriving instance Data (StmtLR GhcPs GhcPs (LocatedA (HsExpr GhcPs))) deriving instance Data (StmtLR GhcPs GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (StmtLR GhcRn GhcRn (LocatedA (HsExpr GhcRn))) deriving instance Data (StmtLR GhcTc GhcTc (LocatedA (HsExpr GhcTc))) deriving instance Data (StmtLR GhcPs GhcPs (LocatedA (HsCmd GhcPs))) deriving instance Data (StmtLR GhcPs GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (StmtLR GhcRn GhcRn (LocatedA (HsCmd GhcRn))) deriving instance Data (StmtLR GhcTc GhcTc (LocatedA (HsCmd GhcTc))) deriving instance Data RecStmtTc -- deriving instance (DataIdLR p p) => Data (ParStmtBlock p p) deriving instance Data (ParStmtBlock GhcPs GhcPs) deriving instance Data (ParStmtBlock GhcPs GhcRn) deriving instance Data (ParStmtBlock GhcRn GhcRn) deriving instance Data (ParStmtBlock GhcTc GhcTc) -- deriving instance (DataIdLR p p) => Data (ApplicativeArg p) deriving instance Data (ApplicativeArg GhcPs) deriving instance Data (ApplicativeArg GhcRn) deriving instance Data (ApplicativeArg GhcTc) deriving instance Data (HsStmtContext GhcPs) deriving instance Data (HsStmtContext GhcRn) deriving instance Data (HsStmtContext GhcTc) deriving instance Data HsArrowMatchContext deriving instance Data HsDoFlavour deriving instance Data (HsMatchContext GhcPs) deriving instance Data (HsMatchContext GhcRn) deriving instance Data (HsMatchContext GhcTc) -- deriving instance (DataIdLR p p) => Data (HsSplice p) deriving instance Data (HsSplice GhcPs) deriving instance Data (HsSplice GhcRn) deriving instance Data (HsSplice GhcTc) -- deriving instance (DataIdLR p p) => Data (HsSplicedThing p) deriving instance Data (HsSplicedThing GhcPs) deriving instance Data (HsSplicedThing GhcRn) deriving instance Data (HsSplicedThing GhcTc) -- deriving instance (DataIdLR p p) => Data (HsQuote p) deriving instance Data (HsQuote GhcPs) deriving instance Data (HsQuote GhcRn) deriving instance Data (HsQuote GhcTc) deriving instance Data HsBracketTc -- deriving instance (DataIdLR p p) => Data (ArithSeqInfo p) deriving instance Data (ArithSeqInfo GhcPs) deriving instance Data (ArithSeqInfo GhcRn) deriving instance Data (ArithSeqInfo GhcTc) deriving instance Data RecordUpdTc deriving instance Data CmdTopTc deriving instance Data PendingRnSplice deriving instance Data PendingTcSplice deriving instance Data SyntaxExprRn deriving instance Data SyntaxExprTc deriving instance Data XBindStmtRn deriving instance Data XBindStmtTc -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Lit ------------------------------------ -- deriving instance (DataId p) => Data (HsLit p) deriving instance Data (HsLit GhcPs) deriving instance Data (HsLit GhcRn) deriving instance Data (HsLit GhcTc) -- deriving instance (DataIdLR p p) => Data (HsOverLit p) deriving instance Data (HsOverLit GhcPs) deriving instance Data (HsOverLit GhcRn) deriving instance Data (HsOverLit GhcTc) deriving instance Data OverLitRn deriving instance Data OverLitTc -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Pat ------------------------------------ -- deriving instance (DataIdLR p p) => Data (Pat p) deriving instance Data (Pat GhcPs) deriving instance Data (Pat GhcRn) deriving instance Data (Pat GhcTc) deriving instance Data ConPatTc deriving instance (Data a, Data b) => Data (HsFieldBind a b) deriving instance (Data body) => Data (HsRecFields GhcPs body) deriving instance (Data body) => Data (HsRecFields GhcRn body) deriving instance (Data body) => Data (HsRecFields GhcTc body) -- --------------------------------------------------------------------- -- Data derivations from GHC.Hs.Type ---------------------------------- -- deriving instance (DataIdLR p p) => Data (LHsQTyVars p) deriving instance Data (LHsQTyVars GhcPs) deriving instance Data (LHsQTyVars GhcRn) deriving instance Data (LHsQTyVars GhcTc) -- deriving instance (Data flag, DataIdLR p p) => Data (HsOuterTyVarBndrs p) deriving instance Data flag => Data (HsOuterTyVarBndrs flag GhcPs) deriving instance Data flag => Data (HsOuterTyVarBndrs flag GhcRn) deriving instance Data flag => Data (HsOuterTyVarBndrs flag GhcTc) -- deriving instance (DataIdLR p p) => Data (HsSigType p) deriving instance Data (HsSigType GhcPs) deriving instance Data (HsSigType GhcRn) deriving instance Data (HsSigType GhcTc) -- deriving instance (DataIdLR p p, Data thing) =>Data (HsWildCardBndrs p thing) deriving instance (Data thing) => Data (HsWildCardBndrs GhcPs thing) deriving instance (Data thing) => Data (HsWildCardBndrs GhcRn thing) deriving instance (Data thing) => Data (HsWildCardBndrs GhcTc thing) -- deriving instance (DataIdLR p p) => Data (HsPatSigType p) deriving instance Data (HsPatSigType GhcPs) deriving instance Data (HsPatSigType GhcRn) deriving instance Data (HsPatSigType GhcTc) -- deriving instance (DataIdLR p p) => Data (HsForAllTelescope p) deriving instance Data (HsForAllTelescope GhcPs) deriving instance Data (HsForAllTelescope GhcRn) deriving instance Data (HsForAllTelescope GhcTc) -- deriving instance (DataIdLR p p) => Data (HsTyVarBndr p) deriving instance (Data flag) => Data (HsTyVarBndr flag GhcPs) deriving instance (Data flag) => Data (HsTyVarBndr flag GhcRn) deriving instance (Data flag) => Data (HsTyVarBndr flag GhcTc) -- deriving instance (DataIdLR p p) => Data (HsType p) deriving instance Data (HsType GhcPs) deriving instance Data (HsType GhcRn) deriving instance Data (HsType GhcTc) -- deriving instance Data (HsLinearArrowTokens p) deriving instance Data (HsLinearArrowTokens GhcPs) deriving instance Data (HsLinearArrowTokens GhcRn) deriving instance Data (HsLinearArrowTokens GhcTc) -- deriving instance (DataIdLR p p) => Data (HsArrow p) deriving instance Data (HsArrow GhcPs) deriving instance Data (HsArrow GhcRn) deriving instance Data (HsArrow GhcTc) -- deriving instance (DataIdLR p p) => Data (HsScaled p a) deriving instance Data thing => Data (HsScaled GhcPs thing) deriving instance Data thing => Data (HsScaled GhcRn thing) deriving instance Data thing => Data (HsScaled GhcTc thing) deriving instance (Data a, Data b) => Data (HsArg a b) -- deriving instance Data (HsArg (Located (HsType GhcPs)) (Located (HsKind GhcPs))) -- deriving instance Data (HsArg (Located (HsType GhcRn)) (Located (HsKind GhcRn))) -- deriving instance Data (HsArg (Located (HsType GhcTc)) (Located (HsKind GhcTc))) -- deriving instance (DataIdLR p p) => Data (ConDeclField p) deriving instance Data (ConDeclField GhcPs) deriving instance Data (ConDeclField GhcRn) deriving instance Data (ConDeclField GhcTc) -- deriving instance (DataId p) => Data (FieldOcc p) deriving instance Data (FieldOcc GhcPs) deriving instance Data (FieldOcc GhcRn) deriving instance Data (FieldOcc GhcTc) -- deriving instance DataId p => Data (AmbiguousFieldOcc p) deriving instance Data (AmbiguousFieldOcc GhcPs) deriving instance Data (AmbiguousFieldOcc GhcRn) deriving instance Data (AmbiguousFieldOcc GhcTc) -- deriving instance (DataId name) => Data (ImportDecl name) deriving instance Data (ImportDecl GhcPs) deriving instance Data (ImportDecl GhcRn) deriving instance Data (ImportDecl GhcTc) -- deriving instance (DataId name) => Data (IE name) deriving instance Data (IE GhcPs) deriving instance Data (IE GhcRn) deriving instance Data (IE GhcTc) -- deriving instance (Eq name, Eq (IdP name)) => Eq (IE name) deriving instance Eq (IE GhcPs) deriving instance Eq (IE GhcRn) deriving instance Eq (IE GhcTc) -- --------------------------------------------------------------------- deriving instance Data XXExprGhcTc deriving instance Data XXPatGhcTc -- --------------------------------------------------------------------- deriving instance Data XViaStrategyPs -- --------------------------------------------------------------------- ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Lit.hs0000644000000000000000000001610214472400112017271 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE TypeApplications #-} {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable, OutputableBndrId {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Source-language literals module GHC.Hs.Lit ( module Language.Haskell.Syntax.Lit , module GHC.Hs.Lit ) where import GHC.Prelude import {-# SOURCE #-} GHC.Hs.Expr( pprExpr ) import Language.Haskell.Syntax.Lit import GHC.Types.SourceText import GHC.Core.Type import GHC.Utils.Outputable import Language.Haskell.Syntax.Expr ( HsExpr ) import Language.Haskell.Syntax.Extension import GHC.Hs.Extension {- ************************************************************************ * * \subsection[HsLit]{Literals} * * ************************************************************************ -} type instance XHsChar (GhcPass _) = SourceText type instance XHsCharPrim (GhcPass _) = SourceText type instance XHsString (GhcPass _) = SourceText type instance XHsStringPrim (GhcPass _) = SourceText type instance XHsInt (GhcPass _) = NoExtField type instance XHsIntPrim (GhcPass _) = SourceText type instance XHsWordPrim (GhcPass _) = SourceText type instance XHsInt64Prim (GhcPass _) = SourceText type instance XHsWord64Prim (GhcPass _) = SourceText type instance XHsInteger (GhcPass _) = SourceText type instance XHsRat (GhcPass _) = NoExtField type instance XHsFloatPrim (GhcPass _) = NoExtField type instance XHsDoublePrim (GhcPass _) = NoExtField type instance XXLit (GhcPass _) = DataConCantHappen data OverLitRn = OverLitRn { ol_rebindable :: Bool, -- Note [ol_rebindable] ol_from_fun :: LIdP GhcRn -- Note [Overloaded literal witnesses] } data OverLitTc = OverLitTc { ol_rebindable :: Bool, -- Note [ol_rebindable] ol_witness :: HsExpr GhcTc, -- Note [Overloaded literal witnesses] ol_type :: Type } {- Note [Overloaded literal witnesses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During renaming, the coercion function needed for a given HsOverLit is resolved according to the current scope and RebindableSyntax (see Note [ol_rebindable]). The result of this resolution *before* type checking is the coercion function such as 'fromInteger' or 'fromRational', stored in the ol_from_fun field of OverLitRn. *After* type checking, the ol_witness field of the OverLitTc contains the witness of the literal as HsExpr, such as (fromInteger 3) or lit_78. This witness should replace the literal. Reason: it allows commoning up of the fromInteger calls, which wouldn't be possible if the desugarer made the application. The ol_type in OverLitTc records the type the overloaded literal is found to have. -} type instance XOverLit GhcPs = NoExtField type instance XOverLit GhcRn = OverLitRn type instance XOverLit GhcTc = OverLitTc pprXOverLit :: GhcPass p -> XOverLit (GhcPass p) -> SDoc pprXOverLit GhcPs noExt = ppr noExt pprXOverLit GhcRn OverLitRn{ ol_from_fun = from_fun } = ppr from_fun pprXOverLit GhcTc OverLitTc{ ol_witness = witness } = pprExpr witness type instance XXOverLit (GhcPass _) = DataConCantHappen overLitType :: HsOverLit GhcTc -> Type overLitType (OverLit OverLitTc{ ol_type = ty } _) = ty -- | Convert a literal from one index type to another convertLit :: HsLit (GhcPass p1) -> HsLit (GhcPass p2) convertLit (HsChar a x) = HsChar a x convertLit (HsCharPrim a x) = HsCharPrim a x convertLit (HsString a x) = HsString a x convertLit (HsStringPrim a x) = HsStringPrim a x convertLit (HsInt a x) = HsInt a x convertLit (HsIntPrim a x) = HsIntPrim a x convertLit (HsWordPrim a x) = HsWordPrim a x convertLit (HsInt64Prim a x) = HsInt64Prim a x convertLit (HsWord64Prim a x) = HsWord64Prim a x convertLit (HsInteger a x b) = HsInteger a x b convertLit (HsRat a x b) = HsRat a x b convertLit (HsFloatPrim a x) = HsFloatPrim a x convertLit (HsDoublePrim a x) = HsDoublePrim a x {- Note [ol_rebindable] ~~~~~~~~~~~~~~~~~~~~ The ol_rebindable field is True if this literal is actually using rebindable syntax. Specifically: False iff ol_from_fun / ol_witness is the standard one True iff ol_from_fun / ol_witness is non-standard Equivalently it's True if a) RebindableSyntax is on b) the witness for fromInteger/fromRational/fromString that happens to be in scope isn't the standard one -} -- Instance specific to GhcPs, need the SourceText instance Outputable (HsLit (GhcPass p)) where ppr (HsChar st c) = pprWithSourceText st (pprHsChar c) ppr (HsCharPrim st c) = pp_st_suffix st primCharSuffix (pprPrimChar c) ppr (HsString st s) = pprWithSourceText st (pprHsString s) ppr (HsStringPrim st s) = pprWithSourceText st (pprHsBytes s) ppr (HsInt _ i) = pprWithSourceText (il_text i) (integer (il_value i)) ppr (HsInteger st i _) = pprWithSourceText st (integer i) ppr (HsRat _ f _) = ppr f ppr (HsFloatPrim _ f) = ppr f <> primFloatSuffix ppr (HsDoublePrim _ d) = ppr d <> primDoubleSuffix ppr (HsIntPrim st i) = pprWithSourceText st (pprPrimInt i) ppr (HsWordPrim st w) = pprWithSourceText st (pprPrimWord w) ppr (HsInt64Prim st i) = pp_st_suffix st primInt64Suffix (pprPrimInt64 i) ppr (HsWord64Prim st w) = pp_st_suffix st primWord64Suffix (pprPrimWord64 w) pp_st_suffix :: SourceText -> SDoc -> SDoc -> SDoc pp_st_suffix NoSourceText _ doc = doc pp_st_suffix (SourceText st) suffix _ = text st <> suffix -- in debug mode, print the expression that it's resolved to, too instance OutputableBndrId p => Outputable (HsOverLit (GhcPass p)) where ppr (OverLit {ol_val=val, ol_ext=ext}) = ppr val <+> (whenPprDebug (parens (pprXOverLit (ghcPass @p) ext))) -- | pmPprHsLit pretty prints literals and is used when pretty printing pattern -- match warnings. All are printed the same (i.e., without hashes if they are -- primitive and not wrapped in constructors if they are boxed). This happens -- mainly for too reasons: -- * We do not want to expose their internal representation -- * The warnings become too messy pmPprHsLit :: HsLit (GhcPass x) -> SDoc pmPprHsLit (HsChar _ c) = pprHsChar c pmPprHsLit (HsCharPrim _ c) = pprHsChar c pmPprHsLit (HsString st s) = pprWithSourceText st (pprHsString s) pmPprHsLit (HsStringPrim _ s) = pprHsBytes s pmPprHsLit (HsInt _ i) = integer (il_value i) pmPprHsLit (HsIntPrim _ i) = integer i pmPprHsLit (HsWordPrim _ w) = integer w pmPprHsLit (HsInt64Prim _ i) = integer i pmPprHsLit (HsWord64Prim _ w) = integer w pmPprHsLit (HsInteger _ i _) = integer i pmPprHsLit (HsRat _ f _) = ppr f pmPprHsLit (HsFloatPrim _ f) = ppr f pmPprHsLit (HsDoublePrim _ d) = ppr d ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Pat.hs0000644000000000000000000006263614472400112017302 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[PatSyntax]{Abstract Haskell syntax---patterns} -} module GHC.Hs.Pat ( Pat(..), LPat, EpAnnSumPat(..), ConPatTc (..), ConLikeP, HsPatExpansion(..), XXPatGhcTc(..), HsConPatDetails, hsConPatArgs, HsRecFields(..), HsFieldBind(..), LHsFieldBind, HsRecField, LHsRecField, HsRecUpdField, LHsRecUpdField, hsRecFields, hsRecFieldSel, hsRecFieldId, hsRecFieldsArgs, hsRecUpdFieldId, hsRecUpdFieldOcc, hsRecUpdFieldRdr, mkPrefixConPat, mkCharLitPat, mkNilPat, isSimplePat, looksLazyPatBind, isBangedLPat, gParPat, patNeedsParens, parenthesizePat, isIrrefutableHsPat, collectEvVarsPat, collectEvVarsPats, pprParendLPat, pprConArgs, pprLPat ) where import GHC.Prelude import Language.Haskell.Syntax.Pat import Language.Haskell.Syntax.Expr ( HsExpr ) import {-# SOURCE #-} GHC.Hs.Expr (pprLExpr, pprSplice) -- friends: import GHC.Hs.Binds import GHC.Hs.Lit import Language.Haskell.Syntax.Extension import GHC.Parser.Annotation import GHC.Hs.Extension import GHC.Hs.Type import GHC.Tc.Types.Evidence import GHC.Types.Basic import GHC.Types.SourceText -- others: import GHC.Core.Ppr ( {- instance OutputableBndr TyVar -} ) import GHC.Builtin.Types import GHC.Types.Var import GHC.Types.Name.Reader ( RdrName ) import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Core.TyCon import GHC.Utils.Outputable import GHC.Core.Type import GHC.Types.SrcLoc import GHC.Data.Bag -- collect ev vars from pats import GHC.Data.Maybe import GHC.Types.Name (Name) import GHC.Driver.Session import qualified GHC.LanguageExtensions as LangExt import Data.Data type instance XWildPat GhcPs = NoExtField type instance XWildPat GhcRn = NoExtField type instance XWildPat GhcTc = Type type instance XVarPat (GhcPass _) = NoExtField type instance XLazyPat GhcPs = EpAnn [AddEpAnn] -- For '~' type instance XLazyPat GhcRn = NoExtField type instance XLazyPat GhcTc = NoExtField type instance XAsPat GhcPs = EpAnn [AddEpAnn] -- For '@' type instance XAsPat GhcRn = NoExtField type instance XAsPat GhcTc = NoExtField type instance XParPat (GhcPass _) = EpAnnCO type instance XBangPat GhcPs = EpAnn [AddEpAnn] -- For '!' type instance XBangPat GhcRn = NoExtField type instance XBangPat GhcTc = NoExtField type instance XListPat GhcPs = EpAnn AnnList -- After parsing, ListPat can refer to a built-in Haskell list pattern -- or an overloaded list pattern. type instance XListPat GhcRn = NoExtField -- Built-in list patterns only. -- After renaming, overloaded list patterns are expanded to view patterns. -- See Note [Desugaring overloaded list patterns] type instance XListPat GhcTc = Type -- List element type, for use in hsPatType. type instance XTuplePat GhcPs = EpAnn [AddEpAnn] type instance XTuplePat GhcRn = NoExtField type instance XTuplePat GhcTc = [Type] type instance XSumPat GhcPs = EpAnn EpAnnSumPat type instance XSumPat GhcRn = NoExtField type instance XSumPat GhcTc = [Type] type instance XConPat GhcPs = EpAnn [AddEpAnn] type instance XConPat GhcRn = NoExtField type instance XConPat GhcTc = ConPatTc type instance XViewPat GhcPs = EpAnn [AddEpAnn] type instance XViewPat GhcRn = Maybe (HsExpr GhcRn) -- The @HsExpr GhcRn@ gives an inverse to the view function. -- This is used for overloaded lists in particular. -- See Note [Invertible view patterns] in GHC.Tc.TyCl.PatSyn. type instance XViewPat GhcTc = Type -- Overall type of the pattern -- (= the argument type of the view function), for hsPatType. type instance XSplicePat GhcPs = NoExtField type instance XSplicePat GhcRn = NoExtField type instance XSplicePat GhcTc = DataConCantHappen type instance XLitPat (GhcPass _) = NoExtField type instance XNPat GhcPs = EpAnn [AddEpAnn] type instance XNPat GhcRn = EpAnn [AddEpAnn] type instance XNPat GhcTc = Type type instance XNPlusKPat GhcPs = EpAnn EpaLocation -- Of the "+" type instance XNPlusKPat GhcRn = NoExtField type instance XNPlusKPat GhcTc = Type type instance XSigPat GhcPs = EpAnn [AddEpAnn] type instance XSigPat GhcRn = NoExtField type instance XSigPat GhcTc = Type type instance XXPat GhcPs = DataConCantHappen type instance XXPat GhcRn = HsPatExpansion (Pat GhcRn) (Pat GhcRn) -- Original pattern and its desugaring/expansion. -- See Note [Rebindable syntax and HsExpansion]. type instance XXPat GhcTc = XXPatGhcTc -- After typechecking, we add extra constructors: CoPat and HsExpansion. -- HsExpansion allows us to handle RebindableSyntax in pattern position: -- see "XXExpr GhcTc" for the counterpart in expressions. type instance ConLikeP GhcPs = RdrName -- IdP GhcPs type instance ConLikeP GhcRn = Name -- IdP GhcRn type instance ConLikeP GhcTc = ConLike type instance XHsFieldBind _ = EpAnn [AddEpAnn] -- --------------------------------------------------------------------- -- API Annotations types data EpAnnSumPat = EpAnnSumPat { sumPatParens :: [AddEpAnn] , sumPatVbarsBefore :: [EpaLocation] , sumPatVbarsAfter :: [EpaLocation] } deriving Data -- --------------------------------------------------------------------- -- | Extension constructor for Pat, added after typechecking. data XXPatGhcTc = -- | Coercion Pattern (translation only) -- -- During desugaring a (CoPat co pat) turns into a cast with 'co' on the -- scrutinee, followed by a match on 'pat'. CoPat { -- | Coercion Pattern -- If co :: t1 ~ t2, p :: t2, -- then (CoPat co p) :: t1 co_cpt_wrap :: HsWrapper , -- | Why not LPat? Ans: existing locn will do co_pat_inner :: Pat GhcTc , -- | Type of whole pattern, t1 co_pat_ty :: Type } -- | Pattern expansion: original pattern, and desugared pattern, -- for RebindableSyntax and other overloaded syntax such as OverloadedLists. -- See Note [Rebindable syntax and HsExpansion]. | ExpansionPat (Pat GhcRn) (Pat GhcTc) -- See Note [Rebindable syntax and HsExpansion]. data HsPatExpansion a b = HsPatExpanded a b deriving Data -- | This is the extension field for ConPat, added after typechecking -- It adds quite a few extra fields, to support elaboration of pattern matching. data ConPatTc = ConPatTc { -- | The universal arg types 1-1 with the universal -- tyvars of the constructor/pattern synonym -- Use (conLikeResTy pat_con cpt_arg_tys) to get -- the type of the pattern cpt_arg_tys :: [Type] , -- | Existentially bound type variables -- in correctly-scoped order e.g. [k:* x:k] cpt_tvs :: [TyVar] , -- | Ditto *coercion variables* and *dictionaries* -- One reason for putting coercion variable here I think -- is to ensure their kinds are zonked cpt_dicts :: [EvVar] , -- | Bindings involving those dictionaries cpt_binds :: TcEvBinds , -- | Extra wrapper to pass to the matcher -- Only relevant for pattern-synonyms; -- ignored for data cons cpt_wrap :: HsWrapper } hsRecFieldId :: HsRecField GhcTc arg -> Id hsRecFieldId = hsRecFieldSel hsRecUpdFieldRdr :: HsRecUpdField (GhcPass p) -> Located RdrName hsRecUpdFieldRdr = fmap rdrNameAmbiguousFieldOcc . reLoc . hfbLHS hsRecUpdFieldId :: HsFieldBind (LAmbiguousFieldOcc GhcTc) arg -> Located Id hsRecUpdFieldId = fmap foExt . reLoc . hsRecUpdFieldOcc hsRecUpdFieldOcc :: HsFieldBind (LAmbiguousFieldOcc GhcTc) arg -> LFieldOcc GhcTc hsRecUpdFieldOcc = fmap unambiguousFieldOcc . hfbLHS {- ************************************************************************ * * * Printing patterns * * ************************************************************************ -} instance OutputableBndrId p => Outputable (Pat (GhcPass p)) where ppr = pprPat -- See Note [Rebindable syntax and HsExpansion]. instance (Outputable a, Outputable b) => Outputable (HsPatExpansion a b) where ppr (HsPatExpanded a b) = ifPprDebug (vcat [ppr a, ppr b]) (ppr a) pprLPat :: (OutputableBndrId p) => LPat (GhcPass p) -> SDoc pprLPat (L _ e) = pprPat e -- | Print with type info if -dppr-debug is on pprPatBndr :: OutputableBndr name => name -> SDoc pprPatBndr var = getPprDebug $ \case True -> parens (pprBndr LambdaBind var) -- Could pass the site to pprPat -- but is it worth it? False -> pprPrefixOcc var pprParendLPat :: (OutputableBndrId p) => PprPrec -> LPat (GhcPass p) -> SDoc pprParendLPat p = pprParendPat p . unLoc pprParendPat :: forall p. OutputableBndrId p => PprPrec -> Pat (GhcPass p) -> SDoc pprParendPat p pat = sdocOption sdocPrintTypecheckerElaboration $ \ print_tc_elab -> if need_parens print_tc_elab pat then parens (pprPat pat) else pprPat pat where need_parens print_tc_elab pat | GhcTc <- ghcPass @p , XPat (CoPat {}) <- pat = print_tc_elab | otherwise = patNeedsParens p pat -- For a CoPat we need parens if we are going to show it, which -- we do if -fprint-typechecker-elaboration is on (c.f. pprHsWrapper) -- But otherwise the CoPat is discarded, so it -- is the pattern inside that matters. Sigh. pprPat :: forall p. (OutputableBndrId p) => Pat (GhcPass p) -> SDoc pprPat (VarPat _ lvar) = pprPatBndr (unLoc lvar) pprPat (WildPat _) = char '_' pprPat (LazyPat _ pat) = char '~' <> pprParendLPat appPrec pat pprPat (BangPat _ pat) = char '!' <> pprParendLPat appPrec pat pprPat (AsPat _ name pat) = hcat [pprPrefixOcc (unLoc name), char '@', pprParendLPat appPrec pat] pprPat (ViewPat _ expr pat) = hcat [pprLExpr expr, text " -> ", ppr pat] pprPat (ParPat _ _ pat _) = parens (ppr pat) pprPat (LitPat _ s) = ppr s pprPat (NPat _ l Nothing _) = ppr l pprPat (NPat _ l (Just _) _) = char '-' <> ppr l pprPat (NPlusKPat _ n k _ _ _) = hcat [ppr_n, char '+', ppr k] where ppr_n = case ghcPass @p of GhcPs -> ppr n GhcRn -> ppr n GhcTc -> ppr n pprPat (SplicePat _ splice) = pprSplice splice pprPat (SigPat _ pat ty) = ppr pat <+> dcolon <+> ppr ty pprPat (ListPat _ pats) = brackets (interpp'SP pats) pprPat (TuplePat _ pats bx) -- Special-case unary boxed tuples so that they are pretty-printed as -- `Solo x`, not `(x)` | [pat] <- pats , Boxed <- bx = hcat [text (mkTupleStr Boxed 1), pprParendLPat appPrec pat] | otherwise = tupleParens (boxityTupleSort bx) (pprWithCommas ppr pats) pprPat (SumPat _ pat alt arity) = sumParens (pprAlternative ppr pat alt arity) pprPat (ConPat { pat_con = con , pat_args = details , pat_con_ext = ext } ) = case ghcPass @p of GhcPs -> pprUserCon (unLoc con) details GhcRn -> pprUserCon (unLoc con) details GhcTc -> sdocOption sdocPrintTypecheckerElaboration $ \case False -> pprUserCon (unLoc con) details True -> -- Tiresome; in 'GHC.Tc.Gen.Bind.tcRhs' we print out a typechecked Pat in an -- error message, and we want to make sure it prints nicely ppr con <> braces (sep [ hsep (map pprPatBndr (tvs ++ dicts)) , ppr binds ]) <+> pprConArgs details where ConPatTc { cpt_tvs = tvs , cpt_dicts = dicts , cpt_binds = binds } = ext pprPat (XPat ext) = case ghcPass @p of #if __GLASGOW_HASKELL__ < 811 GhcPs -> dataConCantHappen ext #endif GhcRn -> case ext of HsPatExpanded orig _ -> pprPat orig GhcTc -> case ext of CoPat co pat _ -> pprHsWrapper co $ \parens -> if parens then pprParendPat appPrec pat else pprPat pat ExpansionPat orig _ -> pprPat orig pprUserCon :: (OutputableBndr con, OutputableBndrId p, Outputable (Anno (IdGhcP p))) => con -> HsConPatDetails (GhcPass p) -> SDoc pprUserCon c (InfixCon p1 p2) = ppr p1 <+> pprInfixOcc c <+> ppr p2 pprUserCon c details = pprPrefixOcc c <+> pprConArgs details pprConArgs :: (OutputableBndrId p, Outputable (Anno (IdGhcP p))) => HsConPatDetails (GhcPass p) -> SDoc pprConArgs (PrefixCon ts pats) = fsep (pprTyArgs ts : map (pprParendLPat appPrec) pats) where pprTyArgs tyargs = fsep (map (\ty -> char '@' <> ppr ty) tyargs) pprConArgs (InfixCon p1 p2) = sep [ pprParendLPat appPrec p1 , pprParendLPat appPrec p2 ] pprConArgs (RecCon rpats) = ppr rpats {- ************************************************************************ * * * Building patterns * * ************************************************************************ -} mkPrefixConPat :: DataCon -> [LPat GhcTc] -> [Type] -> LPat GhcTc -- Make a vanilla Prefix constructor pattern mkPrefixConPat dc pats tys = noLocA $ ConPat { pat_con = noLocA (RealDataCon dc) , pat_args = PrefixCon [] pats , pat_con_ext = ConPatTc { cpt_tvs = [] , cpt_dicts = [] , cpt_binds = emptyTcEvBinds , cpt_arg_tys = tys , cpt_wrap = idHsWrapper } } mkNilPat :: Type -> LPat GhcTc mkNilPat ty = mkPrefixConPat nilDataCon [] [ty] mkCharLitPat :: SourceText -> Char -> LPat GhcTc mkCharLitPat src c = mkPrefixConPat charDataCon [noLocA $ LitPat noExtField (HsCharPrim src c)] [] {- ************************************************************************ * * * Predicates for checking things about pattern-lists in EquationInfo * * * ************************************************************************ \subsection[Pat-list-predicates]{Look for interesting things in patterns} Unlike in the Wadler chapter, where patterns are either ``variables'' or ``constructors,'' here we distinguish between: \begin{description} \item[unfailable:] Patterns that cannot fail to match: variables, wildcards, and lazy patterns. These are the irrefutable patterns; the two other categories are refutable patterns. \item[constructor:] A non-literal constructor pattern (see next category). \item[literal patterns:] At least the numeric ones may be overloaded. \end{description} A pattern is in {\em exactly one} of the above three categories; `as' patterns are treated specially, of course. The 1.3 report defines what ``irrefutable'' and ``failure-free'' patterns are. -} isBangedLPat :: LPat (GhcPass p) -> Bool isBangedLPat = isBangedPat . unLoc isBangedPat :: Pat (GhcPass p) -> Bool isBangedPat (ParPat _ _ p _) = isBangedLPat p isBangedPat (BangPat {}) = True isBangedPat _ = False looksLazyPatBind :: HsBind GhcTc -> Bool -- Returns True of anything *except* -- a StrictHsBind (as above) or -- a VarPat -- In particular, returns True of a pattern binding with a compound pattern, like (I# x) -- Looks through AbsBinds looksLazyPatBind (PatBind { pat_lhs = p }) = looksLazyLPat p looksLazyPatBind (XHsBindsLR (AbsBinds { abs_binds = binds })) = anyBag (looksLazyPatBind . unLoc) binds looksLazyPatBind _ = False looksLazyLPat :: LPat (GhcPass p) -> Bool looksLazyLPat = looksLazyPat . unLoc looksLazyPat :: Pat (GhcPass p) -> Bool looksLazyPat (ParPat _ _ p _) = looksLazyLPat p looksLazyPat (AsPat _ _ p) = looksLazyLPat p looksLazyPat (BangPat {}) = False looksLazyPat (VarPat {}) = False looksLazyPat (WildPat {}) = False looksLazyPat _ = True isIrrefutableHsPat :: forall p. (OutputableBndrId p) => DynFlags -> LPat (GhcPass p) -> Bool -- (isIrrefutableHsPat p) is true if matching against p cannot fail, -- in the sense of falling through to the next pattern. -- (NB: this is not quite the same as the (silly) defn -- in 3.17.2 of the Haskell 98 report.) -- -- WARNING: isIrrefutableHsPat returns False if it's in doubt. -- Specifically on a ConPatIn, which is what it sees for a -- (LPat Name) in the renamer, it doesn't know the size of the -- constructor family, so it returns False. Result: only -- tuple patterns are considered irrefutable at the renamer stage. -- -- But if it returns True, the pattern is definitely irrefutable isIrrefutableHsPat dflags = isIrrefutableHsPat' (xopt LangExt.Strict dflags) {- Note [-XStrict and irrefutability] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When -XStrict is enabled the rules for irrefutability are slightly modified. Specifically, the pattern in a program like do ~(Just hi) <- expr cannot be considered irrefutable. The ~ here merely disables the bang that -XStrict would usually apply, rendering the program equivalent to the following without -XStrict do Just hi <- expr To achieve make this pattern irrefutable with -XStrict the user would rather need to write do ~(~(Just hi)) <- expr Failing to account for this resulted in #19027. To fix this isIrrefutableHsPat takes care to check for two the irrefutability of the inner pattern when it encounters a LazyPat and -XStrict is enabled. See also Note [decideBangHood] in GHC.HsToCore.Utils. -} isIrrefutableHsPat' :: forall p. (OutputableBndrId p) => Bool -- ^ Are we in a @-XStrict@ context? -- See Note [-XStrict and irrefutability] -> LPat (GhcPass p) -> Bool isIrrefutableHsPat' is_strict = goL where goL :: LPat (GhcPass p) -> Bool goL = go . unLoc go :: Pat (GhcPass p) -> Bool go (WildPat {}) = True go (VarPat {}) = True go (LazyPat _ p') | is_strict = isIrrefutableHsPat' False p' | otherwise = True go (BangPat _ pat) = goL pat go (ParPat _ _ pat _) = goL pat go (AsPat _ _ pat) = goL pat go (ViewPat _ _ pat) = goL pat go (SigPat _ pat _) = goL pat go (TuplePat _ pats _) = all goL pats go (SumPat {}) = False -- See Note [Unboxed sum patterns aren't irrefutable] go (ListPat {}) = False go (ConPat { pat_con = con , pat_args = details }) = case ghcPass @p of GhcPs -> False -- Conservative GhcRn -> False -- Conservative GhcTc -> case con of L _ (PatSynCon _pat) -> False -- Conservative L _ (RealDataCon con) -> isJust (tyConSingleDataCon_maybe (dataConTyCon con)) && all goL (hsConPatArgs details) go (LitPat {}) = False go (NPat {}) = False go (NPlusKPat {}) = False -- We conservatively assume that no TH splices are irrefutable -- since we cannot know until the splice is evaluated. go (SplicePat {}) = False go (XPat ext) = case ghcPass @p of #if __GLASGOW_HASKELL__ < 811 GhcPs -> dataConCantHappen ext #endif GhcRn -> case ext of HsPatExpanded _ pat -> go pat GhcTc -> case ext of CoPat _ pat _ -> go pat ExpansionPat _ pat -> go pat -- | Is the pattern any of combination of: -- -- - (pat) -- - pat :: Type -- - ~pat -- - !pat -- - x (variable) isSimplePat :: LPat (GhcPass x) -> Maybe (IdP (GhcPass x)) isSimplePat p = case unLoc p of ParPat _ _ x _ -> isSimplePat x SigPat _ x _ -> isSimplePat x LazyPat _ x -> isSimplePat x BangPat _ x -> isSimplePat x VarPat _ x -> Just (unLoc x) _ -> Nothing {- Note [Unboxed sum patterns aren't irrefutable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unlike unboxed tuples, unboxed sums are *not* irrefutable when used as patterns. A simple example that demonstrates this is from #14228: pattern Just' x = (# x | #) pattern Nothing' = (# | () #) foo x = case x of Nothing' -> putStrLn "nothing" Just' -> putStrLn "just" In foo, the pattern Nothing' (that is, (# x | #)) is certainly not irrefutable, as does not match an unboxed sum value of the same arity—namely, (# | y #) (covered by Just'). In fact, no unboxed sum pattern is irrefutable, since the minimum unboxed sum arity is 2. Failing to mark unboxed sum patterns as non-irrefutable would cause the Just' case in foo to be unreachable, as GHC would mistakenly believe that Nothing' is the only thing that could possibly be matched! -} -- | @'patNeedsParens' p pat@ returns 'True' if the pattern @pat@ needs -- parentheses under precedence @p@. patNeedsParens :: forall p. IsPass p => PprPrec -> Pat (GhcPass p) -> Bool patNeedsParens p = go @p where -- Remark: go needs to be polymorphic, as we call it recursively -- at a different GhcPass (see the case for GhcTc XPat below). go :: forall q. IsPass q => Pat (GhcPass q) -> Bool go (NPlusKPat {}) = p > opPrec go (SplicePat {}) = False go (ConPat { pat_args = ds }) = conPatNeedsParens p ds go (SigPat {}) = p >= sigPrec go (ViewPat {}) = True go (XPat ext) = case ghcPass @q of #if __GLASGOW_HASKELL__ < 901 GhcPs -> dataConCantHappen ext #endif GhcRn -> case ext of HsPatExpanded orig _ -> go orig GhcTc -> case ext of CoPat _ inner _ -> go inner ExpansionPat orig _ -> go orig -- ^^^^^^^ -- NB: recursive call of go at a different GhcPass. go (WildPat {}) = False go (VarPat {}) = False go (LazyPat {}) = False go (BangPat {}) = False go (ParPat {}) = False go (AsPat {}) = False -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Identity (Solo x)`, not `Identity Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go (TuplePat _ [_] Boxed) = p >= appPrec go (TuplePat{}) = False go (SumPat {}) = False go (ListPat {}) = False go (LitPat _ l) = hsLitNeedsParens p l go (NPat _ lol _ _) = hsOverLitNeedsParens p (unLoc lol) -- | @'conPatNeedsParens' p cp@ returns 'True' if the constructor patterns @cp@ -- needs parentheses under precedence @p@. conPatNeedsParens :: PprPrec -> HsConDetails t a b -> Bool conPatNeedsParens p = go where go (PrefixCon ts args) = p >= appPrec && (not (null args) || not (null ts)) go (InfixCon {}) = p >= opPrec -- type args should be empty in this case go (RecCon {}) = False -- | Parenthesize a pattern without token information gParPat :: LPat (GhcPass pass) -> Pat (GhcPass pass) gParPat p = ParPat noAnn noHsTok p noHsTok -- | @'parenthesizePat' p pat@ checks if @'patNeedsParens' p pat@ is true, and -- if so, surrounds @pat@ with a 'ParPat'. Otherwise, it simply returns @pat@. parenthesizePat :: IsPass p => PprPrec -> LPat (GhcPass p) -> LPat (GhcPass p) parenthesizePat p lpat@(L loc pat) | patNeedsParens p pat = L loc (gParPat lpat) | otherwise = lpat {- % Collect all EvVars from all constructor patterns -} -- May need to add more cases collectEvVarsPats :: [Pat GhcTc] -> Bag EvVar collectEvVarsPats = unionManyBags . map collectEvVarsPat collectEvVarsLPat :: LPat GhcTc -> Bag EvVar collectEvVarsLPat = collectEvVarsPat . unLoc collectEvVarsPat :: Pat GhcTc -> Bag EvVar collectEvVarsPat pat = case pat of LazyPat _ p -> collectEvVarsLPat p AsPat _ _ p -> collectEvVarsLPat p ParPat _ _ p _ -> collectEvVarsLPat p BangPat _ p -> collectEvVarsLPat p ListPat _ ps -> unionManyBags $ map collectEvVarsLPat ps TuplePat _ ps _ -> unionManyBags $ map collectEvVarsLPat ps SumPat _ p _ _ -> collectEvVarsLPat p ConPat { pat_args = args , pat_con_ext = ConPatTc { cpt_dicts = dicts } } -> unionBags (listToBag dicts) $ unionManyBags $ map collectEvVarsLPat $ hsConPatArgs args SigPat _ p _ -> collectEvVarsLPat p XPat ext -> case ext of CoPat _ p _ -> collectEvVarsPat p ExpansionPat _ p -> collectEvVarsPat p _other_pat -> emptyBag {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (Pat (GhcPass p)) = SrcSpanAnnA type instance Anno (HsOverLit (GhcPass p)) = SrcAnn NoEpAnns type instance Anno ConLike = SrcSpanAnnN type instance Anno (HsFieldBind lhs rhs) = SrcSpanAnnA ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Type.hs0000644000000000000000000014172214472400112017471 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- NamedThing, Outputable, OutputableBndrId {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 GHC.Hs.Type: Abstract syntax: user-defined types -} module GHC.Hs.Type ( Mult, HsScaled(..), hsMult, hsScaledThing, HsArrow(..), arrowToHsType, HsLinearArrowTokens(..), hsLinear, hsUnrestricted, isUnrestricted, pprHsArrow, HsType(..), HsCoreTy, LHsType, HsKind, LHsKind, HsForAllTelescope(..), EpAnnForallTy, HsTyVarBndr(..), LHsTyVarBndr, LHsQTyVars(..), HsOuterTyVarBndrs(..), HsOuterFamEqnTyVarBndrs, HsOuterSigTyVarBndrs, HsWildCardBndrs(..), HsPatSigType(..), HsPSRn(..), HsSigType(..), LHsSigType, LHsSigWcType, LHsWcType, HsTupleSort(..), HsContext, LHsContext, fromMaybeContext, HsTyLit(..), HsIPName(..), hsIPNameFS, HsArg(..), numVisibleArgs, pprHsArgsApp, LHsTypeArg, lhsTypeArgSrcSpan, OutputableBndrFlag, LBangType, BangType, HsSrcBang(..), HsImplBang(..), SrcStrictness(..), SrcUnpackedness(..), getBangType, getBangStrictness, ConDeclField(..), LConDeclField, pprConDeclFields, HsConDetails(..), noTypeArgs, FieldOcc(..), LFieldOcc, mkFieldOcc, AmbiguousFieldOcc(..), LAmbiguousFieldOcc, mkAmbiguousFieldOcc, rdrNameAmbiguousFieldOcc, selectorAmbiguousFieldOcc, unambiguousFieldOcc, ambiguousFieldOcc, mkAnonWildCardTy, pprAnonWildCard, hsOuterTyVarNames, hsOuterExplicitBndrs, mapHsOuterImplicit, mkHsOuterImplicit, mkHsOuterExplicit, mkHsImplicitSigType, mkHsExplicitSigType, mkHsWildCardBndrs, mkHsPatSigType, mkEmptyWildCardBndrs, mkHsForAllVisTele, mkHsForAllInvisTele, mkHsQTvs, hsQTvExplicit, emptyLHsQTvs, isHsKindedTyVar, hsTvbAllKinded, hsScopedTvs, hsWcScopedTvs, dropWildCards, hsTyVarName, hsAllLTyVarNames, hsLTyVarLocNames, hsLTyVarName, hsLTyVarNames, hsLTyVarLocName, hsExplicitLTyVarNames, splitLHsInstDeclTy, getLHsInstDeclHead, getLHsInstDeclClass_maybe, splitLHsPatSynTy, splitLHsForAllTyInvis, splitLHsForAllTyInvis_KP, splitLHsQualTy, splitLHsSigmaTyInvis, splitLHsGadtTy, splitHsFunType, hsTyGetAppHead_maybe, mkHsOpTy, mkHsAppTy, mkHsAppTys, mkHsAppKindTy, ignoreParens, hsSigWcType, hsPatSigType, hsTyKindSig, setHsTyVarBndrFlag, hsTyVarBndrFlag, -- Printing pprHsType, pprHsForAll, pprHsOuterFamEqnTyVarBndrs, pprHsOuterSigTyVarBndrs, pprLHsContext, hsTypeNeedsParens, parenthesizeHsType, parenthesizeHsContext ) where import GHC.Prelude import Language.Haskell.Syntax.Type import {-# SOURCE #-} GHC.Hs.Expr ( pprSplice ) import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Types.Fixity ( LexicalFixity(..) ) import GHC.Types.Id ( Id ) import GHC.Types.SourceText import GHC.Types.Name( Name, NamedThing(getName) ) import GHC.Types.Name.Reader ( RdrName ) import GHC.Types.Var ( VarBndr ) import GHC.Core.TyCo.Rep ( Type(..) ) import GHC.Builtin.Types( manyDataConName, oneDataConName, mkTupleStr ) import GHC.Core.Ppr ( pprOccWithTick) import GHC.Core.Type import GHC.Hs.Doc import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Utils.Outputable import Data.Maybe import qualified Data.Semigroup as S {- ************************************************************************ * * \subsection{Bang annotations} * * ************************************************************************ -} getBangType :: LHsType (GhcPass p) -> LHsType (GhcPass p) getBangType (L _ (HsBangTy _ _ lty)) = lty getBangType (L _ (HsDocTy x (L _ (HsBangTy _ _ lty)) lds)) = addCLocA lty lds (HsDocTy x lty lds) getBangType lty = lty getBangStrictness :: LHsType (GhcPass p) -> HsSrcBang getBangStrictness (L _ (HsBangTy _ s _)) = s getBangStrictness (L _ (HsDocTy _ (L _ (HsBangTy _ s _)) _)) = s getBangStrictness _ = (HsSrcBang NoSourceText NoSrcUnpack NoSrcStrict) {- ************************************************************************ * * \subsection{Data types} * * ************************************************************************ -} fromMaybeContext :: Maybe (LHsContext (GhcPass p)) -> HsContext (GhcPass p) fromMaybeContext mctxt = unLoc $ fromMaybe (noLocA []) mctxt type instance XHsForAllVis (GhcPass _) = EpAnnForallTy -- Location of 'forall' and '->' type instance XHsForAllInvis (GhcPass _) = EpAnnForallTy -- Location of 'forall' and '.' type instance XXHsForAllTelescope (GhcPass _) = DataConCantHappen type EpAnnForallTy = EpAnn (AddEpAnn, AddEpAnn) -- ^ Location of 'forall' and '->' for HsForAllVis -- Location of 'forall' and '.' for HsForAllInvis type HsQTvsRn = [Name] -- Implicit variables -- For example, in data T (a :: k1 -> k2) = ... -- the 'a' is explicit while 'k1', 'k2' are implicit type instance XHsQTvs GhcPs = NoExtField type instance XHsQTvs GhcRn = HsQTvsRn type instance XHsQTvs GhcTc = HsQTvsRn type instance XXLHsQTyVars (GhcPass _) = DataConCantHappen mkHsForAllVisTele ::EpAnnForallTy -> [LHsTyVarBndr () (GhcPass p)] -> HsForAllTelescope (GhcPass p) mkHsForAllVisTele an vis_bndrs = HsForAllVis { hsf_xvis = an, hsf_vis_bndrs = vis_bndrs } mkHsForAllInvisTele :: EpAnnForallTy -> [LHsTyVarBndr Specificity (GhcPass p)] -> HsForAllTelescope (GhcPass p) mkHsForAllInvisTele an invis_bndrs = HsForAllInvis { hsf_xinvis = an, hsf_invis_bndrs = invis_bndrs } mkHsQTvs :: [LHsTyVarBndr () GhcPs] -> LHsQTyVars GhcPs mkHsQTvs tvs = HsQTvs { hsq_ext = noExtField, hsq_explicit = tvs } emptyLHsQTvs :: LHsQTyVars GhcRn emptyLHsQTvs = HsQTvs { hsq_ext = [], hsq_explicit = [] } ------------------------------------------------ -- HsOuterTyVarBndrs type instance XHsOuterImplicit GhcPs = NoExtField type instance XHsOuterImplicit GhcRn = [Name] type instance XHsOuterImplicit GhcTc = [TyVar] type instance XHsOuterExplicit GhcPs _ = EpAnnForallTy type instance XHsOuterExplicit GhcRn _ = NoExtField type instance XHsOuterExplicit GhcTc flag = [VarBndr TyVar flag] type instance XXHsOuterTyVarBndrs (GhcPass _) = DataConCantHappen type instance XHsWC GhcPs b = NoExtField type instance XHsWC GhcRn b = [Name] type instance XHsWC GhcTc b = [Name] type instance XXHsWildCardBndrs (GhcPass _) _ = DataConCantHappen type instance XHsPS GhcPs = EpAnn EpaLocation type instance XHsPS GhcRn = HsPSRn type instance XHsPS GhcTc = HsPSRn type instance XXHsPatSigType (GhcPass _) = DataConCantHappen type instance XHsSig (GhcPass _) = NoExtField type instance XXHsSigType (GhcPass _) = DataConCantHappen hsSigWcType :: forall p. UnXRec p => LHsSigWcType p -> LHsType p hsSigWcType = sig_body . unXRec @p . hswc_body dropWildCards :: LHsSigWcType pass -> LHsSigType pass -- Drop the wildcard part of a LHsSigWcType dropWildCards sig_ty = hswc_body sig_ty hsOuterTyVarNames :: HsOuterTyVarBndrs flag GhcRn -> [Name] hsOuterTyVarNames (HsOuterImplicit{hso_ximplicit = imp_tvs}) = imp_tvs hsOuterTyVarNames (HsOuterExplicit{hso_bndrs = bndrs}) = hsLTyVarNames bndrs hsOuterExplicitBndrs :: HsOuterTyVarBndrs flag (GhcPass p) -> [LHsTyVarBndr flag (NoGhcTc (GhcPass p))] hsOuterExplicitBndrs (HsOuterExplicit{hso_bndrs = bndrs}) = bndrs hsOuterExplicitBndrs (HsOuterImplicit{}) = [] mkHsOuterImplicit :: HsOuterTyVarBndrs flag GhcPs mkHsOuterImplicit = HsOuterImplicit{hso_ximplicit = noExtField} mkHsOuterExplicit :: EpAnnForallTy -> [LHsTyVarBndr flag GhcPs] -> HsOuterTyVarBndrs flag GhcPs mkHsOuterExplicit an bndrs = HsOuterExplicit { hso_xexplicit = an , hso_bndrs = bndrs } mkHsImplicitSigType :: LHsType GhcPs -> HsSigType GhcPs mkHsImplicitSigType body = HsSig { sig_ext = noExtField , sig_bndrs = mkHsOuterImplicit, sig_body = body } mkHsExplicitSigType :: EpAnnForallTy -> [LHsTyVarBndr Specificity GhcPs] -> LHsType GhcPs -> HsSigType GhcPs mkHsExplicitSigType an bndrs body = HsSig { sig_ext = noExtField , sig_bndrs = mkHsOuterExplicit an bndrs, sig_body = body } mkHsWildCardBndrs :: thing -> HsWildCardBndrs GhcPs thing mkHsWildCardBndrs x = HsWC { hswc_body = x , hswc_ext = noExtField } mkHsPatSigType :: EpAnn EpaLocation -> LHsType GhcPs -> HsPatSigType GhcPs mkHsPatSigType ann x = HsPS { hsps_ext = ann , hsps_body = x } mkEmptyWildCardBndrs :: thing -> HsWildCardBndrs GhcRn thing mkEmptyWildCardBndrs x = HsWC { hswc_body = x , hswc_ext = [] } -------------------------------------------------- type instance XUserTyVar (GhcPass _) = EpAnn [AddEpAnn] type instance XKindedTyVar (GhcPass _) = EpAnn [AddEpAnn] type instance XXTyVarBndr (GhcPass _) = DataConCantHappen -- | Return the attached flag hsTyVarBndrFlag :: HsTyVarBndr flag (GhcPass pass) -> flag hsTyVarBndrFlag (UserTyVar _ fl _) = fl hsTyVarBndrFlag (KindedTyVar _ fl _ _) = fl -- | Set the attached flag setHsTyVarBndrFlag :: flag -> HsTyVarBndr flag' (GhcPass pass) -> HsTyVarBndr flag (GhcPass pass) setHsTyVarBndrFlag f (UserTyVar x _ l) = UserTyVar x f l setHsTyVarBndrFlag f (KindedTyVar x _ l k) = KindedTyVar x f l k -- | Do all type variables in this 'LHsQTyVars' come with kind annotations? hsTvbAllKinded :: LHsQTyVars (GhcPass p) -> Bool hsTvbAllKinded = all (isHsKindedTyVar . unLoc) . hsQTvExplicit instance NamedThing (HsTyVarBndr flag GhcRn) where getName (UserTyVar _ _ v) = unLoc v getName (KindedTyVar _ _ v _) = unLoc v type instance XForAllTy (GhcPass _) = NoExtField type instance XQualTy (GhcPass _) = NoExtField type instance XTyVar (GhcPass _) = EpAnn [AddEpAnn] type instance XAppTy (GhcPass _) = NoExtField type instance XFunTy (GhcPass _) = EpAnnCO type instance XListTy (GhcPass _) = EpAnn AnnParen type instance XTupleTy (GhcPass _) = EpAnn AnnParen type instance XSumTy (GhcPass _) = EpAnn AnnParen type instance XOpTy (GhcPass _) = EpAnn [AddEpAnn] type instance XParTy (GhcPass _) = EpAnn AnnParen type instance XIParamTy (GhcPass _) = EpAnn [AddEpAnn] type instance XStarTy (GhcPass _) = NoExtField type instance XKindSig (GhcPass _) = EpAnn [AddEpAnn] type instance XAppKindTy (GhcPass _) = SrcSpan -- Where the `@` lives type instance XSpliceTy GhcPs = NoExtField type instance XSpliceTy GhcRn = NoExtField type instance XSpliceTy GhcTc = Kind type instance XDocTy (GhcPass _) = EpAnn [AddEpAnn] type instance XBangTy (GhcPass _) = EpAnn [AddEpAnn] type instance XRecTy GhcPs = EpAnn AnnList type instance XRecTy GhcRn = NoExtField type instance XRecTy GhcTc = NoExtField type instance XExplicitListTy GhcPs = EpAnn [AddEpAnn] type instance XExplicitListTy GhcRn = NoExtField type instance XExplicitListTy GhcTc = Kind type instance XExplicitTupleTy GhcPs = EpAnn [AddEpAnn] type instance XExplicitTupleTy GhcRn = NoExtField type instance XExplicitTupleTy GhcTc = [Kind] type instance XTyLit (GhcPass _) = NoExtField type instance XWildCardTy (GhcPass _) = NoExtField type instance XXType (GhcPass _) = HsCoreTy oneDataConHsTy :: HsType GhcRn oneDataConHsTy = HsTyVar noAnn NotPromoted (noLocA oneDataConName) manyDataConHsTy :: HsType GhcRn manyDataConHsTy = HsTyVar noAnn NotPromoted (noLocA manyDataConName) hsLinear :: a -> HsScaled (GhcPass p) a hsLinear = HsScaled (HsLinearArrow (HsPct1 noHsTok noHsUniTok)) hsUnrestricted :: a -> HsScaled (GhcPass p) a hsUnrestricted = HsScaled (HsUnrestrictedArrow noHsUniTok) isUnrestricted :: HsArrow GhcRn -> Bool isUnrestricted (arrowToHsType -> L _ (HsTyVar _ _ (L _ n))) = n == manyDataConName isUnrestricted _ = False -- | Convert an arrow into its corresponding multiplicity. In essence this -- erases the information of whether the programmer wrote an explicit -- multiplicity or a shorthand. arrowToHsType :: HsArrow GhcRn -> LHsType GhcRn arrowToHsType (HsUnrestrictedArrow _) = noLocA manyDataConHsTy arrowToHsType (HsLinearArrow _) = noLocA oneDataConHsTy arrowToHsType (HsExplicitMult _ p _) = p instance (OutputableBndrId pass) => Outputable (HsArrow (GhcPass pass)) where ppr arr = parens (pprHsArrow arr) -- See #18846 pprHsArrow :: (OutputableBndrId pass) => HsArrow (GhcPass pass) -> SDoc pprHsArrow (HsUnrestrictedArrow _) = arrow pprHsArrow (HsLinearArrow _) = lollipop pprHsArrow (HsExplicitMult _ p _) = mulArrow (ppr p) type instance XConDeclField (GhcPass _) = EpAnn [AddEpAnn] type instance XXConDeclField (GhcPass _) = DataConCantHappen instance OutputableBndrId p => Outputable (ConDeclField (GhcPass p)) where ppr (ConDeclField _ fld_n fld_ty _) = ppr fld_n <+> dcolon <+> ppr fld_ty --------------------- hsWcScopedTvs :: LHsSigWcType GhcRn -> [Name] -- Get the lexically-scoped type variables of an LHsSigWcType: -- - the explicitly-given forall'd type variables; -- see Note [Lexically scoped type variables] -- - the named wildcards; see Note [Scoping of named wildcards] -- because they scope in the same way hsWcScopedTvs sig_wc_ty | HsWC { hswc_ext = nwcs, hswc_body = sig_ty } <- sig_wc_ty , L _ (HsSig{sig_bndrs = outer_bndrs}) <- sig_ty = nwcs ++ hsLTyVarNames (hsOuterExplicitBndrs outer_bndrs) -- See Note [hsScopedTvs and visible foralls] hsScopedTvs :: LHsSigType GhcRn -> [Name] -- Same as hsWcScopedTvs, but for a LHsSigType hsScopedTvs (L _ (HsSig{sig_bndrs = outer_bndrs})) = hsLTyVarNames (hsOuterExplicitBndrs outer_bndrs) -- See Note [hsScopedTvs and visible foralls] --------------------- hsTyVarName :: HsTyVarBndr flag (GhcPass p) -> IdP (GhcPass p) hsTyVarName (UserTyVar _ _ (L _ n)) = n hsTyVarName (KindedTyVar _ _ (L _ n) _) = n hsLTyVarName :: LHsTyVarBndr flag (GhcPass p) -> IdP (GhcPass p) hsLTyVarName = hsTyVarName . unLoc hsLTyVarNames :: [LHsTyVarBndr flag (GhcPass p)] -> [IdP (GhcPass p)] hsLTyVarNames = map hsLTyVarName hsExplicitLTyVarNames :: LHsQTyVars (GhcPass p) -> [IdP (GhcPass p)] -- Explicit variables only hsExplicitLTyVarNames qtvs = map hsLTyVarName (hsQTvExplicit qtvs) hsAllLTyVarNames :: LHsQTyVars GhcRn -> [Name] -- All variables hsAllLTyVarNames (HsQTvs { hsq_ext = kvs , hsq_explicit = tvs }) = kvs ++ hsLTyVarNames tvs hsLTyVarLocName :: LHsTyVarBndr flag (GhcPass p) -> LocatedN (IdP (GhcPass p)) hsLTyVarLocName (L l a) = L (l2l l) (hsTyVarName a) hsLTyVarLocNames :: LHsQTyVars (GhcPass p) -> [LocatedN (IdP (GhcPass p))] hsLTyVarLocNames qtvs = map hsLTyVarLocName (hsQTvExplicit qtvs) -- | Get the kind signature of a type, ignoring parentheses: -- -- hsTyKindSig `Maybe ` = Nothing -- hsTyKindSig `Maybe :: Type -> Type ` = Just `Type -> Type` -- hsTyKindSig `Maybe :: ((Type -> Type))` = Just `Type -> Type` -- -- This is used to extract the result kind of type synonyms with a CUSK: -- -- type S = (F :: res_kind) -- ^^^^^^^^ -- hsTyKindSig :: LHsType (GhcPass p) -> Maybe (LHsKind (GhcPass p)) hsTyKindSig lty = case unLoc lty of HsParTy _ lty' -> hsTyKindSig lty' HsKindSig _ _ k -> Just k _ -> Nothing --------------------- ignoreParens :: LHsType (GhcPass p) -> LHsType (GhcPass p) ignoreParens (L _ (HsParTy _ ty)) = ignoreParens ty ignoreParens ty = ty {- ************************************************************************ * * Building types * * ************************************************************************ -} mkAnonWildCardTy :: HsType GhcPs mkAnonWildCardTy = HsWildCardTy noExtField mkHsOpTy :: (Anno (IdGhcP p) ~ SrcSpanAnnN) => PromotionFlag -> LHsType (GhcPass p) -> LocatedN (IdP (GhcPass p)) -> LHsType (GhcPass p) -> HsType (GhcPass p) mkHsOpTy prom ty1 op ty2 = HsOpTy noAnn prom ty1 op ty2 mkHsAppTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) mkHsAppTy t1 t2 = addCLocAA t1 t2 (HsAppTy noExtField t1 (parenthesizeHsType appPrec t2)) mkHsAppTys :: LHsType (GhcPass p) -> [LHsType (GhcPass p)] -> LHsType (GhcPass p) mkHsAppTys = foldl' mkHsAppTy mkHsAppKindTy :: XAppKindTy (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) mkHsAppKindTy ext ty k = addCLocAA ty k (HsAppKindTy ext ty k) {- ************************************************************************ * * Decomposing HsTypes * * ************************************************************************ -} --------------------------------- -- splitHsFunType decomposes a type (t1 -> t2 ... -> tn) -- Breaks up any parens in the result type: -- splitHsFunType (a -> (b -> c)) = ([a,b], c) -- It returns API Annotations for any parens removed splitHsFunType :: LHsType (GhcPass p) -> ( [AddEpAnn], EpAnnComments -- The locations of any parens and -- comments discarded , [HsScaled (GhcPass p) (LHsType (GhcPass p))], LHsType (GhcPass p)) splitHsFunType ty = go ty where go (L l (HsParTy an ty)) = let (anns, cs, args, res) = splitHsFunType ty anns' = anns ++ annParen2AddEpAnn an cs' = cs S.<> epAnnComments (ann l) S.<> epAnnComments an in (anns', cs', args, res) go (L ll (HsFunTy (EpAnn _ _ cs) mult x y)) | (anns, csy, args, res) <- splitHsFunType y = (anns, csy S.<> epAnnComments (ann ll), HsScaled mult x':args, res) where L l t = x x' = L (addCommentsToSrcAnn l cs) t go other = ([], emptyComments, [], other) -- | Retrieve the name of the \"head\" of a nested type application. -- This is somewhat like @GHC.Tc.Gen.HsType.splitHsAppTys@, but a little more -- thorough. The purpose of this function is to examine instance heads, so it -- doesn't handle *all* cases (like lists, tuples, @(~)@, etc.). hsTyGetAppHead_maybe :: (Anno (IdGhcP p) ~ SrcSpanAnnN) => LHsType (GhcPass p) -> Maybe (LocatedN (IdP (GhcPass p))) hsTyGetAppHead_maybe = go where go (L _ (HsTyVar _ _ ln)) = Just ln go (L _ (HsAppTy _ l _)) = go l go (L _ (HsAppKindTy _ t _)) = go t go (L _ (HsOpTy _ _ _ ln _)) = Just ln go (L _ (HsParTy _ t)) = go t go (L _ (HsKindSig _ t _)) = go t go _ = Nothing ------------------------------------------------------------ -- | Compute the 'SrcSpan' associated with an 'LHsTypeArg'. lhsTypeArgSrcSpan :: LHsTypeArg (GhcPass pass) -> SrcSpan lhsTypeArgSrcSpan arg = case arg of HsValArg tm -> getLocA tm HsTypeArg at ty -> at `combineSrcSpans` getLocA ty HsArgPar sp -> sp -------------------------------- -- | Decompose a pattern synonym type signature into its constituent parts. -- -- Note that this function looks through parentheses, so it will work on types -- such as @(forall a. <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. splitLHsPatSynTy :: LHsSigType (GhcPass p) -> ( [LHsTyVarBndr Specificity (GhcPass (NoGhcTcPass p))] -- universals , Maybe (LHsContext (GhcPass p)) -- required constraints , [LHsTyVarBndr Specificity (GhcPass p)] -- existentials , Maybe (LHsContext (GhcPass p)) -- provided constraints , LHsType (GhcPass p)) -- body type splitLHsPatSynTy ty = (univs, reqs, exis, provs, ty4) where -- split_sig_ty :: -- LHsSigType (GhcPass p) -- -> ([LHsTyVarBndr Specificity (GhcPass (NoGhcTcPass p))], LHsType (GhcPass p)) split_sig_ty (L _ HsSig{sig_bndrs = outer_bndrs, sig_body = body}) = case outer_bndrs of -- NB: Use ignoreParens here in order to be consistent with the use of -- splitLHsForAllTyInvis below, which also looks through parentheses. HsOuterImplicit{} -> ([], ignoreParens body) HsOuterExplicit{hso_bndrs = exp_bndrs} -> (exp_bndrs, body) (univs, ty1) = split_sig_ty ty (reqs, ty2) = splitLHsQualTy ty1 ((_an, exis), ty3) = splitLHsForAllTyInvis ty2 (provs, ty4) = splitLHsQualTy ty3 -- | Decompose a sigma type (of the form @forall . context => body@) -- into its constituent parts. -- Only splits type variable binders that were -- quantified invisibly (e.g., @forall a.@, with a dot). -- -- This function is used to split apart certain types, such as instance -- declaration types, which disallow visible @forall@s. For instance, if GHC -- split apart the @forall@ in @instance forall a -> Show (Blah a)@, then that -- declaration would mistakenly be accepted! -- -- Note that this function looks through parentheses, so it will work on types -- such as @(forall a. <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. splitLHsSigmaTyInvis :: LHsType (GhcPass p) -> ([LHsTyVarBndr Specificity (GhcPass p)] , Maybe (LHsContext (GhcPass p)), LHsType (GhcPass p)) splitLHsSigmaTyInvis ty | ((_an,tvs), ty1) <- splitLHsForAllTyInvis ty , (ctxt, ty2) <- splitLHsQualTy ty1 = (tvs, ctxt, ty2) -- | Decompose a GADT type into its constituent parts. -- Returns @(outer_bndrs, mb_ctxt, body)@, where: -- -- * @outer_bndrs@ are 'HsOuterExplicit' if the type has explicit, outermost -- type variable binders. Otherwise, they are 'HsOuterImplicit'. -- -- * @mb_ctxt@ is @Just@ the context, if it is provided. -- Otherwise, it is @Nothing@. -- -- * @body@ is the body of the type after the optional @forall@s and context. -- -- This function is careful not to look through parentheses. -- See @Note [GADT abstract syntax] (Wrinkle: No nested foralls or contexts)@ -- "GHC.Hs.Decls" for why this is important. splitLHsGadtTy :: LHsSigType GhcPs -> (HsOuterSigTyVarBndrs GhcPs, Maybe (LHsContext GhcPs), LHsType GhcPs) splitLHsGadtTy (L _ sig_ty) | (outer_bndrs, rho_ty) <- split_bndrs sig_ty , (mb_ctxt, tau_ty) <- splitLHsQualTy_KP rho_ty = (outer_bndrs, mb_ctxt, tau_ty) where split_bndrs :: HsSigType GhcPs -> (HsOuterSigTyVarBndrs GhcPs, LHsType GhcPs) split_bndrs (HsSig{sig_bndrs = outer_bndrs, sig_body = body_ty}) = (outer_bndrs, body_ty) -- | Decompose a type of the form @forall . body@ into its constituent -- parts. Only splits type variable binders that -- were quantified invisibly (e.g., @forall a.@, with a dot). -- -- This function is used to split apart certain types, such as instance -- declaration types, which disallow visible @forall@s. For instance, if GHC -- split apart the @forall@ in @instance forall a -> Show (Blah a)@, then that -- declaration would mistakenly be accepted! -- -- Note that this function looks through parentheses, so it will work on types -- such as @(forall a. <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. -- Unlike 'splitLHsSigmaTyInvis', this function does not look through -- parentheses, hence the suffix @_KP@ (short for \"Keep Parentheses\"). splitLHsForAllTyInvis :: LHsType (GhcPass pass) -> ( (EpAnnForallTy, [LHsTyVarBndr Specificity (GhcPass pass)]) , LHsType (GhcPass pass)) splitLHsForAllTyInvis ty | ((mb_tvbs), body) <- splitLHsForAllTyInvis_KP (ignoreParens ty) = (fromMaybe (EpAnnNotUsed,[]) mb_tvbs, body) -- | Decompose a type of the form @forall . body@ into its constituent -- parts. Only splits type variable binders that -- were quantified invisibly (e.g., @forall a.@, with a dot). -- -- This function is used to split apart certain types, such as instance -- declaration types, which disallow visible @forall@s. For instance, if GHC -- split apart the @forall@ in @instance forall a -> Show (Blah a)@, then that -- declaration would mistakenly be accepted! -- -- Unlike 'splitLHsForAllTyInvis', this function does not look through -- parentheses, hence the suffix @_KP@ (short for \"Keep Parentheses\"). splitLHsForAllTyInvis_KP :: LHsType (GhcPass pass) -> (Maybe (EpAnnForallTy, [LHsTyVarBndr Specificity (GhcPass pass)]) , LHsType (GhcPass pass)) splitLHsForAllTyInvis_KP lty@(L _ ty) = case ty of HsForAllTy { hst_tele = HsForAllInvis { hsf_xinvis = an , hsf_invis_bndrs = tvs } , hst_body = body } -> (Just (an, tvs), body) _ -> (Nothing, lty) -- | Decompose a type of the form @context => body@ into its constituent parts. -- -- Note that this function looks through parentheses, so it will work on types -- such as @(context => <...>)@. The downside to this is that it is not -- generally possible to take the returned types and reconstruct the original -- type (parentheses and all) from them. splitLHsQualTy :: LHsType (GhcPass pass) -> (Maybe (LHsContext (GhcPass pass)), LHsType (GhcPass pass)) splitLHsQualTy ty | (mb_ctxt, body) <- splitLHsQualTy_KP (ignoreParens ty) = (mb_ctxt, body) -- | Decompose a type of the form @context => body@ into its constituent parts. -- -- Unlike 'splitLHsQualTy', this function does not look through -- parentheses, hence the suffix @_KP@ (short for \"Keep Parentheses\"). splitLHsQualTy_KP :: LHsType (GhcPass pass) -> (Maybe (LHsContext (GhcPass pass)), LHsType (GhcPass pass)) splitLHsQualTy_KP (L _ (HsQualTy { hst_ctxt = ctxt, hst_body = body })) = (Just ctxt, body) splitLHsQualTy_KP body = (Nothing, body) -- | Decompose a type class instance type (of the form -- @forall . context => instance_head@) into its constituent parts. -- Note that the @[Name]@s returned correspond to either: -- -- * The implicitly bound type variables (if the type lacks an outermost -- @forall@), or -- -- * The explicitly bound type variables (if the type has an outermost -- @forall@). -- -- This function is careful not to look through parentheses. -- See @Note [No nested foralls or contexts in instance types]@ -- for why this is important. splitLHsInstDeclTy :: LHsSigType GhcRn -> ([Name], Maybe (LHsContext GhcRn), LHsType GhcRn) splitLHsInstDeclTy (L _ (HsSig{sig_bndrs = outer_bndrs, sig_body = inst_ty})) = (hsOuterTyVarNames outer_bndrs, mb_cxt, body_ty) where (mb_cxt, body_ty) = splitLHsQualTy_KP inst_ty -- | Decompose a type class instance type (of the form -- @forall . context => instance_head@) into the @instance_head@. getLHsInstDeclHead :: LHsSigType (GhcPass p) -> LHsType (GhcPass p) getLHsInstDeclHead (L _ (HsSig{sig_body = qual_ty})) | (_mb_cxt, body_ty) <- splitLHsQualTy_KP qual_ty = body_ty -- | Decompose a type class instance type (of the form -- @forall . context => instance_head@) into the @instance_head@ and -- retrieve the underlying class type constructor (if it exists). getLHsInstDeclClass_maybe :: (Anno (IdGhcP p) ~ SrcSpanAnnN) => LHsSigType (GhcPass p) -> Maybe (LocatedN (IdP (GhcPass p))) -- Works on (LHsSigType GhcPs) getLHsInstDeclClass_maybe inst_ty = do { let head_ty = getLHsInstDeclHead inst_ty ; hsTyGetAppHead_maybe head_ty } {- Note [No nested foralls or contexts in instance types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type at the top of an instance declaration is one of the few places in GHC where nested `forall`s or contexts are not permitted, even with RankNTypes enabled. For example, the following will be rejected: instance forall a. forall b. Show (Either a b) where ... instance Eq a => Eq b => Show (Either a b) where ... instance (forall a. Show (Maybe a)) where ... instance (Eq a => Show (Maybe a)) where ... This restriction is partly motivated by an unusual quirk of instance declarations. Namely, if ScopedTypeVariables is enabled, then the type variables from the top of an instance will scope over the bodies of the instance methods, /even if the type variables are implicitly quantified/. For example, GHC will accept the following: instance Monoid a => Monoid (Identity a) where mempty = Identity (mempty @a) Moreover, the type in the top of an instance declaration must obey the forall-or-nothing rule (see Note [forall-or-nothing rule]). If instance types allowed nested `forall`s, this could result in some strange interactions. For example, consider the following: class C a where m :: Proxy a instance (forall a. C (Either a b)) where m = Proxy @(Either a b) Somewhat surprisingly, old versions of GHC would accept the instance above. Even though the `forall` only quantifies `a`, the outermost parentheses mean that the `forall` is nested, and per the forall-or-nothing rule, this means that implicit quantification would occur. Therefore, the `a` is explicitly bound and the `b` is implicitly bound. Moreover, ScopedTypeVariables would bring /both/ sorts of type variables into scope over the body of `m`. How utterly confusing! To avoid this sort of confusion, we simply disallow nested `forall`s in instance types, which makes things like the instance above become illegal. For the sake of consistency, we also disallow nested contexts, even though they don't have the same strange interaction with ScopedTypeVariables. Just as we forbid nested `forall`s and contexts in normal instance declarations, we also forbid them in SPECIALISE instance pragmas (#18455). Unlike normal instance declarations, ScopedTypeVariables don't have any impact on SPECIALISE instance pragmas, but we use the same validity checks for SPECIALISE instance pragmas anyway to be consistent. ----- -- Wrinkle: Derived instances ----- `deriving` clauses and standalone `deriving` declarations also permit bringing type variables into scope, either through explicit or implicit quantification. Unlike in the tops of instance declarations, however, one does not need to enable ScopedTypeVariables for this to take effect. Just as GHC forbids nested `forall`s in the top of instance declarations, it also forbids them in types involved with `deriving`: 1. In the `via` types in DerivingVia. For example, this is rejected: deriving via (forall x. V x) instance C (S x) Just like the types in instance declarations, `via` types can also bring both implicitly and explicitly bound type variables into scope. As a result, we adopt the same no-nested-`forall`s rule in `via` types to avoid confusing behavior like in the example below: deriving via (forall x. T x y) instance W x y (Foo a b) -- Both x and y are brought into scope??? 2. In the classes in `deriving` clauses. For example, this is rejected: data T = MkT deriving (C1, (forall x. C2 x y)) This is because the generated instance would look like: instance forall x y. C2 x y T where ... So really, the same concerns as instance declarations apply here as well. -} {- ************************************************************************ * * FieldOcc * * ************************************************************************ -} type instance XCFieldOcc GhcPs = NoExtField type instance XCFieldOcc GhcRn = Name type instance XCFieldOcc GhcTc = Id type instance XXFieldOcc (GhcPass _) = DataConCantHappen mkFieldOcc :: LocatedN RdrName -> FieldOcc GhcPs mkFieldOcc rdr = FieldOcc noExtField rdr type instance XUnambiguous GhcPs = NoExtField type instance XUnambiguous GhcRn = Name type instance XUnambiguous GhcTc = Id type instance XAmbiguous GhcPs = NoExtField type instance XAmbiguous GhcRn = NoExtField type instance XAmbiguous GhcTc = Id type instance XXAmbiguousFieldOcc (GhcPass _) = DataConCantHappen instance Outputable (AmbiguousFieldOcc (GhcPass p)) where ppr = ppr . rdrNameAmbiguousFieldOcc instance OutputableBndr (AmbiguousFieldOcc (GhcPass p)) where pprInfixOcc = pprInfixOcc . rdrNameAmbiguousFieldOcc pprPrefixOcc = pprPrefixOcc . rdrNameAmbiguousFieldOcc instance OutputableBndr (Located (AmbiguousFieldOcc (GhcPass p))) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprPrefixOcc . unLoc mkAmbiguousFieldOcc :: LocatedN RdrName -> AmbiguousFieldOcc GhcPs mkAmbiguousFieldOcc rdr = Unambiguous noExtField rdr rdrNameAmbiguousFieldOcc :: AmbiguousFieldOcc (GhcPass p) -> RdrName rdrNameAmbiguousFieldOcc (Unambiguous _ (L _ rdr)) = rdr rdrNameAmbiguousFieldOcc (Ambiguous _ (L _ rdr)) = rdr selectorAmbiguousFieldOcc :: AmbiguousFieldOcc GhcTc -> Id selectorAmbiguousFieldOcc (Unambiguous sel _) = sel selectorAmbiguousFieldOcc (Ambiguous sel _) = sel unambiguousFieldOcc :: AmbiguousFieldOcc GhcTc -> FieldOcc GhcTc unambiguousFieldOcc (Unambiguous rdr sel) = FieldOcc rdr sel unambiguousFieldOcc (Ambiguous rdr sel) = FieldOcc rdr sel ambiguousFieldOcc :: FieldOcc GhcTc -> AmbiguousFieldOcc GhcTc ambiguousFieldOcc (FieldOcc sel rdr) = Unambiguous sel rdr {- ************************************************************************ * * \subsection{Pretty printing} * * ************************************************************************ -} class OutputableBndrFlag flag p where pprTyVarBndr :: OutputableBndrId p => HsTyVarBndr flag (GhcPass p) -> SDoc instance OutputableBndrFlag () p where pprTyVarBndr (UserTyVar _ _ n) = ppr n pprTyVarBndr (KindedTyVar _ _ n k) = parens $ hsep [ppr n, dcolon, ppr k] instance OutputableBndrFlag Specificity p where pprTyVarBndr (UserTyVar _ SpecifiedSpec n) = ppr n pprTyVarBndr (UserTyVar _ InferredSpec n) = braces $ ppr n pprTyVarBndr (KindedTyVar _ SpecifiedSpec n k) = parens $ hsep [ppr n, dcolon, ppr k] pprTyVarBndr (KindedTyVar _ InferredSpec n k) = braces $ hsep [ppr n, dcolon, ppr k] instance OutputableBndrId p => Outputable (HsSigType (GhcPass p)) where ppr (HsSig { sig_bndrs = outer_bndrs, sig_body = body }) = pprHsOuterSigTyVarBndrs outer_bndrs <+> ppr body instance OutputableBndrId p => Outputable (HsType (GhcPass p)) where ppr ty = pprHsType ty instance OutputableBndrId p => Outputable (LHsQTyVars (GhcPass p)) where ppr (HsQTvs { hsq_explicit = tvs }) = interppSP tvs instance (OutputableBndrFlag flag p, OutputableBndrFlag flag (NoGhcTcPass p), OutputableBndrId p) => Outputable (HsOuterTyVarBndrs flag (GhcPass p)) where ppr (HsOuterImplicit{hso_ximplicit = imp_tvs}) = text "HsOuterImplicit:" <+> case ghcPass @p of GhcPs -> ppr imp_tvs GhcRn -> ppr imp_tvs GhcTc -> ppr imp_tvs ppr (HsOuterExplicit{hso_bndrs = exp_tvs}) = text "HsOuterExplicit:" <+> ppr exp_tvs instance OutputableBndrId p => Outputable (HsForAllTelescope (GhcPass p)) where ppr (HsForAllVis { hsf_vis_bndrs = bndrs }) = text "HsForAllVis:" <+> ppr bndrs ppr (HsForAllInvis { hsf_invis_bndrs = bndrs }) = text "HsForAllInvis:" <+> ppr bndrs instance (OutputableBndrId p, OutputableBndrFlag flag p) => Outputable (HsTyVarBndr flag (GhcPass p)) where ppr = pprTyVarBndr instance Outputable thing => Outputable (HsWildCardBndrs (GhcPass p) thing) where ppr (HsWC { hswc_body = ty }) = ppr ty instance (OutputableBndrId p) => Outputable (HsPatSigType (GhcPass p)) where ppr (HsPS { hsps_body = ty }) = ppr ty pprAnonWildCard :: SDoc pprAnonWildCard = char '_' -- | Prints the explicit @forall@ in a type family equation if one is written. -- If there is no explicit @forall@, nothing is printed. pprHsOuterFamEqnTyVarBndrs :: OutputableBndrId p => HsOuterFamEqnTyVarBndrs (GhcPass p) -> SDoc pprHsOuterFamEqnTyVarBndrs (HsOuterImplicit{}) = empty pprHsOuterFamEqnTyVarBndrs (HsOuterExplicit{hso_bndrs = qtvs}) = forAllLit <+> interppSP qtvs <> dot -- | Prints the outermost @forall@ in a type signature if one is written. -- If there is no outermost @forall@, nothing is printed. pprHsOuterSigTyVarBndrs :: OutputableBndrId p => HsOuterSigTyVarBndrs (GhcPass p) -> SDoc pprHsOuterSigTyVarBndrs (HsOuterImplicit{}) = empty pprHsOuterSigTyVarBndrs (HsOuterExplicit{hso_bndrs = bndrs}) = pprHsForAll (mkHsForAllInvisTele noAnn bndrs) Nothing -- | Prints a forall; When passed an empty list, prints @forall .@/@forall ->@ -- only when @-dppr-debug@ is enabled. pprHsForAll :: forall p. OutputableBndrId p => HsForAllTelescope (GhcPass p) -> Maybe (LHsContext (GhcPass p)) -> SDoc pprHsForAll tele cxt = pp_tele tele <+> pprLHsContext cxt where pp_tele :: HsForAllTelescope (GhcPass p) -> SDoc pp_tele tele = case tele of HsForAllVis { hsf_vis_bndrs = qtvs } -> pp_forall (space <> arrow) qtvs HsForAllInvis { hsf_invis_bndrs = qtvs } -> pp_forall dot qtvs pp_forall :: forall flag p. (OutputableBndrId p, OutputableBndrFlag flag p) => SDoc -> [LHsTyVarBndr flag (GhcPass p)] -> SDoc pp_forall separator qtvs | null qtvs = whenPprDebug (forAllLit <> separator) -- Note: to fix the PprRecordDotSyntax1 ppr roundtrip test, the <> -- below needs to be <+>. But it means 94 other test results need to -- be updated to match. | otherwise = forAllLit <+> interppSP qtvs <> separator pprLHsContext :: (OutputableBndrId p) => Maybe (LHsContext (GhcPass p)) -> SDoc pprLHsContext Nothing = empty pprLHsContext (Just lctxt) = pprLHsContextAlways lctxt -- For use in a HsQualTy, which always gets printed if it exists. pprLHsContextAlways :: (OutputableBndrId p) => LHsContext (GhcPass p) -> SDoc pprLHsContextAlways (L _ ctxt) = case ctxt of [] -> parens empty <+> darrow [L _ ty] -> ppr_mono_ty ty <+> darrow _ -> parens (interpp'SP ctxt) <+> darrow pprConDeclFields :: OutputableBndrId p => [LConDeclField (GhcPass p)] -> SDoc pprConDeclFields fields = braces (sep (punctuate comma (map ppr_fld fields))) where ppr_fld (L _ (ConDeclField { cd_fld_names = ns, cd_fld_type = ty, cd_fld_doc = doc })) = pprMaybeWithDoc doc (ppr_names ns <+> dcolon <+> ppr ty) ppr_names :: [LFieldOcc (GhcPass p)] -> SDoc ppr_names [n] = pprPrefixOcc n ppr_names ns = sep (punctuate comma (map pprPrefixOcc ns)) {- Note [Printing KindedTyVars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #3830 reminded me that we should really only print the kind signature on a KindedTyVar if the kind signature was put there by the programmer. During kind inference GHC now adds a PostTcKind to UserTyVars, rather than converting to KindedTyVars as before. (As it happens, the message in #3830 comes out a different way now, and the problem doesn't show up; but having the flag on a KindedTyVar seems like the Right Thing anyway.) -} -- Printing works more-or-less as for Types pprHsType :: (OutputableBndrId p) => HsType (GhcPass p) -> SDoc pprHsType ty = ppr_mono_ty ty ppr_mono_lty :: OutputableBndrId p => LHsType (GhcPass p) -> SDoc ppr_mono_lty ty = ppr_mono_ty (unLoc ty) ppr_mono_ty :: (OutputableBndrId p) => HsType (GhcPass p) -> SDoc ppr_mono_ty (HsForAllTy { hst_tele = tele, hst_body = ty }) = sep [pprHsForAll tele Nothing, ppr_mono_lty ty] ppr_mono_ty (HsQualTy { hst_ctxt = ctxt, hst_body = ty }) = sep [pprLHsContextAlways ctxt, ppr_mono_lty ty] ppr_mono_ty (HsBangTy _ b ty) = ppr b <> ppr_mono_lty ty ppr_mono_ty (HsRecTy _ flds) = pprConDeclFields flds ppr_mono_ty (HsTyVar _ prom (L _ name)) = pprOccWithTick Prefix prom name ppr_mono_ty (HsFunTy _ mult ty1 ty2) = ppr_fun_ty mult ty1 ty2 ppr_mono_ty (HsTupleTy _ con tys) -- Special-case unary boxed tuples so that they are pretty-printed as -- `Solo x`, not `(x)` | [ty] <- tys , BoxedTuple <- std_con = sep [text (mkTupleStr Boxed 1), ppr_mono_lty ty] | otherwise = tupleParens std_con (pprWithCommas ppr tys) where std_con = case con of HsUnboxedTuple -> UnboxedTuple _ -> BoxedTuple ppr_mono_ty (HsSumTy _ tys) = tupleParens UnboxedTuple (pprWithBars ppr tys) ppr_mono_ty (HsKindSig _ ty kind) = ppr_mono_lty ty <+> dcolon <+> ppr kind ppr_mono_ty (HsListTy _ ty) = brackets (ppr_mono_lty ty) ppr_mono_ty (HsIParamTy _ n ty) = (ppr n <+> dcolon <+> ppr_mono_lty ty) ppr_mono_ty (HsSpliceTy _ s) = pprSplice s ppr_mono_ty (HsExplicitListTy _ prom tys) | isPromoted prom = quote $ brackets (maybeAddSpace tys $ interpp'SP tys) | otherwise = brackets (interpp'SP tys) ppr_mono_ty (HsExplicitTupleTy _ tys) -- Special-case unary boxed tuples so that they are pretty-printed as -- `'Solo x`, not `'(x)` | [ty] <- tys = quote $ sep [text (mkTupleStr Boxed 1), ppr_mono_lty ty] | otherwise = quote $ parens (maybeAddSpace tys $ interpp'SP tys) ppr_mono_ty (HsTyLit _ t) = ppr t ppr_mono_ty (HsWildCardTy {}) = char '_' ppr_mono_ty (HsStarTy _ isUni) = char (if isUni then '★' else '*') ppr_mono_ty (HsAppTy _ fun_ty arg_ty) = hsep [ppr_mono_lty fun_ty, ppr_mono_lty arg_ty] ppr_mono_ty (HsAppKindTy _ ty k) = ppr_mono_lty ty <+> char '@' <> ppr_mono_lty k ppr_mono_ty (HsOpTy _ prom ty1 (L _ op) ty2) = sep [ ppr_mono_lty ty1 , sep [pprOccWithTick Infix prom op, ppr_mono_lty ty2 ] ] ppr_mono_ty (HsParTy _ ty) = parens (ppr_mono_lty ty) -- Put the parens in where the user did -- But we still use the precedence stuff to add parens because -- toHsType doesn't put in any HsParTys, so we may still need them ppr_mono_ty (HsDocTy _ ty doc) = pprWithDoc doc $ ppr_mono_lty ty ppr_mono_ty (XHsType t) = ppr t -------------------------- ppr_fun_ty :: (OutputableBndrId p) => HsArrow (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) -> SDoc ppr_fun_ty mult ty1 ty2 = let p1 = ppr_mono_lty ty1 p2 = ppr_mono_lty ty2 arr = pprHsArrow mult in sep [p1, arr <+> p2] -------------------------- -- | @'hsTypeNeedsParens' p t@ returns 'True' if the type @t@ needs parentheses -- under precedence @p@. hsTypeNeedsParens :: PprPrec -> HsType (GhcPass p) -> Bool hsTypeNeedsParens p = go_hs_ty where go_hs_ty (HsForAllTy{}) = p >= funPrec go_hs_ty (HsQualTy{}) = p >= funPrec go_hs_ty (HsBangTy{}) = p > topPrec go_hs_ty (HsRecTy{}) = False go_hs_ty (HsTyVar{}) = False go_hs_ty (HsFunTy{}) = p >= funPrec -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Identity (Solo x)`, not `Identity Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go_hs_ty (HsTupleTy _ con [_]) = case con of HsBoxedOrConstraintTuple -> p >= appPrec HsUnboxedTuple -> False go_hs_ty (HsTupleTy{}) = False go_hs_ty (HsSumTy{}) = False go_hs_ty (HsKindSig{}) = p >= sigPrec go_hs_ty (HsListTy{}) = False go_hs_ty (HsIParamTy{}) = p > topPrec go_hs_ty (HsSpliceTy{}) = False go_hs_ty (HsExplicitListTy{}) = False -- Special-case unary boxed tuple applications so that they are -- parenthesized as `Proxy ('Solo x)`, not `Proxy 'Solo x` (#18612) -- See Note [One-tuples] in GHC.Builtin.Types go_hs_ty (HsExplicitTupleTy _ [_]) = p >= appPrec go_hs_ty (HsExplicitTupleTy{}) = False go_hs_ty (HsTyLit{}) = False go_hs_ty (HsWildCardTy{}) = False go_hs_ty (HsStarTy{}) = p >= starPrec go_hs_ty (HsAppTy{}) = p >= appPrec go_hs_ty (HsAppKindTy{}) = p >= appPrec go_hs_ty (HsOpTy{}) = p >= opPrec go_hs_ty (HsParTy{}) = False go_hs_ty (HsDocTy _ (L _ t) _) = go_hs_ty t go_hs_ty (XHsType ty) = go_core_ty ty go_core_ty (TyVarTy{}) = False go_core_ty (AppTy{}) = p >= appPrec go_core_ty (TyConApp _ args) | null args = False | otherwise = p >= appPrec go_core_ty (ForAllTy{}) = p >= funPrec go_core_ty (FunTy{}) = p >= funPrec go_core_ty (LitTy{}) = False go_core_ty (CastTy t _) = go_core_ty t go_core_ty (CoercionTy{}) = False maybeAddSpace :: [LHsType (GhcPass p)] -> SDoc -> SDoc -- See Note [Printing promoted type constructors] -- in GHC.Iface.Type. This code implements the same -- logic for printing HsType maybeAddSpace tys doc | (ty : _) <- tys , lhsTypeHasLeadingPromotionQuote ty = space <> doc | otherwise = doc lhsTypeHasLeadingPromotionQuote :: LHsType (GhcPass p) -> Bool lhsTypeHasLeadingPromotionQuote ty = goL ty where goL (L _ ty) = go ty go (HsForAllTy{}) = False go (HsQualTy{ hst_ctxt = ctxt, hst_body = body}) | (L _ (c:_)) <- ctxt = goL c | otherwise = goL body go (HsBangTy{}) = False go (HsRecTy{}) = False go (HsTyVar _ p _) = isPromoted p go (HsFunTy _ _ arg _) = goL arg go (HsListTy{}) = False go (HsTupleTy{}) = False go (HsSumTy{}) = False go (HsOpTy _ _ t1 _ _) = goL t1 go (HsKindSig _ t _) = goL t go (HsIParamTy{}) = False go (HsSpliceTy{}) = False go (HsExplicitListTy _ p _) = isPromoted p go (HsExplicitTupleTy{}) = True go (HsTyLit{}) = False go (HsWildCardTy{}) = False go (HsStarTy{}) = False go (HsAppTy _ t _) = goL t go (HsAppKindTy _ t _) = goL t go (HsParTy{}) = False go (HsDocTy _ t _) = goL t go (XHsType{}) = False -- | @'parenthesizeHsType' p ty@ checks if @'hsTypeNeedsParens' p ty@ is -- true, and if so, surrounds @ty@ with an 'HsParTy'. Otherwise, it simply -- returns @ty@. parenthesizeHsType :: PprPrec -> LHsType (GhcPass p) -> LHsType (GhcPass p) parenthesizeHsType p lty@(L loc ty) | hsTypeNeedsParens p ty = L loc (HsParTy noAnn lty) | otherwise = lty -- | @'parenthesizeHsContext' p ctxt@ checks if @ctxt@ is a single constraint -- @c@ such that @'hsTypeNeedsParens' p c@ is true, and if so, surrounds @c@ -- with an 'HsParTy' to form a parenthesized @ctxt@. Otherwise, it simply -- returns @ctxt@ unchanged. parenthesizeHsContext :: PprPrec -> LHsContext (GhcPass p) -> LHsContext (GhcPass p) parenthesizeHsContext p lctxt@(L loc ctxt) = case ctxt of [c] -> L loc [parenthesizeHsType p c] _ -> lctxt -- Other contexts are already "parenthesized" by virtue of -- being tuples. {- ************************************************************************ * * \subsection{Anno instances} * * ************************************************************************ -} type instance Anno (BangType (GhcPass p)) = SrcSpanAnnA type instance Anno [LocatedA (HsType (GhcPass p))] = SrcSpanAnnC type instance Anno (HsType (GhcPass p)) = SrcSpanAnnA type instance Anno (HsSigType (GhcPass p)) = SrcSpanAnnA type instance Anno (HsKind (GhcPass p)) = SrcSpanAnnA type instance Anno (HsTyVarBndr _flag (GhcPass _)) = SrcSpanAnnA -- Explicit pass Anno instances needed because of the NoGhcTc field type instance Anno (HsTyVarBndr _flag GhcPs) = SrcSpanAnnA type instance Anno (HsTyVarBndr _flag GhcRn) = SrcSpanAnnA type instance Anno (HsTyVarBndr _flag GhcTc) = SrcSpanAnnA type instance Anno (HsOuterTyVarBndrs _ (GhcPass _)) = SrcSpanAnnA type instance Anno HsIPName = SrcAnn NoEpAnns type instance Anno (ConDeclField (GhcPass p)) = SrcSpanAnnA type instance Anno (FieldOcc (GhcPass p)) = SrcAnn NoEpAnns type instance Anno (AmbiguousFieldOcc (GhcPass p)) = SrcAnn NoEpAnns ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Utils.hs0000644000000000000000000020143114472400112017642 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-| Module : GHC.Hs.Utils Description : Generic helpers for the HsSyn type. Copyright : (c) The University of Glasgow, 1992-2006 Here we collect a variety of helper functions that construct or analyse HsSyn. All these functions deal with generic HsSyn; functions which deal with the instantiated versions are located elsewhere: Parameterised by Module ---------------- ------------- GhcPs/RdrName GHC.Parser.PostProcess GhcRn/Name GHC.Rename.* GhcTc/Id GHC.Tc.Utils.Zonk The @mk*@ functions attempt to construct a not-completely-useless SrcSpan from their components, compared with the @nl*@ functions which just attach noSrcSpan to everything. -} {-# LANGUAGE CPP #-} {-# LANGUAGE AllowAmbiguousTypes #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ViewPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Hs.Utils( -- * Terms mkHsPar, mkHsApp, mkHsAppWith, mkHsApps, mkHsAppsWith, mkHsAppType, mkHsAppTypes, mkHsCaseAlt, mkSimpleMatch, unguardedGRHSs, unguardedRHS, mkMatchGroup, mkLamCaseMatchGroup, mkMatch, mkPrefixFunRhs, mkHsLam, mkHsIf, mkHsWrap, mkLHsWrap, mkHsWrapCo, mkHsWrapCoR, mkLHsWrapCo, mkHsDictLet, mkHsLams, mkHsOpApp, mkHsDo, mkHsDoAnns, mkHsComp, mkHsCompAnns, mkHsWrapPat, mkHsWrapPatCo, mkLHsPar, mkHsCmdWrap, mkLHsCmdWrap, mkHsCmdIf, mkConLikeTc, nlHsTyApp, nlHsTyApps, nlHsVar, nl_HsVar, nlHsDataCon, nlHsLit, nlHsApp, nlHsApps, nlHsSyntaxApps, nlHsIntLit, nlHsVarApps, nlHsDo, nlHsOpApp, nlHsLam, nlHsPar, nlHsIf, nlHsCase, nlList, mkLHsTupleExpr, mkLHsVarTuple, missingTupArg, mkLocatedList, -- * Constructing general big tuples -- $big_tuples mkChunkified, chunkify, -- * Bindings mkFunBind, mkVarBind, mkHsVarBind, mkSimpleGeneratedFunBind, mkTopFunBind, mkPatSynBind, isInfixFunBind, spanHsLocaLBinds, -- * Literals mkHsIntegral, mkHsFractional, mkHsIsString, mkHsString, mkHsStringPrimLit, mkHsCharPrimLit, -- * Patterns mkNPat, mkNPlusKPat, nlVarPat, nlLitPat, nlConVarPat, nlConVarPatName, nlConPat, nlConPatName, nlInfixConPat, nlNullaryConPat, nlWildConPat, nlWildPat, nlWildPatName, nlTuplePat, mkParPat, nlParPat, mkBigLHsVarTup, mkBigLHsTup, mkBigLHsVarPatTup, mkBigLHsPatTup, -- * Types mkHsAppTy, mkHsAppKindTy, hsTypeToHsSigType, hsTypeToHsSigWcType, mkClassOpSigs, mkHsSigEnv, nlHsAppTy, nlHsAppKindTy, nlHsTyVar, nlHsFunTy, nlHsParTy, nlHsTyConApp, -- * Stmts mkTransformStmt, mkTransformByStmt, mkBodyStmt, mkPsBindStmt, mkRnBindStmt, mkTcBindStmt, mkLastStmt, emptyTransStmt, mkGroupUsingStmt, mkGroupByUsingStmt, emptyRecStmt, emptyRecStmtName, emptyRecStmtId, mkRecStmt, unitRecStmtTc, mkLetStmt, -- * Template Haskell mkUntypedSplice, mkTypedSplice, mkHsQuasiQuote, -- * Collecting binders isUnliftedHsBind, isBangedHsBind, collectLocalBinders, collectHsValBinders, collectHsBindListBinders, collectHsIdBinders, collectHsBindsBinders, collectHsBindBinders, collectMethodBinders, collectPatBinders, collectPatsBinders, collectLStmtsBinders, collectStmtsBinders, collectLStmtBinders, collectStmtBinders, CollectPass(..), CollectFlag(..), hsLTyClDeclBinders, hsTyClForeignBinders, hsPatSynSelectors, getPatSynBinds, hsForeignDeclsBinders, hsGroupBinders, hsDataFamInstBinders, -- * Collecting implicit binders lStmtsImplicits, hsValBindsImplicits, lPatImplicits ) where import GHC.Prelude import GHC.Hs.Decls import GHC.Hs.Binds import GHC.Hs.Expr import GHC.Hs.Pat import GHC.Hs.Type import GHC.Hs.Lit import Language.Haskell.Syntax.Extension import GHC.Hs.Extension import GHC.Parser.Annotation import GHC.Tc.Types.Evidence import GHC.Core.TyCo.Rep import GHC.Core.Multiplicity ( pattern Many ) import GHC.Builtin.Types ( unitTy ) import GHC.Tc.Utils.TcType import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Types.Id import GHC.Types.Name import GHC.Types.Name.Set hiding ( unitFV ) import GHC.Types.Name.Env import GHC.Types.Name.Reader import GHC.Types.Var import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Types.Fixity import GHC.Types.SourceText import GHC.Data.FastString import GHC.Data.Bag import GHC.Settings.Constants import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Either import Data.Function import Data.List ( partition, deleteBy ) {- ************************************************************************ * * Some useful helpers for constructing syntax * * ************************************************************************ These functions attempt to construct a not-completely-useless 'SrcSpan' from their components, compared with the @nl*@ functions below which just attach 'noSrcSpan' to everything. -} -- | @e => (e)@ mkHsPar :: LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkHsPar e = L (getLoc e) (gHsPar e) mkSimpleMatch :: (Anno (Match (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcSpanAnnA, Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcAnn NoEpAnns) => HsMatchContext (GhcPass p) -> [LPat (GhcPass p)] -> LocatedA (body (GhcPass p)) -> LMatch (GhcPass p) (LocatedA (body (GhcPass p))) mkSimpleMatch ctxt pats rhs = L loc $ Match { m_ext = noAnn, m_ctxt = ctxt, m_pats = pats , m_grhss = unguardedGRHSs (locA loc) rhs noAnn } where loc = case pats of [] -> getLoc rhs (pat:_) -> combineSrcSpansA (getLoc pat) (getLoc rhs) unguardedGRHSs :: Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcAnn NoEpAnns => SrcSpan -> LocatedA (body (GhcPass p)) -> EpAnn GrhsAnn -> GRHSs (GhcPass p) (LocatedA (body (GhcPass p))) unguardedGRHSs loc rhs an = GRHSs emptyComments (unguardedRHS an loc rhs) emptyLocalBinds unguardedRHS :: Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcAnn NoEpAnns => EpAnn GrhsAnn -> SrcSpan -> LocatedA (body (GhcPass p)) -> [LGRHS (GhcPass p) (LocatedA (body (GhcPass p)))] unguardedRHS an loc rhs = [L (noAnnSrcSpan loc) (GRHS an [] rhs)] type AnnoBody p body = ( XMG (GhcPass p) (LocatedA (body (GhcPass p))) ~ NoExtField , Anno [LocatedA (Match (GhcPass p) (LocatedA (body (GhcPass p))))] ~ SrcSpanAnnL , Anno (Match (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcSpanAnnA ) mkMatchGroup :: AnnoBody p body => Origin -> LocatedL [LocatedA (Match (GhcPass p) (LocatedA (body (GhcPass p))))] -> MatchGroup (GhcPass p) (LocatedA (body (GhcPass p))) mkMatchGroup origin matches = MG { mg_ext = noExtField , mg_alts = matches , mg_origin = origin } mkLamCaseMatchGroup :: AnnoBody p body => Origin -> LamCaseVariant -> LocatedL [LocatedA (Match (GhcPass p) (LocatedA (body (GhcPass p))))] -> MatchGroup (GhcPass p) (LocatedA (body (GhcPass p))) mkLamCaseMatchGroup origin lc_variant (L l matches) = mkMatchGroup origin (L l $ map fixCtxt matches) where fixCtxt (L a match) = L a match{m_ctxt = LamCaseAlt lc_variant} mkLocatedList :: Semigroup a => [GenLocated (SrcAnn a) e2] -> LocatedAn an [GenLocated (SrcAnn a) e2] mkLocatedList [] = noLocA [] mkLocatedList ms = L (noAnnSrcSpan $ locA $ combineLocsA (head ms) (last ms)) ms mkHsApp :: LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkHsApp e1 e2 = addCLocAA e1 e2 (HsApp noComments e1 e2) mkHsAppWith :: (LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> HsExpr (GhcPass id) -> LHsExpr (GhcPass id)) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkHsAppWith mkLocated e1 e2 = mkLocated e1 e2 (HsApp noAnn e1 e2) mkHsApps :: LHsExpr (GhcPass id) -> [LHsExpr (GhcPass id)] -> LHsExpr (GhcPass id) mkHsApps = mkHsAppsWith addCLocAA mkHsAppsWith :: (LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> HsExpr (GhcPass id) -> LHsExpr (GhcPass id)) -> LHsExpr (GhcPass id) -> [LHsExpr (GhcPass id)] -> LHsExpr (GhcPass id) mkHsAppsWith mkLocated = foldl' (mkHsAppWith mkLocated) mkHsAppType :: LHsExpr GhcRn -> LHsWcType GhcRn -> LHsExpr GhcRn mkHsAppType e t = addCLocAA t_body e (HsAppType noExtField e paren_wct) where t_body = hswc_body t paren_wct = t { hswc_body = parenthesizeHsType appPrec t_body } mkHsAppTypes :: LHsExpr GhcRn -> [LHsWcType GhcRn] -> LHsExpr GhcRn mkHsAppTypes = foldl' mkHsAppType mkHsLam :: (IsPass p, XMG (GhcPass p) (LHsExpr (GhcPass p)) ~ NoExtField) => [LPat (GhcPass p)] -> LHsExpr (GhcPass p) -> LHsExpr (GhcPass p) mkHsLam pats body = mkHsPar (L (getLoc body) (HsLam noExtField matches)) where matches = mkMatchGroup Generated (noLocA [mkSimpleMatch LambdaExpr pats' body]) pats' = map (parenthesizePat appPrec) pats mkHsLams :: [TyVar] -> [EvVar] -> LHsExpr GhcTc -> LHsExpr GhcTc mkHsLams tyvars dicts expr = mkLHsWrap (mkWpTyLams tyvars <.> mkWpLams dicts) expr -- |A simple case alternative with a single pattern, no binds, no guards; -- pre-typechecking mkHsCaseAlt :: (Anno (GRHS (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcAnn NoEpAnns, Anno (Match (GhcPass p) (LocatedA (body (GhcPass p)))) ~ SrcSpanAnnA) => LPat (GhcPass p) -> (LocatedA (body (GhcPass p))) -> LMatch (GhcPass p) (LocatedA (body (GhcPass p))) mkHsCaseAlt pat expr = mkSimpleMatch CaseAlt [pat] expr nlHsTyApp :: Id -> [Type] -> LHsExpr GhcTc nlHsTyApp fun_id tys = noLocA (mkHsWrap (mkWpTyApps tys) (HsVar noExtField (noLocA fun_id))) nlHsTyApps :: Id -> [Type] -> [LHsExpr GhcTc] -> LHsExpr GhcTc nlHsTyApps fun_id tys xs = foldl' nlHsApp (nlHsTyApp fun_id tys) xs --------- Adding parens --------- -- | Wrap in parens if @'hsExprNeedsParens' appPrec@ says it needs them -- So @f x@ becomes @(f x)@, but @3@ stays as @3@. mkLHsPar :: IsPass id => LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) mkLHsPar = parenthesizeHsExpr appPrec mkParPat :: IsPass p => LPat (GhcPass p) -> LPat (GhcPass p) mkParPat = parenthesizePat appPrec nlParPat :: LPat (GhcPass name) -> LPat (GhcPass name) nlParPat p = noLocA (gParPat p) ------------------------------- -- These are the bits of syntax that contain rebindable names -- See GHC.Rename.Env.lookupSyntax mkHsIntegral :: IntegralLit -> HsOverLit GhcPs mkHsFractional :: FractionalLit -> HsOverLit GhcPs mkHsIsString :: SourceText -> FastString -> HsOverLit GhcPs mkHsDo :: HsDoFlavour -> LocatedL [ExprLStmt GhcPs] -> HsExpr GhcPs mkHsDoAnns :: HsDoFlavour -> LocatedL [ExprLStmt GhcPs] -> EpAnn AnnList -> HsExpr GhcPs mkHsComp :: HsDoFlavour -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> HsExpr GhcPs mkHsCompAnns :: HsDoFlavour -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> EpAnn AnnList -> HsExpr GhcPs mkNPat :: LocatedAn NoEpAnns (HsOverLit GhcPs) -> Maybe (SyntaxExpr GhcPs) -> EpAnn [AddEpAnn] -> Pat GhcPs mkNPlusKPat :: LocatedN RdrName -> LocatedAn NoEpAnns (HsOverLit GhcPs) -> EpAnn EpaLocation -> Pat GhcPs -- NB: The following functions all use noSyntaxExpr: the generated expressions -- will not work with rebindable syntax if used after the renamer mkLastStmt :: IsPass idR => LocatedA (bodyR (GhcPass idR)) -> StmtLR (GhcPass idL) (GhcPass idR) (LocatedA (bodyR (GhcPass idR))) mkBodyStmt :: LocatedA (bodyR GhcPs) -> StmtLR (GhcPass idL) GhcPs (LocatedA (bodyR GhcPs)) mkPsBindStmt :: EpAnn [AddEpAnn] -> LPat GhcPs -> LocatedA (bodyR GhcPs) -> StmtLR GhcPs GhcPs (LocatedA (bodyR GhcPs)) mkRnBindStmt :: LPat GhcRn -> LocatedA (bodyR GhcRn) -> StmtLR GhcRn GhcRn (LocatedA (bodyR GhcRn)) mkTcBindStmt :: LPat GhcTc -> LocatedA (bodyR GhcTc) -> StmtLR GhcTc GhcTc (LocatedA (bodyR GhcTc)) emptyRecStmt :: (Anno [GenLocated (Anno (StmtLR (GhcPass idL) GhcPs bodyR)) (StmtLR (GhcPass idL) GhcPs bodyR)] ~ SrcSpanAnnL) => StmtLR (GhcPass idL) GhcPs bodyR emptyRecStmtName :: (Anno [GenLocated (Anno (StmtLR GhcRn GhcRn bodyR)) (StmtLR GhcRn GhcRn bodyR)] ~ SrcSpanAnnL) => StmtLR GhcRn GhcRn bodyR emptyRecStmtId :: Stmt GhcTc (LocatedA (HsCmd GhcTc)) mkRecStmt :: (Anno [GenLocated (Anno (StmtLR (GhcPass idL) GhcPs bodyR)) (StmtLR (GhcPass idL) GhcPs bodyR)] ~ SrcSpanAnnL) => EpAnn AnnList -> LocatedL [LStmtLR (GhcPass idL) GhcPs bodyR] -> StmtLR (GhcPass idL) GhcPs bodyR mkHsIntegral i = OverLit noExtField (HsIntegral i) mkHsFractional f = OverLit noExtField (HsFractional f) mkHsIsString src s = OverLit noExtField (HsIsString src s) mkHsDo ctxt stmts = HsDo noAnn ctxt stmts mkHsDoAnns ctxt stmts anns = HsDo anns ctxt stmts mkHsComp ctxt stmts expr = mkHsCompAnns ctxt stmts expr noAnn mkHsCompAnns ctxt stmts expr anns = mkHsDoAnns ctxt (mkLocatedList (stmts ++ [last_stmt])) anns where -- Strip the annotations from the location, they are in the embedded expr last_stmt = L (noAnnSrcSpan $ getLocA expr) $ mkLastStmt expr -- restricted to GhcPs because other phases might need a SyntaxExpr mkHsIf :: LHsExpr GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs -> EpAnn AnnsIf -> HsExpr GhcPs mkHsIf c a b anns = HsIf anns c a b -- restricted to GhcPs because other phases might need a SyntaxExpr mkHsCmdIf :: LHsExpr GhcPs -> LHsCmd GhcPs -> LHsCmd GhcPs -> EpAnn AnnsIf -> HsCmd GhcPs mkHsCmdIf c a b anns = HsCmdIf anns noSyntaxExpr c a b mkNPat lit neg anns = NPat anns lit neg noSyntaxExpr mkNPlusKPat id lit anns = NPlusKPat anns id lit (unLoc lit) noSyntaxExpr noSyntaxExpr mkTransformStmt :: EpAnn [AddEpAnn] -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) mkTransformByStmt :: EpAnn [AddEpAnn] -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) mkGroupUsingStmt :: EpAnn [AddEpAnn] -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) mkGroupByUsingStmt :: EpAnn [AddEpAnn] -> [ExprLStmt GhcPs] -> LHsExpr GhcPs -> LHsExpr GhcPs -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) emptyTransStmt :: EpAnn [AddEpAnn] -> StmtLR GhcPs GhcPs (LHsExpr GhcPs) emptyTransStmt anns = TransStmt { trS_ext = anns , trS_form = panic "emptyTransStmt: form" , trS_stmts = [], trS_bndrs = [] , trS_by = Nothing, trS_using = noLocA noExpr , trS_ret = noSyntaxExpr, trS_bind = noSyntaxExpr , trS_fmap = noExpr } mkTransformStmt a ss u = (emptyTransStmt a) { trS_form = ThenForm, trS_stmts = ss, trS_using = u } mkTransformByStmt a ss u b = (emptyTransStmt a) { trS_form = ThenForm, trS_stmts = ss, trS_using = u, trS_by = Just b } mkGroupUsingStmt a ss u = (emptyTransStmt a) { trS_form = GroupForm, trS_stmts = ss, trS_using = u } mkGroupByUsingStmt a ss b u = (emptyTransStmt a) { trS_form = GroupForm, trS_stmts = ss, trS_using = u, trS_by = Just b } mkLastStmt body = LastStmt noExtField body Nothing noSyntaxExpr mkBodyStmt body = BodyStmt noExtField body noSyntaxExpr noSyntaxExpr mkPsBindStmt ann pat body = BindStmt ann pat body mkRnBindStmt pat body = BindStmt (XBindStmtRn { xbsrn_bindOp = noSyntaxExpr, xbsrn_failOp = Nothing }) pat body mkTcBindStmt pat body = BindStmt (XBindStmtTc { xbstc_bindOp = noSyntaxExpr, xbstc_boundResultType = unitTy, -- unitTy is a dummy value -- can't panic here: it's forced during zonking xbstc_boundResultMult = Many, xbstc_failOp = Nothing }) pat body emptyRecStmt' :: forall idL idR body . (WrapXRec (GhcPass idR) [LStmtLR (GhcPass idL) (GhcPass idR) body], IsPass idR) => XRecStmt (GhcPass idL) (GhcPass idR) body -> StmtLR (GhcPass idL) (GhcPass idR) body emptyRecStmt' tyVal = RecStmt { recS_stmts = wrapXRec @(GhcPass idR) [] , recS_later_ids = [] , recS_rec_ids = [] , recS_ret_fn = noSyntaxExpr , recS_mfix_fn = noSyntaxExpr , recS_bind_fn = noSyntaxExpr , recS_ext = tyVal } unitRecStmtTc :: RecStmtTc unitRecStmtTc = RecStmtTc { recS_bind_ty = unitTy , recS_later_rets = [] , recS_rec_rets = [] , recS_ret_ty = unitTy } emptyRecStmt = emptyRecStmt' noAnn emptyRecStmtName = emptyRecStmt' noExtField emptyRecStmtId = emptyRecStmt' unitRecStmtTc -- a panic might trigger during zonking mkRecStmt anns stmts = (emptyRecStmt' anns) { recS_stmts = stmts } mkLetStmt :: EpAnn [AddEpAnn] -> HsLocalBinds GhcPs -> StmtLR GhcPs GhcPs (LocatedA b) mkLetStmt anns binds = LetStmt anns binds ------------------------------- -- | A useful function for building @OpApps@. The operator is always a -- variable, and we don't know the fixity yet. mkHsOpApp :: LHsExpr GhcPs -> IdP GhcPs -> LHsExpr GhcPs -> HsExpr GhcPs mkHsOpApp e1 op e2 = OpApp noAnn e1 (noLocA (HsVar noExtField (noLocA op))) e2 unqualSplice :: RdrName unqualSplice = mkRdrUnqual (mkVarOccFS (fsLit "splice")) mkUntypedSplice :: EpAnn [AddEpAnn] -> SpliceDecoration -> LHsExpr GhcPs -> HsSplice GhcPs mkUntypedSplice ann hasParen e = HsUntypedSplice ann hasParen unqualSplice e mkTypedSplice :: EpAnn [AddEpAnn] -> SpliceDecoration -> LHsExpr GhcPs -> HsSplice GhcPs mkTypedSplice ann hasParen e = HsTypedSplice ann hasParen unqualSplice e mkHsQuasiQuote :: RdrName -> SrcSpan -> FastString -> HsSplice GhcPs mkHsQuasiQuote quoter span quote = HsQuasiQuote noExtField unqualSplice quoter span quote mkHsString :: String -> HsLit (GhcPass p) mkHsString s = HsString NoSourceText (mkFastString s) mkHsStringPrimLit :: FastString -> HsLit (GhcPass p) mkHsStringPrimLit fs = HsStringPrim NoSourceText (bytesFS fs) mkHsCharPrimLit :: Char -> HsLit (GhcPass p) mkHsCharPrimLit c = HsChar NoSourceText c mkConLikeTc :: ConLike -> HsExpr GhcTc mkConLikeTc con = XExpr (ConLikeTc con [] []) {- ************************************************************************ * * Constructing syntax with no location info * * ************************************************************************ -} nlHsVar :: IsSrcSpanAnn p a => IdP (GhcPass p) -> LHsExpr (GhcPass p) nlHsVar n = noLocA (HsVar noExtField (noLocA n)) nl_HsVar :: IsSrcSpanAnn p a => IdP (GhcPass p) -> HsExpr (GhcPass p) nl_HsVar n = HsVar noExtField (noLocA n) -- | NB: Only for 'LHsExpr' 'Id'. nlHsDataCon :: DataCon -> LHsExpr GhcTc nlHsDataCon con = noLocA (mkConLikeTc (RealDataCon con)) nlHsLit :: HsLit (GhcPass p) -> LHsExpr (GhcPass p) nlHsLit n = noLocA (HsLit noComments n) nlHsIntLit :: Integer -> LHsExpr (GhcPass p) nlHsIntLit n = noLocA (HsLit noComments (HsInt noExtField (mkIntegralLit n))) nlVarPat :: IsSrcSpanAnn p a => IdP (GhcPass p) -> LPat (GhcPass p) nlVarPat n = noLocA (VarPat noExtField (noLocA n)) nlLitPat :: HsLit GhcPs -> LPat GhcPs nlLitPat l = noLocA (LitPat noExtField l) nlHsApp :: IsPass id => LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) nlHsApp f x = noLocA (HsApp noComments f (mkLHsPar x)) nlHsSyntaxApps :: SyntaxExprTc -> [LHsExpr GhcTc] -> LHsExpr GhcTc nlHsSyntaxApps (SyntaxExprTc { syn_expr = fun , syn_arg_wraps = arg_wraps , syn_res_wrap = res_wrap }) args = mkLHsWrap res_wrap (foldl' nlHsApp (noLocA fun) (zipWithEqual "nlHsSyntaxApps" mkLHsWrap arg_wraps args)) nlHsSyntaxApps NoSyntaxExprTc args = pprPanic "nlHsSyntaxApps" (ppr args) -- this function should never be called in scenarios where there is no -- syntax expr nlHsApps :: IsSrcSpanAnn p a => IdP (GhcPass p) -> [LHsExpr (GhcPass p)] -> LHsExpr (GhcPass p) nlHsApps f xs = foldl' nlHsApp (nlHsVar f) xs nlHsVarApps :: IsSrcSpanAnn p a => IdP (GhcPass p) -> [IdP (GhcPass p)] -> LHsExpr (GhcPass p) nlHsVarApps f xs = noLocA (foldl' mk (HsVar noExtField (noLocA f)) (map ((HsVar noExtField) . noLocA) xs)) where mk f a = HsApp noComments (noLocA f) (noLocA a) nlConVarPat :: RdrName -> [RdrName] -> LPat GhcPs nlConVarPat con vars = nlConPat con (map nlVarPat vars) nlConVarPatName :: Name -> [Name] -> LPat GhcRn nlConVarPatName con vars = nlConPatName con (map nlVarPat vars) nlInfixConPat :: RdrName -> LPat GhcPs -> LPat GhcPs -> LPat GhcPs nlInfixConPat con l r = noLocA $ ConPat { pat_con = noLocA con , pat_args = InfixCon (parenthesizePat opPrec l) (parenthesizePat opPrec r) , pat_con_ext = noAnn } nlConPat :: RdrName -> [LPat GhcPs] -> LPat GhcPs nlConPat con pats = noLocA $ ConPat { pat_con_ext = noAnn , pat_con = noLocA con , pat_args = PrefixCon [] (map (parenthesizePat appPrec) pats) } nlConPatName :: Name -> [LPat GhcRn] -> LPat GhcRn nlConPatName con pats = noLocA $ ConPat { pat_con_ext = noExtField , pat_con = noLocA con , pat_args = PrefixCon [] (map (parenthesizePat appPrec) pats) } nlNullaryConPat :: RdrName -> LPat GhcPs nlNullaryConPat con = noLocA $ ConPat { pat_con_ext = noAnn , pat_con = noLocA con , pat_args = PrefixCon [] [] } nlWildConPat :: DataCon -> LPat GhcPs nlWildConPat con = noLocA $ ConPat { pat_con_ext = noAnn , pat_con = noLocA $ getRdrName con , pat_args = PrefixCon [] $ replicate (dataConSourceArity con) nlWildPat } -- | Wildcard pattern - after parsing nlWildPat :: LPat GhcPs nlWildPat = noLocA (WildPat noExtField ) -- | Wildcard pattern - after renaming nlWildPatName :: LPat GhcRn nlWildPatName = noLocA (WildPat noExtField ) nlHsDo :: HsDoFlavour -> [LStmt GhcPs (LHsExpr GhcPs)] -> LHsExpr GhcPs nlHsDo ctxt stmts = noLocA (mkHsDo ctxt (noLocA stmts)) nlHsOpApp :: LHsExpr GhcPs -> IdP GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs nlHsOpApp e1 op e2 = noLocA (mkHsOpApp e1 op e2) nlHsLam :: LMatch GhcPs (LHsExpr GhcPs) -> LHsExpr GhcPs nlHsPar :: LHsExpr (GhcPass id) -> LHsExpr (GhcPass id) nlHsCase :: LHsExpr GhcPs -> [LMatch GhcPs (LHsExpr GhcPs)] -> LHsExpr GhcPs nlList :: [LHsExpr GhcPs] -> LHsExpr GhcPs -- AZ:Is this used? nlHsLam match = noLocA (HsLam noExtField (mkMatchGroup Generated (noLocA [match]))) nlHsPar e = noLocA (gHsPar e) -- nlHsIf should generate if-expressions which are NOT subject to -- RebindableSyntax, so the first field of HsIf is False. (#12080) nlHsIf :: LHsExpr GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs -> LHsExpr GhcPs nlHsIf cond true false = noLocA (HsIf noAnn cond true false) nlHsCase expr matches = noLocA (HsCase noAnn expr (mkMatchGroup Generated (noLocA matches))) nlList exprs = noLocA (ExplicitList noAnn exprs) nlHsAppTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) nlHsTyVar :: IsSrcSpanAnn p a => PromotionFlag -> IdP (GhcPass p) -> LHsType (GhcPass p) nlHsFunTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) -> LHsType (GhcPass p) nlHsParTy :: LHsType (GhcPass p) -> LHsType (GhcPass p) nlHsAppTy f t = noLocA (HsAppTy noExtField f (parenthesizeHsType appPrec t)) nlHsTyVar p x = noLocA (HsTyVar noAnn p (noLocA x)) nlHsFunTy a b = noLocA (HsFunTy noAnn (HsUnrestrictedArrow noHsUniTok) (parenthesizeHsType funPrec a) b) nlHsParTy t = noLocA (HsParTy noAnn t) nlHsTyConApp :: IsSrcSpanAnn p a => PromotionFlag -> LexicalFixity -> IdP (GhcPass p) -> [LHsTypeArg (GhcPass p)] -> LHsType (GhcPass p) nlHsTyConApp prom fixity tycon tys | Infix <- fixity , HsValArg ty1 : HsValArg ty2 : rest <- tys = foldl' mk_app (noLocA $ HsOpTy noAnn prom ty1 (noLocA tycon) ty2) rest | otherwise = foldl' mk_app (nlHsTyVar prom tycon) tys where mk_app :: LHsType (GhcPass p) -> LHsTypeArg (GhcPass p) -> LHsType (GhcPass p) mk_app fun@(L _ (HsOpTy {})) arg = mk_app (noLocA $ HsParTy noAnn fun) arg -- parenthesize things like `(A + B) C` mk_app fun (HsValArg ty) = noLocA (HsAppTy noExtField fun (parenthesizeHsType appPrec ty)) mk_app fun (HsTypeArg _ ki) = noLocA (HsAppKindTy noSrcSpan fun (parenthesizeHsType appPrec ki)) mk_app fun (HsArgPar _) = noLocA (HsParTy noAnn fun) nlHsAppKindTy :: LHsType (GhcPass p) -> LHsKind (GhcPass p) -> LHsType (GhcPass p) nlHsAppKindTy f k = noLocA (HsAppKindTy noSrcSpan f (parenthesizeHsType appPrec k)) {- Tuples. All these functions are *pre-typechecker* because they lack types on the tuple. -} mkLHsTupleExpr :: [LHsExpr (GhcPass p)] -> XExplicitTuple (GhcPass p) -> LHsExpr (GhcPass p) -- Makes a pre-typechecker boxed tuple, deals with 1 case mkLHsTupleExpr [e] _ = e mkLHsTupleExpr es ext = noLocA $ ExplicitTuple ext (map (Present noAnn) es) Boxed mkLHsVarTuple :: IsSrcSpanAnn p a => [IdP (GhcPass p)] -> XExplicitTuple (GhcPass p) -> LHsExpr (GhcPass p) mkLHsVarTuple ids ext = mkLHsTupleExpr (map nlHsVar ids) ext nlTuplePat :: [LPat GhcPs] -> Boxity -> LPat GhcPs nlTuplePat pats box = noLocA (TuplePat noAnn pats box) missingTupArg :: EpAnn EpaLocation -> HsTupArg GhcPs missingTupArg ann = Missing ann mkLHsPatTup :: [LPat GhcRn] -> LPat GhcRn mkLHsPatTup [] = noLocA $ TuplePat noExtField [] Boxed mkLHsPatTup [lpat] = lpat mkLHsPatTup lpats = L (getLoc (head lpats)) $ TuplePat noExtField lpats Boxed -- | The Big equivalents for the source tuple expressions mkBigLHsVarTup :: IsSrcSpanAnn p a => [IdP (GhcPass p)] -> XExplicitTuple (GhcPass p) -> LHsExpr (GhcPass p) mkBigLHsVarTup ids anns = mkBigLHsTup (map nlHsVar ids) anns mkBigLHsTup :: [LHsExpr (GhcPass id)] -> XExplicitTuple (GhcPass id) -> LHsExpr (GhcPass id) mkBigLHsTup es anns = mkChunkified (\e -> mkLHsTupleExpr e anns) es -- | The Big equivalents for the source tuple patterns mkBigLHsVarPatTup :: [IdP GhcRn] -> LPat GhcRn mkBigLHsVarPatTup bs = mkBigLHsPatTup (map nlVarPat bs) mkBigLHsPatTup :: [LPat GhcRn] -> LPat GhcRn mkBigLHsPatTup = mkChunkified mkLHsPatTup -- $big_tuples -- #big_tuples# -- -- GHCs built in tuples can only go up to 'mAX_TUPLE_SIZE' in arity, but -- we might conceivably want to build such a massive tuple as part of the -- output of a desugaring stage (notably that for list comprehensions). -- -- We call tuples above this size \"big tuples\", and emulate them by -- creating and pattern matching on >nested< tuples that are expressible -- by GHC. -- -- Nesting policy: it's better to have a 2-tuple of 10-tuples (3 objects) -- than a 10-tuple of 2-tuples (11 objects), so we want the leaves of any -- construction to be big. -- -- If you just use the 'mkBigCoreTup', 'mkBigCoreVarTupTy', 'mkTupleSelector' -- and 'mkTupleCase' functions to do all your work with tuples you should be -- fine, and not have to worry about the arity limitation at all. -- | Lifts a \"small\" constructor into a \"big\" constructor by recursive decomposition mkChunkified :: ([a] -> a) -- ^ \"Small\" constructor function, of maximum input arity 'mAX_TUPLE_SIZE' -> [a] -- ^ Possible \"big\" list of things to construct from -> a -- ^ Constructed thing made possible by recursive decomposition mkChunkified small_tuple as = mk_big_tuple (chunkify as) where -- Each sub-list is short enough to fit in a tuple mk_big_tuple [as] = small_tuple as mk_big_tuple as_s = mk_big_tuple (chunkify (map small_tuple as_s)) chunkify :: [a] -> [[a]] -- ^ Split a list into lists that are small enough to have a corresponding -- tuple arity. The sub-lists of the result all have length <= 'mAX_TUPLE_SIZE' -- But there may be more than 'mAX_TUPLE_SIZE' sub-lists chunkify xs | n_xs <= mAX_TUPLE_SIZE = [xs] | otherwise = split xs where n_xs = length xs split [] = [] split xs = take mAX_TUPLE_SIZE xs : split (drop mAX_TUPLE_SIZE xs) {- ************************************************************************ * * LHsSigType and LHsSigWcType * * ********************************************************************* -} -- | Convert an 'LHsType' to an 'LHsSigType'. hsTypeToHsSigType :: LHsType GhcPs -> LHsSigType GhcPs hsTypeToHsSigType lty@(L loc ty) = L loc $ case ty of HsForAllTy { hst_tele = HsForAllInvis { hsf_xinvis = an , hsf_invis_bndrs = bndrs } , hst_body = body } -> mkHsExplicitSigType an bndrs body _ -> mkHsImplicitSigType lty -- | Convert an 'LHsType' to an 'LHsSigWcType'. hsTypeToHsSigWcType :: LHsType GhcPs -> LHsSigWcType GhcPs hsTypeToHsSigWcType = mkHsWildCardBndrs . hsTypeToHsSigType mkHsSigEnv :: forall a. (LSig GhcRn -> Maybe ([LocatedN Name], a)) -> [LSig GhcRn] -> NameEnv a mkHsSigEnv get_info sigs = mkNameEnv (mk_pairs ordinary_sigs) `extendNameEnvList` (mk_pairs gen_dm_sigs) -- The subtlety is this: in a class decl with a -- default-method signature as well as a method signature -- we want the latter to win (#12533) -- class C x where -- op :: forall a . x a -> x a -- default op :: forall b . x b -> x b -- op x = ...(e :: b -> b)... -- The scoped type variables of the 'default op', namely 'b', -- scope over the code for op. The 'forall a' does not! -- This applies both in the renamer and typechecker, both -- of which use this function where (gen_dm_sigs, ordinary_sigs) = partition is_gen_dm_sig sigs is_gen_dm_sig (L _ (ClassOpSig _ True _ _)) = True is_gen_dm_sig _ = False mk_pairs :: [LSig GhcRn] -> [(Name, a)] mk_pairs sigs = [ (n,a) | Just (ns,a) <- map get_info sigs , L _ n <- ns ] mkClassOpSigs :: [LSig GhcPs] -> [LSig GhcPs] -- ^ Convert 'TypeSig' to 'ClassOpSig'. -- The former is what is parsed, but the latter is -- what we need in class/instance declarations mkClassOpSigs sigs = map fiddle sigs where fiddle (L loc (TypeSig anns nms ty)) = L loc (ClassOpSig anns False nms (dropWildCards ty)) fiddle sig = sig {- ********************************************************************* * * --------- HsWrappers: type args, dict args, casts --------- * * ********************************************************************* -} mkLHsWrap :: HsWrapper -> LHsExpr GhcTc -> LHsExpr GhcTc mkLHsWrap co_fn (L loc e) = L loc (mkHsWrap co_fn e) mkHsWrap :: HsWrapper -> HsExpr GhcTc -> HsExpr GhcTc mkHsWrap co_fn e | isIdHsWrapper co_fn = e mkHsWrap co_fn e = XExpr (WrapExpr $ HsWrap co_fn e) mkHsWrapCo :: TcCoercionN -- A Nominal coercion a ~N b -> HsExpr GhcTc -> HsExpr GhcTc mkHsWrapCo co e = mkHsWrap (mkWpCastN co) e mkHsWrapCoR :: TcCoercionR -- A Representational coercion a ~R b -> HsExpr GhcTc -> HsExpr GhcTc mkHsWrapCoR co e = mkHsWrap (mkWpCastR co) e mkLHsWrapCo :: TcCoercionN -> LHsExpr GhcTc -> LHsExpr GhcTc mkLHsWrapCo co (L loc e) = L loc (mkHsWrapCo co e) mkHsCmdWrap :: HsWrapper -> HsCmd GhcTc -> HsCmd GhcTc mkHsCmdWrap w cmd | isIdHsWrapper w = cmd | otherwise = XCmd (HsWrap w cmd) mkLHsCmdWrap :: HsWrapper -> LHsCmd GhcTc -> LHsCmd GhcTc mkLHsCmdWrap w (L loc c) = L loc (mkHsCmdWrap w c) mkHsWrapPat :: HsWrapper -> Pat GhcTc -> Type -> Pat GhcTc mkHsWrapPat co_fn p ty | isIdHsWrapper co_fn = p | otherwise = XPat $ CoPat co_fn p ty mkHsWrapPatCo :: TcCoercionN -> Pat GhcTc -> Type -> Pat GhcTc mkHsWrapPatCo co pat ty | isTcReflCo co = pat | otherwise = XPat $ CoPat (mkWpCastN co) pat ty mkHsDictLet :: TcEvBinds -> LHsExpr GhcTc -> LHsExpr GhcTc mkHsDictLet ev_binds expr = mkLHsWrap (mkWpLet ev_binds) expr {- l ************************************************************************ * * Bindings; with a location at the top * * ************************************************************************ -} mkFunBind :: Origin -> LocatedN RdrName -> [LMatch GhcPs (LHsExpr GhcPs)] -> HsBind GhcPs -- ^ Not infix, with place holders for coercion and free vars mkFunBind origin fn ms = FunBind { fun_id = fn , fun_matches = mkMatchGroup origin (noLocA ms) , fun_ext = noExtField , fun_tick = [] } mkTopFunBind :: Origin -> LocatedN Name -> [LMatch GhcRn (LHsExpr GhcRn)] -> HsBind GhcRn -- ^ In Name-land, with empty bind_fvs mkTopFunBind origin fn ms = FunBind { fun_id = fn , fun_matches = mkMatchGroup origin (noLocA ms) , fun_ext = emptyNameSet -- NB: closed -- binding , fun_tick = [] } mkHsVarBind :: SrcSpan -> RdrName -> LHsExpr GhcPs -> LHsBind GhcPs mkHsVarBind loc var rhs = mkSimpleGeneratedFunBind loc var [] rhs mkVarBind :: IdP (GhcPass p) -> LHsExpr (GhcPass p) -> LHsBind (GhcPass p) mkVarBind var rhs = L (getLoc rhs) $ VarBind { var_ext = noExtField, var_id = var, var_rhs = rhs } mkPatSynBind :: LocatedN RdrName -> HsPatSynDetails GhcPs -> LPat GhcPs -> HsPatSynDir GhcPs -> EpAnn [AddEpAnn] -> HsBind GhcPs mkPatSynBind name details lpat dir anns = PatSynBind noExtField psb where psb = PSB{ psb_ext = anns , psb_id = name , psb_args = details , psb_def = lpat , psb_dir = dir } -- |If any of the matches in the 'FunBind' are infix, the 'FunBind' is -- considered infix. isInfixFunBind :: forall id1 id2. UnXRec id2 => HsBindLR id1 id2 -> Bool isInfixFunBind (FunBind { fun_matches = MG _ matches _ }) = any (isInfixMatch . unXRec @id2) (unXRec @id2 matches) isInfixFunBind _ = False -- |Return the 'SrcSpan' encompassing the contents of any enclosed binds spanHsLocaLBinds :: HsLocalBinds (GhcPass p) -> SrcSpan spanHsLocaLBinds (EmptyLocalBinds _) = noSrcSpan spanHsLocaLBinds (HsValBinds _ (ValBinds _ bs sigs)) = foldr combineSrcSpans noSrcSpan (bsSpans ++ sigsSpans) where bsSpans :: [SrcSpan] bsSpans = map getLocA $ bagToList bs sigsSpans :: [SrcSpan] sigsSpans = map getLocA sigs spanHsLocaLBinds (HsValBinds _ (XValBindsLR (NValBinds bs sigs))) = foldr combineSrcSpans noSrcSpan (bsSpans ++ sigsSpans) where bsSpans :: [SrcSpan] bsSpans = map getLocA $ concatMap (bagToList . snd) bs sigsSpans :: [SrcSpan] sigsSpans = map getLocA sigs spanHsLocaLBinds (HsIPBinds _ (IPBinds _ bs)) = foldr combineSrcSpans noSrcSpan (map getLocA bs) ------------ -- | Convenience function using 'mkFunBind'. -- This is for generated bindings only, do not use for user-written code. mkSimpleGeneratedFunBind :: SrcSpan -> RdrName -> [LPat GhcPs] -> LHsExpr GhcPs -> LHsBind GhcPs mkSimpleGeneratedFunBind loc fun pats expr = L (noAnnSrcSpan loc) $ mkFunBind Generated (L (noAnnSrcSpan loc) fun) [mkMatch (mkPrefixFunRhs (L (noAnnSrcSpan loc) fun)) pats expr emptyLocalBinds] -- | Make a prefix, non-strict function 'HsMatchContext' mkPrefixFunRhs :: LIdP (NoGhcTc p) -> HsMatchContext p mkPrefixFunRhs n = FunRhs { mc_fun = n , mc_fixity = Prefix , mc_strictness = NoSrcStrict } ------------ mkMatch :: forall p. IsPass p => HsMatchContext (GhcPass p) -> [LPat (GhcPass p)] -> LHsExpr (GhcPass p) -> HsLocalBinds (GhcPass p) -> LMatch (GhcPass p) (LHsExpr (GhcPass p)) mkMatch ctxt pats expr binds = noLocA (Match { m_ext = noAnn , m_ctxt = ctxt , m_pats = map mkParPat pats , m_grhss = GRHSs emptyComments (unguardedRHS noAnn noSrcSpan expr) binds }) {- ************************************************************************ * * Collecting binders * * ************************************************************************ Get all the binders in some HsBindGroups, IN THE ORDER OF APPEARANCE. eg. ... where (x, y) = ... f i j = ... [a, b] = ... it should return [x, y, f, a, b] (remember, order important). Note [Collect binders only after renaming] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These functions should only be used on HsSyn *after* the renamer, to return a [Name] or [Id]. Before renaming the record punning and wild-card mechanism makes it hard to know what is bound. So these functions should not be applied to (HsSyn RdrName) Note [Unlifted id check in isUnliftedHsBind] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function isUnliftedHsBind is used to complain if we make a top-level binding for a variable of unlifted type. Such a binding is illegal if the top-level binding would be unlifted; but also if the local letrec generated by desugaring AbsBinds would be. E.g. f :: Num a => (# a, a #) g :: Num a => a -> a f = ...g... g = ...g... The top-level bindings for f,g are not unlifted (because of the Num a =>), but the local, recursive, monomorphic bindings are: t = /\a \(d:Num a). letrec fm :: (# a, a #) = ...g... gm :: a -> a = ...f... in (fm, gm) Here the binding for 'fm' is illegal. So generally we check the abe_mono types. BUT we have a special case when abs_sig is true; see Note [The abs_sig field of AbsBinds] in GHC.Hs.Binds -} ----------------- Bindings -------------------------- -- | Should we treat this as an unlifted bind? This will be true for any -- bind that binds an unlifted variable, but we must be careful around -- AbsBinds. See Note [Unlifted id check in isUnliftedHsBind]. For usage -- information, see Note [Strict binds checks] is GHC.HsToCore.Binds. isUnliftedHsBind :: HsBind GhcTc -> Bool -- works only over typechecked binds isUnliftedHsBind bind | XHsBindsLR (AbsBinds { abs_exports = exports, abs_sig = has_sig }) <- bind = if has_sig then any (is_unlifted_id . abe_poly) exports else any (is_unlifted_id . abe_mono) exports -- If has_sig is True we will never generate a binding for abe_mono, -- so we don't need to worry about it being unlifted. The abe_poly -- binding might not be: e.g. forall a. Num a => (# a, a #) | otherwise = any is_unlifted_id (collectHsBindBinders CollNoDictBinders bind) where is_unlifted_id id = isUnliftedType (idType id) -- bindings always have a fixed RuntimeRep, so it's OK -- to call isUnliftedType here -- | Is a binding a strict variable or pattern bind (e.g. @!x = ...@)? isBangedHsBind :: HsBind GhcTc -> Bool isBangedHsBind (XHsBindsLR (AbsBinds { abs_binds = binds })) = anyBag (isBangedHsBind . unLoc) binds isBangedHsBind (FunBind {fun_matches = matches}) | [L _ match] <- unLoc $ mg_alts matches , FunRhs{mc_strictness = SrcStrict} <- m_ctxt match = True isBangedHsBind (PatBind {pat_lhs = pat}) = isBangedLPat pat isBangedHsBind _ = False collectLocalBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> HsLocalBindsLR (GhcPass idL) (GhcPass idR) -> [IdP (GhcPass idL)] collectLocalBinders flag = \case HsValBinds _ binds -> collectHsIdBinders flag binds -- No pattern synonyms here HsIPBinds {} -> [] EmptyLocalBinds _ -> [] collectHsIdBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> HsValBindsLR (GhcPass idL) (GhcPass idR) -> [IdP (GhcPass idL)] -- ^ Collect 'Id' binders only, or 'Id's + pattern synonyms, respectively collectHsIdBinders flag = collect_hs_val_binders True flag collectHsValBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> HsValBindsLR (GhcPass idL) idR -> [IdP (GhcPass idL)] collectHsValBinders flag = collect_hs_val_binders False flag collectHsBindBinders :: CollectPass p => CollectFlag p -> HsBindLR p idR -> [IdP p] -- ^ Collect both 'Id's and pattern-synonym binders collectHsBindBinders flag b = collect_bind False flag b [] collectHsBindsBinders :: CollectPass p => CollectFlag p -> LHsBindsLR p idR -> [IdP p] collectHsBindsBinders flag binds = collect_binds False flag binds [] collectHsBindListBinders :: forall p idR. CollectPass p => CollectFlag p -> [LHsBindLR p idR] -> [IdP p] -- ^ Same as 'collectHsBindsBinders', but works over a list of bindings collectHsBindListBinders flag = foldr (collect_bind False flag . unXRec @p) [] collect_hs_val_binders :: CollectPass (GhcPass idL) => Bool -> CollectFlag (GhcPass idL) -> HsValBindsLR (GhcPass idL) idR -> [IdP (GhcPass idL)] collect_hs_val_binders ps flag = \case ValBinds _ binds _ -> collect_binds ps flag binds [] XValBindsLR (NValBinds binds _) -> collect_out_binds ps flag binds collect_out_binds :: forall p. CollectPass p => Bool -> CollectFlag p -> [(RecFlag, LHsBinds p)] -> [IdP p] collect_out_binds ps flag = foldr (collect_binds ps flag . snd) [] collect_binds :: forall p idR. CollectPass p => Bool -> CollectFlag p -> LHsBindsLR p idR -> [IdP p] -> [IdP p] -- ^ Collect 'Id's, or 'Id's + pattern synonyms, depending on boolean flag collect_binds ps flag binds acc = foldr (collect_bind ps flag . unXRec @p) acc binds collect_bind :: forall p idR. CollectPass p => Bool -> CollectFlag p -> HsBindLR p idR -> [IdP p] -> [IdP p] collect_bind _ _ (FunBind { fun_id = f }) acc = unXRec @p f : acc collect_bind _ flag (PatBind { pat_lhs = p }) acc = collect_lpat flag p acc collect_bind _ _ (VarBind { var_id = f }) acc = f : acc collect_bind omitPatSyn _ (PatSynBind _ (PSB { psb_id = ps })) acc | omitPatSyn = acc | otherwise = unXRec @p ps : acc collect_bind _ _ (PatSynBind _ (XPatSynBind _)) acc = acc collect_bind _ _ (XHsBindsLR b) acc = collectXXHsBindsLR @p @idR b acc collectMethodBinders :: forall idL idR. UnXRec idL => LHsBindsLR idL idR -> [LIdP idL] -- ^ Used exclusively for the bindings of an instance decl which are all -- 'FunBinds' collectMethodBinders binds = foldr (get . unXRec @idL) [] binds where get (FunBind { fun_id = f }) fs = f : fs get _ fs = fs -- Someone else complains about non-FunBinds ----------------- Statements -------------------------- -- collectLStmtsBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> [LStmtLR (GhcPass idL) (GhcPass idR) body] -> [IdP (GhcPass idL)] collectLStmtsBinders flag = concatMap (collectLStmtBinders flag) collectStmtsBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> [StmtLR (GhcPass idL) (GhcPass idR) body] -> [IdP (GhcPass idL)] collectStmtsBinders flag = concatMap (collectStmtBinders flag) collectLStmtBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> LStmtLR (GhcPass idL) (GhcPass idR) body -> [IdP (GhcPass idL)] collectLStmtBinders flag = collectStmtBinders flag . unLoc collectStmtBinders :: CollectPass (GhcPass idL) => CollectFlag (GhcPass idL) -> StmtLR (GhcPass idL) (GhcPass idR) body -> [IdP (GhcPass idL)] -- Id Binders for a Stmt... [but what about pattern-sig type vars]? collectStmtBinders flag = \case BindStmt _ pat _ -> collectPatBinders flag pat LetStmt _ binds -> collectLocalBinders flag binds BodyStmt {} -> [] LastStmt {} -> [] ParStmt _ xs _ _ -> collectLStmtsBinders flag [s | ParStmtBlock _ ss _ _ <- xs, s <- ss] TransStmt { trS_stmts = stmts } -> collectLStmtsBinders flag stmts RecStmt { recS_stmts = L _ ss } -> collectLStmtsBinders flag ss ApplicativeStmt _ args _ -> concatMap collectArgBinders args where collectArgBinders = \case (_, ApplicativeArgOne { app_arg_pattern = pat }) -> collectPatBinders flag pat (_, ApplicativeArgMany { bv_pattern = pat }) -> collectPatBinders flag pat ----------------- Patterns -------------------------- collectPatBinders :: CollectPass p => CollectFlag p -> LPat p -> [IdP p] collectPatBinders flag pat = collect_lpat flag pat [] collectPatsBinders :: CollectPass p => CollectFlag p -> [LPat p] -> [IdP p] collectPatsBinders flag pats = foldr (collect_lpat flag) [] pats ------------- -- | Indicate if evidence binders have to be collected. -- -- This type is used as a boolean (should we collect evidence binders or not?) -- but also to pass an evidence that the AST has been typechecked when we do -- want to collect evidence binders, otherwise these binders are not available. -- -- See Note [Dictionary binders in ConPatOut] data CollectFlag p where -- | Don't collect evidence binders CollNoDictBinders :: CollectFlag p -- | Collect evidence binders CollWithDictBinders :: CollectFlag GhcTc collect_lpat :: forall p. CollectPass p => CollectFlag p -> LPat p -> [IdP p] -> [IdP p] collect_lpat flag pat bndrs = collect_pat flag (unXRec @p pat) bndrs collect_pat :: forall p. CollectPass p => CollectFlag p -> Pat p -> [IdP p] -> [IdP p] collect_pat flag pat bndrs = case pat of VarPat _ var -> unXRec @p var : bndrs WildPat _ -> bndrs LazyPat _ pat -> collect_lpat flag pat bndrs BangPat _ pat -> collect_lpat flag pat bndrs AsPat _ a pat -> unXRec @p a : collect_lpat flag pat bndrs ViewPat _ _ pat -> collect_lpat flag pat bndrs ParPat _ _ pat _ -> collect_lpat flag pat bndrs ListPat _ pats -> foldr (collect_lpat flag) bndrs pats TuplePat _ pats _ -> foldr (collect_lpat flag) bndrs pats SumPat _ pat _ _ -> collect_lpat flag pat bndrs LitPat _ _ -> bndrs NPat {} -> bndrs NPlusKPat _ n _ _ _ _ -> unXRec @p n : bndrs SigPat _ pat _ -> collect_lpat flag pat bndrs XPat ext -> collectXXPat @p flag ext bndrs SplicePat _ (HsSpliced _ _ (HsSplicedPat pat)) -> collect_pat flag pat bndrs SplicePat _ _ -> bndrs -- See Note [Dictionary binders in ConPatOut] ConPat {pat_args=ps} -> case flag of CollNoDictBinders -> foldr (collect_lpat flag) bndrs (hsConPatArgs ps) CollWithDictBinders -> foldr (collect_lpat flag) bndrs (hsConPatArgs ps) ++ collectEvBinders (cpt_binds (pat_con_ext pat)) collectEvBinders :: TcEvBinds -> [Id] collectEvBinders (EvBinds bs) = foldr add_ev_bndr [] bs collectEvBinders (TcEvBinds {}) = panic "ToDo: collectEvBinders" add_ev_bndr :: EvBind -> [Id] -> [Id] add_ev_bndr (EvBind { eb_lhs = b }) bs | isId b = b:bs | otherwise = bs -- A worry: what about coercion variable binders?? -- | This class specifies how to collect variable identifiers from extension patterns in the given pass. -- Consumers of the GHC API that define their own passes should feel free to implement instances in order -- to make use of functions which depend on it. -- -- In particular, Haddock already makes use of this, with an instance for its 'DocNameI' pass so that -- it can reuse the code in GHC for collecting binders. class UnXRec p => CollectPass p where collectXXPat :: CollectFlag p -> XXPat p -> [IdP p] -> [IdP p] collectXXHsBindsLR :: forall pR. XXHsBindsLR p pR -> [IdP p] -> [IdP p] instance IsPass p => CollectPass (GhcPass p) where collectXXPat flag ext = case ghcPass @p of GhcPs -> dataConCantHappen ext GhcRn | HsPatExpanded _ pat <- ext -> collect_pat flag pat GhcTc -> case ext of CoPat _ pat _ -> collect_pat flag pat ExpansionPat _ pat -> collect_pat flag pat collectXXHsBindsLR ext = case ghcPass @p of GhcPs -> dataConCantHappen ext GhcRn -> dataConCantHappen ext GhcTc -> case ext of AbsBinds { abs_exports = dbinds } -> (map abe_poly dbinds ++) -- I don't think we want the binders from the abe_binds -- binding (hence see AbsBinds) is in zonking in GHC.Tc.Utils.Zonk {- Note [Dictionary binders in ConPatOut] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Should we collect dictionary binders in ConPatOut? It depends! Use CollectFlag to choose. 1. Pre-typechecker there are no ConPatOuts. Use CollNoDictBinders flag. 2. In the desugarer, most of the time we don't want to collect evidence binders, so we also use CollNoDictBinders flag. Example of why it matters: In a lazy pattern, for example f ~(C x y) = ..., we want to generate bindings for x,y but not for dictionaries bound by C. (The type checker ensures they would not be used.) Here's the problem. Consider data T a where C :: Num a => a -> Int -> T a f ~(C (n+1) m) = (n,m) Here, the pattern (C (n+1)) binds a hidden dictionary (d::Num a), and *also* uses that dictionary to match the (n+1) pattern. Yet, the variables bound by the lazy pattern are n,m, *not* the dictionary d. So in mkSelectorBinds in GHC.HsToCore.Utils, we want just m,n as the variables bound. So in this case, we do *not* gather (a) dictionary and (b) dictionary bindings as binders of a ConPatOut pattern. 3. On the other hand, desugaring of arrows needs evidence bindings and uses CollWithDictBinders flag. Consider h :: (ArrowChoice a, Arrow a) => Int -> a (Int,Int) Int h x = proc (y,z) -> case compare x y of GT -> returnA -< z+x The type checker turns the case into case compare x y of GT { $dNum_123 = $dNum_Int } -> returnA -< (+) $dNum_123 z x That is, it attaches the $dNum_123 binding to a ConPatOut in scope. During desugaring, evidence binders must be collected because their sets are intersected with free variable sets of subsequent commands to create (minimal) command environments. Failing to do it properly leads to bugs (e.g., #18950). Note: attaching evidence binders to existing ConPatOut may be suboptimal for arrows. In the example above we would prefer to generate: case compare x y of GT -> returnA -< let $dNum_123 = $dNum_Int in (+) $dNum_123 z x So that the evidence isn't passed into the command environment. This issue doesn't arise with desugaring of non-arrow code because the simplifier can freely float and inline let-expressions created for evidence binders. But with arrow desugaring, the simplifier would have to see through the command environment tuple which is more complicated. -} hsGroupBinders :: HsGroup GhcRn -> [Name] hsGroupBinders (HsGroup { hs_valds = val_decls, hs_tyclds = tycl_decls, hs_fords = foreign_decls }) = collectHsValBinders CollNoDictBinders val_decls ++ hsTyClForeignBinders tycl_decls foreign_decls hsTyClForeignBinders :: [TyClGroup GhcRn] -> [LForeignDecl GhcRn] -> [Name] -- We need to look at instance declarations too, -- because their associated types may bind data constructors hsTyClForeignBinders tycl_decls foreign_decls = map unLoc (hsForeignDeclsBinders foreign_decls) ++ getSelectorNames (foldMap (foldMap hsLTyClDeclBinders . group_tyclds) tycl_decls `mappend` foldMap (foldMap hsLInstDeclBinders . group_instds) tycl_decls) where getSelectorNames :: ([LocatedA Name], [LFieldOcc GhcRn]) -> [Name] getSelectorNames (ns, fs) = map unLoc ns ++ map (foExt . unLoc) fs ------------------- hsLTyClDeclBinders :: IsPass p => LocatedA (TyClDecl (GhcPass p)) -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) -- ^ Returns all the /binding/ names of the decl. The first one is -- guaranteed to be the name of the decl. The first component -- represents all binding names except record fields; the second -- represents field occurrences. For record fields mentioned in -- multiple constructors, the SrcLoc will be from the first occurrence. -- -- Each returned (Located name) has a SrcSpan for the /whole/ declaration. -- See Note [SrcSpan for binders] hsLTyClDeclBinders (L loc (FamDecl { tcdFam = FamilyDecl { fdLName = (L _ name) } })) = ([L loc name], []) hsLTyClDeclBinders (L loc (SynDecl { tcdLName = (L _ name) })) = ([L loc name], []) hsLTyClDeclBinders (L loc (ClassDecl { tcdLName = (L _ cls_name) , tcdSigs = sigs , tcdATs = ats })) = (L loc cls_name : [ L fam_loc fam_name | (L fam_loc (FamilyDecl { fdLName = L _ fam_name })) <- ats ] ++ [ L mem_loc mem_name | (L mem_loc (ClassOpSig _ False ns _)) <- sigs , (L _ mem_name) <- ns ] , []) hsLTyClDeclBinders (L loc (DataDecl { tcdLName = (L _ name) , tcdDataDefn = defn })) = (\ (xs, ys) -> (L loc name : xs, ys)) $ hsDataDefnBinders defn ------------------- hsForeignDeclsBinders :: forall p a. (UnXRec (GhcPass p), IsSrcSpanAnn p a) => [LForeignDecl (GhcPass p)] -> [LIdP (GhcPass p)] -- ^ See Note [SrcSpan for binders] hsForeignDeclsBinders foreign_decls = [ L (noAnnSrcSpan (locA decl_loc)) n | L decl_loc (ForeignImport { fd_name = L _ n }) <- foreign_decls] ------------------- hsPatSynSelectors :: IsPass p => HsValBinds (GhcPass p) -> [FieldOcc (GhcPass p)] -- ^ Collects record pattern-synonym selectors only; the pattern synonym -- names are collected by 'collectHsValBinders'. hsPatSynSelectors (ValBinds _ _ _) = panic "hsPatSynSelectors" hsPatSynSelectors (XValBindsLR (NValBinds binds _)) = foldr addPatSynSelector [] . unionManyBags $ map snd binds addPatSynSelector :: forall p. UnXRec p => LHsBind p -> [FieldOcc p] -> [FieldOcc p] addPatSynSelector bind sels | PatSynBind _ (PSB { psb_args = RecCon as }) <- unXRec @p bind = map recordPatSynField as ++ sels | otherwise = sels getPatSynBinds :: forall id. UnXRec id => [(RecFlag, LHsBinds id)] -> [PatSynBind id id] getPatSynBinds binds = [ psb | (_, lbinds) <- binds , (unXRec @id -> (PatSynBind _ psb)) <- bagToList lbinds ] ------------------- hsLInstDeclBinders :: IsPass p => LInstDecl (GhcPass p) -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) hsLInstDeclBinders (L _ (ClsInstD { cid_inst = ClsInstDecl { cid_datafam_insts = dfis }})) = foldMap (hsDataFamInstBinders . unLoc) dfis hsLInstDeclBinders (L _ (DataFamInstD { dfid_inst = fi })) = hsDataFamInstBinders fi hsLInstDeclBinders (L _ (TyFamInstD {})) = mempty ------------------- -- | the 'SrcLoc' returned are for the whole declarations, not just the names hsDataFamInstBinders :: IsPass p => DataFamInstDecl (GhcPass p) -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) hsDataFamInstBinders (DataFamInstDecl { dfid_eqn = FamEqn { feqn_rhs = defn }}) = hsDataDefnBinders defn -- There can't be repeated symbols because only data instances have binders ------------------- -- | the 'SrcLoc' returned are for the whole declarations, not just the names hsDataDefnBinders :: IsPass p => HsDataDefn (GhcPass p) -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) hsDataDefnBinders (HsDataDefn { dd_cons = cons }) = hsConDeclsBinders cons -- See Note [Binders in family instances] ------------------- type Seen p = [LFieldOcc (GhcPass p)] -> [LFieldOcc (GhcPass p)] -- Filters out ones that have already been seen hsConDeclsBinders :: forall p. IsPass p => [LConDecl (GhcPass p)] -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) -- See hsLTyClDeclBinders for what this does -- The function is boringly complicated because of the records -- And since we only have equality, we have to be a little careful hsConDeclsBinders cons = go id cons where go :: Seen p -> [LConDecl (GhcPass p)] -> ([LocatedA (IdP (GhcPass p))], [LFieldOcc (GhcPass p)]) go _ [] = ([], []) go remSeen (r:rs) -- Don't re-mangle the location of field names, because we don't -- have a record of the full location of the field declaration anyway = let loc = getLoc r in case unLoc r of -- remove only the first occurrence of any seen field in order to -- avoid circumventing detection of duplicate fields (#9156) ConDeclGADT { con_names = names, con_g_args = args } -> (map (L loc . unLoc) names ++ ns, flds ++ fs) where (remSeen', flds) = get_flds_gadt remSeen args (ns, fs) = go remSeen' rs ConDeclH98 { con_name = name, con_args = args } -> ([L loc (unLoc name)] ++ ns, flds ++ fs) where (remSeen', flds) = get_flds_h98 remSeen args (ns, fs) = go remSeen' rs get_flds_h98 :: Seen p -> HsConDeclH98Details (GhcPass p) -> (Seen p, [LFieldOcc (GhcPass p)]) get_flds_h98 remSeen (RecCon flds) = get_flds remSeen flds get_flds_h98 remSeen _ = (remSeen, []) get_flds_gadt :: Seen p -> HsConDeclGADTDetails (GhcPass p) -> (Seen p, [LFieldOcc (GhcPass p)]) get_flds_gadt remSeen (RecConGADT flds _) = get_flds remSeen flds get_flds_gadt remSeen _ = (remSeen, []) get_flds :: Seen p -> LocatedL [LConDeclField (GhcPass p)] -> (Seen p, [LFieldOcc (GhcPass p)]) get_flds remSeen flds = (remSeen', fld_names) where fld_names = remSeen (concatMap (cd_fld_names . unLoc) (unLoc flds)) remSeen' = foldr (.) remSeen [deleteBy ((==) `on` unLoc . foLabel . unLoc) v | v <- fld_names] {- Note [SrcSpan for binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~ When extracting the (Located RdrNme) for a binder, at least for the main name (the TyCon of a type declaration etc), we want to give it the @SrcSpan@ of the whole /declaration/, not just the name itself (which is how it appears in the syntax tree). This SrcSpan (for the entire declaration) is used as the SrcSpan for the Name that is finally produced, and hence for error messages. (See #8607.) Note [Binders in family instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a type or data family instance declaration, the type constructor is an *occurrence* not a binding site type instance T Int = Int -> Int -- No binders data instance S Bool = S1 | S2 -- Binders are S1,S2 ************************************************************************ * * Collecting binders the user did not write * * ************************************************************************ The job of this family of functions is to run through binding sites and find the set of all Names that were defined "implicitly", without being explicitly written by the user. The main purpose is to find names introduced by record wildcards so that we can avoid warning the user when they don't use those names (#4404) Since the addition of -Wunused-record-wildcards, this function returns a pair of [(SrcSpan, [Name])]. Each element of the list is one set of implicit binders, the first component of the tuple is the document describes the possible fix to the problem (by removing the ..). This means there is some unfortunate coupling between this function and where it is used but it's only used for one specific purpose in one place so it seemed easier. -} lStmtsImplicits :: [LStmtLR GhcRn (GhcPass idR) (LocatedA (body (GhcPass idR)))] -> [(SrcSpan, [Name])] lStmtsImplicits = hs_lstmts where hs_lstmts :: [LStmtLR GhcRn (GhcPass idR) (LocatedA (body (GhcPass idR)))] -> [(SrcSpan, [Name])] hs_lstmts = concatMap (hs_stmt . unLoc) hs_stmt :: StmtLR GhcRn (GhcPass idR) (LocatedA (body (GhcPass idR))) -> [(SrcSpan, [Name])] hs_stmt (BindStmt _ pat _) = lPatImplicits pat hs_stmt (ApplicativeStmt _ args _) = concatMap do_arg args where do_arg (_, ApplicativeArgOne { app_arg_pattern = pat }) = lPatImplicits pat do_arg (_, ApplicativeArgMany { app_stmts = stmts }) = hs_lstmts stmts hs_stmt (LetStmt _ binds) = hs_local_binds binds hs_stmt (BodyStmt {}) = [] hs_stmt (LastStmt {}) = [] hs_stmt (ParStmt _ xs _ _) = hs_lstmts [s | ParStmtBlock _ ss _ _ <- xs , s <- ss] hs_stmt (TransStmt { trS_stmts = stmts }) = hs_lstmts stmts hs_stmt (RecStmt { recS_stmts = L _ ss }) = hs_lstmts ss hs_local_binds (HsValBinds _ val_binds) = hsValBindsImplicits val_binds hs_local_binds (HsIPBinds {}) = [] hs_local_binds (EmptyLocalBinds _) = [] hsValBindsImplicits :: HsValBindsLR GhcRn (GhcPass idR) -> [(SrcSpan, [Name])] hsValBindsImplicits (XValBindsLR (NValBinds binds _)) = concatMap (lhsBindsImplicits . snd) binds hsValBindsImplicits (ValBinds _ binds _) = lhsBindsImplicits binds lhsBindsImplicits :: LHsBindsLR GhcRn idR -> [(SrcSpan, [Name])] lhsBindsImplicits = foldBag (++) (lhs_bind . unLoc) [] where lhs_bind (PatBind { pat_lhs = lpat }) = lPatImplicits lpat lhs_bind _ = [] lPatImplicits :: LPat GhcRn -> [(SrcSpan, [Name])] lPatImplicits = hs_lpat where hs_lpat lpat = hs_pat (unLoc lpat) hs_lpats = foldr (\pat rest -> hs_lpat pat ++ rest) [] hs_pat (LazyPat _ pat) = hs_lpat pat hs_pat (BangPat _ pat) = hs_lpat pat hs_pat (AsPat _ _ pat) = hs_lpat pat hs_pat (ViewPat _ _ pat) = hs_lpat pat hs_pat (ParPat _ _ pat _) = hs_lpat pat hs_pat (ListPat _ pats) = hs_lpats pats hs_pat (TuplePat _ pats _) = hs_lpats pats hs_pat (SigPat _ pat _) = hs_lpat pat hs_pat (ConPat {pat_con=con, pat_args=ps}) = details con ps hs_pat _ = [] details :: LocatedN Name -> HsConPatDetails GhcRn -> [(SrcSpan, [Name])] details _ (PrefixCon _ ps) = hs_lpats ps details n (RecCon fs) = [(err_loc, collectPatsBinders CollNoDictBinders implicit_pats) | Just{} <- [rec_dotdot fs] ] ++ hs_lpats explicit_pats where implicit_pats = map (hfbRHS . unLoc) implicit explicit_pats = map (hfbRHS . unLoc) explicit (explicit, implicit) = partitionEithers [if pat_explicit then Left fld else Right fld | (i, fld) <- [0..] `zip` rec_flds fs , let pat_explicit = maybe True ((i<) . unLoc) (rec_dotdot fs)] err_loc = maybe (getLocA n) getLoc (rec_dotdot fs) details _ (InfixCon p1 p2) = hs_lpat p1 ++ hs_lpat p2 ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Errors/Ppr.hs0000644000000000000000000003601714472400112021701 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic DsMessage module GHC.HsToCore.Errors.Ppr where import GHC.Core.Predicate (isEvVar) import GHC.Core.Type import GHC.Driver.Flags import GHC.Hs import GHC.HsToCore.Errors.Types import GHC.Prelude import GHC.Types.Basic (pprRuleName) import GHC.Types.Error import GHC.Types.Id (idType) import GHC.Types.SrcLoc import GHC.Utils.Misc import GHC.Utils.Outputable import qualified GHC.LanguageExtensions as LangExt import GHC.HsToCore.Pmc.Ppr instance Diagnostic DsMessage where diagnosticMessage = \case DsUnknownMessage m -> diagnosticMessage m DsEmptyEnumeration -> mkSimpleDecorated $ text "Enumeration is empty" DsIdentitiesFound conv_fn type_of_conv -> mkSimpleDecorated $ vcat [ text "Call of" <+> ppr conv_fn <+> dcolon <+> ppr type_of_conv , nest 2 $ text "can probably be omitted" ] DsOverflowedLiterals i tc bounds _possiblyUsingNegativeLiterals -> let msg = case bounds of Nothing -> vcat [ text "Literal" <+> integer i <+> text "is negative but" <+> ppr tc <+> text "only supports positive numbers" ] Just (MinBound minB, MaxBound maxB) -> vcat [ text "Literal" <+> integer i <+> text "is out of the" <+> ppr tc <+> text "range" <+> integer minB <> text ".." <> integer maxB ] in mkSimpleDecorated msg DsRedundantBangPatterns ctx q -> mkSimpleDecorated $ pprEqn ctx q "has redundant bang" DsOverlappingPatterns ctx q -> mkSimpleDecorated $ pprEqn ctx q "is redundant" DsInaccessibleRhs ctx q -> mkSimpleDecorated $ pprEqn ctx q "has inaccessible right hand side" DsMaxPmCheckModelsReached limit -> mkSimpleDecorated $ vcat [ hang (text "Pattern match checker ran into -fmax-pmcheck-models=" <> int limit <> text " limit, so") 2 ( bullet <+> text "Redundant clauses might not be reported at all" $$ bullet <+> text "Redundant clauses might be reported as inaccessible" $$ bullet <+> text "Patterns reported as unmatched might actually be matched") ] DsNonExhaustivePatterns kind _flag maxPatterns vars nablas -> mkSimpleDecorated $ pprContext False kind (text "are non-exhaustive") $ \_ -> case vars of -- See #11245 [] -> text "Guards do not cover entire pattern space" _ -> let us = map (\nabla -> pprUncovered nabla vars) nablas pp_tys = pprQuotedList $ map idType vars in hang (text "Patterns of type" <+> pp_tys <+> text "not matched:") 4 (vcat (take maxPatterns us) $$ dots maxPatterns us) DsTopLevelBindsNotAllowed bindsType bind -> let desc = case bindsType of UnliftedTypeBinds -> "bindings for unlifted types" StrictBinds -> "strict bindings" in mkSimpleDecorated $ hang (text "Top-level" <+> text desc <+> text "aren't allowed:") 2 (ppr bind) DsUselessSpecialiseForClassMethodSelector poly_id -> mkSimpleDecorated $ text "Ignoring useless SPECIALISE pragma for NOINLINE function:" <+> quotes (ppr poly_id) DsUselessSpecialiseForNoInlineFunction poly_id -> mkSimpleDecorated $ text "Ignoring useless SPECIALISE pragma for NOINLINE function:" <+> quotes (ppr poly_id) DsMultiplicityCoercionsNotSupported -> mkSimpleDecorated $ text "GHC bug #19517: GHC currently does not support programs using GADTs or type families to witness equality of multiplicities" DsOrphanRule rule -> mkSimpleDecorated $ text "Orphan rule:" <+> ppr rule DsRuleLhsTooComplicated orig_lhs lhs2 -> mkSimpleDecorated $ hang (text "RULE left-hand side too complicated to desugar") 2 (vcat [ text "Optimised lhs:" <+> ppr lhs2 , text "Orig lhs:" <+> ppr orig_lhs]) DsRuleIgnoredDueToConstructor con -> mkSimpleDecorated $ vcat [ text "A constructor," <+> ppr con <> text ", appears as outermost match in RULE lhs." , text "This rule will be ignored." ] DsRuleBindersNotBound unbound orig_bndrs orig_lhs lhs2 -> mkSimpleDecorated $ vcat (map pp_dead unbound) where pp_dead bndr = hang (sep [ text "Forall'd" <+> pp_bndr bndr , text "is not bound in RULE lhs"]) 2 (vcat [ text "Orig bndrs:" <+> ppr orig_bndrs , text "Orig lhs:" <+> ppr orig_lhs , text "optimised lhs:" <+> ppr lhs2 ]) pp_bndr b | isTyVar b = text "type variable" <+> quotes (ppr b) | isEvVar b = text "constraint" <+> quotes (ppr (varType b)) | otherwise = text "variable" <+> quotes (ppr b) DsMultipleConForNewtype names -> mkSimpleDecorated $ text "Multiple constructors for newtype:" <+> pprQuotedList names DsLazyPatCantBindVarsOfUnliftedType unlifted_bndrs -> mkSimpleDecorated $ hang (text "A lazy (~) pattern cannot bind variables of unlifted type." $$ text "Unlifted variables:") 2 (vcat (map (\id -> ppr id <+> dcolon <+> ppr (idType id)) unlifted_bndrs)) DsNotYetHandledByTH reason -> case reason of ThAmbiguousRecordUpdates fld -> mkMsg "Ambiguous record updates" (ppr fld) ThAbstractClosedTypeFamily decl -> mkMsg "abstract closed type family" (ppr decl) ThForeignLabel cls -> mkMsg "Foreign label" (doubleQuotes (ppr cls)) ThForeignExport decl -> mkMsg "Foreign export" (ppr decl) ThMinimalPragmas -> mkMsg "MINIMAL pragmas" empty ThSCCPragmas -> mkMsg "SCC pragmas" empty ThNoUserInline -> mkMsg "NOUSERINLINE" empty ThExoticFormOfType ty -> mkMsg "Exotic form of type" (ppr ty) ThAmbiguousRecordSelectors e -> mkMsg "Ambiguous record selectors" (ppr e) ThMonadComprehensionSyntax e -> mkMsg "monad comprehension and [: :]" (ppr e) ThCostCentres e -> mkMsg "Cost centres" (ppr e) ThExpressionForm e -> mkMsg "Expression form" (ppr e) ThExoticStatement other -> mkMsg "Exotic statement" (ppr other) ThExoticLiteral lit -> mkMsg "Exotic literal" (ppr lit) ThExoticPattern pat -> mkMsg "Exotic pattern" (ppr pat) ThGuardedLambdas m -> mkMsg "Guarded lambdas" (pprMatch m) ThNegativeOverloadedPatterns pat -> mkMsg "Negative overloaded patterns" (ppr pat) ThHaddockDocumentation -> mkMsg "Haddock documentation" empty ThWarningAndDeprecationPragmas decl -> mkMsg "WARNING and DEPRECATION pragmas" $ text "Pragma for declaration of" <+> ppr decl ThSplicesWithinDeclBrackets -> mkMsg "Splices within declaration brackets" empty ThNonLinearDataCon -> mkMsg "Non-linear fields in data constructors" empty where mkMsg what doc = mkSimpleDecorated $ hang (text what <+> text "not (yet) handled by Template Haskell") 2 doc DsAggregatedViewExpressions views -> mkSimpleDecorated (vcat msgs) where msgs = map (\g -> text "Putting these view expressions into the same case:" <+> (ppr g)) views DsUnbangedStrictPatterns bind -> mkSimpleDecorated $ hang (text "Pattern bindings containing unlifted types should use" $$ text "an outermost bang pattern:") 2 (ppr bind) DsCannotMixPolyAndUnliftedBindings bind -> mkSimpleDecorated $ hang (text "You can't mix polymorphic and unlifted bindings:") 2 (ppr bind) DsWrongDoBind _rhs elt_ty -> mkSimpleDecorated $ badMonadBind elt_ty DsUnusedDoBind _rhs elt_ty -> mkSimpleDecorated $ badMonadBind elt_ty DsRecBindsNotAllowedForUnliftedTys binds -> mkSimpleDecorated $ hang (text "Recursive bindings for unlifted types aren't allowed:") 2 (vcat (map ppr binds)) DsRuleMightInlineFirst rule_name lhs_id _ -> mkSimpleDecorated $ vcat [ hang (text "Rule" <+> pprRuleName rule_name <+> text "may never fire") 2 (text "because" <+> quotes (ppr lhs_id) <+> text "might inline first") ] DsAnotherRuleMightFireFirst rule_name bad_rule lhs_id -> mkSimpleDecorated $ vcat [ hang (text "Rule" <+> pprRuleName rule_name <+> text "may never fire") 2 (text "because rule" <+> pprRuleName bad_rule <+> text "for"<+> quotes (ppr lhs_id) <+> text "might fire first") ] diagnosticReason = \case DsUnknownMessage m -> diagnosticReason m DsEmptyEnumeration -> WarningWithFlag Opt_WarnEmptyEnumerations DsIdentitiesFound{} -> WarningWithFlag Opt_WarnIdentities DsOverflowedLiterals{} -> WarningWithFlag Opt_WarnOverflowedLiterals DsRedundantBangPatterns{} -> WarningWithFlag Opt_WarnRedundantBangPatterns DsOverlappingPatterns{} -> WarningWithFlag Opt_WarnOverlappingPatterns DsInaccessibleRhs{} -> WarningWithFlag Opt_WarnOverlappingPatterns DsMaxPmCheckModelsReached{} -> WarningWithoutFlag DsNonExhaustivePatterns _ (ExhaustivityCheckType mb_flag) _ _ _ -> maybe WarningWithoutFlag WarningWithFlag mb_flag DsTopLevelBindsNotAllowed{} -> ErrorWithoutFlag DsUselessSpecialiseForClassMethodSelector{} -> WarningWithoutFlag DsUselessSpecialiseForNoInlineFunction{} -> WarningWithoutFlag DsMultiplicityCoercionsNotSupported{} -> ErrorWithoutFlag DsOrphanRule{} -> WarningWithFlag Opt_WarnOrphans DsRuleLhsTooComplicated{} -> WarningWithoutFlag DsRuleIgnoredDueToConstructor{} -> WarningWithoutFlag DsRuleBindersNotBound{} -> WarningWithoutFlag DsMultipleConForNewtype{} -> ErrorWithoutFlag DsLazyPatCantBindVarsOfUnliftedType{} -> ErrorWithoutFlag DsNotYetHandledByTH{} -> ErrorWithoutFlag DsAggregatedViewExpressions{} -> WarningWithoutFlag DsUnbangedStrictPatterns{} -> WarningWithFlag Opt_WarnUnbangedStrictPatterns DsCannotMixPolyAndUnliftedBindings{} -> ErrorWithoutFlag DsWrongDoBind{} -> WarningWithFlag Opt_WarnWrongDoBind DsUnusedDoBind{} -> WarningWithFlag Opt_WarnUnusedDoBind DsRecBindsNotAllowedForUnliftedTys{} -> ErrorWithoutFlag DsRuleMightInlineFirst{} -> WarningWithFlag Opt_WarnInlineRuleShadowing DsAnotherRuleMightFireFirst{} -> WarningWithFlag Opt_WarnInlineRuleShadowing diagnosticHints = \case DsUnknownMessage m -> diagnosticHints m DsEmptyEnumeration -> noHints DsIdentitiesFound{} -> noHints DsOverflowedLiterals i _tc bounds usingNegLiterals -> case (bounds, usingNegLiterals) of (Just (MinBound minB, MaxBound _), NotUsingNegLiterals) | minB == -i -- Note [Suggest NegativeLiterals] , i > 0 -> [ suggestExtensionWithInfo (text "If you are trying to write a large negative literal") LangExt.NegativeLiterals ] _ -> noHints DsRedundantBangPatterns{} -> noHints DsOverlappingPatterns{} -> noHints DsInaccessibleRhs{} -> noHints DsMaxPmCheckModelsReached{} -> [SuggestIncreaseMaxPmCheckModels] DsNonExhaustivePatterns{} -> noHints DsTopLevelBindsNotAllowed{} -> noHints DsUselessSpecialiseForClassMethodSelector{} -> noHints DsUselessSpecialiseForNoInlineFunction{} -> noHints DsMultiplicityCoercionsNotSupported -> noHints DsOrphanRule{} -> noHints DsRuleLhsTooComplicated{} -> noHints DsRuleIgnoredDueToConstructor{} -> noHints DsRuleBindersNotBound{} -> noHints DsMultipleConForNewtype{} -> noHints DsLazyPatCantBindVarsOfUnliftedType{} -> noHints DsNotYetHandledByTH{} -> noHints DsAggregatedViewExpressions{} -> noHints DsUnbangedStrictPatterns{} -> noHints DsCannotMixPolyAndUnliftedBindings{} -> [SuggestAddTypeSignatures UnnamedBinding] DsWrongDoBind rhs _ -> [SuggestBindToWildcard rhs] DsUnusedDoBind rhs _ -> [SuggestBindToWildcard rhs] DsRecBindsNotAllowedForUnliftedTys{} -> noHints DsRuleMightInlineFirst _ lhs_id rule_act -> [SuggestAddInlineOrNoInlinePragma lhs_id rule_act] DsAnotherRuleMightFireFirst _ bad_rule _ -> [SuggestAddPhaseToCompetingRule bad_rule] {- Note [Suggest NegativeLiterals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you write x :: Int8 x = -128 it'll parse as (negate 128), and overflow. In this case, suggest NegativeLiterals. We get an erroneous suggestion for x = 128 but perhaps that does not matter too much. -} -- -- Helper functions -- badMonadBind :: Type -> SDoc badMonadBind elt_ty = hang (text "A do-notation statement discarded a result of type") 2 (quotes (ppr elt_ty)) -- Print a single clause (for redundant/with-inaccessible-rhs) pprEqn :: HsMatchContext GhcRn -> SDoc -> String -> SDoc pprEqn ctx q txt = pprContext True ctx (text txt) $ \f -> f (q <+> matchSeparator ctx <+> text "...") pprContext :: Bool -> HsMatchContext GhcRn -> SDoc -> ((SDoc -> SDoc) -> SDoc) -> SDoc pprContext singular kind msg rest_of_msg_fun = vcat [text txt <+> msg, sep [ text "In" <+> ppr_match <> char ':' , nest 4 (rest_of_msg_fun pref)]] where txt | singular = "Pattern match" | otherwise = "Pattern match(es)" (ppr_match, pref) = case kind of FunRhs { mc_fun = L _ fun } -> (pprMatchContext kind, \ pp -> ppr fun <+> pp) _ -> (pprMatchContext kind, \ pp -> pp) dots :: Int -> [a] -> SDoc dots maxPatterns qs | qs `lengthExceeds` maxPatterns = text "..." | otherwise = empty ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Errors/Types.hs0000644000000000000000000001322414472400112022237 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.HsToCore.Errors.Types where import Data.Typeable import GHC.Prelude import GHC.Core (CoreRule, CoreExpr, RuleName) import GHC.Core.DataCon import GHC.Core.Type import GHC.Driver.Session import GHC.Hs import GHC.HsToCore.Pmc.Solver.Types import GHC.Types.Basic (Activation) import GHC.Types.Error import GHC.Types.ForeignCall import GHC.Types.Id import GHC.Types.Name (Name) import qualified GHC.LanguageExtensions as LangExt newtype MinBound = MinBound Integer newtype MaxBound = MaxBound Integer type MaxUncoveredPatterns = Int type MaxPmCheckModels = Int -- | Diagnostics messages emitted during desugaring. data DsMessage -- | Simply wraps a generic 'Diagnostic' message. = forall a. (Diagnostic a, Typeable a) => DsUnknownMessage a {-| DsEmptyEnumeration is a warning (controlled by the -Wempty-enumerations flag) that is emitted if an enumeration is empty. Example(s): main :: IO () main = do let enum = [5 .. 3] print enum Here 'enum' would yield an empty list, because 5 is greater than 3. Test case(s): warnings/should_compile/T10930 warnings/should_compile/T18402 warnings/should_compile/T10930b numeric/should_compile/T10929 numeric/should_compile/T7881 deSugar/should_run/T18172 -} | DsEmptyEnumeration {-| DsIdentitiesFound is a warning (controlled by the -Widentities flag) that is emitted on uses of Prelude numeric conversions that are probably the identity (and hence could be omitted). Example(s): main :: IO () main = do let x = 10 print $ conv 10 where conv :: Int -> Int conv x = fromIntegral x Here calling 'conv' is essentially the identity function, and therefore can be omitted. Test case(s): deSugar/should_compile/T4488 -} | DsIdentitiesFound !Id -- The conversion function !Type -- The type of conversion | DsOverflowedLiterals !Integer !Name !(Maybe (MinBound, MaxBound)) !NegLiteralExtEnabled -- FIXME(adn) Use a proper type instead of 'SDoc', but unfortunately -- 'SrcInfo' gives us an 'SDoc' to begin with. | DsRedundantBangPatterns !(HsMatchContext GhcRn) !SDoc -- FIXME(adn) Use a proper type instead of 'SDoc', but unfortunately -- 'SrcInfo' gives us an 'SDoc' to begin with. | DsOverlappingPatterns !(HsMatchContext GhcRn) !SDoc -- FIXME(adn) Use a proper type instead of 'SDoc' | DsInaccessibleRhs !(HsMatchContext GhcRn) !SDoc | DsMaxPmCheckModelsReached !MaxPmCheckModels | DsNonExhaustivePatterns !(HsMatchContext GhcRn) !ExhaustivityCheckType !MaxUncoveredPatterns [Id] [Nabla] | DsTopLevelBindsNotAllowed !BindsType !(HsBindLR GhcTc GhcTc) | DsUselessSpecialiseForClassMethodSelector !Id | DsUselessSpecialiseForNoInlineFunction !Id | DsMultiplicityCoercionsNotSupported | DsOrphanRule !CoreRule | DsRuleLhsTooComplicated !CoreExpr !CoreExpr | DsRuleIgnoredDueToConstructor !DataCon | DsRuleBindersNotBound ![Var] -- ^ The list of unbound binders ![Var] -- ^ The original binders !CoreExpr -- ^ The original LHS !CoreExpr -- ^ The optimised LHS | DsMultipleConForNewtype [LocatedN Name] | DsLazyPatCantBindVarsOfUnliftedType [Var] | DsNotYetHandledByTH !ThRejectionReason | DsAggregatedViewExpressions [[LHsExpr GhcTc]] | DsUnbangedStrictPatterns !(HsBindLR GhcTc GhcTc) | DsCannotMixPolyAndUnliftedBindings !(HsBindLR GhcTc GhcTc) | DsWrongDoBind !(LHsExpr GhcTc) !Type | DsUnusedDoBind !(LHsExpr GhcTc) !Type | DsRecBindsNotAllowedForUnliftedTys ![LHsBindLR GhcTc GhcTc] | DsRuleMightInlineFirst !RuleName !Var !Activation | DsAnotherRuleMightFireFirst !RuleName !RuleName -- the \"bad\" rule !Var -- The positional number of the argument for an expression (first, second, third, etc) newtype DsArgNum = DsArgNum Int -- | Why TemplateHaskell rejected the splice. Used in the 'DsNotYetHandledByTH' -- constructor of a 'DsMessage'. data ThRejectionReason = ThAmbiguousRecordUpdates !(HsRecUpdField GhcRn) | ThAbstractClosedTypeFamily !(LFamilyDecl GhcRn) | ThForeignLabel !CLabelString | ThForeignExport !(LForeignDecl GhcRn) | ThMinimalPragmas | ThSCCPragmas | ThNoUserInline | ThExoticFormOfType !(HsType GhcRn) | ThAmbiguousRecordSelectors !(HsExpr GhcRn) | ThMonadComprehensionSyntax !(HsExpr GhcRn) | ThCostCentres !(HsExpr GhcRn) | ThExpressionForm !(HsExpr GhcRn) | ThExoticStatement [Stmt GhcRn (LHsExpr GhcRn)] | ThExoticLiteral !(HsLit GhcRn) | ThExoticPattern !(Pat GhcRn) | ThGuardedLambdas !(Match GhcRn (LHsExpr GhcRn)) | ThNegativeOverloadedPatterns !(Pat GhcRn) | ThHaddockDocumentation | ThWarningAndDeprecationPragmas [LIdP GhcRn] | ThSplicesWithinDeclBrackets | ThNonLinearDataCon data NegLiteralExtEnabled = YesUsingNegLiterals | NotUsingNegLiterals negLiteralExtEnabled :: DynFlags -> NegLiteralExtEnabled negLiteralExtEnabled dflags = if (xopt LangExt.NegativeLiterals dflags) then YesUsingNegLiterals else NotUsingNegLiterals newtype ExhaustivityCheckType = ExhaustivityCheckType (Maybe WarningFlag) data BindsType = UnliftedTypeBinds | StrictBinds ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Pmc/Ppr.hs0000644000000000000000000001660014472400112021140 0ustar0000000000000000 {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Provides factilities for pretty-printing 'Nabla's in a way appropriate for -- user facing pattern match warnings. module GHC.HsToCore.Pmc.Ppr ( pprUncovered ) where import GHC.Prelude import GHC.Types.Basic import GHC.Types.Id import GHC.Types.Var.Env import GHC.Types.Unique.DFM import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Builtin.Types import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Control.Monad.Trans.RWS.CPS import GHC.Data.Maybe import Data.List.NonEmpty (NonEmpty, nonEmpty, toList) import GHC.HsToCore.Pmc.Types -- | Pretty-print the guts of an uncovered value vector abstraction, i.e., its -- components and refutable shapes associated to any mentioned variables. -- -- Example for @([Just p, q], [p :-> [3,4], q :-> [0,5]])@: -- -- @ -- (Just p) q -- where p is not one of {3, 4} -- q is not one of {0, 5} -- @ -- -- When the set of refutable shapes contains more than 3 elements, the -- additional elements are indicated by "...". pprUncovered :: Nabla -> [Id] -> SDoc pprUncovered nabla vas | isNullUDFM refuts = fsep vec -- there are no refutations | otherwise = hang (fsep vec) 4 $ text "where" <+> vcat (map (pprRefutableShapes . snd) (udfmToList refuts)) where init_prec -- No outer parentheses when it's a unary pattern by assuming lowest -- precedence | [_] <- vas = topPrec | otherwise = appPrec ppr_action = mapM (pprPmVar init_prec) vas (vec, renamings) = runPmPpr nabla ppr_action refuts = prettifyRefuts nabla renamings -- | Output refutable shapes of a variable in the form of @var is not one of {2, -- Nothing, 3}@. Will never print more than 3 refutable shapes, the tail is -- indicated by an ellipsis. pprRefutableShapes :: (SDoc,[PmAltCon]) -> SDoc pprRefutableShapes (var, alts) = var <+> text "is not one of" <+> format_alts alts where format_alts = braces . fsep . punctuate comma . shorten . map ppr_alt shorten (a:b:c:_:_) = a:b:c:[text "..."] shorten xs = xs ppr_alt (PmAltConLike cl) = ppr cl ppr_alt (PmAltLit lit) = ppr lit {- 1. Literals ~~~~~~~~~~~~~~ Starting with a function definition like: f :: Int -> Bool f 5 = True f 6 = True The uncovered set looks like: { var |> var /= 5, var /= 6 } Yet, we would like to print this nicely as follows: x , where x not one of {5,6} Since these variables will be shown to the programmer, we give them better names (t1, t2, ..) in 'prettifyRefuts', hence the SDoc in 'PrettyPmRefutEnv'. 2. Residual Constraints ~~~~~~~~~~~~~~~~~~~~~~~ Unhandled constraints that refer to HsExpr are typically ignored by the solver (it does not even substitute in HsExpr so they are even printed as wildcards). Additionally, the oracle returns a substitution if it succeeds so we apply this substitution to the vectors before printing them out (see function `pprOne' in "GHC.HsToCore.Pmc") to be more precise. -} -- | Extract and assigns pretty names to constraint variables with refutable -- shapes. prettifyRefuts :: Nabla -> DIdEnv (Id, SDoc) -> DIdEnv (SDoc, [PmAltCon]) prettifyRefuts nabla = listToUDFM_Directly . map attach_refuts . udfmToList where attach_refuts (u, (x, sdoc)) = (u, (sdoc, lookupRefuts nabla x)) type PmPprM a = RWS Nabla () (DIdEnv (Id, SDoc), [SDoc]) a -- Try nice names p,q,r,s,t before using the (ugly) t_i nameList :: [SDoc] nameList = map text ["p","q","r","s","t"] ++ [ text ('t':show u) | u <- [(0 :: Int)..] ] runPmPpr :: Nabla -> PmPprM a -> (a, DIdEnv (Id, SDoc)) runPmPpr nabla m = case runRWS m nabla (emptyDVarEnv, nameList) of (a, (renamings, _), _) -> (a, renamings) -- | Allocates a new, clean name for the given 'Id' if it doesn't already have -- one. getCleanName :: Id -> PmPprM SDoc getCleanName x = do (renamings, name_supply) <- get let (clean_name:name_supply') = name_supply case lookupDVarEnv renamings x of Just (_, nm) -> pure nm Nothing -> do put (extendDVarEnv renamings x (x, clean_name), name_supply') pure clean_name checkRefuts :: Id -> PmPprM (Maybe SDoc) -- the clean name if it has negative info attached checkRefuts x = do nabla <- ask case lookupRefuts nabla x of [] -> pure Nothing -- Will just be a wildcard later on _ -> Just <$> getCleanName x -- | Pretty print a variable, but remember to prettify the names of the variables -- that refer to neg-literals. The ones that cannot be shown are printed as -- underscores. pprPmVar :: PprPrec -> Id -> PmPprM SDoc pprPmVar prec x = do nabla <- ask case lookupSolution nabla x of Just (PACA alt _tvs args) -> pprPmAltCon prec alt args Nothing -> fromMaybe underscore <$> checkRefuts x pprPmAltCon :: PprPrec -> PmAltCon -> [Id] -> PmPprM SDoc pprPmAltCon _prec (PmAltLit l) _ = pure (ppr l) pprPmAltCon prec (PmAltConLike cl) args = do nabla <- ask pprConLike nabla prec cl args pprConLike :: Nabla -> PprPrec -> ConLike -> [Id] -> PmPprM SDoc pprConLike nabla _prec cl args | Just pm_expr_list <- pmExprAsList nabla (PmAltConLike cl) args = case pm_expr_list of NilTerminated list -> brackets . fsep . punctuate comma <$> mapM (pprPmVar appPrec) list WcVarTerminated pref x -> parens . fcat . punctuate colon <$> mapM (pprPmVar appPrec) (toList pref ++ [x]) pprConLike _nabla _prec (RealDataCon con) args | isUnboxedTupleDataCon con , let hash_parens doc = text "(#" <+> doc <+> text "#)" = hash_parens . fsep . punctuate comma <$> mapM (pprPmVar appPrec) args | isTupleDataCon con = parens . fsep . punctuate comma <$> mapM (pprPmVar appPrec) args pprConLike _nabla prec cl args | conLikeIsInfix cl = case args of [x, y] -> do x' <- pprPmVar funPrec x y' <- pprPmVar funPrec y return (cparen (prec > opPrec) (x' <+> ppr cl <+> y')) -- can it be infix but have more than two arguments? list -> pprPanic "pprConLike:" (ppr list) | null args = return (ppr cl) | otherwise = do args' <- mapM (pprPmVar appPrec) args return (cparen (prec > funPrec) (fsep (ppr cl : args'))) -- | The result of 'pmExprAsList'. data PmExprList = NilTerminated [Id] | WcVarTerminated (NonEmpty Id) Id -- | Extract a list of 'Id's out of a sequence of cons cells, optionally -- terminated by a wildcard variable instead of @[]@. Some examples: -- -- * @pmExprAsList (1:2:[]) == Just ('NilTerminated' [1,2])@, a regular, -- @[]@-terminated list. Should be pretty-printed as @[1,2]@. -- * @pmExprAsList (1:2:x) == Just ('WcVarTerminated' [1,2] x)@, a list prefix -- ending in a wildcard variable x (of list type). Should be pretty-printed as -- (1:2:_). -- * @pmExprAsList [] == Just ('NilTerminated' [])@ pmExprAsList :: Nabla -> PmAltCon -> [Id] -> Maybe PmExprList pmExprAsList nabla = go_con [] where go_var rev_pref x | Just (PACA alt _tvs args) <- lookupSolution nabla x = go_con rev_pref alt args go_var rev_pref x | Just pref <- nonEmpty (reverse rev_pref) = Just (WcVarTerminated pref x) go_var _ _ = Nothing go_con rev_pref (PmAltConLike (RealDataCon c)) es | c == nilDataCon = assert (null es) $ Just (NilTerminated (reverse rev_pref)) | c == consDataCon = assert (length es == 2) $ go_var (es !! 0 : rev_pref) (es !! 1) go_con _ _ _ = Nothing ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Pmc/Solver/Types.hs0000644000000000000000000007437314472400112022770 0ustar0000000000000000{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE MultiWayIf #-} -- | Domain types used in "GHC.HsToCore.Pmc.Solver". -- The ultimate goal is to define 'Nabla', which models normalised refinement -- types from the paper -- [Lower Your Guards: A Compositional Pattern-Match Coverage Checker"](https://dl.acm.org/doi/abs/10.1145/3408989). module GHC.HsToCore.Pmc.Solver.Types ( -- * Normalised refinement types BotInfo(..), PmAltConApp(..), VarInfo(..), TmState(..), TyState(..), Nabla(..), Nablas(..), initNablas, lookupRefuts, lookupSolution, -- ** Looking up 'VarInfo' lookupVarInfo, lookupVarInfoNT, trvVarInfo, -- ** Caching residual COMPLETE sets CompleteMatch, ResidualCompleteMatches(..), getRcm, isRcmInitialised, -- ** Representations for Literals and AltCons PmLit(..), PmLitValue(..), PmAltCon(..), pmLitType, pmAltConType, isPmAltConMatchStrict, pmAltConImplBangs, -- *** PmAltConSet PmAltConSet, emptyPmAltConSet, isEmptyPmAltConSet, elemPmAltConSet, extendPmAltConSet, pmAltConSetElems, -- *** Equality on 'PmAltCon's PmEquality(..), eqPmAltCon, -- *** Operations on 'PmLit' literalToPmLit, negatePmLit, overloadPmLit, pmLitAsStringLit, coreExprAsPmLit ) where import GHC.Prelude import GHC.Data.Bag import GHC.Data.FastString import GHC.Types.Id import GHC.Types.Var.Set import GHC.Types.Unique.DSet import GHC.Types.Unique.SDFM import GHC.Types.Name import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import GHC.Utils.Misc (lastMaybe) import GHC.Data.List.SetOps (unionLists) import GHC.Data.Maybe import GHC.Core.Type import GHC.Core.TyCon import GHC.Types.Literal import GHC.Core import GHC.Core.Map.Expr import GHC.Core.Utils (exprType) import GHC.Builtin.Names import GHC.Builtin.Types import GHC.Builtin.Types.Prim import GHC.Tc.Solver.InertSet (InertSet, emptyInert) import GHC.Tc.Utils.TcType (isStringTy) import GHC.Types.CompleteMatch (CompleteMatch(..)) import GHC.Types.SourceText (SourceText(..), mkFractionalLit, FractionalLit , fractionalLitFromRational , FractionalExponentBase(..)) import Numeric (fromRat) import Data.Foldable (find) import Data.Ratio import GHC.Real (Ratio(..)) import qualified Data.Semigroup as Semi -- import GHC.Driver.Ppr -- -- * Normalised refinement types -- -- | A normalised refinement type ∇ (\"nabla\"), comprised of an inert set of -- canonical (i.e. mutually compatible) term and type constraints that form the -- refinement type's predicate. data Nabla = MkNabla { nabla_ty_st :: !TyState -- ^ Type oracle; things like a~Int , nabla_tm_st :: !TmState -- ^ Term oracle; things like x~Nothing } -- | An initial nabla that is always satisfiable initNabla :: Nabla initNabla = MkNabla initTyState initTmState instance Outputable Nabla where ppr nabla = hang (text "Nabla") 2 $ vcat [ -- intentionally formatted this way enable the dev to comment in only -- the info they need ppr (nabla_tm_st nabla), ppr (nabla_ty_st nabla) ] -- | A disjunctive bag of 'Nabla's, representing a refinement type. newtype Nablas = MkNablas (Bag Nabla) initNablas :: Nablas initNablas = MkNablas (unitBag initNabla) instance Outputable Nablas where ppr (MkNablas nablas) = ppr nablas instance Semigroup Nablas where MkNablas l <> MkNablas r = MkNablas (l `unionBags` r) instance Monoid Nablas where mempty = MkNablas emptyBag -- | The type oracle state. An 'GHC.Tc.Solver.Monad.InertSet' that we -- incrementally add local type constraints to, together with a sequence -- number that counts the number of times we extended it with new facts. data TyState = TySt { ty_st_n :: !Int, ty_st_inert :: !InertSet } -- | Not user-facing. instance Outputable TyState where ppr (TySt n inert) = ppr n <+> ppr inert initTyState :: TyState initTyState = TySt 0 emptyInert -- | The term oracle state. Stores 'VarInfo' for encountered 'Id's. These -- entries are possibly shared when we figure out that two variables must be -- equal, thus represent the same set of values. -- -- See Note [TmState invariants] in "GHC.HsToCore.Pmc.Solver". data TmState = TmSt { ts_facts :: !(UniqSDFM Id VarInfo) -- ^ Facts about term variables. Deterministic env, so that we generate -- deterministic error messages. , ts_reps :: !(CoreMap Id) -- ^ An environment for looking up whether we already encountered semantically -- equivalent expressions that we want to represent by the same 'Id' -- representative. , ts_dirty :: !DIdSet -- ^ Which 'VarInfo' needs to be checked for inhabitants because of new -- negative constraints (e.g. @x ≁ ⊥@ or @x ≁ K@). } -- | Information about an 'Id'. Stores positive ('vi_pos') facts, like @x ~ Just 42@, -- and negative ('vi_neg') facts, like "x is not (:)". -- Also caches the type ('vi_ty'), the 'ResidualCompleteMatches' of a COMPLETE set -- ('vi_rcm'). -- -- Subject to Note [The Pos/Neg invariant] in "GHC.HsToCore.Pmc.Solver". data VarInfo = VI { vi_id :: !Id -- ^ The 'Id' in question. Important for adding new constraints relative to -- this 'VarInfo' when we don't easily have the 'Id' available. , vi_pos :: ![PmAltConApp] -- ^ Positive info: 'PmAltCon' apps it is (i.e. @x ~ [Just y, PatSyn z]@), all -- at the same time (i.e. conjunctive). We need a list because of nested -- pattern matches involving pattern synonym -- case x of { Just y -> case x of PatSyn z -> ... } -- However, no more than one RealDataCon in the list, otherwise contradiction -- because of generativity. , vi_neg :: !PmAltConSet -- ^ Negative info: A list of 'PmAltCon's that it cannot match. -- Example, assuming -- -- @ -- data T = Leaf Int | Branch T T | Node Int T -- @ -- -- then @x ≁ [Leaf, Node]@ means that @x@ cannot match a @Leaf@ or @Node@, -- and hence can only match @Branch@. Is orthogonal to anything from 'vi_pos', -- in the sense that 'eqPmAltCon' returns @PossiblyOverlap@ for any pairing -- between 'vi_pos' and 'vi_neg'. -- See Note [Why record both positive and negative info?] -- It's worth having an actual set rather than a simple association list, -- because files like Cabal's `LicenseId` define relatively huge enums -- that lead to quadratic or worse behavior. , vi_bot :: BotInfo -- ^ Can this variable be ⊥? Models (mutually contradicting) @x ~ ⊥@ and -- @x ≁ ⊥@ constraints. E.g. -- * 'MaybeBot': Don't know; Neither @x ~ ⊥@ nor @x ≁ ⊥@. -- * 'IsBot': @x ~ ⊥@ -- * 'IsNotBot': @x ≁ ⊥@ , vi_rcm :: !ResidualCompleteMatches -- ^ A cache of the associated COMPLETE sets. At any time a superset of -- possible constructors of each COMPLETE set. So, if it's not in here, we -- can't possibly match on it. Complementary to 'vi_neg'. We still need it -- to recognise completion of a COMPLETE set efficiently for large enums. } data PmAltConApp = PACA { paca_con :: !PmAltCon , paca_tvs :: ![TyVar] , paca_ids :: ![Id] } -- | See 'vi_bot'. data BotInfo = IsBot | IsNotBot | MaybeBot deriving Eq instance Outputable PmAltConApp where ppr PACA{paca_con = con, paca_tvs = tvs, paca_ids = ids} = hsep (ppr con : map ((char '@' <>) . ppr) tvs ++ map ppr ids) instance Outputable BotInfo where ppr MaybeBot = underscore ppr IsBot = text "~⊥" ppr IsNotBot = text "≁⊥" -- | Not user-facing. instance Outputable TmState where ppr (TmSt state reps dirty) = ppr state $$ ppr reps $$ ppr dirty -- | Not user-facing. instance Outputable VarInfo where ppr (VI x pos neg bot cache) = braces (hcat (punctuate comma [pp_x, pp_pos, pp_neg, ppr bot, pp_cache])) where pp_x = ppr x <> dcolon <> ppr (idType x) pp_pos | [] <- pos = underscore | [p] <- pos = char '~' <> ppr p -- suppress outer [_] if singleton | otherwise = char '~' <> ppr pos pp_neg | isEmptyPmAltConSet neg = underscore | otherwise = char '≁' <> ppr neg pp_cache | RCM Nothing Nothing <- cache = underscore | otherwise = ppr cache -- | Initial state of the term oracle. initTmState :: TmState initTmState = TmSt emptyUSDFM emptyCoreMap emptyDVarSet -- | A data type that caches for the 'VarInfo' of @x@ the results of querying -- 'dsGetCompleteMatches' and then striking out all occurrences of @K@ for -- which we already know @x ≁ K@ from these sets. -- -- For motivation, see Section 5.3 in Lower Your Guards. -- See also Note [Implementation of COMPLETE pragmas] data ResidualCompleteMatches = RCM { rcm_vanilla :: !(Maybe CompleteMatch) -- ^ The residual set for the vanilla COMPLETE set from the data defn. -- Tracked separately from 'rcm_pragmas', because it might only be -- known much later (when we have enough type information to see the 'TyCon' -- of the match), or not at all even. Until that happens, it is 'Nothing'. , rcm_pragmas :: !(Maybe [CompleteMatch]) -- ^ The residual sets for /all/ COMPLETE sets from pragmas that are -- visible when compiling this module. Querying that set with -- 'dsGetCompleteMatches' requires 'DsM', so we initialise it with 'Nothing' -- until first needed in a 'DsM' context. } getRcm :: ResidualCompleteMatches -> [CompleteMatch] getRcm (RCM vanilla pragmas) = maybeToList vanilla ++ fromMaybe [] pragmas isRcmInitialised :: ResidualCompleteMatches -> Bool isRcmInitialised (RCM vanilla pragmas) = isJust vanilla && isJust pragmas instance Outputable ResidualCompleteMatches where -- formats as "[{Nothing,Just},{P,Q}]" ppr rcm = ppr (getRcm rcm) ----------------------- -- * Looking up VarInfo emptyRCM :: ResidualCompleteMatches emptyRCM = RCM Nothing Nothing emptyVarInfo :: Id -> VarInfo emptyVarInfo x = VI { vi_id = x , vi_pos = [] , vi_neg = emptyPmAltConSet -- Why not set IsNotBot for unlifted type here? -- Because we'd have to trigger an inhabitation test, which we can't. -- See case (4) in Note [Strict fields and variables of unlifted type] -- in GHC.HsToCore.Pmc.Solver , vi_bot = MaybeBot , vi_rcm = emptyRCM } lookupVarInfo :: TmState -> Id -> VarInfo -- (lookupVarInfo tms x) tells what we know about 'x' lookupVarInfo (TmSt env _ _) x = fromMaybe (emptyVarInfo x) (lookupUSDFM env x) -- | Like @lookupVarInfo ts x@, but @lookupVarInfo ts x = (y, vi)@ also looks -- through newtype constructors. We have @x ~ N1 (... (Nk y))@ such that the -- returned @y@ doesn't have a positive newtype constructor constraint -- associated with it (yet). The 'VarInfo' returned is that of @y@'s -- representative. -- -- Careful, this means that @idType x@ might be different to @idType y@, even -- modulo type normalisation! -- -- See also Note [Coverage checking Newtype matches] in GHC.HsToCore.Pmc.Solver. lookupVarInfoNT :: TmState -> Id -> (Id, VarInfo) lookupVarInfoNT ts x = case lookupVarInfo ts x of VI{ vi_pos = as_newtype -> Just y } -> lookupVarInfoNT ts y res -> (x, res) where as_newtype = listToMaybe . mapMaybe go go PACA{paca_con = PmAltConLike (RealDataCon dc), paca_ids = [y]} | isNewDataCon dc = Just y go _ = Nothing trvVarInfo :: Functor f => (VarInfo -> f (a, VarInfo)) -> Nabla -> Id -> f (a, Nabla) trvVarInfo f nabla@MkNabla{ nabla_tm_st = ts@TmSt{ts_facts = env} } x = set_vi <$> f (lookupVarInfo ts x) where set_vi (a, vi') = (a, nabla{ nabla_tm_st = ts{ ts_facts = addToUSDFM env (vi_id vi') vi' } }) ------------------------------------------------ -- * Exported utility functions querying 'Nabla' lookupRefuts :: Nabla -> Id -> [PmAltCon] -- Unfortunately we need the extra bit of polymorphism and the unfortunate -- duplication of lookupVarInfo here. lookupRefuts MkNabla{ nabla_tm_st = ts } x = pmAltConSetElems $ vi_neg $ lookupVarInfo ts x isDataConSolution :: PmAltConApp -> Bool isDataConSolution PACA{paca_con = PmAltConLike (RealDataCon _)} = True isDataConSolution _ = False -- @lookupSolution nabla x@ picks a single solution ('vi_pos') of @x@ from -- possibly many, preferring 'RealDataCon' solutions whenever possible. lookupSolution :: Nabla -> Id -> Maybe PmAltConApp lookupSolution nabla x = case vi_pos (lookupVarInfo (nabla_tm_st nabla) x) of [] -> Nothing pos | Just sol <- find isDataConSolution pos -> Just sol | otherwise -> Just (head pos) -------------------------------------------------------------------------------- -- The rest is just providing an IR for (overloaded!) literals and AltCons that -- sits between Hs and Core. We need a reliable way to detect and determine -- equality between them, which is impossible with Hs (too expressive) and with -- Core (no notion of overloaded literals, and even plain 'Int' literals are -- actually constructor apps). Also String literals are troublesome. -- | Literals (simple and overloaded ones) for pattern match checking. -- -- See Note [Undecidable Equality for PmAltCons] data PmLit = PmLit { pm_lit_ty :: Type , pm_lit_val :: PmLitValue } data PmLitValue = PmLitInt Integer | PmLitRat Rational | PmLitChar Char -- We won't actually see PmLitString in the oracle since we desugar strings to -- lists | PmLitString FastString | PmLitOverInt Int {- How often Negated? -} Integer | PmLitOverRat Int {- How often Negated? -} FractionalLit | PmLitOverString FastString -- | Undecidable semantic equality result. -- See Note [Undecidable Equality for PmAltCons] data PmEquality = Equal | Disjoint | PossiblyOverlap deriving (Eq, Show) -- | When 'PmEquality' can be decided. @True <=> Equal@, @False <=> Disjoint@. decEquality :: Bool -> PmEquality decEquality True = Equal decEquality False = Disjoint -- | Undecidable equality for values represented by 'PmLit's. -- See Note [Undecidable Equality for PmAltCons] -- -- * @Just True@ ==> Surely equal -- * @Just False@ ==> Surely different (non-overlapping, even!) -- * @Nothing@ ==> Equality relation undecidable eqPmLit :: PmLit -> PmLit -> PmEquality eqPmLit (PmLit t1 v1) (PmLit t2 v2) -- no haddock | pprTrace "eqPmLit" (ppr t1 <+> ppr v1 $$ ppr t2 <+> ppr v2) False = undefined | not (t1 `eqType` t2) = Disjoint | otherwise = go v1 v2 where go (PmLitInt i1) (PmLitInt i2) = decEquality (i1 == i2) go (PmLitRat r1) (PmLitRat r2) = decEquality (r1 == r2) go (PmLitChar c1) (PmLitChar c2) = decEquality (c1 == c2) go (PmLitString s1) (PmLitString s2) = decEquality (s1 == s2) go (PmLitOverInt n1 i1) (PmLitOverInt n2 i2) | n1 == n2 && i1 == i2 = Equal go (PmLitOverRat n1 r1) (PmLitOverRat n2 r2) | n1 == n2 && r1 == r2 = Equal go (PmLitOverString s1) (PmLitOverString s2) | s1 == s2 = Equal go _ _ = PossiblyOverlap -- | Syntactic equality. instance Eq PmLit where a == b = eqPmLit a b == Equal -- | Type of a 'PmLit' pmLitType :: PmLit -> Type pmLitType (PmLit ty _) = ty -- | Undecidable equality for values represented by 'ConLike's. -- See Note [Undecidable Equality for PmAltCons]. -- 'PatSynCon's aren't enforced to be generative, so two syntactically different -- 'PatSynCon's might match the exact same values. Without looking into and -- reasoning about the pattern synonym's definition, we can't decide if their -- sets of matched values is different. -- -- * @Just True@ ==> Surely equal -- * @Just False@ ==> Surely different (non-overlapping, even!) -- * @Nothing@ ==> Equality relation undecidable eqConLike :: ConLike -> ConLike -> PmEquality eqConLike (RealDataCon dc1) (RealDataCon dc2) = decEquality (dc1 == dc2) eqConLike (PatSynCon psc1) (PatSynCon psc2) | psc1 == psc2 = Equal eqConLike _ _ = PossiblyOverlap -- | Represents the head of a match against a 'ConLike' or literal. -- Really similar to 'GHC.Core.AltCon'. data PmAltCon = PmAltConLike ConLike | PmAltLit PmLit data PmAltConSet = PACS !(UniqDSet ConLike) ![PmLit] emptyPmAltConSet :: PmAltConSet emptyPmAltConSet = PACS emptyUniqDSet [] isEmptyPmAltConSet :: PmAltConSet -> Bool isEmptyPmAltConSet (PACS cls lits) = isEmptyUniqDSet cls && null lits -- | Whether there is a 'PmAltCon' in the 'PmAltConSet' that compares 'Equal' to -- the given 'PmAltCon' according to 'eqPmAltCon'. elemPmAltConSet :: PmAltCon -> PmAltConSet -> Bool elemPmAltConSet (PmAltConLike cl) (PACS cls _ ) = elementOfUniqDSet cl cls elemPmAltConSet (PmAltLit lit) (PACS _ lits) = elem lit lits extendPmAltConSet :: PmAltConSet -> PmAltCon -> PmAltConSet extendPmAltConSet (PACS cls lits) (PmAltConLike cl) = PACS (addOneToUniqDSet cls cl) lits extendPmAltConSet (PACS cls lits) (PmAltLit lit) = PACS cls (unionLists lits [lit]) pmAltConSetElems :: PmAltConSet -> [PmAltCon] pmAltConSetElems (PACS cls lits) = map PmAltConLike (uniqDSetToList cls) ++ map PmAltLit lits instance Outputable PmAltConSet where ppr = ppr . pmAltConSetElems -- | We can't in general decide whether two 'PmAltCon's match the same set of -- values. In addition to the reasons in 'eqPmLit' and 'eqConLike', a -- 'PmAltConLike' might or might not represent the same value as a 'PmAltLit'. -- See Note [Undecidable Equality for PmAltCons]. -- -- * @Just True@ ==> Surely equal -- * @Just False@ ==> Surely different (non-overlapping, even!) -- * @Nothing@ ==> Equality relation undecidable -- -- Examples (omitting some constructor wrapping): -- -- * @eqPmAltCon (LitInt 42) (LitInt 1) == Just False@: Lit equality is -- decidable -- * @eqPmAltCon (DataCon A) (DataCon B) == Just False@: DataCon equality is -- decidable -- * @eqPmAltCon (LitOverInt 42) (LitOverInt 1) == Nothing@: OverLit equality -- is undecidable -- * @eqPmAltCon (PatSyn PA) (PatSyn PB) == Nothing@: PatSyn equality is -- undecidable -- * @eqPmAltCon (DataCon I#) (LitInt 1) == Nothing@: DataCon to Lit -- comparisons are undecidable without reasoning about the wrapped @Int#@ -- * @eqPmAltCon (LitOverInt 1) (LitOverInt 1) == Just True@: We assume -- reflexivity for overloaded literals -- * @eqPmAltCon (PatSyn PA) (PatSyn PA) == Just True@: We assume reflexivity -- for Pattern Synonyms eqPmAltCon :: PmAltCon -> PmAltCon -> PmEquality eqPmAltCon (PmAltConLike cl1) (PmAltConLike cl2) = eqConLike cl1 cl2 eqPmAltCon (PmAltLit l1) (PmAltLit l2) = eqPmLit l1 l2 eqPmAltCon _ _ = PossiblyOverlap -- | Syntactic equality. instance Eq PmAltCon where a == b = eqPmAltCon a b == Equal -- | Type of a 'PmAltCon' pmAltConType :: PmAltCon -> [Type] -> Type pmAltConType (PmAltLit lit) _arg_tys = assert (null _arg_tys ) $ pmLitType lit pmAltConType (PmAltConLike con) arg_tys = conLikeResTy con arg_tys -- | Is a match on this constructor forcing the match variable? -- True of data constructors, literals and pattern synonyms (#17357), but not of -- newtypes. -- See Note [Coverage checking Newtype matches] in GHC.HsToCore.Pmc.Solver. isPmAltConMatchStrict :: PmAltCon -> Bool isPmAltConMatchStrict PmAltLit{} = True isPmAltConMatchStrict (PmAltConLike PatSynCon{}) = True -- #17357 isPmAltConMatchStrict (PmAltConLike (RealDataCon dc)) = not (isNewDataCon dc) pmAltConImplBangs :: PmAltCon -> [HsImplBang] pmAltConImplBangs PmAltLit{} = [] pmAltConImplBangs (PmAltConLike con) = conLikeImplBangs con {- Note [Undecidable Equality for PmAltCons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Equality on overloaded literals is undecidable in the general case. Consider the following example: instance Num Bool where ... fromInteger 0 = False -- C-like representation of booleans fromInteger _ = True f :: Bool -> () f 1 = () -- Clause A f 2 = () -- Clause B Clause B is redundant but to detect this, we must decide the constraint: @fromInteger 2 ~ fromInteger 1@ which means that we have to look through function @fromInteger@, whose implementation could be anything. This poses difficulties for: 1. The expressive power of the check. We cannot expect a reasonable implementation of pattern matching to detect that @fromInteger 2 ~ fromInteger 1@ is True, unless we unfold function fromInteger. This puts termination at risk and is undecidable in the general case. 2. Error messages/Warnings. What should our message for @f@ above be? A reasonable approach would be to issue: Pattern matches are (potentially) redundant: f 2 = ... under the assumption that 1 == 2 but seems to complex and confusing for the user. We choose to equate only obviously equal overloaded literals, in all other cases we signal undecidability by returning Nothing from 'eqPmAltCons'. We do better for non-overloaded literals, because we know their fromInteger/fromString implementation is actually injective, allowing us to simplify the constraint @fromInteger 1 ~ fromInteger 2@ to @1 ~ 2@, which is trivially unsatisfiable. The impact of this treatment of overloaded literals is the following: * Redundancy checking is rather conservative, since it cannot see that clause B above is redundant. * We have instant equality check for overloaded literals (we do not rely on the term oracle which is rather expensive, both in terms of performance and memory). This significantly improves the performance of functions `covered` `uncovered` and `divergent` in "GHC.HsToCore.Pmc" and effectively addresses #11161. * The warnings issued are simpler. Similar reasoning applies to pattern synonyms: In contrast to data constructors, which are generative, constraints like F a ~ G b for two different pattern synonyms F and G aren't immediately unsatisfiable. We assume F a ~ F a, though. -} literalToPmLit :: Type -> Literal -> Maybe PmLit literalToPmLit ty l = PmLit ty <$> go l where go (LitChar c) = Just (PmLitChar c) go (LitFloat r) = Just (PmLitRat r) go (LitDouble r) = Just (PmLitRat r) go (LitString s) = Just (PmLitString (mkFastStringByteString s)) go (LitNumber _ i) = Just (PmLitInt i) go _ = Nothing negatePmLit :: PmLit -> Maybe PmLit negatePmLit (PmLit ty v) = PmLit ty <$> go v where go (PmLitInt i) = Just (PmLitInt (-i)) go (PmLitRat r) = Just (PmLitRat (-r)) go (PmLitOverInt n i) = Just (PmLitOverInt (n+1) i) go (PmLitOverRat n r) = Just (PmLitOverRat (n+1) r) go _ = Nothing overloadPmLit :: Type -> PmLit -> Maybe PmLit overloadPmLit ty (PmLit _ v) = PmLit ty <$> go v where go (PmLitInt i) = Just (PmLitOverInt 0 i) go (PmLitRat r) = Just $! PmLitOverRat 0 $! fractionalLitFromRational r go (PmLitString s) | ty `eqType` stringTy = Just v | otherwise = Just (PmLitOverString s) go ovRat@PmLitOverRat{} = Just ovRat go _ = Nothing pmLitAsStringLit :: PmLit -> Maybe FastString pmLitAsStringLit (PmLit _ (PmLitString s)) = Just s pmLitAsStringLit _ = Nothing coreExprAsPmLit :: CoreExpr -> Maybe PmLit -- coreExprAsPmLit e | pprTrace "coreExprAsPmLit" (ppr e) False = undefined coreExprAsPmLit (Tick _t e) = coreExprAsPmLit e coreExprAsPmLit (Lit l) = literalToPmLit (literalType l) l coreExprAsPmLit e = case collectArgs e of (Var x, [Lit l]) | Just dc <- isDataConWorkId_maybe x , dc `elem` [intDataCon, wordDataCon, charDataCon, floatDataCon, doubleDataCon] -> literalToPmLit (exprType e) l (Var x, [Lit (LitNumber _ l)]) | Just (ty,l) <- bignum_lit_maybe x l -> Just (PmLit ty (PmLitInt l)) (Var x, [_ty, n_arg, d_arg]) | Just dc <- isDataConWorkId_maybe x , dataConName dc == ratioDataConName , Just (PmLit _ (PmLitInt n)) <- coreExprAsPmLit n_arg , Just (PmLit _ (PmLitInt d)) <- coreExprAsPmLit d_arg -- HACK: just assume we have a literal double. This case only occurs for -- overloaded lits anyway, so we immediately override type information -> literalToPmLit (exprType e) (mkLitDouble (n % d)) (Var x, args) -- See Note [Detecting overloaded literals with -XRebindableSyntax] | is_rebound_name x fromIntegerName , Just arg <- lastMaybe args , Just (_ty,l) <- bignum_conapp_maybe arg -> Just (PmLit integerTy (PmLitInt l)) >>= overloadPmLit (exprType e) (Var x, args) -- See Note [Detecting overloaded literals with -XRebindableSyntax] -- fromRational | is_rebound_name x fromRationalName , [r] <- dropWhile (not . is_ratio) args -> coreExprAsPmLit r >>= overloadPmLit (exprType e) --Rationals with large exponents (Var x, args) -- See Note [Detecting overloaded literals with -XRebindableSyntax] -- See Note [Dealing with rationals with large exponents] -- mkRationalBase* | Just exp_base <- is_larg_exp_ratio x , [r, exp] <- dropWhile (not . is_ratio) args , (Var x, [_ty, n_arg, d_arg]) <- collectArgs r , Just dc <- isDataConWorkId_maybe x , dataConName dc == ratioDataConName , Just (PmLit _ (PmLitInt n)) <- coreExprAsPmLit n_arg , Just (PmLit _ (PmLitInt d)) <- coreExprAsPmLit d_arg , Just (_exp_ty,exp') <- bignum_conapp_maybe exp -> do let rational = (abs n) :% d let neg = if n < 0 then 1 else 0 let frac = mkFractionalLit NoSourceText False rational exp' exp_base Just $ PmLit (exprType e) (PmLitOverRat neg frac) (Var x, args) | is_rebound_name x fromStringName -- See Note [Detecting overloaded literals with -XRebindableSyntax] , s:_ <- filter (isStringTy . exprType) $ filter isValArg args -- NB: Calls coreExprAsPmLit and then overloadPmLit, so that we return PmLitOverStrings -> coreExprAsPmLit s >>= overloadPmLit (exprType e) -- These last two cases handle proper String literals (Var x, [Type ty]) | Just dc <- isDataConWorkId_maybe x , dc == nilDataCon , ty `eqType` charTy -> literalToPmLit stringTy (mkLitString "") (Var x, [Lit l]) | idName x `elem` [unpackCStringName, unpackCStringUtf8Name] -> literalToPmLit stringTy l _ -> Nothing where bignum_conapp_maybe (App (Var x) (Lit (LitNumber _ l))) = bignum_lit_maybe x l bignum_conapp_maybe _ = Nothing bignum_lit_maybe x l | Just dc <- isDataConWorkId_maybe x = if | dc == integerISDataCon -> Just (integerTy,l) | dc == integerIPDataCon -> Just (integerTy,l) | dc == integerINDataCon -> Just (integerTy,negate l) | dc == naturalNSDataCon -> Just (naturalTy,l) | dc == naturalNBDataCon -> Just (naturalTy,l) | otherwise -> Nothing bignum_lit_maybe _ _ = Nothing is_ratio (Type _) = False is_ratio r | Just (tc, _) <- splitTyConApp_maybe (exprType r) = tyConName tc == ratioTyConName | otherwise = False is_larg_exp_ratio x | is_rebound_name x mkRationalBase10Name = Just Base10 | is_rebound_name x mkRationalBase2Name = Just Base2 | otherwise = Nothing -- See Note [Detecting overloaded literals with -XRebindableSyntax] is_rebound_name :: Id -> Name -> Bool is_rebound_name x n = getOccFS (idName x) == getOccFS n {- Note [Detecting overloaded literals with -XRebindableSyntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally, we'd find e.g. overloaded string literals by comparing the application head of an expression to `fromStringName`. But that doesn't work with -XRebindableSyntax: The `Name` of a user-provided `fromString` function is different to `fromStringName`, which lives in a certain module, etc. There really is no other way than to compare `OccName`s and guess which argument is the actual literal string (we assume it's the first argument of type `String`). The same applies to other overloaded literals, such as overloaded rationals (`fromRational`)and overloaded integer literals (`fromInteger`). Note [Dealing with rationals with large exponents] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Rationals with large exponents are *not* desugared to a simple rational. As that would require us to compute their value which can be expensive. Rather they desugar to an expression. For example 1e1000 will desugar to an expression of the form: `mkRationalWithExponentBase10 (1 :% 1) 1000` Only overloaded literals desugar to this form however, so we we can just return a overloaded rational literal. The most complex case is if we have RebindableSyntax enabled. By example if we have a pattern like this: `f 3.3 = True` It will desugar to: fromRational [TYPE: Rational, mkRationalBase10 (:% @Integer 10 1) (-1)] The fromRational is properly detected as an overloaded Rational by coreExprAsPmLit and it's general code for detecting overloaded rationals. See Note [Detecting overloaded literals with -XRebindableSyntax]. This case then recurses into coreExprAsPmLit passing only the expression `mkRationalBase10 (:% @Integer 10 1) (-1)`. Which is caught by rationals with large exponents case. This will return a `PmLitOverRat` literal. Which is then passed to overloadPmLit which simply returns it as-is since it's already overloaded. -} instance Outputable PmLitValue where ppr (PmLitInt i) = ppr i ppr (PmLitRat r) = ppr (double (fromRat r)) -- good enough ppr (PmLitChar c) = pprHsChar c ppr (PmLitString s) = pprHsString s ppr (PmLitOverInt n i) = minuses n (ppr i) ppr (PmLitOverRat n r) = minuses n (ppr r) ppr (PmLitOverString s) = pprHsString s -- Take care of negated literals minuses :: Int -> SDoc -> SDoc minuses n sdoc = iterate (\sdoc -> parens (char '-' <> sdoc)) sdoc !! n instance Outputable PmLit where ppr (PmLit ty v) = ppr v <> suffix where -- Some ad-hoc hackery for displaying proper lit suffixes based on type tbl = [ (intPrimTy, primIntSuffix) , (int64PrimTy, primInt64Suffix) , (wordPrimTy, primWordSuffix) , (word64PrimTy, primWord64Suffix) , (charPrimTy, primCharSuffix) , (floatPrimTy, primFloatSuffix) , (doublePrimTy, primDoubleSuffix) ] suffix = fromMaybe empty (snd <$> find (eqType ty . fst) tbl) instance Outputable PmAltCon where ppr (PmAltConLike cl) = ppr cl ppr (PmAltLit l) = ppr l instance Outputable PmEquality where ppr = text . show ghc-lib-parser-9.4.7.20230826/compiler/GHC/HsToCore/Pmc/Types.hs0000644000000000000000000001744214472400112021510 0ustar0000000000000000 {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ViewPatterns #-} {- Author: George Karachalias Sebastian Graf -} -- | Types used through-out pattern match checking. This module is mostly there -- to be imported from "GHC.HsToCore.Types". The exposed API is that of -- "GHC.HsToCore.Pmc". -- -- These types model the paper -- [Lower Your Guards: A Compositional Pattern-Match Coverage Checker"](https://dl.acm.org/doi/abs/10.1145/3408989). module GHC.HsToCore.Pmc.Types ( -- * LYG syntax -- ** Guard language SrcInfo(..), PmGrd(..), GrdVec(..), -- ** Guard tree language PmMatchGroup(..), PmMatch(..), PmGRHSs(..), PmGRHS(..), PmPatBind(..), PmEmptyCase(..), -- * Coverage Checking types RedSets (..), Precision (..), CheckResult (..), -- * Pre and post coverage checking synonyms Pre, Post, -- * Normalised refinement types module GHC.HsToCore.Pmc.Solver.Types ) where import GHC.Prelude import GHC.HsToCore.Pmc.Solver.Types import GHC.Data.OrdList import GHC.Types.Id import GHC.Types.Var (EvVar) import GHC.Types.SrcLoc import GHC.Utils.Outputable import GHC.Core.Type import GHC.Core import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import qualified Data.Semigroup as Semi -- -- * Guard language -- -- | A very simple language for pattern guards. Let bindings, bang patterns, -- and matching variables against flat constructor patterns. -- The LYG guard language. data PmGrd = -- | @PmCon x K dicts args@ corresponds to a @K dicts args <- x@ guard. -- The @args@ are bound in this construct, the @x@ is just a use. -- For the arguments' meaning see 'GHC.Hs.Pat.ConPatOut'. PmCon { pm_id :: !Id, pm_con_con :: !PmAltCon, pm_con_tvs :: ![TyVar], pm_con_dicts :: ![EvVar], pm_con_args :: ![Id] } -- | @PmBang x@ corresponds to a @seq x True@ guard. -- If the extra 'SrcInfo' is present, the bang guard came from a source -- bang pattern, in which case we might want to report it as redundant. -- See Note [Dead bang patterns] in GHC.HsToCore.Pmc.Check. | PmBang { pm_id :: !Id, _pm_loc :: !(Maybe SrcInfo) } -- | @PmLet x expr@ corresponds to a @let x = expr@ guard. This actually -- /binds/ @x@. | PmLet { pm_id :: !Id, _pm_let_expr :: !CoreExpr } -- | Should not be user-facing. instance Outputable PmGrd where ppr (PmCon x alt _tvs _con_dicts con_args) = hsep [ppr alt, hsep (map ppr con_args), text "<-", ppr x] ppr (PmBang x _loc) = char '!' <> ppr x ppr (PmLet x expr) = hsep [text "let", ppr x, text "=", ppr expr] -- -- * Guard tree language -- -- | Means by which we identify a source construct for later pretty-printing in -- a warning message. 'SDoc' for the equation to show, 'Located' for the -- location. newtype SrcInfo = SrcInfo (Located SDoc) -- | A sequence of 'PmGrd's. newtype GrdVec = GrdVec [PmGrd] -- | A guard tree denoting 'MatchGroup'. newtype PmMatchGroup p = PmMatchGroup (NonEmpty (PmMatch p)) -- | A guard tree denoting 'Match': A payload describing the pats and a bunch of -- GRHS. data PmMatch p = PmMatch { pm_pats :: !p, pm_grhss :: !(PmGRHSs p) } -- | A guard tree denoting 'GRHSs': A bunch of 'PmLet' guards for local -- bindings from the 'GRHSs's @where@ clauses and the actual list of 'GRHS'. -- See Note [Long-distance information for HsLocalBinds] in -- "GHC.HsToCore.Pmc.Desugar". data PmGRHSs p = PmGRHSs { pgs_lcls :: !p, pgs_grhss :: !(NonEmpty (PmGRHS p))} -- | A guard tree denoting 'GRHS': A payload describing the grds and a 'SrcInfo' -- useful for printing out in warnings messages. data PmGRHS p = PmGRHS { pg_grds :: !p, pg_rhs :: !SrcInfo } -- | A guard tree denoting an -XEmptyCase. newtype PmEmptyCase = PmEmptyCase { pe_var :: Id } -- | A guard tree denoting a pattern binding. newtype PmPatBind p = -- just reuse GrdGRHS and pretend its @SrcInfo@ is info on the /pattern/, -- rather than on the pattern bindings. PmPatBind (PmGRHS p) instance Outputable SrcInfo where ppr (SrcInfo (L (RealSrcSpan rss _) _)) = ppr (srcSpanStartLine rss) ppr (SrcInfo (L s _)) = ppr s -- | Format LYG guards as @| True <- x, let x = 42, !z@ instance Outputable GrdVec where ppr (GrdVec []) = empty ppr (GrdVec (g:gs)) = fsep (char '|' <+> ppr g : map ((comma <+>) . ppr) gs) -- | Format a LYG sequence (e.g. 'Match'es of a 'MatchGroup' or 'GRHSs') as -- @{ ; ...; }@ pprLygSequence :: Outputable a => NonEmpty a -> SDoc pprLygSequence (NE.toList -> as) = braces (space <> fsep (punctuate semi (map ppr as)) <> space) instance Outputable p => Outputable (PmMatchGroup p) where ppr (PmMatchGroup matches) = pprLygSequence matches instance Outputable p => Outputable (PmMatch p) where ppr (PmMatch { pm_pats = grds, pm_grhss = grhss }) = ppr grds <+> ppr grhss instance Outputable p => Outputable (PmGRHSs p) where ppr (PmGRHSs { pgs_lcls = _lcls, pgs_grhss = grhss }) = ppr grhss instance Outputable p => Outputable (PmGRHS p) where ppr (PmGRHS { pg_grds = grds, pg_rhs = rhs }) = ppr grds <+> text "->" <+> ppr rhs instance Outputable p => Outputable (PmPatBind p) where ppr (PmPatBind PmGRHS { pg_grds = grds, pg_rhs = bind }) = ppr bind <+> ppr grds <+> text "=" <+> text "..." instance Outputable PmEmptyCase where ppr (PmEmptyCase { pe_var = var }) = text " ppr var <> text ">" data Precision = Approximate | Precise deriving (Eq, Show) instance Outputable Precision where ppr = text . show instance Semi.Semigroup Precision where Precise <> Precise = Precise _ <> _ = Approximate instance Monoid Precision where mempty = Precise mappend = (Semi.<>) -- | Redundancy sets, used to determine redundancy of RHSs and bang patterns -- (later digested into a 'CIRB'). data RedSets = RedSets { rs_cov :: !Nablas -- ^ The /Covered/ set; the set of values reaching a particular program -- point. , rs_div :: !Nablas -- ^ The /Diverging/ set; empty if no match can lead to divergence. -- If it wasn't empty, we have to turn redundancy warnings into -- inaccessibility warnings for any subclauses. , rs_bangs :: !(OrdList (Nablas, SrcInfo)) -- ^ If any of the 'Nablas' is empty, the corresponding 'SrcInfo' pin-points -- a bang pattern in source that is redundant. See Note [Dead bang patterns]. } instance Outputable RedSets where ppr RedSets { rs_cov = _cov, rs_div = _div, rs_bangs = _bangs } -- It's useful to change this definition for different verbosity levels in -- printf-debugging = empty -- | Pattern-match coverage check result data CheckResult a = CheckResult { cr_ret :: !a -- ^ A hole for redundancy info and covered sets. , cr_uncov :: !Nablas -- ^ The set of uncovered values falling out at the bottom. -- (for -Wincomplete-patterns, but also important state for the algorithm) , cr_approx :: !Precision -- ^ A flag saying whether we ran into the 'maxPmCheckModels' limit for the -- purpose of suggesting to crank it up in the warning message. Writer state. } deriving Functor instance Outputable a => Outputable (CheckResult a) where ppr (CheckResult c unc pc) = text "CheckResult" <+> ppr_precision pc <+> braces (fsep [ field "ret" c <> comma , field "uncov" unc]) where ppr_precision Precise = empty ppr_precision Approximate = text "(Approximate)" field name value = text name <+> equals <+> ppr value -- -- * Pre and post coverage checking synonyms -- -- | Used as tree payload pre-checking. The LYG guards to check. type Pre = GrdVec -- | Used as tree payload post-checking. The redundancy info we elaborated. type Post = RedSets ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Ext/Fields.hs0000644000000000000000000000533714472400112021154 0ustar0000000000000000module GHC.Iface.Ext.Fields ( ExtensibleFields (..) , FieldName , emptyExtensibleFields -- * Reading , readField , readFieldWith -- * Writing , writeField , writeFieldWith -- * Deletion , deleteField ) where import GHC.Prelude import GHC.Utils.Binary import Control.Monad import Data.Map ( Map ) import qualified Data.Map as Map import Control.DeepSeq type FieldName = String newtype ExtensibleFields = ExtensibleFields { getExtensibleFields :: (Map FieldName BinData) } instance Binary ExtensibleFields where put_ bh (ExtensibleFields fs) = do put_ bh (Map.size fs :: Int) -- Put the names of each field, and reserve a space -- for a payload pointer after each name: header_entries <- forM (Map.toList fs) $ \(name, dat) -> do put_ bh name field_p_p <- tellBin bh put_ bh field_p_p return (field_p_p, dat) -- Now put the payloads and use the reserved space -- to point to the start of each payload: forM_ header_entries $ \(field_p_p, dat) -> do field_p <- tellBin bh putAt bh field_p_p field_p seekBin bh field_p put_ bh dat get bh = do n <- get bh :: IO Int -- Get the names and field pointers: header_entries <- replicateM n $ (,) <$> get bh <*> get bh -- Seek to and get each field's payload: fields <- forM header_entries $ \(name, field_p) -> do seekBin bh field_p dat <- get bh return (name, dat) return . ExtensibleFields . Map.fromList $ fields instance NFData ExtensibleFields where rnf (ExtensibleFields fs) = rnf fs emptyExtensibleFields :: ExtensibleFields emptyExtensibleFields = ExtensibleFields Map.empty -------------------------------------------------------------------------------- -- | Reading readField :: Binary a => FieldName -> ExtensibleFields -> IO (Maybe a) readField name = readFieldWith name get readFieldWith :: FieldName -> (BinHandle -> IO a) -> ExtensibleFields -> IO (Maybe a) readFieldWith name read fields = sequence $ ((read =<<) . dataHandle) <$> Map.lookup name (getExtensibleFields fields) -------------------------------------------------------------------------------- -- | Writing writeField :: Binary a => FieldName -> a -> ExtensibleFields -> IO ExtensibleFields writeField name x = writeFieldWith name (`put_` x) writeFieldWith :: FieldName -> (BinHandle -> IO ()) -> ExtensibleFields -> IO ExtensibleFields writeFieldWith name write fields = do bh <- openBinMem (1024 * 1024) write bh -- bd <- handleData bh return $ ExtensibleFields (Map.insert name bd $ getExtensibleFields fields) deleteField :: FieldName -> ExtensibleFields -> ExtensibleFields deleteField name (ExtensibleFields fs) = ExtensibleFields $ Map.delete name fs ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Recomp/Binary.hs0000644000000000000000000000257014472400112021653 0ustar0000000000000000 -- | Computing fingerprints of values serializeable with GHC's \"Binary\" module. module GHC.Iface.Recomp.Binary ( -- * Computing fingerprints fingerprintBinMem , computeFingerprint , putNameLiterally ) where import GHC.Prelude import GHC.Utils.Fingerprint import GHC.Utils.Binary import GHC.Types.Name import GHC.Utils.Panic.Plain fingerprintBinMem :: BinHandle -> IO Fingerprint fingerprintBinMem bh = withBinBuffer bh f where f bs = -- we need to take care that we force the result here -- lest a reference to the ByteString may leak out of -- withBinBuffer. let fp = fingerprintByteString bs in fp `seq` return fp computeFingerprint :: (Binary a) => (BinHandle -> Name -> IO ()) -> a -> IO Fingerprint computeFingerprint put_nonbinding_name a = do bh <- fmap set_user_data $ openBinMem (3*1024) -- just less than a block put_ bh a fingerprintBinMem bh where set_user_data bh = setUserData bh $ newWriteState put_nonbinding_name putNameLiterally putFS -- | Used when we want to fingerprint a structure without depending on the -- fingerprints of external Names that it refers to. putNameLiterally :: BinHandle -> Name -> IO () putNameLiterally bh name = assert (isExternalName name) $ do put_ bh $! nameModule name put_ bh $! nameOccName name ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Syntax.hs0000644000000000000000000031410014472400112020463 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 -} {-# LANGUAGE LambdaCase #-} module GHC.Iface.Syntax ( module GHC.Iface.Type, IfaceDecl(..), IfaceFamTyConFlav(..), IfaceClassOp(..), IfaceAT(..), IfaceConDecl(..), IfaceConDecls(..), IfaceEqSpec, IfaceExpr(..), IfaceAlt(..), IfaceLetBndr(..), IfaceJoinInfo(..), IfaceBinding(..), IfaceConAlt(..), IfaceIdInfo, IfaceIdDetails(..), IfaceUnfolding(..), IfaceInfoItem(..), IfaceRule(..), IfaceAnnotation(..), IfaceAnnTarget, IfaceClsInst(..), IfaceFamInst(..), IfaceTickish(..), IfaceClassBody(..), IfaceBang(..), IfaceSrcBang(..), SrcUnpackedness(..), SrcStrictness(..), IfaceAxBranch(..), IfaceTyConParent(..), IfaceCompleteMatch(..), IfaceLFInfo(..), -- * Binding names IfaceTopBndr, putIfaceTopBndr, getIfaceTopBndr, -- Misc ifaceDeclImplicitBndrs, visibleIfConDecls, ifaceDeclFingerprints, -- Free Names freeNamesIfDecl, freeNamesIfRule, freeNamesIfFamInst, -- Pretty printing pprIfaceExpr, pprIfaceDecl, AltPpr(..), ShowSub(..), ShowHowMuch(..), showToIface, showToHeader ) where import GHC.Prelude import GHC.Builtin.Names ( unrestrictedFunTyConKey, liftedTypeKindTyConKey ) import GHC.Types.Unique ( hasKey ) import GHC.Iface.Type import GHC.Iface.Recomp.Binary import GHC.Core( IsOrphan, isOrphan, UnfoldingCache(..) ) import GHC.Types.Demand import GHC.Types.Cpr import GHC.Core.Class import GHC.Types.FieldLabel import GHC.Types.Name.Set import GHC.Core.Coercion.Axiom ( BranchIndex ) import GHC.Types.Name import GHC.Types.CostCentre import GHC.Types.Literal import GHC.Types.ForeignCall import GHC.Types.Annotations( AnnPayload, AnnTarget ) import GHC.Types.Basic import GHC.Unit.Module import GHC.Types.SrcLoc import GHC.Data.BooleanFormula ( BooleanFormula, pprBooleanFormula, isTrue ) import GHC.Types.Var( VarBndr(..), binderVar, tyVarSpecToBinders ) import GHC.Core.TyCon ( Role (..), Injectivity(..), tyConBndrVisArgFlag ) import GHC.Core.DataCon (SrcStrictness(..), SrcUnpackedness(..)) import GHC.Builtin.Types ( constraintKindTyConName ) import GHC.Stg.InferTags.TagSig import GHC.Utils.Lexeme (isLexSym) import GHC.Utils.Fingerprint import GHC.Utils.Binary import GHC.Utils.Binary.Typeable () import GHC.Utils.Outputable as Outputable import GHC.Utils.Panic import GHC.Utils.Misc( dropList, filterByList, notNull, unzipWith, seqList, zipWithEqual ) import Control.Monad import System.IO.Unsafe import Control.DeepSeq infixl 3 &&& {- ************************************************************************ * * Declarations * * ************************************************************************ -} -- | A binding top-level 'Name' in an interface file (e.g. the name of an -- 'IfaceDecl'). type IfaceTopBndr = Name -- It's convenient to have a Name in the Iface syntax, although in each -- case the namespace is implied by the context. However, having a -- Name makes things like ifaceDeclImplicitBndrs and ifaceDeclFingerprints -- very convenient. Moreover, having the key of the binder means that -- we can encode known-key things cleverly in the symbol table. See Note -- [Symbol table representation of Names] -- -- We don't serialise the namespace onto the disk though; rather we -- drop it when serialising and add it back in when deserialising. getIfaceTopBndr :: BinHandle -> IO IfaceTopBndr getIfaceTopBndr bh = get bh putIfaceTopBndr :: BinHandle -> IfaceTopBndr -> IO () putIfaceTopBndr bh name = case getUserData bh of UserData{ ud_put_binding_name = put_binding_name } -> --pprTrace "putIfaceTopBndr" (ppr name) $ put_binding_name bh name data IfaceDecl = IfaceId { ifName :: IfaceTopBndr, ifType :: IfaceType, ifIdDetails :: IfaceIdDetails, ifIdInfo :: IfaceIdInfo } | IfaceData { ifName :: IfaceTopBndr, -- Type constructor ifBinders :: [IfaceTyConBinder], ifResKind :: IfaceType, -- Result kind of type constructor ifCType :: Maybe CType, -- C type for CAPI FFI ifRoles :: [Role], -- Roles ifCtxt :: IfaceContext, -- The "stupid theta" ifCons :: IfaceConDecls, -- Includes new/data/data family info ifGadtSyntax :: Bool, -- True <=> declared using -- GADT syntax ifParent :: IfaceTyConParent -- The axiom, for a newtype, -- or data/newtype family instance } | IfaceSynonym { ifName :: IfaceTopBndr, -- Type constructor ifRoles :: [Role], -- Roles ifBinders :: [IfaceTyConBinder], ifResKind :: IfaceKind, -- Kind of the *result* ifSynRhs :: IfaceType } | IfaceFamily { ifName :: IfaceTopBndr, -- Type constructor ifResVar :: Maybe IfLclName, -- Result variable name, used -- only for pretty-printing -- with --show-iface ifBinders :: [IfaceTyConBinder], ifResKind :: IfaceKind, -- Kind of the *tycon* ifFamFlav :: IfaceFamTyConFlav, ifFamInj :: Injectivity } -- injectivity information | IfaceClass { ifName :: IfaceTopBndr, -- Name of the class TyCon ifRoles :: [Role], -- Roles ifBinders :: [IfaceTyConBinder], ifFDs :: [FunDep IfLclName], -- Functional dependencies ifBody :: IfaceClassBody -- Methods, superclasses, ATs } | IfaceAxiom { ifName :: IfaceTopBndr, -- Axiom name ifTyCon :: IfaceTyCon, -- LHS TyCon ifRole :: Role, -- Role of axiom ifAxBranches :: [IfaceAxBranch] -- Branches } | IfacePatSyn { ifName :: IfaceTopBndr, -- Name of the pattern synonym ifPatIsInfix :: Bool, ifPatMatcher :: (IfExtName, Bool), ifPatBuilder :: Maybe (IfExtName, Bool), -- Everything below is redundant, -- but needed to implement pprIfaceDecl ifPatUnivBndrs :: [IfaceForAllSpecBndr], ifPatExBndrs :: [IfaceForAllSpecBndr], ifPatProvCtxt :: IfaceContext, ifPatReqCtxt :: IfaceContext, ifPatArgs :: [IfaceType], ifPatTy :: IfaceType, ifFieldLabels :: [FieldLabel] } -- See also 'ClassBody' data IfaceClassBody -- Abstract classes don't specify their body; they only occur in @hs-boot@ and -- @hsig@ files. = IfAbstractClass | IfConcreteClass { ifClassCtxt :: IfaceContext, -- Super classes ifATs :: [IfaceAT], -- Associated type families ifSigs :: [IfaceClassOp], -- Method signatures ifMinDef :: BooleanFormula IfLclName -- Minimal complete definition } data IfaceTyConParent = IfNoParent | IfDataInstance IfExtName -- Axiom name IfaceTyCon -- Family TyCon (pretty-printing only, not used in GHC.IfaceToCore) -- see Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr IfaceAppArgs -- Arguments of the family TyCon data IfaceFamTyConFlav = IfaceDataFamilyTyCon -- Data family | IfaceOpenSynFamilyTyCon | IfaceClosedSynFamilyTyCon (Maybe (IfExtName, [IfaceAxBranch])) -- ^ Name of associated axiom and branches for pretty printing purposes, -- or 'Nothing' for an empty closed family without an axiom -- See Note [Pretty printing via Iface syntax] in "GHC.Types.TyThing.Ppr" | IfaceAbstractClosedSynFamilyTyCon | IfaceBuiltInSynFamTyCon -- for pretty printing purposes only data IfaceClassOp = IfaceClassOp IfaceTopBndr IfaceType -- Class op type (Maybe (DefMethSpec IfaceType)) -- Default method -- The types of both the class op itself, -- and the default method, are *not* quantified -- over the class variables data IfaceAT = IfaceAT -- See GHC.Core.Class.ClassATItem IfaceDecl -- The associated type declaration (Maybe IfaceType) -- Default associated type instance, if any -- This is just like CoAxBranch data IfaceAxBranch = IfaceAxBranch { ifaxbTyVars :: [IfaceTvBndr] , ifaxbEtaTyVars :: [IfaceTvBndr] , ifaxbCoVars :: [IfaceIdBndr] , ifaxbLHS :: IfaceAppArgs , ifaxbRoles :: [Role] , ifaxbRHS :: IfaceType , ifaxbIncomps :: [BranchIndex] } -- See Note [Storing compatibility] in GHC.Core.Coercion.Axiom data IfaceConDecls = IfAbstractTyCon -- c.f TyCon.AbstractTyCon | IfDataTyCon [IfaceConDecl] -- Data type decls | IfNewTyCon IfaceConDecl -- Newtype decls -- For IfDataTyCon and IfNewTyCon we store: -- * the data constructor(s); -- The field labels are stored individually in the IfaceConDecl -- (there is some redundancy here, because a field label may occur -- in multiple IfaceConDecls and represent the same field label) data IfaceConDecl = IfCon { ifConName :: IfaceTopBndr, -- Constructor name ifConWrapper :: Bool, -- True <=> has a wrapper ifConInfix :: Bool, -- True <=> declared infix -- The universal type variables are precisely those -- of the type constructor of this data constructor -- This is *easy* to guarantee when creating the IfCon -- but it's not so easy for the original TyCon/DataCon -- So this guarantee holds for IfaceConDecl, but *not* for DataCon ifConExTCvs :: [IfaceBndr], -- Existential ty/covars ifConUserTvBinders :: [IfaceForAllSpecBndr], -- The tyvars, in the order the user wrote them -- INVARIANT: the set of tyvars in ifConUserTvBinders is exactly the -- set of tyvars (*not* covars) of ifConExTCvs, unioned -- with the set of ifBinders (from the parent IfaceDecl) -- whose tyvars do not appear in ifConEqSpec -- See Note [DataCon user type variable binders] in GHC.Core.DataCon ifConEqSpec :: IfaceEqSpec, -- Equality constraints ifConCtxt :: IfaceContext, -- Non-stupid context ifConArgTys :: [(IfaceMult, IfaceType)],-- Arg types ifConFields :: [FieldLabel], -- ...ditto... (field labels) ifConStricts :: [IfaceBang], -- Empty (meaning all lazy), -- or 1-1 corresp with arg tys -- See Note [Bangs on imported data constructors] in GHC.Types.Id.Make ifConSrcStricts :: [IfaceSrcBang] } -- empty meaning no src stricts type IfaceEqSpec = [(IfLclName,IfaceType)] -- | This corresponds to an HsImplBang; that is, the final -- implementation decision about the data constructor arg data IfaceBang = IfNoBang | IfStrict | IfUnpack | IfUnpackCo IfaceCoercion -- | This corresponds to HsSrcBang data IfaceSrcBang = IfSrcBang SrcUnpackedness SrcStrictness data IfaceClsInst = IfaceClsInst { ifInstCls :: IfExtName, -- See comments with ifInstTys :: [Maybe IfaceTyCon], -- the defn of ClsInst ifDFun :: IfExtName, -- The dfun ifOFlag :: OverlapFlag, -- Overlap flag ifInstOrph :: IsOrphan } -- See Note [Orphans] in GHC.Core.InstEnv -- There's always a separate IfaceDecl for the DFun, which gives -- its IdInfo with its full type and version number. -- The instance declarations taken together have a version number, -- and we don't want that to wobble gratuitously -- If this instance decl is *used*, we'll record a usage on the dfun; -- and if the head does not change it won't be used if it wasn't before -- The ifFamInstTys field of IfaceFamInst contains a list of the rough -- match types data IfaceFamInst = IfaceFamInst { ifFamInstFam :: IfExtName -- Family name , ifFamInstTys :: [Maybe IfaceTyCon] -- See above , ifFamInstAxiom :: IfExtName -- The axiom , ifFamInstOrph :: IsOrphan -- Just like IfaceClsInst } data IfaceRule = IfaceRule { ifRuleName :: RuleName, ifActivation :: Activation, ifRuleBndrs :: [IfaceBndr], -- Tyvars and term vars ifRuleHead :: IfExtName, -- Head of lhs ifRuleArgs :: [IfaceExpr], -- Args of LHS ifRuleRhs :: IfaceExpr, ifRuleAuto :: Bool, ifRuleOrph :: IsOrphan -- Just like IfaceClsInst } data IfaceAnnotation = IfaceAnnotation { ifAnnotatedTarget :: IfaceAnnTarget, ifAnnotatedValue :: AnnPayload } type IfaceAnnTarget = AnnTarget OccName data IfaceCompleteMatch = IfaceCompleteMatch [IfExtName] (Maybe IfaceTyCon) instance Outputable IfaceCompleteMatch where ppr (IfaceCompleteMatch cls mtc) = text "COMPLETE" <> colon <+> ppr cls <+> case mtc of Nothing -> empty Just tc -> dcolon <+> ppr tc -- Here's a tricky case: -- * Compile with -O module A, and B which imports A.f -- * Change function f in A, and recompile without -O -- * When we read in old A.hi we read in its IdInfo (as a thunk) -- (In earlier GHCs we used to drop IdInfo immediately on reading, -- but we do not do that now. Instead it's discarded when the -- ModIface is read into the various decl pools.) -- * The version comparison sees that new (=NoInfo) differs from old (=HasInfo *) -- and so gives a new version. type IfaceIdInfo = [IfaceInfoItem] data IfaceInfoItem = HsArity Arity | HsDmdSig DmdSig | HsCprSig CprSig | HsInline InlinePragma | HsUnfold Bool -- True <=> isStrongLoopBreaker is true IfaceUnfolding -- See Note [Expose recursive functions] | HsNoCafRefs | HsLevity -- Present <=> never representation-polymorphic | HsLFInfo IfaceLFInfo | HsTagSig TagSig -- NB: Specialisations and rules come in separately and are -- only later attached to the Id. Partial reason: some are orphans. data IfaceUnfolding = IfCoreUnfold Bool IfUnfoldingCache IfaceExpr -- True <=> INLINABLE, False <=> regular unfolding -- Possibly could eliminate the Bool here, the information -- is also in the InlinePragma. | IfCompulsory IfaceExpr -- default methods and unsafeCoerce# -- for more about unsafeCoerce#, see -- Note [Wiring in unsafeCoerce#] in "GHC.HsToCore" | IfInlineRule Arity -- INLINE pragmas Bool -- OK to inline even if *un*-saturated Bool -- OK to inline even if context is boring IfaceExpr | IfDFunUnfold [IfaceBndr] [IfaceExpr] type IfUnfoldingCache = UnfoldingCache -- We only serialise the IdDetails of top-level Ids, and even then -- we only need a very limited selection. Notably, none of the -- implicit ones are needed here, because they are not put in -- interface files data IfaceIdDetails = IfVanillaId | IfWorkerLikeId [CbvMark] | IfRecSelId (Either IfaceTyCon IfaceDecl) Bool | IfDFunId -- | Iface type for LambdaFormInfo. Fields not relevant for imported Ids are -- omitted in this type. data IfaceLFInfo = IfLFReEntrant !RepArity | IfLFThunk !Bool -- True <=> updatable !Bool -- True <=> might be a function type | IfLFCon !Name | IfLFUnknown !Bool | IfLFUnlifted instance Outputable IfaceLFInfo where ppr (IfLFReEntrant arity) = text "LFReEntrant" <+> ppr arity ppr (IfLFThunk updatable mb_fun) = text "LFThunk" <+> parens (text "updatable=" <> ppr updatable <+> text "might_be_function=" <+> ppr mb_fun) ppr (IfLFCon con) = text "LFCon" <> brackets (ppr con) ppr IfLFUnlifted = text "LFUnlifted" ppr (IfLFUnknown fun_flag) = text "LFUnknown" <+> ppr fun_flag instance Binary IfaceLFInfo where put_ bh (IfLFReEntrant arity) = do putByte bh 0 put_ bh arity put_ bh (IfLFThunk updatable mb_fun) = do putByte bh 1 put_ bh updatable put_ bh mb_fun put_ bh (IfLFCon con_name) = do putByte bh 2 put_ bh con_name put_ bh (IfLFUnknown fun_flag) = do putByte bh 3 put_ bh fun_flag put_ bh IfLFUnlifted = putByte bh 4 get bh = do tag <- getByte bh case tag of 0 -> IfLFReEntrant <$> get bh 1 -> IfLFThunk <$> get bh <*> get bh 2 -> IfLFCon <$> get bh 3 -> IfLFUnknown <$> get bh 4 -> pure IfLFUnlifted _ -> panic "Invalid byte" {- Note [Versioning of instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See [https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/recompilation-avoidance#instances] ************************************************************************ * * Functions over declarations * * ************************************************************************ -} visibleIfConDecls :: IfaceConDecls -> [IfaceConDecl] visibleIfConDecls (IfAbstractTyCon {}) = [] visibleIfConDecls (IfDataTyCon cs) = cs visibleIfConDecls (IfNewTyCon c) = [c] ifaceDeclImplicitBndrs :: IfaceDecl -> [OccName] -- *Excludes* the 'main' name, but *includes* the implicitly-bound names -- Deeply revolting, because it has to predict what gets bound, -- especially the question of whether there's a wrapper for a datacon -- See Note [Implicit TyThings] in GHC.Driver.Env -- N.B. the set of names returned here *must* match the set of -- TyThings returned by GHC.Driver.Env.implicitTyThings, in the sense that -- TyThing.getOccName should define a bijection between the two lists. -- This invariant is used in GHC.IfaceToCore.tc_iface_decl_fingerprint (see note -- [Tricky iface loop]) -- The order of the list does not matter. ifaceDeclImplicitBndrs (IfaceData {ifName = tc_name, ifCons = cons }) = case cons of IfAbstractTyCon {} -> [] IfNewTyCon cd -> mkNewTyCoOcc (occName tc_name) : ifaceConDeclImplicitBndrs cd IfDataTyCon cds -> concatMap ifaceConDeclImplicitBndrs cds ifaceDeclImplicitBndrs (IfaceClass { ifBody = IfAbstractClass }) = [] ifaceDeclImplicitBndrs (IfaceClass { ifName = cls_tc_name , ifBody = IfConcreteClass { ifClassCtxt = sc_ctxt, ifSigs = sigs, ifATs = ats }}) = -- (possibly) newtype coercion co_occs ++ -- data constructor (DataCon namespace) -- data worker (Id namespace) -- no wrapper (class dictionaries never have a wrapper) [dc_occ, dcww_occ] ++ -- associated types [occName (ifName at) | IfaceAT at _ <- ats ] ++ -- superclass selectors [mkSuperDictSelOcc n cls_tc_occ | n <- [1..n_ctxt]] ++ -- operation selectors [occName op | IfaceClassOp op _ _ <- sigs] where cls_tc_occ = occName cls_tc_name n_ctxt = length sc_ctxt n_sigs = length sigs co_occs | is_newtype = [mkNewTyCoOcc cls_tc_occ] | otherwise = [] dcww_occ = mkDataConWorkerOcc dc_occ dc_occ = mkClassDataConOcc cls_tc_occ is_newtype = n_sigs + n_ctxt == 1 -- Sigh (keep this synced with buildClass) ifaceDeclImplicitBndrs _ = [] ifaceConDeclImplicitBndrs :: IfaceConDecl -> [OccName] ifaceConDeclImplicitBndrs (IfCon { ifConWrapper = has_wrapper, ifConName = con_name }) = [occName con_name, work_occ] ++ wrap_occs where con_occ = occName con_name work_occ = mkDataConWorkerOcc con_occ -- Id namespace wrap_occs | has_wrapper = [mkDataConWrapperOcc con_occ] -- Id namespace | otherwise = [] -- ----------------------------------------------------------------------------- -- The fingerprints of an IfaceDecl -- We better give each name bound by the declaration a -- different fingerprint! So we calculate the fingerprint of -- each binder by combining the fingerprint of the whole -- declaration with the name of the binder. (#5614, #7215) ifaceDeclFingerprints :: Fingerprint -> IfaceDecl -> [(OccName,Fingerprint)] ifaceDeclFingerprints hash decl = (getOccName decl, hash) : [ (occ, computeFingerprint' (hash,occ)) | occ <- ifaceDeclImplicitBndrs decl ] where computeFingerprint' = unsafeDupablePerformIO . computeFingerprint (panic "ifaceDeclFingerprints") {- ************************************************************************ * * Expressions * * ************************************************************************ -} data IfaceExpr = IfaceLcl IfLclName | IfaceExt IfExtName | IfaceType IfaceType | IfaceCo IfaceCoercion | IfaceTuple TupleSort [IfaceExpr] -- Saturated; type arguments omitted | IfaceLam IfaceLamBndr IfaceExpr | IfaceApp IfaceExpr IfaceExpr | IfaceCase IfaceExpr IfLclName [IfaceAlt] | IfaceECase IfaceExpr IfaceType -- See Note [Empty case alternatives] | IfaceLet IfaceBinding IfaceExpr | IfaceCast IfaceExpr IfaceCoercion | IfaceLit Literal | IfaceLitRubbish IfaceType -- See GHC.Types.Literal -- Note [Rubbish literals] item (6) | IfaceFCall ForeignCall IfaceType | IfaceTick IfaceTickish IfaceExpr -- from Tick tickish E data IfaceTickish = IfaceHpcTick Module Int -- from HpcTick x | IfaceSCC CostCentre Bool Bool -- from ProfNote | IfaceSource RealSrcSpan String -- from SourceNote -- no breakpoints: we never export these into interface files data IfaceAlt = IfaceAlt IfaceConAlt [IfLclName] IfaceExpr -- Note: IfLclName, not IfaceBndr (and same with the case binder) -- We reconstruct the kind/type of the thing from the context -- thus saving bulk in interface files data IfaceConAlt = IfaceDefault | IfaceDataAlt IfExtName | IfaceLitAlt Literal data IfaceBinding = IfaceNonRec IfaceLetBndr IfaceExpr | IfaceRec [(IfaceLetBndr, IfaceExpr)] -- IfaceLetBndr is like IfaceIdBndr, but has IdInfo too -- It's used for *non-top-level* let/rec binders -- See Note [IdInfo on nested let-bindings] data IfaceLetBndr = IfLetBndr IfLclName IfaceType IfaceIdInfo IfaceJoinInfo data IfaceJoinInfo = IfaceNotJoinPoint | IfaceJoinPoint JoinArity {- Note [Empty case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Iface syntax an IfaceCase does not record the types of the alternatives, unlike Core syntax Case. But we need this type if the alternatives are empty. Hence IfaceECase. See Note [Empty case alternatives] in GHC.Core. Note [Expose recursive functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For supercompilation we want to put *all* unfoldings in the interface file, even for functions that are recursive (or big). So we need to know when an unfolding belongs to a loop-breaker so that we can refrain from inlining it (except during supercompilation). Note [IdInfo on nested let-bindings] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Occasionally we want to preserve IdInfo on nested let bindings. The one that came up was a NOINLINE pragma on a let-binding inside an INLINE function. The user (Duncan Coutts) really wanted the NOINLINE control to cross the separate compilation boundary. In general we retain all info that is left by GHC.Core.Tidy.tidyLetBndr, since that is what is seen by importing module with --make Note [Displaying axiom incompatibilities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ With -fprint-axiom-incomps we display which closed type family equations are incompatible with which. This information is sometimes necessary because GHC doesn't try equations in order: any equation can be used when all preceding equations that are incompatible with it do not apply. For example, the last "a && a = a" equation in Data.Type.Bool.&& is actually compatible with all previous equations, and can reduce at any time. This is displayed as: Prelude> :i Data.Type.Equality.== type family (==) (a :: k) (b :: k) :: Bool where {- #0 -} (==) (f a) (g b) = (f == g) && (a == b) {- #1 -} (==) a a = 'True -- incompatible with: #0 {- #2 -} (==) _1 _2 = 'False -- incompatible with: #1, #0 The comment after an equation refers to all previous equations (0-indexed) that are incompatible with it. ************************************************************************ * * Printing IfaceDecl * * ************************************************************************ -} pprAxBranch :: SDoc -> BranchIndex -> IfaceAxBranch -> SDoc -- The TyCon might be local (just an OccName), or this might -- be a branch for an imported TyCon, so it would be an ExtName -- So it's easier to take an SDoc here -- -- This function is used -- to print interface files, -- in debug messages -- in :info F for GHCi, which goes via toConToIfaceDecl on the family tycon -- For user error messages we use Coercion.pprCoAxiom and friends pprAxBranch pp_tc idx (IfaceAxBranch { ifaxbTyVars = tvs , ifaxbCoVars = _cvs , ifaxbLHS = pat_tys , ifaxbRHS = rhs , ifaxbIncomps = incomps }) = assertPpr (null _cvs) (pp_tc $$ ppr _cvs) $ hang ppr_binders 2 (hang pp_lhs 2 (equals <+> ppr rhs)) $+$ nest 4 maybe_incomps where -- See Note [Printing foralls in type family instances] in GHC.Iface.Type ppr_binders = maybe_index <+> pprUserIfaceForAll (map (mkIfaceForAllTvBndr Specified) tvs) pp_lhs = hang pp_tc 2 (pprParendIfaceAppArgs pat_tys) -- See Note [Displaying axiom incompatibilities] maybe_index = ppWhenOption sdocPrintAxiomIncomps $ text "{-" <+> (text "#" <> ppr idx) <+> text "-}" maybe_incomps = ppWhenOption sdocPrintAxiomIncomps $ ppWhen (notNull incomps) $ text "--" <+> text "incompatible with:" <+> pprWithCommas (\incomp -> text "#" <> ppr incomp) incomps instance Outputable IfaceAnnotation where ppr (IfaceAnnotation target value) = ppr target <+> colon <+> ppr value instance NamedThing IfaceClassOp where getName (IfaceClassOp n _ _) = n instance HasOccName IfaceClassOp where occName = getOccName instance NamedThing IfaceConDecl where getName = ifConName instance HasOccName IfaceConDecl where occName = getOccName instance NamedThing IfaceDecl where getName = ifName instance HasOccName IfaceDecl where occName = getOccName instance Outputable IfaceDecl where ppr = pprIfaceDecl showToIface {- Note [Minimal complete definition] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The minimal complete definition should only be included if a complete class definition is shown. Since the minimal complete definition is anonymous we can't reuse the same mechanism that is used for the filtering of method signatures. Instead we just check if anything at all is filtered and hide it in that case. -} data ShowSub = ShowSub { ss_how_much :: ShowHowMuch , ss_forall :: ShowForAllFlag } -- See Note [Printing IfaceDecl binders] -- The alternative pretty printer referred to in the note. newtype AltPpr = AltPpr (Maybe (OccName -> SDoc)) data ShowHowMuch = ShowHeader AltPpr -- ^Header information only, not rhs | ShowSome [OccName] AltPpr -- ^ Show only some sub-components. Specifically, -- -- [@\[\]@] Print all sub-components. -- [@(n:ns)@] Print sub-component @n@ with @ShowSub = ns@; -- elide other sub-components to @...@ -- May 14: the list is max 1 element long at the moment | ShowIface -- ^Everything including GHC-internal information (used in --show-iface) {- Note [Printing IfaceDecl binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The binders in an IfaceDecl are just OccNames, so we don't know what module they come from. But when we pretty-print a TyThing by converting to an IfaceDecl (see GHC.Types.TyThing.Ppr), the TyThing may come from some other module so we really need the module qualifier. We solve this by passing in a pretty-printer for the binders. When printing an interface file (--show-iface), we want to print everything unqualified, so we can just print the OccName directly. -} instance Outputable ShowHowMuch where ppr (ShowHeader _) = text "ShowHeader" ppr ShowIface = text "ShowIface" ppr (ShowSome occs _) = text "ShowSome" <+> ppr occs showToHeader :: ShowSub showToHeader = ShowSub { ss_how_much = ShowHeader $ AltPpr Nothing , ss_forall = ShowForAllWhen } showToIface :: ShowSub showToIface = ShowSub { ss_how_much = ShowIface , ss_forall = ShowForAllWhen } ppShowIface :: ShowSub -> SDoc -> SDoc ppShowIface (ShowSub { ss_how_much = ShowIface }) doc = doc ppShowIface _ _ = Outputable.empty -- show if all sub-components or the complete interface is shown ppShowAllSubs :: ShowSub -> SDoc -> SDoc -- Note [Minimal complete definition] ppShowAllSubs (ShowSub { ss_how_much = ShowSome [] _ }) doc = doc ppShowAllSubs (ShowSub { ss_how_much = ShowIface }) doc = doc ppShowAllSubs _ _ = Outputable.empty ppShowRhs :: ShowSub -> SDoc -> SDoc ppShowRhs (ShowSub { ss_how_much = ShowHeader _ }) _ = Outputable.empty ppShowRhs _ doc = doc showSub :: HasOccName n => ShowSub -> n -> Bool showSub (ShowSub { ss_how_much = ShowHeader _ }) _ = False showSub (ShowSub { ss_how_much = ShowSome (n:_) _ }) thing = n == occName thing showSub (ShowSub { ss_how_much = _ }) _ = True ppr_trim :: [Maybe SDoc] -> [SDoc] -- Collapse a group of Nothings to a single "..." ppr_trim xs = snd (foldr go (False, []) xs) where go (Just doc) (_, so_far) = (False, doc : so_far) go Nothing (True, so_far) = (True, so_far) go Nothing (False, so_far) = (True, text "..." : so_far) isIfaceDataInstance :: IfaceTyConParent -> Bool isIfaceDataInstance IfNoParent = False isIfaceDataInstance _ = True pprClassRoles :: ShowSub -> IfaceTopBndr -> [IfaceTyConBinder] -> [Role] -> SDoc pprClassRoles ss clas binders roles = pprRoles (== Nominal) (pprPrefixIfDeclBndr (ss_how_much ss) (occName clas)) binders roles pprClassStandaloneKindSig :: ShowSub -> IfaceTopBndr -> IfaceKind -> SDoc pprClassStandaloneKindSig ss clas = pprStandaloneKindSig (pprPrefixIfDeclBndr (ss_how_much ss) (occName clas)) constraintIfaceKind :: IfaceKind constraintIfaceKind = IfaceTyConApp (IfaceTyCon constraintKindTyConName (mkIfaceTyConInfo NotPromoted IfaceNormalTyCon)) IA_Nil pprIfaceDecl :: ShowSub -> IfaceDecl -> SDoc -- NB: pprIfaceDecl is also used for pretty-printing TyThings in GHCi -- See Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr pprIfaceDecl ss (IfaceData { ifName = tycon, ifCType = ctype, ifCtxt = context, ifResKind = kind, ifRoles = roles, ifCons = condecls, ifParent = parent, ifGadtSyntax = gadt, ifBinders = binders }) | gadt = vcat [ pp_roles , pp_ki_sig , pp_nd <+> pp_lhs <+> pp_kind <+> pp_where , nest 2 (vcat pp_cons) , nest 2 $ ppShowIface ss pp_extra ] | otherwise = vcat [ pp_roles , pp_ki_sig , hang (pp_nd <+> pp_lhs) 2 (add_bars pp_cons) , nest 2 $ ppShowIface ss pp_extra ] where is_data_instance = isIfaceDataInstance parent -- See Note [Printing foralls in type family instances] in GHC.Iface.Type pp_data_inst_forall :: SDoc pp_data_inst_forall = pprUserIfaceForAll forall_bndrs forall_bndrs :: [IfaceForAllBndr] forall_bndrs = [Bndr (binderVar tc_bndr) Specified | tc_bndr <- binders] cons = visibleIfConDecls condecls pp_where = ppWhen (gadt && not (null cons)) $ text "where" pp_cons = ppr_trim (map show_con cons) :: [SDoc] pp_kind = ppUnless (if ki_sig_printable then isIfaceTauType kind -- Even in the presence of a standalone kind signature, a non-tau -- result kind annotation cannot be discarded as it determines the arity. -- See Note [Arity inference in kcCheckDeclHeader_sig] in GHC.Tc.Gen.HsType else isIfaceLiftedTypeKind kind) (dcolon <+> ppr kind) pp_lhs = case parent of IfNoParent -> pprIfaceDeclHead suppress_bndr_sig context ss tycon binders IfDataInstance{} -> text "instance" <+> pp_data_inst_forall <+> pprIfaceTyConParent parent pp_roles | is_data_instance = empty | otherwise = pprRoles (== Representational) name_doc binders roles -- Don't display roles for data family instances (yet) -- See discussion on #8672. ki_sig_printable = -- If we print a standalone kind signature for a data instance, we leak -- the internal constructor name: -- -- type T15827.R:Dka :: forall k. k -> * -- data instance forall k (a :: k). D a = MkD (Proxy a) -- -- This T15827.R:Dka is a compiler-generated type constructor for the -- data instance. not is_data_instance pp_ki_sig = ppWhen ki_sig_printable $ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders kind) -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig ki_sig_printable name_doc = pprPrefixIfDeclBndr (ss_how_much ss) (occName tycon) add_bars [] = Outputable.empty add_bars (c:cs) = sep ((equals <+> c) : map (vbar <+>) cs) ok_con dc = showSub ss dc || any (showSub ss . flSelector) (ifConFields dc) show_con dc | ok_con dc = Just $ pprIfaceConDecl ss gadt tycon binders parent dc | otherwise = Nothing pp_nd = case condecls of IfAbstractTyCon{} -> text "data" IfDataTyCon{} -> text "data" IfNewTyCon{} -> text "newtype" pp_extra = vcat [pprCType ctype] pprIfaceDecl ss (IfaceClass { ifName = clas , ifRoles = roles , ifFDs = fds , ifBinders = binders , ifBody = IfAbstractClass }) = vcat [ pprClassRoles ss clas binders roles , pprClassStandaloneKindSig ss clas (mkIfaceTyConKind binders constraintIfaceKind) , text "class" <+> pprIfaceDeclHead suppress_bndr_sig [] ss clas binders <+> pprFundeps fds ] where -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl ss (IfaceClass { ifName = clas , ifRoles = roles , ifFDs = fds , ifBinders = binders , ifBody = IfConcreteClass { ifATs = ats, ifSigs = sigs, ifClassCtxt = context, ifMinDef = minDef }}) = vcat [ pprClassRoles ss clas binders roles , pprClassStandaloneKindSig ss clas (mkIfaceTyConKind binders constraintIfaceKind) , text "class" <+> pprIfaceDeclHead suppress_bndr_sig context ss clas binders <+> pprFundeps fds <+> pp_where , nest 2 (vcat [ vcat asocs, vcat dsigs , ppShowAllSubs ss (pprMinDef minDef)])] where pp_where = ppShowRhs ss $ ppUnless (null sigs && null ats) (text "where") asocs = ppr_trim $ map maybeShowAssoc ats dsigs = ppr_trim $ map maybeShowSig sigs maybeShowAssoc :: IfaceAT -> Maybe SDoc maybeShowAssoc asc@(IfaceAT d _) | showSub ss d = Just $ pprIfaceAT ss asc | otherwise = Nothing maybeShowSig :: IfaceClassOp -> Maybe SDoc maybeShowSig sg | showSub ss sg = Just $ pprIfaceClassOp ss sg | otherwise = Nothing pprMinDef :: BooleanFormula IfLclName -> SDoc pprMinDef minDef = ppUnless (isTrue minDef) $ -- hide empty definitions text "{-# MINIMAL" <+> pprBooleanFormula (\_ def -> cparen (isLexSym def) (ppr def)) 0 minDef <+> text "#-}" -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl ss (IfaceSynonym { ifName = tc , ifBinders = binders , ifSynRhs = mono_ty , ifResKind = res_kind}) = vcat [ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders res_kind) , hang (text "type" <+> pprIfaceDeclHead suppress_bndr_sig [] ss tc binders <+> equals) 2 (sep [ pprIfaceForAll tvs, pprIfaceContextArr theta, ppr_tau , ppUnless (isIfaceLiftedTypeKind res_kind) (dcolon <+> ppr res_kind) ]) ] where (tvs, theta, tau) = splitIfaceSigmaTy mono_ty name_doc = pprPrefixIfDeclBndr (ss_how_much ss) (occName tc) -- See Note [Printing type abbreviations] in GHC.Iface.Type ppr_tau | tc `hasKey` liftedTypeKindTyConKey || tc `hasKey` unrestrictedFunTyConKey = updSDocContext (\ctx -> ctx { sdocPrintTypeAbbreviations = False }) $ ppr tau | otherwise = ppr tau -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl ss (IfaceFamily { ifName = tycon , ifFamFlav = rhs, ifBinders = binders , ifResKind = res_kind , ifResVar = res_var, ifFamInj = inj }) | IfaceDataFamilyTyCon <- rhs = vcat [ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders res_kind) , text "data family" <+> pprIfaceDeclHead suppress_bndr_sig [] ss tycon binders ] | otherwise = vcat [ pprStandaloneKindSig name_doc (mkIfaceTyConKind binders res_kind) , hang (text "type family" <+> pprIfaceDeclHead suppress_bndr_sig [] ss tycon binders <+> ppShowRhs ss (pp_where rhs)) 2 (pp_inj res_var inj <+> ppShowRhs ss (pp_rhs rhs)) $$ nest 2 (ppShowRhs ss (pp_branches rhs)) ] where name_doc = pprPrefixIfDeclBndr (ss_how_much ss) (occName tycon) pp_where (IfaceClosedSynFamilyTyCon {}) = text "where" pp_where _ = empty pp_inj Nothing _ = empty pp_inj (Just res) inj | Injective injectivity <- inj = hsep [ equals, ppr res , pp_inj_cond res injectivity] | otherwise = hsep [ equals, ppr res ] pp_inj_cond res inj = case filterByList inj binders of [] -> empty tvs -> hsep [vbar, ppr res, text "->", interppSP (map ifTyConBinderName tvs)] pp_rhs IfaceDataFamilyTyCon = ppShowIface ss (text "data") pp_rhs IfaceOpenSynFamilyTyCon = ppShowIface ss (text "open") pp_rhs IfaceAbstractClosedSynFamilyTyCon = ppShowIface ss (text "closed, abstract") pp_rhs (IfaceClosedSynFamilyTyCon {}) = empty -- see pp_branches pp_rhs IfaceBuiltInSynFamTyCon = ppShowIface ss (text "built-in") pp_branches (IfaceClosedSynFamilyTyCon (Just (ax, brs))) = vcat (unzipWith (pprAxBranch (pprPrefixIfDeclBndr (ss_how_much ss) (occName tycon)) ) $ zip [0..] brs) $$ ppShowIface ss (text "axiom" <+> ppr ax) pp_branches _ = Outputable.empty -- See Note [Suppressing binder signatures] in GHC.Iface.Type suppress_bndr_sig = SuppressBndrSig True pprIfaceDecl _ (IfacePatSyn { ifName = name, ifPatUnivBndrs = univ_bndrs, ifPatExBndrs = ex_bndrs, ifPatProvCtxt = prov_ctxt, ifPatReqCtxt = req_ctxt, ifPatArgs = arg_tys, ifFieldLabels = pat_fldlbls, ifPatTy = pat_ty} ) = sdocWithContext mk_msg where pat_keywrd = text "pattern" mk_msg sdocCtx = vcat [ ppr_pat_ty -- only print this for record pattern synonyms , if null pat_fldlbls then Outputable.empty else pat_keywrd <+> pprPrefixOcc name <+> pat_body] where ppr_pat_ty = hang (pat_keywrd <+> pprPrefixOcc name) 2 (dcolon <+> sep [univ_msg , pprIfaceContextArr req_ctxt , ppWhen insert_empty_ctxt $ parens empty <+> darrow , ex_msg , pprIfaceContextArr prov_ctxt , pprIfaceType $ foldr (IfaceFunTy VisArg many_ty) pat_ty arg_tys ]) pat_body = braces $ sep $ punctuate comma $ map ppr pat_fldlbls univ_msg = pprUserIfaceForAll $ tyVarSpecToBinders univ_bndrs ex_msg = pprUserIfaceForAll $ tyVarSpecToBinders ex_bndrs insert_empty_ctxt = null req_ctxt && not (null prov_ctxt && isEmpty sdocCtx ex_msg) pprIfaceDecl ss (IfaceId { ifName = var, ifType = ty, ifIdDetails = details, ifIdInfo = info }) = vcat [ hang (pprPrefixIfDeclBndr (ss_how_much ss) (occName var) <+> dcolon) 2 (pprIfaceSigmaType (ss_forall ss) ty) , ppShowIface ss (ppr details) , ppShowIface ss (ppr info) ] pprIfaceDecl _ (IfaceAxiom { ifName = name, ifTyCon = tycon , ifAxBranches = branches }) = hang (text "axiom" <+> ppr name <+> dcolon) 2 (vcat $ unzipWith (pprAxBranch (ppr tycon)) $ zip [0..] branches) pprCType :: Maybe CType -> SDoc pprCType Nothing = Outputable.empty pprCType (Just cType) = text "C type:" <+> ppr cType -- if, for each role, suppress_if role is True, then suppress the role -- output pprRoles :: (Role -> Bool) -> SDoc -> [IfaceTyConBinder] -> [Role] -> SDoc pprRoles suppress_if tyCon bndrs roles = sdocOption sdocPrintExplicitKinds $ \print_kinds -> let froles = suppressIfaceInvisibles (PrintExplicitKinds print_kinds) bndrs roles in ppUnless (all suppress_if froles || null froles) $ text "type role" <+> tyCon <+> hsep (map ppr froles) pprStandaloneKindSig :: SDoc -> IfaceType -> SDoc pprStandaloneKindSig tyCon ty = text "type" <+> tyCon <+> text "::" <+> ppr ty pprInfixIfDeclBndr :: ShowHowMuch -> OccName -> SDoc pprInfixIfDeclBndr (ShowSome _ (AltPpr (Just ppr_bndr))) name = pprInfixVar (isSymOcc name) (ppr_bndr name) pprInfixIfDeclBndr _ name = pprInfixVar (isSymOcc name) (ppr name) pprPrefixIfDeclBndr :: ShowHowMuch -> OccName -> SDoc pprPrefixIfDeclBndr (ShowHeader (AltPpr (Just ppr_bndr))) name = parenSymOcc name (ppr_bndr name) pprPrefixIfDeclBndr (ShowSome _ (AltPpr (Just ppr_bndr))) name = parenSymOcc name (ppr_bndr name) pprPrefixIfDeclBndr _ name = parenSymOcc name (ppr name) instance Outputable IfaceClassOp where ppr = pprIfaceClassOp showToIface pprIfaceClassOp :: ShowSub -> IfaceClassOp -> SDoc pprIfaceClassOp ss (IfaceClassOp n ty dm) = pp_sig n ty $$ generic_dm where generic_dm | Just (GenericDM dm_ty) <- dm = text "default" <+> pp_sig n dm_ty | otherwise = empty pp_sig n ty = pprPrefixIfDeclBndr (ss_how_much ss) (occName n) <+> dcolon <+> pprIfaceSigmaType ShowForAllWhen ty instance Outputable IfaceAT where ppr = pprIfaceAT showToIface pprIfaceAT :: ShowSub -> IfaceAT -> SDoc pprIfaceAT ss (IfaceAT d mb_def) = vcat [ pprIfaceDecl ss d , case mb_def of Nothing -> Outputable.empty Just rhs -> nest 2 $ text "Default:" <+> ppr rhs ] instance Outputable IfaceTyConParent where ppr p = pprIfaceTyConParent p pprIfaceTyConParent :: IfaceTyConParent -> SDoc pprIfaceTyConParent IfNoParent = Outputable.empty pprIfaceTyConParent (IfDataInstance _ tc tys) = pprIfaceTypeApp topPrec tc tys pprIfaceDeclHead :: SuppressBndrSig -> IfaceContext -> ShowSub -> Name -> [IfaceTyConBinder] -- of the tycon, for invisible-suppression -> SDoc pprIfaceDeclHead suppress_sig context ss tc_occ bndrs = sdocOption sdocPrintExplicitKinds $ \print_kinds -> sep [ pprIfaceContextArr context , pprPrefixIfDeclBndr (ss_how_much ss) (occName tc_occ) <+> pprIfaceTyConBinders suppress_sig (suppressIfaceInvisibles (PrintExplicitKinds print_kinds) bndrs bndrs) ] pprIfaceConDecl :: ShowSub -> Bool -> IfaceTopBndr -> [IfaceTyConBinder] -> IfaceTyConParent -> IfaceConDecl -> SDoc pprIfaceConDecl ss gadt_style tycon tc_binders parent (IfCon { ifConName = name, ifConInfix = is_infix, ifConUserTvBinders = user_tvbs, ifConEqSpec = eq_spec, ifConCtxt = ctxt, ifConArgTys = arg_tys, ifConStricts = stricts, ifConFields = fields }) | gadt_style = pp_prefix_con <+> dcolon <+> ppr_gadt_ty | otherwise = ppr_ex_quant pp_h98_con where pp_h98_con | not (null fields) = pp_prefix_con <+> pp_field_args | is_infix , [ty1, ty2] <- pp_args = sep [ ty1 , pprInfixIfDeclBndr how_much (occName name) , ty2] | otherwise = pp_prefix_con <+> sep pp_args how_much = ss_how_much ss tys_w_strs :: [(IfaceBang, IfaceType)] tys_w_strs = zip stricts (map snd arg_tys) pp_prefix_con = pprPrefixIfDeclBndr how_much (occName name) -- If we're pretty-printing a H98-style declaration with existential -- quantification, then user_tvbs will always consist of the universal -- tyvar binders followed by the existential tyvar binders. So to recover -- the visibilities of the existential tyvar binders, we can simply drop -- the universal tyvar binders from user_tvbs. ex_tvbs = dropList tc_binders user_tvbs ppr_ex_quant = pprIfaceForAllPartMust (ifaceForAllSpecToBndrs ex_tvbs) ctxt pp_gadt_res_ty = mk_user_con_res_ty eq_spec ppr_gadt_ty = pprIfaceForAllPart (ifaceForAllSpecToBndrs user_tvbs) ctxt pp_tau -- A bit gruesome this, but we can't form the full con_tau, and ppr it, -- because we don't have a Name for the tycon, only an OccName pp_tau | null fields = case pp_args ++ [pp_gadt_res_ty] of (t:ts) -> fsep (t : zipWithEqual "pprIfaceConDecl" (\(w,_) d -> ppr_arr w <+> d) arg_tys ts) [] -> panic "pp_con_taus" | otherwise = sep [pp_field_args, arrow <+> pp_gadt_res_ty] -- Constructors are linear by default, but we don't want to show -- linear arrows when -XLinearTypes is disabled ppr_arr w = sdocOption sdocLinearTypes (\linearTypes -> if linearTypes then ppr_fun_arrow w else arrow) ppr_bang IfNoBang = whenPprDebug $ char '_' ppr_bang IfStrict = char '!' ppr_bang IfUnpack = text "{-# UNPACK #-}" ppr_bang (IfUnpackCo co) = text "! {-# UNPACK #-}" <> pprParendIfaceCoercion co pprFieldArgTy, pprArgTy :: (IfaceBang, IfaceType) -> SDoc -- If using record syntax, the only reason one would need to parenthesize -- a compound field type is if it's preceded by a bang pattern. pprFieldArgTy (bang, ty) = ppr_arg_ty (bang_prec bang) bang ty -- If not using record syntax, a compound field type might need to be -- parenthesized if one of the following holds: -- -- 1. We're using Haskell98 syntax. -- 2. The field type is preceded with a bang pattern. pprArgTy (bang, ty) = ppr_arg_ty (max gadt_prec (bang_prec bang)) bang ty ppr_arg_ty :: PprPrec -> IfaceBang -> IfaceType -> SDoc ppr_arg_ty prec bang ty = ppr_bang bang <> pprPrecIfaceType prec ty -- If we're displaying the fields GADT-style, e.g., -- -- data Foo a where -- MkFoo :: (Int -> Int) -> Maybe a -> Foo -- -- Then we use `funPrec`, since that will ensure `Int -> Int` gets the -- parentheses that it requires, but simple compound types like `Maybe a` -- (which don't require parentheses in a function argument position) won't -- get them, assuming that there are no bang patterns (see bang_prec). -- -- If we're displaying the fields Haskell98-style, e.g., -- -- data Foo a = MkFoo (Int -> Int) (Maybe a) -- -- Then not only must we parenthesize `Int -> Int`, we must also -- parenthesize compound fields like (Maybe a). Therefore, we pick -- `appPrec`, which has higher precedence than `funPrec`. gadt_prec :: PprPrec gadt_prec | gadt_style = funPrec | otherwise = appPrec -- The presence of bang patterns or UNPACK annotations requires -- surrounding the type with parentheses, if needed (#13699) bang_prec :: IfaceBang -> PprPrec bang_prec IfNoBang = topPrec bang_prec IfStrict = appPrec bang_prec IfUnpack = appPrec bang_prec IfUnpackCo{} = appPrec pp_args :: [SDoc] -- No records, e.g., ` Maybe a -> Int -> ...` or -- `!(Maybe a) -> !Int -> ...` pp_args = map pprArgTy tys_w_strs pp_field_args :: SDoc -- Records, e.g., { x :: Maybe a, y :: Int } or -- { x :: !(Maybe a), y :: !Int } pp_field_args = braces $ sep $ punctuate comma $ ppr_trim $ zipWith maybe_show_label fields tys_w_strs maybe_show_label :: FieldLabel -> (IfaceBang, IfaceType) -> Maybe SDoc maybe_show_label lbl bty | showSub ss sel = Just (pprPrefixIfDeclBndr how_much occ <+> dcolon <+> pprFieldArgTy bty) | otherwise = Nothing where sel = flSelector lbl occ = mkVarOccFS (flLabel lbl) mk_user_con_res_ty :: IfaceEqSpec -> SDoc -- See Note [Result type of a data family GADT] mk_user_con_res_ty eq_spec | IfDataInstance _ tc tys <- parent = pprIfaceType (IfaceTyConApp tc (substIfaceAppArgs gadt_subst tys)) | otherwise = ppr_tc_app gadt_subst where gadt_subst = mkIfaceTySubst eq_spec -- When pretty-printing a GADT return type, we: -- -- 1. Take the data tycon binders, extract their variable names and -- visibilities, and construct suitable arguments from them. (This is -- the role of mk_tc_app_args.) -- 2. Apply the GADT substitution constructed from the eq_spec. -- (See Note [Result type of a data family GADT].) -- 3. Pretty-print the data type constructor applied to its arguments. -- This process will omit any invisible arguments, such as coercion -- variables, if necessary. (See Note -- [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep.) ppr_tc_app gadt_subst = pprPrefixIfDeclBndr how_much (occName tycon) <+> pprParendIfaceAppArgs (substIfaceAppArgs gadt_subst (mk_tc_app_args tc_binders)) mk_tc_app_args :: [IfaceTyConBinder] -> IfaceAppArgs mk_tc_app_args [] = IA_Nil mk_tc_app_args (Bndr bndr vis:tc_bndrs) = IA_Arg (IfaceTyVar (ifaceBndrName bndr)) (tyConBndrVisArgFlag vis) (mk_tc_app_args tc_bndrs) instance Outputable IfaceRule where ppr (IfaceRule { ifRuleName = name, ifActivation = act, ifRuleBndrs = bndrs, ifRuleHead = fn, ifRuleArgs = args, ifRuleRhs = rhs, ifRuleOrph = orph }) = sep [ hsep [ pprRuleName name , if isOrphan orph then text "[orphan]" else Outputable.empty , ppr act , pp_foralls ] , nest 2 (sep [ppr fn <+> sep (map pprParendIfaceExpr args), text "=" <+> ppr rhs]) ] where pp_foralls = ppUnless (null bndrs) $ forAllLit <+> pprIfaceBndrs bndrs <> dot instance Outputable IfaceClsInst where ppr (IfaceClsInst { ifDFun = dfun_id, ifOFlag = flag , ifInstCls = cls, ifInstTys = mb_tcs , ifInstOrph = orph }) = hang (text "instance" <+> ppr flag <+> (if isOrphan orph then text "[orphan]" else Outputable.empty) <+> ppr cls <+> brackets (pprWithCommas ppr_rough mb_tcs)) 2 (equals <+> ppr dfun_id) instance Outputable IfaceFamInst where ppr (IfaceFamInst { ifFamInstFam = fam, ifFamInstTys = mb_tcs , ifFamInstAxiom = tycon_ax, ifFamInstOrph = orph }) = hang (text "family instance" <+> (if isOrphan orph then text "[orphan]" else Outputable.empty) <+> ppr fam <+> pprWithCommas (brackets . ppr_rough) mb_tcs) 2 (equals <+> ppr tycon_ax) ppr_rough :: Maybe IfaceTyCon -> SDoc ppr_rough Nothing = dot ppr_rough (Just tc) = ppr tc {- Note [Result type of a data family GADT] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data family T a data instance T (p,q) where T1 :: T (Int, Maybe c) T2 :: T (Bool, q) The IfaceDecl actually looks like data TPr p q where T1 :: forall p q. forall c. (p~Int,q~Maybe c) => TPr p q T2 :: forall p q. (p~Bool) => TPr p q To reconstruct the result types for T1 and T2 that we want to pretty print, we substitute the eq-spec [p->Int, q->Maybe c] in the arg pattern (p,q) to give T (Int, Maybe c) Remember that in IfaceSyn, the TyCon and DataCon share the same universal type variables. ----------------------------- Printing IfaceExpr ------------------------------------ -} instance Outputable IfaceExpr where ppr e = pprIfaceExpr noParens e noParens :: SDoc -> SDoc noParens pp = pp pprParendIfaceExpr :: IfaceExpr -> SDoc pprParendIfaceExpr = pprIfaceExpr parens -- | Pretty Print an IfaceExpr -- -- The first argument should be a function that adds parens in context that need -- an atomic value (e.g. function args) pprIfaceExpr :: (SDoc -> SDoc) -> IfaceExpr -> SDoc pprIfaceExpr _ (IfaceLcl v) = ppr v pprIfaceExpr _ (IfaceExt v) = ppr v pprIfaceExpr _ (IfaceLit l) = ppr l pprIfaceExpr _ (IfaceLitRubbish r) = text "RUBBISH" <> parens (ppr r) pprIfaceExpr _ (IfaceFCall cc ty) = braces (ppr cc <+> ppr ty) pprIfaceExpr _ (IfaceType ty) = char '@' <> pprParendIfaceType ty pprIfaceExpr _ (IfaceCo co) = text "@~" <> pprParendIfaceCoercion co pprIfaceExpr add_par app@(IfaceApp _ _) = add_par (pprIfaceApp app []) pprIfaceExpr _ (IfaceTuple c as) = tupleParens c (pprWithCommas ppr as) pprIfaceExpr add_par i@(IfaceLam _ _) = add_par (sep [char '\\' <+> sep (map pprIfaceLamBndr bndrs) <+> arrow, pprIfaceExpr noParens body]) where (bndrs,body) = collect [] i collect bs (IfaceLam b e) = collect (b:bs) e collect bs e = (reverse bs, e) pprIfaceExpr add_par (IfaceECase scrut ty) = add_par (sep [ text "case" <+> pprIfaceExpr noParens scrut , text "ret_ty" <+> pprParendIfaceType ty , text "of {}" ]) pprIfaceExpr add_par (IfaceCase scrut bndr [IfaceAlt con bs rhs]) = add_par (sep [text "case" <+> pprIfaceExpr noParens scrut <+> text "of" <+> ppr bndr <+> char '{' <+> ppr_con_bs con bs <+> arrow, pprIfaceExpr noParens rhs <+> char '}']) pprIfaceExpr add_par (IfaceCase scrut bndr alts) = add_par (sep [text "case" <+> pprIfaceExpr noParens scrut <+> text "of" <+> ppr bndr <+> char '{', nest 2 (sep (map pprIfaceAlt alts)) <+> char '}']) pprIfaceExpr _ (IfaceCast expr co) = sep [pprParendIfaceExpr expr, nest 2 (text "`cast`"), pprParendIfaceCoercion co] pprIfaceExpr add_par (IfaceLet (IfaceNonRec b rhs) body) = add_par (sep [text "let {", nest 2 (ppr_bind (b, rhs)), text "} in", pprIfaceExpr noParens body]) pprIfaceExpr add_par (IfaceLet (IfaceRec pairs) body) = add_par (sep [text "letrec {", nest 2 (sep (map ppr_bind pairs)), text "} in", pprIfaceExpr noParens body]) pprIfaceExpr add_par (IfaceTick tickish e) = add_par (pprIfaceTickish tickish <+> pprIfaceExpr noParens e) pprIfaceAlt :: IfaceAlt -> SDoc pprIfaceAlt (IfaceAlt con bs rhs) = sep [ppr_con_bs con bs, arrow <+> pprIfaceExpr noParens rhs] ppr_con_bs :: IfaceConAlt -> [IfLclName] -> SDoc ppr_con_bs con bs = ppr con <+> hsep (map ppr bs) ppr_bind :: (IfaceLetBndr, IfaceExpr) -> SDoc ppr_bind (IfLetBndr b ty info ji, rhs) = sep [hang (ppr b <+> dcolon <+> ppr ty) 2 (ppr ji <+> ppr info), equals <+> pprIfaceExpr noParens rhs] ------------------ pprIfaceTickish :: IfaceTickish -> SDoc pprIfaceTickish (IfaceHpcTick m ix) = braces (text "tick" <+> ppr m <+> ppr ix) pprIfaceTickish (IfaceSCC cc tick scope) = braces (pprCostCentreCore cc <+> ppr tick <+> ppr scope) pprIfaceTickish (IfaceSource src _names) = braces (pprUserRealSpan True src) ------------------ pprIfaceApp :: IfaceExpr -> [SDoc] -> SDoc pprIfaceApp (IfaceApp fun arg) args = pprIfaceApp fun $ nest 2 (pprParendIfaceExpr arg) : args pprIfaceApp fun args = sep (pprParendIfaceExpr fun : args) ------------------ instance Outputable IfaceConAlt where ppr IfaceDefault = text "DEFAULT" ppr (IfaceLitAlt l) = ppr l ppr (IfaceDataAlt d) = ppr d ------------------ instance Outputable IfaceIdDetails where ppr IfVanillaId = Outputable.empty ppr (IfWorkerLikeId dmd) = text "StrWork" <> parens (ppr dmd) ppr (IfRecSelId tc b) = text "RecSel" <+> ppr tc <+> if b then text "" else Outputable.empty ppr IfDFunId = text "DFunId" instance Outputable IfaceInfoItem where ppr (HsUnfold lb unf) = text "Unfolding" <> ppWhen lb (text "(loop-breaker)") <> colon <+> ppr unf ppr (HsInline prag) = text "Inline:" <+> ppr prag ppr (HsArity arity) = text "Arity:" <+> int arity ppr (HsDmdSig str) = text "Strictness:" <+> ppr str ppr (HsCprSig cpr) = text "CPR:" <+> ppr cpr ppr HsNoCafRefs = text "HasNoCafRefs" ppr HsLevity = text "Never levity-polymorphic" ppr (HsLFInfo lf_info) = text "LambdaFormInfo:" <+> ppr lf_info ppr (HsTagSig tag_sig) = text "TagSig:" <+> ppr tag_sig instance Outputable IfaceJoinInfo where ppr IfaceNotJoinPoint = empty ppr (IfaceJoinPoint ar) = angleBrackets (text "join" <+> ppr ar) instance Outputable IfaceUnfolding where ppr (IfCompulsory e) = text "" <+> parens (ppr e) ppr (IfCoreUnfold s _ e) = (if s then text "" else Outputable.empty) <+> parens (ppr e) ppr (IfInlineRule a uok bok e) = sep [text "InlineRule" <+> ppr (a,uok,bok), pprParendIfaceExpr e] ppr (IfDFunUnfold bs es) = hang (text "DFun:" <+> sep (map ppr bs) <> dot) 2 (sep (map pprParendIfaceExpr es)) {- ************************************************************************ * * Finding the Names in Iface syntax * * ************************************************************************ This is used for dependency analysis in GHC.Iface.Make, so that we fingerprint a declaration before the things that depend on it. It is specific to interface-file fingerprinting in the sense that we don't collect *all* Names: for example, the DFun of an instance is recorded textually rather than by its fingerprint when fingerprinting the instance, so DFuns are not dependencies. -} freeNamesIfDecl :: IfaceDecl -> NameSet freeNamesIfDecl (IfaceId { ifType = t, ifIdDetails = d, ifIdInfo = i}) = freeNamesIfType t &&& freeNamesIfIdInfo i &&& freeNamesIfIdDetails d freeNamesIfDecl (IfaceData { ifBinders = bndrs, ifResKind = res_k , ifParent = p, ifCtxt = ctxt, ifCons = cons }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfType res_k &&& freeNamesIfaceTyConParent p &&& freeNamesIfContext ctxt &&& freeNamesIfConDecls cons freeNamesIfDecl (IfaceSynonym { ifBinders = bndrs, ifResKind = res_k , ifSynRhs = rhs }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfKind res_k &&& freeNamesIfType rhs freeNamesIfDecl (IfaceFamily { ifBinders = bndrs, ifResKind = res_k , ifFamFlav = flav }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfKind res_k &&& freeNamesIfFamFlav flav freeNamesIfDecl (IfaceClass{ ifBinders = bndrs, ifBody = cls_body }) = freeNamesIfVarBndrs bndrs &&& freeNamesIfClassBody cls_body freeNamesIfDecl (IfaceAxiom { ifTyCon = tc, ifAxBranches = branches }) = freeNamesIfTc tc &&& fnList freeNamesIfAxBranch branches freeNamesIfDecl (IfacePatSyn { ifPatMatcher = (matcher, _) , ifPatBuilder = mb_builder , ifPatUnivBndrs = univ_bndrs , ifPatExBndrs = ex_bndrs , ifPatProvCtxt = prov_ctxt , ifPatReqCtxt = req_ctxt , ifPatArgs = args , ifPatTy = pat_ty , ifFieldLabels = lbls }) = unitNameSet matcher &&& maybe emptyNameSet (unitNameSet . fst) mb_builder &&& freeNamesIfVarBndrs univ_bndrs &&& freeNamesIfVarBndrs ex_bndrs &&& freeNamesIfContext prov_ctxt &&& freeNamesIfContext req_ctxt &&& fnList freeNamesIfType args &&& freeNamesIfType pat_ty &&& mkNameSet (map flSelector lbls) freeNamesIfClassBody :: IfaceClassBody -> NameSet freeNamesIfClassBody IfAbstractClass = emptyNameSet freeNamesIfClassBody (IfConcreteClass{ ifClassCtxt = ctxt, ifATs = ats, ifSigs = sigs }) = freeNamesIfContext ctxt &&& fnList freeNamesIfAT ats &&& fnList freeNamesIfClsSig sigs freeNamesIfAxBranch :: IfaceAxBranch -> NameSet freeNamesIfAxBranch (IfaceAxBranch { ifaxbTyVars = tyvars , ifaxbCoVars = covars , ifaxbLHS = lhs , ifaxbRHS = rhs }) = fnList freeNamesIfTvBndr tyvars &&& fnList freeNamesIfIdBndr covars &&& freeNamesIfAppArgs lhs &&& freeNamesIfType rhs freeNamesIfIdDetails :: IfaceIdDetails -> NameSet freeNamesIfIdDetails (IfRecSelId tc _) = either freeNamesIfTc freeNamesIfDecl tc freeNamesIfIdDetails _ = emptyNameSet -- All other changes are handled via the version info on the tycon freeNamesIfFamFlav :: IfaceFamTyConFlav -> NameSet freeNamesIfFamFlav IfaceOpenSynFamilyTyCon = emptyNameSet freeNamesIfFamFlav IfaceDataFamilyTyCon = emptyNameSet freeNamesIfFamFlav (IfaceClosedSynFamilyTyCon (Just (ax, br))) = unitNameSet ax &&& fnList freeNamesIfAxBranch br freeNamesIfFamFlav (IfaceClosedSynFamilyTyCon Nothing) = emptyNameSet freeNamesIfFamFlav IfaceAbstractClosedSynFamilyTyCon = emptyNameSet freeNamesIfFamFlav IfaceBuiltInSynFamTyCon = emptyNameSet freeNamesIfContext :: IfaceContext -> NameSet freeNamesIfContext = fnList freeNamesIfType freeNamesIfAT :: IfaceAT -> NameSet freeNamesIfAT (IfaceAT decl mb_def) = freeNamesIfDecl decl &&& case mb_def of Nothing -> emptyNameSet Just rhs -> freeNamesIfType rhs freeNamesIfClsSig :: IfaceClassOp -> NameSet freeNamesIfClsSig (IfaceClassOp _n ty dm) = freeNamesIfType ty &&& freeNamesDM dm freeNamesDM :: Maybe (DefMethSpec IfaceType) -> NameSet freeNamesDM (Just (GenericDM ty)) = freeNamesIfType ty freeNamesDM _ = emptyNameSet freeNamesIfConDecls :: IfaceConDecls -> NameSet freeNamesIfConDecls (IfDataTyCon c) = fnList freeNamesIfConDecl c freeNamesIfConDecls (IfNewTyCon c) = freeNamesIfConDecl c freeNamesIfConDecls _ = emptyNameSet freeNamesIfConDecl :: IfaceConDecl -> NameSet freeNamesIfConDecl (IfCon { ifConExTCvs = ex_tvs, ifConCtxt = ctxt , ifConArgTys = arg_tys , ifConFields = flds , ifConEqSpec = eq_spec , ifConStricts = bangs }) = fnList freeNamesIfBndr ex_tvs &&& freeNamesIfContext ctxt &&& fnList freeNamesIfType (map fst arg_tys) &&& -- these are multiplicities, represented as types fnList freeNamesIfType (map snd arg_tys) &&& mkNameSet (map flSelector flds) &&& fnList freeNamesIfType (map snd eq_spec) &&& -- equality constraints fnList freeNamesIfBang bangs freeNamesIfBang :: IfaceBang -> NameSet freeNamesIfBang (IfUnpackCo co) = freeNamesIfCoercion co freeNamesIfBang _ = emptyNameSet freeNamesIfKind :: IfaceType -> NameSet freeNamesIfKind = freeNamesIfType freeNamesIfAppArgs :: IfaceAppArgs -> NameSet freeNamesIfAppArgs (IA_Arg t _ ts) = freeNamesIfType t &&& freeNamesIfAppArgs ts freeNamesIfAppArgs IA_Nil = emptyNameSet freeNamesIfType :: IfaceType -> NameSet freeNamesIfType (IfaceFreeTyVar _) = emptyNameSet freeNamesIfType (IfaceTyVar _) = emptyNameSet freeNamesIfType (IfaceAppTy s t) = freeNamesIfType s &&& freeNamesIfAppArgs t freeNamesIfType (IfaceTyConApp tc ts) = freeNamesIfTc tc &&& freeNamesIfAppArgs ts freeNamesIfType (IfaceTupleTy _ _ ts) = freeNamesIfAppArgs ts freeNamesIfType (IfaceLitTy _) = emptyNameSet freeNamesIfType (IfaceForAllTy tv t) = freeNamesIfVarBndr tv &&& freeNamesIfType t freeNamesIfType (IfaceFunTy _ w s t) = freeNamesIfType s &&& freeNamesIfType t &&& freeNamesIfType w freeNamesIfType (IfaceCastTy t c) = freeNamesIfType t &&& freeNamesIfCoercion c freeNamesIfType (IfaceCoercionTy c) = freeNamesIfCoercion c freeNamesIfMCoercion :: IfaceMCoercion -> NameSet freeNamesIfMCoercion IfaceMRefl = emptyNameSet freeNamesIfMCoercion (IfaceMCo co) = freeNamesIfCoercion co freeNamesIfCoercion :: IfaceCoercion -> NameSet freeNamesIfCoercion (IfaceReflCo t) = freeNamesIfType t freeNamesIfCoercion (IfaceGReflCo _ t mco) = freeNamesIfType t &&& freeNamesIfMCoercion mco freeNamesIfCoercion (IfaceFunCo _ c_mult c1 c2) = freeNamesIfCoercion c_mult &&& freeNamesIfCoercion c1 &&& freeNamesIfCoercion c2 freeNamesIfCoercion (IfaceTyConAppCo _ tc cos) = freeNamesIfTc tc &&& fnList freeNamesIfCoercion cos freeNamesIfCoercion (IfaceAppCo c1 c2) = freeNamesIfCoercion c1 &&& freeNamesIfCoercion c2 freeNamesIfCoercion (IfaceForAllCo _ kind_co co) = freeNamesIfCoercion kind_co &&& freeNamesIfCoercion co freeNamesIfCoercion (IfaceFreeCoVar _) = emptyNameSet freeNamesIfCoercion (IfaceCoVarCo _) = emptyNameSet freeNamesIfCoercion (IfaceHoleCo _) = emptyNameSet freeNamesIfCoercion (IfaceAxiomInstCo ax _ cos) = unitNameSet ax &&& fnList freeNamesIfCoercion cos freeNamesIfCoercion (IfaceUnivCo p _ t1 t2) = freeNamesIfProv p &&& freeNamesIfType t1 &&& freeNamesIfType t2 freeNamesIfCoercion (IfaceSymCo c) = freeNamesIfCoercion c freeNamesIfCoercion (IfaceTransCo c1 c2) = freeNamesIfCoercion c1 &&& freeNamesIfCoercion c2 freeNamesIfCoercion (IfaceNthCo _ co) = freeNamesIfCoercion co freeNamesIfCoercion (IfaceLRCo _ co) = freeNamesIfCoercion co freeNamesIfCoercion (IfaceInstCo co co2) = freeNamesIfCoercion co &&& freeNamesIfCoercion co2 freeNamesIfCoercion (IfaceKindCo c) = freeNamesIfCoercion c freeNamesIfCoercion (IfaceSubCo co) = freeNamesIfCoercion co freeNamesIfCoercion (IfaceAxiomRuleCo _ax cos) -- the axiom is just a string, so we don't count it as a name. = fnList freeNamesIfCoercion cos freeNamesIfProv :: IfaceUnivCoProv -> NameSet freeNamesIfProv (IfacePhantomProv co) = freeNamesIfCoercion co freeNamesIfProv (IfaceProofIrrelProv co) = freeNamesIfCoercion co freeNamesIfProv (IfacePluginProv _) = emptyNameSet freeNamesIfProv (IfaceCorePrepProv _) = emptyNameSet freeNamesIfVarBndr :: VarBndr IfaceBndr vis -> NameSet freeNamesIfVarBndr (Bndr bndr _) = freeNamesIfBndr bndr freeNamesIfVarBndrs :: [VarBndr IfaceBndr vis] -> NameSet freeNamesIfVarBndrs = fnList freeNamesIfVarBndr freeNamesIfBndr :: IfaceBndr -> NameSet freeNamesIfBndr (IfaceIdBndr b) = freeNamesIfIdBndr b freeNamesIfBndr (IfaceTvBndr b) = freeNamesIfTvBndr b freeNamesIfBndrs :: [IfaceBndr] -> NameSet freeNamesIfBndrs = fnList freeNamesIfBndr freeNamesIfLetBndr :: IfaceLetBndr -> NameSet -- Remember IfaceLetBndr is used only for *nested* bindings -- The IdInfo can contain an unfolding (in the case of -- local INLINE pragmas), so look there too freeNamesIfLetBndr (IfLetBndr _name ty info _ji) = freeNamesIfType ty &&& freeNamesIfIdInfo info freeNamesIfTvBndr :: IfaceTvBndr -> NameSet freeNamesIfTvBndr (_fs,k) = freeNamesIfKind k -- kinds can have Names inside, because of promotion freeNamesIfIdBndr :: IfaceIdBndr -> NameSet freeNamesIfIdBndr (_, _fs,k) = freeNamesIfKind k freeNamesIfIdInfo :: IfaceIdInfo -> NameSet freeNamesIfIdInfo = fnList freeNamesItem freeNamesItem :: IfaceInfoItem -> NameSet freeNamesItem (HsUnfold _ u) = freeNamesIfUnfold u freeNamesItem (HsLFInfo (IfLFCon n)) = unitNameSet n freeNamesItem _ = emptyNameSet freeNamesIfUnfold :: IfaceUnfolding -> NameSet freeNamesIfUnfold (IfCoreUnfold _ _ e) = freeNamesIfExpr e freeNamesIfUnfold (IfCompulsory e) = freeNamesIfExpr e freeNamesIfUnfold (IfInlineRule _ _ _ e) = freeNamesIfExpr e freeNamesIfUnfold (IfDFunUnfold bs es) = freeNamesIfBndrs bs &&& fnList freeNamesIfExpr es freeNamesIfExpr :: IfaceExpr -> NameSet freeNamesIfExpr (IfaceExt v) = unitNameSet v freeNamesIfExpr (IfaceFCall _ ty) = freeNamesIfType ty freeNamesIfExpr (IfaceType ty) = freeNamesIfType ty freeNamesIfExpr (IfaceCo co) = freeNamesIfCoercion co freeNamesIfExpr (IfaceTuple _ as) = fnList freeNamesIfExpr as freeNamesIfExpr (IfaceLam (b,_) body) = freeNamesIfBndr b &&& freeNamesIfExpr body freeNamesIfExpr (IfaceApp f a) = freeNamesIfExpr f &&& freeNamesIfExpr a freeNamesIfExpr (IfaceCast e co) = freeNamesIfExpr e &&& freeNamesIfCoercion co freeNamesIfExpr (IfaceTick _ e) = freeNamesIfExpr e freeNamesIfExpr (IfaceECase e ty) = freeNamesIfExpr e &&& freeNamesIfType ty freeNamesIfExpr (IfaceCase s _ alts) = freeNamesIfExpr s &&& fnList fn_alt alts &&& fn_cons alts where fn_alt (IfaceAlt _con _bs r) = freeNamesIfExpr r -- Depend on the data constructors. Just one will do! -- Note [Tracking data constructors] fn_cons [] = emptyNameSet fn_cons (IfaceAlt IfaceDefault _ _ : xs) = fn_cons xs fn_cons (IfaceAlt (IfaceDataAlt con) _ _ : _ ) = unitNameSet con fn_cons (_ : _ ) = emptyNameSet freeNamesIfExpr (IfaceLet (IfaceNonRec bndr rhs) body) = freeNamesIfLetBndr bndr &&& freeNamesIfExpr rhs &&& freeNamesIfExpr body freeNamesIfExpr (IfaceLet (IfaceRec as) x) = fnList fn_pair as &&& freeNamesIfExpr x where fn_pair (bndr, rhs) = freeNamesIfLetBndr bndr &&& freeNamesIfExpr rhs freeNamesIfExpr _ = emptyNameSet freeNamesIfTc :: IfaceTyCon -> NameSet freeNamesIfTc tc = unitNameSet (ifaceTyConName tc) -- ToDo: shouldn't we include IfaceIntTc & co.? freeNamesIfRule :: IfaceRule -> NameSet freeNamesIfRule (IfaceRule { ifRuleBndrs = bs, ifRuleHead = f , ifRuleArgs = es, ifRuleRhs = rhs }) = unitNameSet f &&& fnList freeNamesIfBndr bs &&& fnList freeNamesIfExpr es &&& freeNamesIfExpr rhs freeNamesIfFamInst :: IfaceFamInst -> NameSet freeNamesIfFamInst (IfaceFamInst { ifFamInstFam = famName , ifFamInstAxiom = axName }) = unitNameSet famName &&& unitNameSet axName freeNamesIfaceTyConParent :: IfaceTyConParent -> NameSet freeNamesIfaceTyConParent IfNoParent = emptyNameSet freeNamesIfaceTyConParent (IfDataInstance ax tc tys) = unitNameSet ax &&& freeNamesIfTc tc &&& freeNamesIfAppArgs tys -- helpers (&&&) :: NameSet -> NameSet -> NameSet (&&&) = unionNameSet fnList :: (a -> NameSet) -> [a] -> NameSet fnList f = foldr (&&&) emptyNameSet . map f {- Note [Tracking data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In a case expression case e of { C a -> ...; ... } You might think that we don't need to include the datacon C in the free names, because its type will probably show up in the free names of 'e'. But in rare circumstances this may not happen. Here's the one that bit me: module DynFlags where import {-# SOURCE #-} Packages( PackageState ) data DynFlags = DF ... PackageState ... module Packages where import GHC.Driver.Session data PackageState = PS ... lookupModule (df :: DynFlags) = case df of DF ...p... -> case p of PS ... -> ... Now, lookupModule depends on DynFlags, but the transitive dependency on the *locally-defined* type PackageState is not visible. We need to take account of the use of the data constructor PS in the pattern match. ************************************************************************ * * Binary instances * * ************************************************************************ Note that there is a bit of subtlety here when we encode names. While IfaceTopBndrs is really just a synonym for Name, we need to take care to encode them with {get,put}IfaceTopBndr. The difference becomes important when we go to fingerprint an IfaceDecl. See Note [Fingerprinting IfaceDecls] for details. -} instance Binary IfaceDecl where put_ bh (IfaceId name ty details idinfo) = do putByte bh 0 putIfaceTopBndr bh name lazyPut bh (ty, details, idinfo) -- See Note [Lazy deserialization of IfaceId] put_ bh (IfaceData a1 a2 a3 a4 a5 a6 a7 a8 a9) = do putByte bh 2 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh a9 put_ bh (IfaceSynonym a1 a2 a3 a4 a5) = do putByte bh 3 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh (IfaceFamily a1 a2 a3 a4 a5 a6) = do putByte bh 4 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 -- NB: Written in a funny way to avoid an interface change put_ bh (IfaceClass { ifName = a2, ifRoles = a3, ifBinders = a4, ifFDs = a5, ifBody = IfConcreteClass { ifClassCtxt = a1, ifATs = a6, ifSigs = a7, ifMinDef = a8 }}) = do putByte bh 5 put_ bh a1 putIfaceTopBndr bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh (IfaceAxiom a1 a2 a3 a4) = do putByte bh 6 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh (IfacePatSyn a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) = do putByte bh 7 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh a9 put_ bh a10 put_ bh a11 put_ bh (IfaceClass { ifName = a1, ifRoles = a2, ifBinders = a3, ifFDs = a4, ifBody = IfAbstractClass }) = do putByte bh 8 putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 get bh = do h <- getByte bh case h of 0 -> do name <- get bh ~(ty, details, idinfo) <- lazyGet bh -- See Note [Lazy deserialization of IfaceId] return (IfaceId name ty details idinfo) 1 -> error "Binary.get(TyClDecl): ForeignType" 2 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh a9 <- get bh return (IfaceData a1 a2 a3 a4 a5 a6 a7 a8 a9) 3 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh return (IfaceSynonym a1 a2 a3 a4 a5) 4 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh return (IfaceFamily a1 a2 a3 a4 a5 a6) 5 -> do a1 <- get bh a2 <- getIfaceTopBndr bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh return (IfaceClass { ifName = a2, ifRoles = a3, ifBinders = a4, ifFDs = a5, ifBody = IfConcreteClass { ifClassCtxt = a1, ifATs = a6, ifSigs = a7, ifMinDef = a8 }}) 6 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh return (IfaceAxiom a1 a2 a3 a4) 7 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh a9 <- get bh a10 <- get bh a11 <- get bh return (IfacePatSyn a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) 8 -> do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh return (IfaceClass { ifName = a1, ifRoles = a2, ifBinders = a3, ifFDs = a4, ifBody = IfAbstractClass }) _ -> panic (unwords ["Unknown IfaceDecl tag:", show h]) {- Note [Lazy deserialization of IfaceId] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The use of lazyPut and lazyGet in the IfaceId Binary instance is purely for performance reasons, to avoid deserializing details about identifiers that will never be used. It's not involved in tying the knot in the type checker. It saved ~1% of the total build time of GHC. When we read an interface file, we extend the PTE, a mapping of Names to TyThings, with the declarations we have read. The extension of the PTE is strict in the Names, but not in the TyThings themselves. GHC.IfaceToCore.tcIfaceDecls calculates the list of (Name, TyThing) bindings to add to the PTE. For an IfaceId, there's just one binding to add; and the ty, details, and idinfo fields of an IfaceId are used only in the TyThing. So by reading those fields lazily we may be able to save the work of ever having to deserialize them (into IfaceType, etc.). For IfaceData and IfaceClass, tcIfaceDecls creates extra implicit bindings (the constructors and field selectors of the data declaration, or the methods of the class), whose Names depend on more than just the Name of the type constructor or class itself. So deserializing them lazily would be more involved. Similar comments apply to the other constructors of IfaceDecl with the additional point that they probably represent a small proportion of all declarations. -} instance Binary IfaceFamTyConFlav where put_ bh IfaceDataFamilyTyCon = putByte bh 0 put_ bh IfaceOpenSynFamilyTyCon = putByte bh 1 put_ bh (IfaceClosedSynFamilyTyCon mb) = putByte bh 2 >> put_ bh mb put_ bh IfaceAbstractClosedSynFamilyTyCon = putByte bh 3 put_ _ IfaceBuiltInSynFamTyCon = pprPanic "Cannot serialize IfaceBuiltInSynFamTyCon, used for pretty-printing only" Outputable.empty get bh = do { h <- getByte bh ; case h of 0 -> return IfaceDataFamilyTyCon 1 -> return IfaceOpenSynFamilyTyCon 2 -> do { mb <- get bh ; return (IfaceClosedSynFamilyTyCon mb) } 3 -> return IfaceAbstractClosedSynFamilyTyCon _ -> pprPanic "Binary.get(IfaceFamTyConFlav): Invalid tag" (ppr (fromIntegral h :: Int)) } instance Binary IfaceClassOp where put_ bh (IfaceClassOp n ty def) = do putIfaceTopBndr bh n put_ bh ty put_ bh def get bh = do n <- getIfaceTopBndr bh ty <- get bh def <- get bh return (IfaceClassOp n ty def) instance Binary IfaceAT where put_ bh (IfaceAT dec defs) = do put_ bh dec put_ bh defs get bh = do dec <- get bh defs <- get bh return (IfaceAT dec defs) instance Binary IfaceAxBranch where put_ bh (IfaceAxBranch a1 a2 a3 a4 a5 a6 a7) = do put_ bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 get bh = do a1 <- get bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh return (IfaceAxBranch a1 a2 a3 a4 a5 a6 a7) instance Binary IfaceConDecls where put_ bh IfAbstractTyCon = putByte bh 0 put_ bh (IfDataTyCon cs) = putByte bh 1 >> put_ bh cs put_ bh (IfNewTyCon c) = putByte bh 2 >> put_ bh c get bh = do h <- getByte bh case h of 0 -> return IfAbstractTyCon 1 -> liftM IfDataTyCon (get bh) 2 -> liftM IfNewTyCon (get bh) _ -> error "Binary(IfaceConDecls).get: Invalid IfaceConDecls" instance Binary IfaceConDecl where put_ bh (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) = do putIfaceTopBndr bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 put_ bh (length a9) mapM_ (put_ bh) a9 put_ bh a10 put_ bh a11 get bh = do a1 <- getIfaceTopBndr bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh n_fields <- get bh a9 <- replicateM n_fields (get bh) a10 <- get bh a11 <- get bh return (IfCon a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11) instance Binary IfaceBang where put_ bh IfNoBang = putByte bh 0 put_ bh IfStrict = putByte bh 1 put_ bh IfUnpack = putByte bh 2 put_ bh (IfUnpackCo co) = putByte bh 3 >> put_ bh co get bh = do h <- getByte bh case h of 0 -> return IfNoBang 1 -> return IfStrict 2 -> return IfUnpack _ -> IfUnpackCo <$> get bh instance Binary IfaceSrcBang where put_ bh (IfSrcBang a1 a2) = do put_ bh a1 put_ bh a2 get bh = do a1 <- get bh a2 <- get bh return (IfSrcBang a1 a2) instance Binary IfaceClsInst where put_ bh (IfaceClsInst cls tys dfun flag orph) = do put_ bh cls put_ bh tys put_ bh dfun put_ bh flag put_ bh orph get bh = do cls <- get bh tys <- get bh dfun <- get bh flag <- get bh orph <- get bh return (IfaceClsInst cls tys dfun flag orph) instance Binary IfaceFamInst where put_ bh (IfaceFamInst fam tys name orph) = do put_ bh fam put_ bh tys put_ bh name put_ bh orph get bh = do fam <- get bh tys <- get bh name <- get bh orph <- get bh return (IfaceFamInst fam tys name orph) instance Binary IfaceRule where put_ bh (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8) = do put_ bh a1 put_ bh a2 put_ bh a3 put_ bh a4 put_ bh a5 put_ bh a6 put_ bh a7 put_ bh a8 get bh = do a1 <- get bh a2 <- get bh a3 <- get bh a4 <- get bh a5 <- get bh a6 <- get bh a7 <- get bh a8 <- get bh return (IfaceRule a1 a2 a3 a4 a5 a6 a7 a8) instance Binary IfaceAnnotation where put_ bh (IfaceAnnotation a1 a2) = do put_ bh a1 put_ bh a2 get bh = do a1 <- get bh a2 <- get bh return (IfaceAnnotation a1 a2) instance Binary IfaceIdDetails where put_ bh IfVanillaId = putByte bh 0 put_ bh (IfRecSelId a b) = putByte bh 1 >> put_ bh a >> put_ bh b put_ bh (IfWorkerLikeId dmds) = putByte bh 2 >> put_ bh dmds put_ bh IfDFunId = putByte bh 3 get bh = do h <- getByte bh case h of 0 -> return IfVanillaId 1 -> do { a <- get bh; b <- get bh; return (IfRecSelId a b) } 2 -> do { dmds <- get bh; return (IfWorkerLikeId dmds) } _ -> return IfDFunId instance Binary IfaceInfoItem where put_ bh (HsArity aa) = putByte bh 0 >> put_ bh aa put_ bh (HsDmdSig ab) = putByte bh 1 >> put_ bh ab put_ bh (HsUnfold lb ad) = putByte bh 2 >> put_ bh lb >> put_ bh ad put_ bh (HsInline ad) = putByte bh 3 >> put_ bh ad put_ bh HsNoCafRefs = putByte bh 4 put_ bh HsLevity = putByte bh 5 put_ bh (HsCprSig cpr) = putByte bh 6 >> put_ bh cpr put_ bh (HsLFInfo lf_info) = putByte bh 7 >> put_ bh lf_info put_ bh (HsTagSig sig) = putByte bh 8 >> put_ bh sig get bh = do h <- getByte bh case h of 0 -> liftM HsArity $ get bh 1 -> liftM HsDmdSig $ get bh 2 -> do lb <- get bh ad <- get bh return (HsUnfold lb ad) 3 -> liftM HsInline $ get bh 4 -> return HsNoCafRefs 5 -> return HsLevity 6 -> HsCprSig <$> get bh 7 -> HsLFInfo <$> get bh _ -> HsTagSig <$> get bh instance Binary IfaceUnfolding where put_ bh (IfCoreUnfold s c e) = do putByte bh 0 put_ bh s putUnfoldingCache bh c put_ bh e put_ bh (IfInlineRule a b c d) = do putByte bh 1 put_ bh a put_ bh b put_ bh c put_ bh d put_ bh (IfDFunUnfold as bs) = do putByte bh 2 put_ bh as put_ bh bs put_ bh (IfCompulsory e) = do putByte bh 3 put_ bh e get bh = do h <- getByte bh case h of 0 -> do s <- get bh c <- getUnfoldingCache bh e <- get bh return (IfCoreUnfold s c e) 1 -> do a <- get bh b <- get bh c <- get bh d <- get bh return (IfInlineRule a b c d) 2 -> do as <- get bh bs <- get bh return (IfDFunUnfold as bs) _ -> do e <- get bh return (IfCompulsory e) putUnfoldingCache :: BinHandle -> IfUnfoldingCache -> IO () putUnfoldingCache bh (UnfoldingCache { uf_is_value = hnf, uf_is_conlike = conlike , uf_is_work_free = wf, uf_expandable = exp }) = do let b = zeroBits .<<|. hnf .<<|. conlike .<<|. wf .<<|. exp putByte bh b getUnfoldingCache :: BinHandle -> IO IfUnfoldingCache getUnfoldingCache bh = do b <- getByte bh let hnf = testBit b 3 conlike = testBit b 2 wf = testBit b 1 exp = testBit b 0 return (UnfoldingCache { uf_is_value = hnf, uf_is_conlike = conlike , uf_is_work_free = wf, uf_expandable = exp }) infixl 9 .<<|. (.<<|.) :: (Bits a) => a -> Bool -> a x .<<|. b = (if b then (`setBit` 0) else id) (x `shiftL` 1) instance Binary IfaceAlt where put_ bh (IfaceAlt a b c) = do put_ bh a put_ bh b put_ bh c get bh = do a <- get bh b <- get bh c <- get bh return (IfaceAlt a b c) instance Binary IfaceExpr where put_ bh (IfaceLcl aa) = do putByte bh 0 put_ bh aa put_ bh (IfaceType ab) = do putByte bh 1 put_ bh ab put_ bh (IfaceCo ab) = do putByte bh 2 put_ bh ab put_ bh (IfaceTuple ac ad) = do putByte bh 3 put_ bh ac put_ bh ad put_ bh (IfaceLam (ae, os) af) = do putByte bh 4 put_ bh ae put_ bh os put_ bh af put_ bh (IfaceApp ag ah) = do putByte bh 5 put_ bh ag put_ bh ah put_ bh (IfaceCase ai aj ak) = do putByte bh 6 put_ bh ai put_ bh aj put_ bh ak put_ bh (IfaceLet al am) = do putByte bh 7 put_ bh al put_ bh am put_ bh (IfaceTick an ao) = do putByte bh 8 put_ bh an put_ bh ao put_ bh (IfaceLit ap) = do putByte bh 9 put_ bh ap put_ bh (IfaceFCall as at) = do putByte bh 10 put_ bh as put_ bh at put_ bh (IfaceExt aa) = do putByte bh 11 put_ bh aa put_ bh (IfaceCast ie ico) = do putByte bh 12 put_ bh ie put_ bh ico put_ bh (IfaceECase a b) = do putByte bh 13 put_ bh a put_ bh b put_ bh (IfaceLitRubbish r) = do putByte bh 14 put_ bh r get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (IfaceLcl aa) 1 -> do ab <- get bh return (IfaceType ab) 2 -> do ab <- get bh return (IfaceCo ab) 3 -> do ac <- get bh ad <- get bh return (IfaceTuple ac ad) 4 -> do ae <- get bh os <- get bh af <- get bh return (IfaceLam (ae, os) af) 5 -> do ag <- get bh ah <- get bh return (IfaceApp ag ah) 6 -> do ai <- get bh aj <- get bh ak <- get bh return (IfaceCase ai aj ak) 7 -> do al <- get bh am <- get bh return (IfaceLet al am) 8 -> do an <- get bh ao <- get bh return (IfaceTick an ao) 9 -> do ap <- get bh return (IfaceLit ap) 10 -> do as <- get bh at <- get bh return (IfaceFCall as at) 11 -> do aa <- get bh return (IfaceExt aa) 12 -> do ie <- get bh ico <- get bh return (IfaceCast ie ico) 13 -> do a <- get bh b <- get bh return (IfaceECase a b) 14 -> do r <- get bh return (IfaceLitRubbish r) _ -> panic ("get IfaceExpr " ++ show h) instance Binary IfaceTickish where put_ bh (IfaceHpcTick m ix) = do putByte bh 0 put_ bh m put_ bh ix put_ bh (IfaceSCC cc tick push) = do putByte bh 1 put_ bh cc put_ bh tick put_ bh push put_ bh (IfaceSource src name) = do putByte bh 2 put_ bh (srcSpanFile src) put_ bh (srcSpanStartLine src) put_ bh (srcSpanStartCol src) put_ bh (srcSpanEndLine src) put_ bh (srcSpanEndCol src) put_ bh name get bh = do h <- getByte bh case h of 0 -> do m <- get bh ix <- get bh return (IfaceHpcTick m ix) 1 -> do cc <- get bh tick <- get bh push <- get bh return (IfaceSCC cc tick push) 2 -> do file <- get bh sl <- get bh sc <- get bh el <- get bh ec <- get bh let start = mkRealSrcLoc file sl sc end = mkRealSrcLoc file el ec name <- get bh return (IfaceSource (mkRealSrcSpan start end) name) _ -> panic ("get IfaceTickish " ++ show h) instance Binary IfaceConAlt where put_ bh IfaceDefault = putByte bh 0 put_ bh (IfaceDataAlt aa) = putByte bh 1 >> put_ bh aa put_ bh (IfaceLitAlt ac) = putByte bh 2 >> put_ bh ac get bh = do h <- getByte bh case h of 0 -> return IfaceDefault 1 -> liftM IfaceDataAlt $ get bh _ -> liftM IfaceLitAlt $ get bh instance Binary IfaceBinding where put_ bh (IfaceNonRec aa ab) = putByte bh 0 >> put_ bh aa >> put_ bh ab put_ bh (IfaceRec ac) = putByte bh 1 >> put_ bh ac get bh = do h <- getByte bh case h of 0 -> do { aa <- get bh; ab <- get bh; return (IfaceNonRec aa ab) } _ -> do { ac <- get bh; return (IfaceRec ac) } instance Binary IfaceLetBndr where put_ bh (IfLetBndr a b c d) = do put_ bh a put_ bh b put_ bh c put_ bh d get bh = do a <- get bh b <- get bh c <- get bh d <- get bh return (IfLetBndr a b c d) instance Binary IfaceJoinInfo where put_ bh IfaceNotJoinPoint = putByte bh 0 put_ bh (IfaceJoinPoint ar) = do putByte bh 1 put_ bh ar get bh = do h <- getByte bh case h of 0 -> return IfaceNotJoinPoint _ -> liftM IfaceJoinPoint $ get bh instance Binary IfaceTyConParent where put_ bh IfNoParent = putByte bh 0 put_ bh (IfDataInstance ax pr ty) = do putByte bh 1 put_ bh ax put_ bh pr put_ bh ty get bh = do h <- getByte bh case h of 0 -> return IfNoParent _ -> do ax <- get bh pr <- get bh ty <- get bh return $ IfDataInstance ax pr ty instance Binary IfaceCompleteMatch where put_ bh (IfaceCompleteMatch cs mtc) = put_ bh cs >> put_ bh mtc get bh = IfaceCompleteMatch <$> get bh <*> get bh {- ************************************************************************ * * NFData instances See Note [Avoiding space leaks in toIface*] in GHC.CoreToIface * * ************************************************************************ -} instance NFData IfaceDecl where rnf = \case IfaceId f1 f2 f3 f4 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 IfaceData f1 f2 f3 f4 f5 f6 f7 f8 f9 -> f1 `seq` seqList f2 `seq` f3 `seq` f4 `seq` f5 `seq` rnf f6 `seq` rnf f7 `seq` rnf f8 `seq` rnf f9 IfaceSynonym f1 f2 f3 f4 f5 -> rnf f1 `seq` f2 `seq` seqList f3 `seq` rnf f4 `seq` rnf f5 IfaceFamily f1 f2 f3 f4 f5 f6 -> rnf f1 `seq` rnf f2 `seq` seqList f3 `seq` rnf f4 `seq` rnf f5 `seq` f6 `seq` () IfaceClass f1 f2 f3 f4 f5 -> rnf f1 `seq` f2 `seq` seqList f3 `seq` rnf f4 `seq` rnf f5 IfaceAxiom nm tycon role ax -> rnf nm `seq` rnf tycon `seq` role `seq` rnf ax IfacePatSyn f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` f5 `seq` f6 `seq` rnf f7 `seq` rnf f8 `seq` rnf f9 `seq` rnf f10 `seq` f11 `seq` () instance NFData IfaceAxBranch where rnf (IfaceAxBranch f1 f2 f3 f4 f5 f6 f7) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` f5 `seq` rnf f6 `seq` rnf f7 instance NFData IfaceClassBody where rnf = \case IfAbstractClass -> () IfConcreteClass f1 f2 f3 f4 -> rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` f4 `seq` () instance NFData IfaceAT where rnf (IfaceAT f1 f2) = rnf f1 `seq` rnf f2 instance NFData IfaceClassOp where rnf (IfaceClassOp f1 f2 f3) = rnf f1 `seq` rnf f2 `seq` f3 `seq` () instance NFData IfaceTyConParent where rnf = \case IfNoParent -> () IfDataInstance f1 f2 f3 -> rnf f1 `seq` rnf f2 `seq` rnf f3 instance NFData IfaceConDecls where rnf = \case IfAbstractTyCon -> () IfDataTyCon f1 -> rnf f1 IfNewTyCon f1 -> rnf f1 instance NFData IfaceConDecl where rnf (IfCon f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` f5 `seq` rnf f6 `seq` rnf f7 `seq` rnf f8 `seq` f9 `seq` rnf f10 `seq` rnf f11 instance NFData IfaceSrcBang where rnf (IfSrcBang f1 f2) = f1 `seq` f2 `seq` () instance NFData IfaceBang where rnf x = x `seq` () instance NFData IfaceIdDetails where rnf = \case IfVanillaId -> () IfWorkerLikeId dmds -> dmds `seqList` () IfRecSelId (Left tycon) b -> rnf tycon `seq` rnf b IfRecSelId (Right decl) b -> rnf decl `seq` rnf b IfDFunId -> () instance NFData IfaceInfoItem where rnf = \case HsArity a -> rnf a HsDmdSig str -> seqDmdSig str HsInline p -> p `seq` () -- TODO: seq further? HsUnfold b unf -> rnf b `seq` rnf unf HsNoCafRefs -> () HsLevity -> () HsCprSig cpr -> cpr `seq` () HsLFInfo lf_info -> lf_info `seq` () -- TODO: seq further? HsTagSig sig -> sig `seq` () instance NFData IfaceUnfolding where rnf = \case IfCoreUnfold inlinable cache expr -> rnf inlinable `seq` cache `seq` rnf expr -- See Note [UnfoldingCache] in GHC.Core for why it suffices to merely `seq` on cache IfCompulsory expr -> rnf expr IfInlineRule arity b1 b2 e -> rnf arity `seq` rnf b1 `seq` rnf b2 `seq` rnf e IfDFunUnfold bndrs exprs -> rnf bndrs `seq` rnf exprs instance NFData IfaceExpr where rnf = \case IfaceLcl nm -> rnf nm IfaceExt nm -> rnf nm IfaceType ty -> rnf ty IfaceCo co -> rnf co IfaceTuple sort exprs -> sort `seq` rnf exprs IfaceLam bndr expr -> rnf bndr `seq` rnf expr IfaceApp e1 e2 -> rnf e1 `seq` rnf e2 IfaceCase e nm alts -> rnf e `seq` nm `seq` rnf alts IfaceECase e ty -> rnf e `seq` rnf ty IfaceLet bind e -> rnf bind `seq` rnf e IfaceCast e co -> rnf e `seq` rnf co IfaceLit l -> l `seq` () -- FIXME IfaceLitRubbish r -> rnf r `seq` () IfaceFCall fc ty -> fc `seq` rnf ty IfaceTick tick e -> rnf tick `seq` rnf e instance NFData IfaceAlt where rnf (IfaceAlt con bndrs rhs) = rnf con `seq` rnf bndrs `seq` rnf rhs instance NFData IfaceBinding where rnf = \case IfaceNonRec bndr e -> rnf bndr `seq` rnf e IfaceRec binds -> rnf binds instance NFData IfaceLetBndr where rnf (IfLetBndr nm ty id_info join_info) = rnf nm `seq` rnf ty `seq` rnf id_info `seq` rnf join_info instance NFData IfaceFamTyConFlav where rnf = \case IfaceDataFamilyTyCon -> () IfaceOpenSynFamilyTyCon -> () IfaceClosedSynFamilyTyCon f1 -> rnf f1 IfaceAbstractClosedSynFamilyTyCon -> () IfaceBuiltInSynFamTyCon -> () instance NFData IfaceJoinInfo where rnf x = x `seq` () instance NFData IfaceTickish where rnf = \case IfaceHpcTick m i -> rnf m `seq` rnf i IfaceSCC cc b1 b2 -> cc `seq` rnf b1 `seq` rnf b2 IfaceSource src str -> src `seq` rnf str instance NFData IfaceConAlt where rnf = \case IfaceDefault -> () IfaceDataAlt nm -> rnf nm IfaceLitAlt lit -> lit `seq` () instance NFData IfaceCompleteMatch where rnf (IfaceCompleteMatch f1 mtc) = rnf f1 `seq` rnf mtc instance NFData IfaceRule where rnf (IfaceRule f1 f2 f3 f4 f5 f6 f7 f8) = rnf f1 `seq` f2 `seq` rnf f3 `seq` rnf f4 `seq` rnf f5 `seq` rnf f6 `seq` rnf f7 `seq` f8 `seq` () instance NFData IfaceFamInst where rnf (IfaceFamInst f1 f2 f3 f4) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` f4 `seq` () instance NFData IfaceClsInst where rnf (IfaceClsInst f1 f2 f3 f4 f5) = f1 `seq` rnf f2 `seq` rnf f3 `seq` f4 `seq` f5 `seq` () instance NFData IfaceAnnotation where rnf (IfaceAnnotation f1 f2) = f1 `seq` f2 `seq` () ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Type.hs0000644000000000000000000024760214472400112020132 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 This module defines interface types and binders -} {-# LANGUAGE FlexibleInstances #-} -- FlexibleInstances for Binary (DefMethSpec IfaceType) {-# LANGUAGE BangPatterns #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE LambdaCase #-} module GHC.Iface.Type ( IfExtName, IfLclName, IfaceType(..), IfacePredType, IfaceKind, IfaceCoercion(..), IfaceMCoercion(..), IfaceUnivCoProv(..), IfaceMult, IfaceTyCon(..), IfaceTyConInfo(..), mkIfaceTyConInfo, IfaceTyConSort(..), IfaceTyLit(..), IfaceAppArgs(..), IfaceContext, IfaceBndr(..), IfaceOneShot(..), IfaceLamBndr, IfaceTvBndr, IfaceIdBndr, IfaceTyConBinder, IfaceForAllSpecBndr, IfaceForAllBndr, ArgFlag(..), AnonArgFlag(..), ShowForAllFlag(..), mkIfaceForAllTvBndr, mkIfaceTyConKind, ifaceForAllSpecToBndrs, ifaceForAllSpecToBndr, ifForAllBndrVar, ifForAllBndrName, ifaceBndrName, ifTyConBinderVar, ifTyConBinderName, -- Equality testing isIfaceLiftedTypeKind, -- Conversion from IfaceAppArgs to IfaceTypes/ArgFlags appArgsIfaceTypes, appArgsIfaceTypesArgFlags, -- Printing SuppressBndrSig(..), UseBndrParens(..), PrintExplicitKinds(..), pprIfaceType, pprParendIfaceType, pprPrecIfaceType, pprIfaceContext, pprIfaceContextArr, pprIfaceIdBndr, pprIfaceLamBndr, pprIfaceTvBndr, pprIfaceTyConBinders, pprIfaceBndrs, pprIfaceAppArgs, pprParendIfaceAppArgs, pprIfaceForAllPart, pprIfaceForAllPartMust, pprIfaceForAll, pprIfaceSigmaType, pprIfaceTyLit, pprIfaceCoercion, pprParendIfaceCoercion, splitIfaceSigmaTy, pprIfaceTypeApp, pprUserIfaceForAll, pprIfaceCoTcApp, pprTyTcApp, pprIfacePrefixApp, ppr_fun_arrow, isIfaceTauType, suppressIfaceInvisibles, stripIfaceInvisVars, stripInvisArgs, mkIfaceTySubst, substIfaceTyVar, substIfaceAppArgs, inDomIfaceTySubst, many_ty ) where import GHC.Prelude import {-# SOURCE #-} GHC.Builtin.Types ( coercibleTyCon, heqTyCon , tupleTyConName , manyDataConTyCon, oneDataConTyCon , liftedRepTyCon, liftedDataConTyCon ) import GHC.Core.Type ( isRuntimeRepTy, isMultiplicityTy, isLevityTy ) import GHC.Core.TyCon hiding ( pprPromotionQuote ) import GHC.Core.Coercion.Axiom import GHC.Types.Var import GHC.Builtin.Names import {-# SOURCE #-} GHC.Builtin.Types ( liftedTypeKindTyConName ) import GHC.Types.Name import GHC.Types.Basic import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Misc import GHC.Utils.Panic import {-# SOURCE #-} GHC.Tc.Utils.TcType ( isMetaTyVar, isTyConableTyVar ) import Data.Maybe( isJust ) import qualified Data.Semigroup as Semi import Control.DeepSeq {- ************************************************************************ * * Local (nested) binders * * ************************************************************************ -} type IfLclName = FastString -- A local name in iface syntax type IfExtName = Name -- An External or WiredIn Name can appear in Iface syntax -- (However Internal or System Names never should) data IfaceBndr -- Local (non-top-level) binders = IfaceIdBndr {-# UNPACK #-} !IfaceIdBndr | IfaceTvBndr {-# UNPACK #-} !IfaceTvBndr type IfaceIdBndr = (IfaceType, IfLclName, IfaceType) type IfaceTvBndr = (IfLclName, IfaceKind) ifaceTvBndrName :: IfaceTvBndr -> IfLclName ifaceTvBndrName (n,_) = n ifaceIdBndrName :: IfaceIdBndr -> IfLclName ifaceIdBndrName (_,n,_) = n ifaceBndrName :: IfaceBndr -> IfLclName ifaceBndrName (IfaceTvBndr bndr) = ifaceTvBndrName bndr ifaceBndrName (IfaceIdBndr bndr) = ifaceIdBndrName bndr ifaceBndrType :: IfaceBndr -> IfaceType ifaceBndrType (IfaceIdBndr (_, _, t)) = t ifaceBndrType (IfaceTvBndr (_, t)) = t type IfaceLamBndr = (IfaceBndr, IfaceOneShot) data IfaceOneShot -- See Note [Preserve OneShotInfo] in "GHC.Core.Tidy" = IfaceNoOneShot -- and Note [The oneShot function] in "GHC.Types.Id.Make" | IfaceOneShot instance Outputable IfaceOneShot where ppr IfaceNoOneShot = text "NoOneShotInfo" ppr IfaceOneShot = text "OneShot" {- %************************************************************************ %* * IfaceType %* * %************************************************************************ -} ------------------------------- type IfaceKind = IfaceType -- | A kind of universal type, used for types and kinds. -- -- Any time a 'Type' is pretty-printed, it is first converted to an 'IfaceType' -- before being printed. See Note [Pretty printing via Iface syntax] in "GHC.Types.TyThing.Ppr" data IfaceType = IfaceFreeTyVar TyVar -- See Note [Free tyvars in IfaceType] | IfaceTyVar IfLclName -- Type/coercion variable only, not tycon | IfaceLitTy IfaceTyLit | IfaceAppTy IfaceType IfaceAppArgs -- See Note [Suppressing invisible arguments] for -- an explanation of why the second field isn't -- IfaceType, analogous to AppTy. | IfaceFunTy AnonArgFlag IfaceMult IfaceType IfaceType | IfaceForAllTy IfaceForAllBndr IfaceType | IfaceTyConApp IfaceTyCon IfaceAppArgs -- Not necessarily saturated -- Includes newtypes, synonyms, tuples | IfaceCastTy IfaceType IfaceCoercion | IfaceCoercionTy IfaceCoercion | IfaceTupleTy -- Saturated tuples (unsaturated ones use IfaceTyConApp) TupleSort -- What sort of tuple? PromotionFlag -- A bit like IfaceTyCon IfaceAppArgs -- arity = length args -- For promoted data cons, the kind args are omitted -- Why have this? Only for efficiency: IfaceTupleTy can omit the -- type arguments, as they can be recreated when deserializing. -- In an experiment, removing IfaceTupleTy resulted in a 0.75% regression -- in interface file size (in GHC's boot libraries). -- See !3987. type IfaceMult = IfaceType type IfacePredType = IfaceType type IfaceContext = [IfacePredType] data IfaceTyLit = IfaceNumTyLit Integer | IfaceStrTyLit FastString | IfaceCharTyLit Char deriving (Eq) type IfaceTyConBinder = VarBndr IfaceBndr TyConBndrVis type IfaceForAllBndr = VarBndr IfaceBndr ArgFlag type IfaceForAllSpecBndr = VarBndr IfaceBndr Specificity -- | Make an 'IfaceForAllBndr' from an 'IfaceTvBndr'. mkIfaceForAllTvBndr :: ArgFlag -> IfaceTvBndr -> IfaceForAllBndr mkIfaceForAllTvBndr vis var = Bndr (IfaceTvBndr var) vis -- | Build the 'tyConKind' from the binders and the result kind. -- Keep in sync with 'mkTyConKind' in "GHC.Core.TyCon". mkIfaceTyConKind :: [IfaceTyConBinder] -> IfaceKind -> IfaceKind mkIfaceTyConKind bndrs res_kind = foldr mk res_kind bndrs where mk :: IfaceTyConBinder -> IfaceKind -> IfaceKind mk (Bndr tv (AnonTCB af)) k = IfaceFunTy af many_ty (ifaceBndrType tv) k mk (Bndr tv (NamedTCB vis)) k = IfaceForAllTy (Bndr tv vis) k ifaceForAllSpecToBndrs :: [IfaceForAllSpecBndr] -> [IfaceForAllBndr] ifaceForAllSpecToBndrs = map ifaceForAllSpecToBndr ifaceForAllSpecToBndr :: IfaceForAllSpecBndr -> IfaceForAllBndr ifaceForAllSpecToBndr (Bndr tv spec) = Bndr tv (Invisible spec) -- | Stores the arguments in a type application as a list. -- See @Note [Suppressing invisible arguments]@. data IfaceAppArgs = IA_Nil | IA_Arg IfaceType -- The type argument ArgFlag -- The argument's visibility. We store this here so -- that we can: -- -- 1. Avoid pretty-printing invisible (i.e., specified -- or inferred) arguments when -- -fprint-explicit-kinds isn't enabled, or -- 2. When -fprint-explicit-kinds *is*, enabled, print -- specified arguments in @(...) and inferred -- arguments in @{...}. IfaceAppArgs -- The rest of the arguments instance Semi.Semigroup IfaceAppArgs where IA_Nil <> xs = xs IA_Arg ty argf rest <> xs = IA_Arg ty argf (rest Semi.<> xs) instance Monoid IfaceAppArgs where mempty = IA_Nil mappend = (Semi.<>) -- Encodes type constructors, kind constructors, -- coercion constructors, the lot. -- We have to tag them in order to pretty print them -- properly. data IfaceTyCon = IfaceTyCon { ifaceTyConName :: IfExtName , ifaceTyConInfo :: IfaceTyConInfo } deriving (Eq) -- | The various types of TyCons which have special, built-in syntax. data IfaceTyConSort = IfaceNormalTyCon -- ^ a regular tycon | IfaceTupleTyCon !Arity !TupleSort -- ^ a tuple, e.g. @(a, b, c)@ or @(#a, b, c#)@. -- The arity is the tuple width, not the tycon arity -- (which is twice the width in the case of unboxed -- tuples). | IfaceSumTyCon !Arity -- ^ an unboxed sum, e.g. @(# a | b | c #)@ | IfaceEqualityTyCon -- ^ A heterogeneous equality TyCon -- (i.e. eqPrimTyCon, eqReprPrimTyCon, heqTyCon) -- that is actually being applied to two types -- of the same kind. This affects pretty-printing -- only: see Note [Equality predicates in IfaceType] deriving (Eq) instance Outputable IfaceTyConSort where ppr IfaceNormalTyCon = text "normal" ppr (IfaceTupleTyCon n sort) = ppr sort <> colon <> ppr n ppr (IfaceSumTyCon n) = text "sum:" <> ppr n ppr IfaceEqualityTyCon = text "equality" {- Note [Free tyvars in IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nowadays (since Nov 16, 2016) we pretty-print a Type by converting to an IfaceType and pretty printing that. This eliminates a lot of pretty-print duplication, and it matches what we do with pretty- printing TyThings. See Note [Pretty printing via Iface syntax] in GHC.Types.TyThing.Ppr. It works fine for closed types, but when printing debug traces (e.g. when using -ddump-tc-trace) we print a lot of /open/ types. These types are full of TcTyVars, and it's absolutely crucial to print them in their full glory, with their unique, TcTyVarDetails etc. So we simply embed a TyVar in IfaceType with the IfaceFreeTyVar constructor. Note that: * We never expect to serialise an IfaceFreeTyVar into an interface file, nor to deserialise one. IfaceFreeTyVar is used only in the "convert to IfaceType and then pretty-print" pipeline. We do the same for covars, naturally. Note [Equality predicates in IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC has several varieties of type equality (see Note [The equality types story] in GHC.Builtin.Types.Prim for details). In an effort to avoid confusing users, we suppress the differences during pretty printing unless certain flags are enabled. Here is how each equality predicate* is printed in homogeneous and heterogeneous contexts, depending on which combination of the -fprint-explicit-kinds and -fprint-equality-relations flags is used: -------------------------------------------------------------------------------------------- | Predicate | Neither flag | -fprint-explicit-kinds | |-------------------------------|----------------------------|-----------------------------| | a ~ b (homogeneous) | a ~ b | (a :: Type) ~ (b :: Type) | | a ~~ b, homogeneously | a ~ b | (a :: Type) ~ (b :: Type) | | a ~~ b, heterogeneously | a ~~ c | (a :: Type) ~~ (c :: k) | | a ~# b, homogeneously | a ~ b | (a :: Type) ~ (b :: Type) | | a ~# b, heterogeneously | a ~~ c | (a :: Type) ~~ (c :: k) | | Coercible a b (homogeneous) | Coercible a b | Coercible @Type a b | | a ~R# b, homogeneously | Coercible a b | Coercible @Type a b | | a ~R# b, heterogeneously | a ~R# b | (a :: Type) ~R# (c :: k) | |-------------------------------|----------------------------|-----------------------------| | Predicate | -fprint-equality-relations | Both flags | |-------------------------------|----------------------------|-----------------------------| | a ~ b (homogeneous) | a ~ b | (a :: Type) ~ (b :: Type) | | a ~~ b, homogeneously | a ~~ b | (a :: Type) ~~ (b :: Type) | | a ~~ b, heterogeneously | a ~~ c | (a :: Type) ~~ (c :: k) | | a ~# b, homogeneously | a ~# b | (a :: Type) ~# (b :: Type) | | a ~# b, heterogeneously | a ~# c | (a :: Type) ~# (c :: k) | | Coercible a b (homogeneous) | Coercible a b | Coercible @Type a b | | a ~R# b, homogeneously | a ~R# b | (a :: Type) ~R# (b :: Type) | | a ~R# b, heterogeneously | a ~R# b | (a :: Type) ~R# (c :: k) | -------------------------------------------------------------------------------------------- (* There is no heterogeneous, representational, lifted equality counterpart to (~~). There could be, but there seems to be no use for it.) This table adheres to the following rules: A. With -fprint-equality-relations, print the true equality relation. B. Without -fprint-equality-relations: i. If the equality is representational and homogeneous, use Coercible. ii. Otherwise, if the equality is representational, use ~R#. iii. If the equality is nominal and homogeneous, use ~. iv. Otherwise, if the equality is nominal, use ~~. C. With -fprint-explicit-kinds, print kinds on both sides of an infix operator, as above; or print the kind with Coercible. D. Without -fprint-explicit-kinds, don't print kinds. A hetero-kinded equality is used homogeneously when it is applied to two identical kinds. Unfortunately, determining this from an IfaceType isn't possible since we can't see through type synonyms. Consequently, we need to record whether this particular application is homogeneous in IfaceTyConSort for the purposes of pretty-printing. See Note [The equality types story] in GHC.Builtin.Types.Prim. -} data IfaceTyConInfo -- Used to guide pretty-printing -- and to disambiguate D from 'D (they share a name) = IfaceTyConInfo { ifaceTyConIsPromoted :: PromotionFlag , ifaceTyConSort :: IfaceTyConSort } deriving (Eq) -- This smart constructor allows sharing of the two most common -- cases. See #19194 mkIfaceTyConInfo :: PromotionFlag -> IfaceTyConSort -> IfaceTyConInfo mkIfaceTyConInfo IsPromoted IfaceNormalTyCon = IfaceTyConInfo IsPromoted IfaceNormalTyCon mkIfaceTyConInfo NotPromoted IfaceNormalTyCon = IfaceTyConInfo NotPromoted IfaceNormalTyCon mkIfaceTyConInfo prom sort = IfaceTyConInfo prom sort data IfaceMCoercion = IfaceMRefl | IfaceMCo IfaceCoercion data IfaceCoercion = IfaceReflCo IfaceType | IfaceGReflCo Role IfaceType (IfaceMCoercion) | IfaceFunCo Role IfaceCoercion IfaceCoercion IfaceCoercion | IfaceTyConAppCo Role IfaceTyCon [IfaceCoercion] | IfaceAppCo IfaceCoercion IfaceCoercion | IfaceForAllCo IfaceBndr IfaceCoercion IfaceCoercion | IfaceCoVarCo IfLclName | IfaceAxiomInstCo IfExtName BranchIndex [IfaceCoercion] | IfaceAxiomRuleCo IfLclName [IfaceCoercion] -- There are only a fixed number of CoAxiomRules, so it suffices -- to use an IfaceLclName to distinguish them. -- See Note [Adding built-in type families] in GHC.Builtin.Types.Literals | IfaceUnivCo IfaceUnivCoProv Role IfaceType IfaceType | IfaceSymCo IfaceCoercion | IfaceTransCo IfaceCoercion IfaceCoercion | IfaceNthCo Int IfaceCoercion | IfaceLRCo LeftOrRight IfaceCoercion | IfaceInstCo IfaceCoercion IfaceCoercion | IfaceKindCo IfaceCoercion | IfaceSubCo IfaceCoercion | IfaceFreeCoVar CoVar -- See Note [Free tyvars in IfaceType] | IfaceHoleCo CoVar -- ^ See Note [Holes in IfaceCoercion] data IfaceUnivCoProv = IfacePhantomProv IfaceCoercion | IfaceProofIrrelProv IfaceCoercion | IfacePluginProv String | IfaceCorePrepProv Bool -- See defn of CorePrepProv {- Note [Holes in IfaceCoercion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When typechecking fails the typechecker will produce a HoleCo to stand in place of the unproven assertion. While we generally don't want to let these unproven assertions leak into interface files, we still need to be able to pretty-print them as we use IfaceType's pretty-printer to render Types. For this reason IfaceCoercion has a IfaceHoleCo constructor; however, we fails when asked to serialize to a IfaceHoleCo to ensure that they don't end up in an interface file. %************************************************************************ %* * Functions over IFaceTypes * * ************************************************************************ -} ifaceTyConHasKey :: IfaceTyCon -> Unique -> Bool ifaceTyConHasKey tc key = ifaceTyConName tc `hasKey` key -- | Given a kind K, is K of the form (TYPE ('BoxedRep 'LiftedRep))? isIfaceLiftedTypeKind :: IfaceKind -> Bool isIfaceLiftedTypeKind (IfaceTyConApp tc IA_Nil) = isLiftedTypeKindTyConName (ifaceTyConName tc) isIfaceLiftedTypeKind (IfaceTyConApp tc1 args1) = isIfaceTyConAppLiftedTypeKind tc1 args1 isIfaceLiftedTypeKind _ = False -- | Given a kind constructor K and arguments A, returns true if -- both of the following statements are true: -- -- * K is TYPE -- * A is a singleton IfaceAppArgs of the form ('BoxedRep 'Lifted) -- -- For the second condition, we must also check for the type -- synonym LiftedRep. isIfaceTyConAppLiftedTypeKind :: IfaceTyCon -> IfaceAppArgs -> Bool isIfaceTyConAppLiftedTypeKind tc1 args1 | tc1 `ifaceTyConHasKey` tYPETyConKey , IA_Arg soleArg1 Required IA_Nil <- args1 , IfaceTyConApp rep args2 <- soleArg1 = if | rep `ifaceTyConHasKey` boxedRepDataConKey , IA_Arg soleArg2 Required IA_Nil <- args2 , IfaceTyConApp lev IA_Nil <- soleArg2 , lev `ifaceTyConHasKey` liftedDataConKey -> True | rep `ifaceTyConHasKey` liftedRepTyConKey , IA_Nil <- args2 -> True | otherwise -> False | otherwise = False splitIfaceSigmaTy :: IfaceType -> ([IfaceForAllBndr], [IfacePredType], IfaceType) -- Mainly for printing purposes -- -- Here we split nested IfaceSigmaTy properly. -- -- @ -- forall t. T t => forall m a b. M m => (a -> m b) -> t a -> m (t b) -- @ -- -- If you called @splitIfaceSigmaTy@ on this type: -- -- @ -- ([t, m, a, b], [T t, M m], (a -> m b) -> t a -> m (t b)) -- @ splitIfaceSigmaTy ty = case (bndrs, theta) of ([], []) -> (bndrs, theta, tau) _ -> let (bndrs', theta', tau') = splitIfaceSigmaTy tau in (bndrs ++ bndrs', theta ++ theta', tau') where (bndrs, rho) = split_foralls ty (theta, tau) = split_rho rho split_foralls (IfaceForAllTy bndr ty) | isInvisibleArgFlag (binderArgFlag bndr) = case split_foralls ty of { (bndrs, rho) -> (bndr:bndrs, rho) } split_foralls rho = ([], rho) split_rho (IfaceFunTy InvisArg _ ty1 ty2) = case split_rho ty2 of { (ps, tau) -> (ty1:ps, tau) } split_rho tau = ([], tau) splitIfaceReqForallTy :: IfaceType -> ([IfaceForAllBndr], IfaceType) splitIfaceReqForallTy (IfaceForAllTy bndr ty) | isVisibleArgFlag (binderArgFlag bndr) = case splitIfaceReqForallTy ty of { (bndrs, rho) -> (bndr:bndrs, rho) } splitIfaceReqForallTy rho = ([], rho) suppressIfaceInvisibles :: PrintExplicitKinds -> [IfaceTyConBinder] -> [a] -> [a] suppressIfaceInvisibles (PrintExplicitKinds True) _tys xs = xs suppressIfaceInvisibles (PrintExplicitKinds False) tys xs = suppress tys xs where suppress _ [] = [] suppress [] a = a suppress (k:ks) (x:xs) | isInvisibleTyConBinder k = suppress ks xs | otherwise = x : suppress ks xs stripIfaceInvisVars :: PrintExplicitKinds -> [IfaceTyConBinder] -> [IfaceTyConBinder] stripIfaceInvisVars (PrintExplicitKinds True) tyvars = tyvars stripIfaceInvisVars (PrintExplicitKinds False) tyvars = filterOut isInvisibleTyConBinder tyvars -- | Extract an 'IfaceBndr' from an 'IfaceForAllBndr'. ifForAllBndrVar :: IfaceForAllBndr -> IfaceBndr ifForAllBndrVar = binderVar -- | Extract the variable name from an 'IfaceForAllBndr'. ifForAllBndrName :: IfaceForAllBndr -> IfLclName ifForAllBndrName fab = ifaceBndrName (ifForAllBndrVar fab) -- | Extract an 'IfaceBndr' from an 'IfaceTyConBinder'. ifTyConBinderVar :: IfaceTyConBinder -> IfaceBndr ifTyConBinderVar = binderVar -- | Extract the variable name from an 'IfaceTyConBinder'. ifTyConBinderName :: IfaceTyConBinder -> IfLclName ifTyConBinderName tcb = ifaceBndrName (ifTyConBinderVar tcb) ifTypeIsVarFree :: IfaceType -> Bool -- Returns True if the type definitely has no variables at all -- Just used to control pretty printing ifTypeIsVarFree ty = go ty where go (IfaceTyVar {}) = False go (IfaceFreeTyVar {}) = False go (IfaceAppTy fun args) = go fun && go_args args go (IfaceFunTy _ w arg res) = go w && go arg && go res go (IfaceForAllTy {}) = False go (IfaceTyConApp _ args) = go_args args go (IfaceTupleTy _ _ args) = go_args args go (IfaceLitTy _) = True go (IfaceCastTy {}) = False -- Safe go (IfaceCoercionTy {}) = False -- Safe go_args IA_Nil = True go_args (IA_Arg arg _ args) = go arg && go_args args {- Note [Substitution on IfaceType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Substitutions on IfaceType are done only during pretty-printing to construct the result type of a GADT, and does not deal with binders (eg IfaceForAll), so it doesn't need fancy capture stuff. -} type IfaceTySubst = FastStringEnv IfaceType -- Note [Substitution on IfaceType] mkIfaceTySubst :: [(IfLclName,IfaceType)] -> IfaceTySubst -- See Note [Substitution on IfaceType] mkIfaceTySubst eq_spec = mkFsEnv eq_spec inDomIfaceTySubst :: IfaceTySubst -> IfaceTvBndr -> Bool -- See Note [Substitution on IfaceType] inDomIfaceTySubst subst (fs, _) = isJust (lookupFsEnv subst fs) substIfaceType :: IfaceTySubst -> IfaceType -> IfaceType -- See Note [Substitution on IfaceType] substIfaceType env ty = go ty where go (IfaceFreeTyVar tv) = IfaceFreeTyVar tv go (IfaceTyVar tv) = substIfaceTyVar env tv go (IfaceAppTy t ts) = IfaceAppTy (go t) (substIfaceAppArgs env ts) go (IfaceFunTy af w t1 t2) = IfaceFunTy af (go w) (go t1) (go t2) go ty@(IfaceLitTy {}) = ty go (IfaceTyConApp tc tys) = IfaceTyConApp tc (substIfaceAppArgs env tys) go (IfaceTupleTy s i tys) = IfaceTupleTy s i (substIfaceAppArgs env tys) go (IfaceForAllTy {}) = pprPanic "substIfaceType" (ppr ty) go (IfaceCastTy ty co) = IfaceCastTy (go ty) (go_co co) go (IfaceCoercionTy co) = IfaceCoercionTy (go_co co) go_mco IfaceMRefl = IfaceMRefl go_mco (IfaceMCo co) = IfaceMCo $ go_co co go_co (IfaceReflCo ty) = IfaceReflCo (go ty) go_co (IfaceGReflCo r ty mco) = IfaceGReflCo r (go ty) (go_mco mco) go_co (IfaceFunCo r w c1 c2) = IfaceFunCo r (go_co w) (go_co c1) (go_co c2) go_co (IfaceTyConAppCo r tc cos) = IfaceTyConAppCo r tc (go_cos cos) go_co (IfaceAppCo c1 c2) = IfaceAppCo (go_co c1) (go_co c2) go_co (IfaceForAllCo {}) = pprPanic "substIfaceCoercion" (ppr ty) go_co (IfaceFreeCoVar cv) = IfaceFreeCoVar cv go_co (IfaceCoVarCo cv) = IfaceCoVarCo cv go_co (IfaceHoleCo cv) = IfaceHoleCo cv go_co (IfaceAxiomInstCo a i cos) = IfaceAxiomInstCo a i (go_cos cos) go_co (IfaceUnivCo prov r t1 t2) = IfaceUnivCo (go_prov prov) r (go t1) (go t2) go_co (IfaceSymCo co) = IfaceSymCo (go_co co) go_co (IfaceTransCo co1 co2) = IfaceTransCo (go_co co1) (go_co co2) go_co (IfaceNthCo n co) = IfaceNthCo n (go_co co) go_co (IfaceLRCo lr co) = IfaceLRCo lr (go_co co) go_co (IfaceInstCo c1 c2) = IfaceInstCo (go_co c1) (go_co c2) go_co (IfaceKindCo co) = IfaceKindCo (go_co co) go_co (IfaceSubCo co) = IfaceSubCo (go_co co) go_co (IfaceAxiomRuleCo n cos) = IfaceAxiomRuleCo n (go_cos cos) go_cos = map go_co go_prov (IfacePhantomProv co) = IfacePhantomProv (go_co co) go_prov (IfaceProofIrrelProv co) = IfaceProofIrrelProv (go_co co) go_prov co@(IfacePluginProv _) = co go_prov co@(IfaceCorePrepProv _) = co substIfaceAppArgs :: IfaceTySubst -> IfaceAppArgs -> IfaceAppArgs substIfaceAppArgs env args = go args where go IA_Nil = IA_Nil go (IA_Arg ty arg tys) = IA_Arg (substIfaceType env ty) arg (go tys) substIfaceTyVar :: IfaceTySubst -> IfLclName -> IfaceType substIfaceTyVar env tv | Just ty <- lookupFsEnv env tv = ty | otherwise = IfaceTyVar tv {- ************************************************************************ * * Functions over IfaceAppArgs * * ************************************************************************ -} stripInvisArgs :: PrintExplicitKinds -> IfaceAppArgs -> IfaceAppArgs stripInvisArgs (PrintExplicitKinds True) tys = tys stripInvisArgs (PrintExplicitKinds False) tys = suppress_invis tys where suppress_invis c = case c of IA_Nil -> IA_Nil IA_Arg t argf ts | isVisibleArgFlag argf -> IA_Arg t argf $ suppress_invis ts -- Keep recursing through the remainder of the arguments, as it's -- possible that there are remaining invisible ones. -- See the "In type declarations" section of Note [VarBndrs, -- TyCoVarBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep. | otherwise -> suppress_invis ts appArgsIfaceTypes :: IfaceAppArgs -> [IfaceType] appArgsIfaceTypes IA_Nil = [] appArgsIfaceTypes (IA_Arg t _ ts) = t : appArgsIfaceTypes ts appArgsIfaceTypesArgFlags :: IfaceAppArgs -> [(IfaceType, ArgFlag)] appArgsIfaceTypesArgFlags IA_Nil = [] appArgsIfaceTypesArgFlags (IA_Arg t a ts) = (t, a) : appArgsIfaceTypesArgFlags ts ifaceVisAppArgsLength :: IfaceAppArgs -> Int ifaceVisAppArgsLength = go 0 where go !n IA_Nil = n go n (IA_Arg _ argf rest) | isVisibleArgFlag argf = go (n+1) rest | otherwise = go n rest {- Note [Suppressing invisible arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use the IfaceAppArgs data type to specify which of the arguments to a type should be displayed when pretty-printing, under the control of -fprint-explicit-kinds. See also Type.filterOutInvisibleTypes. For example, given T :: forall k. (k->*) -> k -> * -- Ordinary kind polymorphism 'Just :: forall k. k -> 'Maybe k -- Promoted we want T * Tree Int prints as T Tree Int 'Just * prints as Just * For type constructors (IfaceTyConApp), IfaceAppArgs is a quite natural fit, since the corresponding Core constructor: data Type = ... | TyConApp TyCon [Type] Already puts all of its arguments into a list. So when converting a Type to an IfaceType (see toIfaceAppArgsX in GHC.Core.ToIface), we simply use the kind of the TyCon (which is cached) to guide the process of converting the argument Types into an IfaceAppArgs list. We also want this behavior for IfaceAppTy, since given: data Proxy (a :: k) f :: forall (t :: forall a. a -> Type). Proxy Type (t Bool True) We want to print the return type as `Proxy (t True)` without the use of -fprint-explicit-kinds (#15330). Accomplishing this is trickier than in the tycon case, because the corresponding Core constructor for IfaceAppTy: data Type = ... | AppTy Type Type Only stores one argument at a time. Therefore, when converting an AppTy to an IfaceAppTy (in toIfaceTypeX in GHC.CoreToIface), we: 1. Flatten the chain of AppTys down as much as possible 2. Use typeKind to determine the function Type's kind 3. Use this kind to guide the process of converting the argument Types into an IfaceAppArgs list. By flattening the arguments like this, we obtain two benefits: (a) We can reuse the same machinery to pretty-print IfaceTyConApp arguments as we do IfaceTyApp arguments, which means that we only need to implement the logic to filter out invisible arguments once. (b) Unlike for tycons, finding the kind of a type in general (through typeKind) is not a constant-time operation, so by flattening the arguments first, we decrease the number of times we have to call typeKind. Note [Pretty-printing invisible arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Suppressing invisible arguments] is all about how to avoid printing invisible arguments when the -fprint-explicit-kinds flag is disables. Well, what about when it's enabled? Then we can and should print invisible kind arguments, and this Note explains how we do it. As two running examples, consider the following code: {-# LANGUAGE PolyKinds #-} data T1 a data T2 (a :: k) When displaying these types (with -fprint-explicit-kinds on), we could just do the following: T1 k a T2 k a That certainly gets the job done. But it lacks a crucial piece of information: is the `k` argument inferred or specified? To communicate this, we use visible kind application syntax to distinguish the two cases: T1 @{k} a T2 @k a Here, @{k} indicates that `k` is an inferred argument, and @k indicates that `k` is a specified argument. (See Note [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep for a lengthier explanation on what "inferred" and "specified" mean.) ************************************************************************ * * Pretty-printing * * ************************************************************************ -} if_print_coercions :: SDoc -- ^ if printing coercions -> SDoc -- ^ otherwise -> SDoc if_print_coercions yes no = sdocOption sdocPrintExplicitCoercions $ \print_co -> getPprStyle $ \style -> getPprDebug $ \debug -> if print_co || dumpStyle style || debug then yes else no pprIfaceInfixApp :: PprPrec -> SDoc -> SDoc -> SDoc -> SDoc pprIfaceInfixApp ctxt_prec pp_tc pp_ty1 pp_ty2 = maybeParen ctxt_prec opPrec $ sep [pp_ty1, pp_tc <+> pp_ty2] pprIfacePrefixApp :: PprPrec -> SDoc -> [SDoc] -> SDoc pprIfacePrefixApp ctxt_prec pp_fun pp_tys | null pp_tys = pp_fun | otherwise = maybeParen ctxt_prec appPrec $ hang pp_fun 2 (sep pp_tys) isIfaceTauType :: IfaceType -> Bool isIfaceTauType (IfaceForAllTy _ _) = False isIfaceTauType (IfaceFunTy InvisArg _ _ _) = False isIfaceTauType _ = True -- ----------------------------- Printing binders ------------------------------------ instance Outputable IfaceBndr where ppr (IfaceIdBndr bndr) = pprIfaceIdBndr bndr ppr (IfaceTvBndr bndr) = char '@' <> pprIfaceTvBndr bndr (SuppressBndrSig False) (UseBndrParens False) pprIfaceBndrs :: [IfaceBndr] -> SDoc pprIfaceBndrs bs = sep (map ppr bs) pprIfaceLamBndr :: IfaceLamBndr -> SDoc pprIfaceLamBndr (b, IfaceNoOneShot) = ppr b pprIfaceLamBndr (b, IfaceOneShot) = ppr b <> text "[OneShot]" pprIfaceIdBndr :: IfaceIdBndr -> SDoc pprIfaceIdBndr (w, name, ty) = parens (ppr name <> brackets (ppr w) <+> dcolon <+> ppr ty) {- Note [Suppressing binder signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When printing the binders in a 'forall', we want to keep the kind annotations: forall (a :: k). blah ^^^^ good On the other hand, when we print the binders of a data declaration in :info, the kind information would be redundant due to the standalone kind signature: type F :: Symbol -> Type type F (s :: Symbol) = blah ^^^^^^^^^ redundant Here we'd like to omit the kind annotation: type F :: Symbol -> Type type F s = blah Note [Printing type abbreviations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally, we pretty-print `TYPE 'LiftedRep` as `Type` (or `*`) and `FUN 'Many` as `(->)`. This way, error messages don't refer to representation polymorphism or linearity if it is not necessary. However, when printing the definition of Type or (->) with :info, this would give confusing output: `type (->) = (->)` (#18594). Solution: detect when we are in :info and disable displaying the synonym with the SDoc option sdocPrintTypeAbbreviations. If there will be a need, in the future we could expose it as a flag -fprint-type-abbreviations or even two separate flags controlling TYPE 'LiftedRep and FUN 'Many. -} -- | Do we want to suppress kind annotations on binders? -- See Note [Suppressing binder signatures] newtype SuppressBndrSig = SuppressBndrSig Bool newtype UseBndrParens = UseBndrParens Bool newtype PrintExplicitKinds = PrintExplicitKinds Bool pprIfaceTvBndr :: IfaceTvBndr -> SuppressBndrSig -> UseBndrParens -> SDoc pprIfaceTvBndr (tv, ki) (SuppressBndrSig suppress_sig) (UseBndrParens use_parens) | suppress_sig = ppr tv | isIfaceLiftedTypeKind ki = ppr tv | otherwise = maybe_parens (ppr tv <+> dcolon <+> ppr ki) where maybe_parens | use_parens = parens | otherwise = id pprIfaceTyConBinders :: SuppressBndrSig -> [IfaceTyConBinder] -> SDoc pprIfaceTyConBinders suppress_sig = sep . map go where go :: IfaceTyConBinder -> SDoc go (Bndr (IfaceIdBndr bndr) _) = pprIfaceIdBndr bndr go (Bndr (IfaceTvBndr bndr) vis) = -- See Note [Pretty-printing invisible arguments] case vis of AnonTCB VisArg -> ppr_bndr (UseBndrParens True) AnonTCB InvisArg -> char '@' <> braces (ppr_bndr (UseBndrParens False)) -- The above case is rare. (See Note [AnonTCB InvisArg] in GHC.Core.TyCon.) -- Should we print these differently? NamedTCB Required -> ppr_bndr (UseBndrParens True) NamedTCB Specified -> char '@' <> ppr_bndr (UseBndrParens True) NamedTCB Inferred -> char '@' <> braces (ppr_bndr (UseBndrParens False)) where ppr_bndr = pprIfaceTvBndr bndr suppress_sig instance Binary IfaceBndr where put_ bh (IfaceIdBndr aa) = do putByte bh 0 put_ bh aa put_ bh (IfaceTvBndr ab) = do putByte bh 1 put_ bh ab get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (IfaceIdBndr aa) _ -> do ab <- get bh return (IfaceTvBndr ab) instance Binary IfaceOneShot where put_ bh IfaceNoOneShot = putByte bh 0 put_ bh IfaceOneShot = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return IfaceNoOneShot _ -> return IfaceOneShot -- ----------------------------- Printing IfaceType ------------------------------------ --------------------------------- instance Outputable IfaceType where ppr ty = pprIfaceType ty pprIfaceType, pprParendIfaceType :: IfaceType -> SDoc pprIfaceType = pprPrecIfaceType topPrec pprParendIfaceType = pprPrecIfaceType appPrec pprPrecIfaceType :: PprPrec -> IfaceType -> SDoc -- We still need `hideNonStandardTypes`, since the `pprPrecIfaceType` may be -- called from other places, besides `:type` and `:info`. pprPrecIfaceType prec ty = hideNonStandardTypes (ppr_ty prec) ty ppr_fun_arrow :: IfaceMult -> SDoc ppr_fun_arrow w | (IfaceTyConApp tc _) <- w , tc `ifaceTyConHasKey` (getUnique manyDataConTyCon) = arrow | (IfaceTyConApp tc _) <- w , tc `ifaceTyConHasKey` (getUnique oneDataConTyCon) = lollipop | otherwise = mulArrow (pprIfaceType w) ppr_sigma :: PprPrec -> IfaceType -> SDoc ppr_sigma ctxt_prec ty = maybeParen ctxt_prec funPrec (pprIfaceSigmaType ShowForAllMust ty) ppr_ty :: PprPrec -> IfaceType -> SDoc ppr_ty ctxt_prec ty@(IfaceForAllTy {}) = ppr_sigma ctxt_prec ty ppr_ty ctxt_prec ty@(IfaceFunTy InvisArg _ _ _) = ppr_sigma ctxt_prec ty ppr_ty _ (IfaceFreeTyVar tyvar) = ppr tyvar -- This is the main reason for IfaceFreeTyVar! ppr_ty _ (IfaceTyVar tyvar) = ppr tyvar -- See Note [Free tyvars in IfaceType] ppr_ty ctxt_prec (IfaceTyConApp tc tys) = pprTyTcApp ctxt_prec tc tys ppr_ty ctxt_prec (IfaceTupleTy i p tys) = pprTuple ctxt_prec i p tys -- always fully saturated ppr_ty _ (IfaceLitTy n) = pprIfaceTyLit n -- Function types ppr_ty ctxt_prec (IfaceFunTy _ w ty1 ty2) -- Should be VisArg = -- We don't want to lose synonyms, so we mustn't use splitFunTys here. maybeParen ctxt_prec funPrec $ sep [ppr_ty funPrec ty1, sep (ppr_fun_tail w ty2)] where ppr_fun_tail wthis (IfaceFunTy VisArg wnext ty1 ty2) = (ppr_fun_arrow wthis <+> ppr_ty funPrec ty1) : ppr_fun_tail wnext ty2 ppr_fun_tail wthis other_ty = [ppr_fun_arrow wthis <+> pprIfaceType other_ty] ppr_ty ctxt_prec (IfaceAppTy t ts) = if_print_coercions ppr_app_ty ppr_app_ty_no_casts where ppr_app_ty = sdocOption sdocPrintExplicitKinds $ \print_kinds -> let tys_wo_kinds = appArgsIfaceTypesArgFlags $ stripInvisArgs (PrintExplicitKinds print_kinds) ts in pprIfacePrefixApp ctxt_prec (ppr_ty funPrec t) (map (ppr_app_arg appPrec) tys_wo_kinds) -- Strip any casts from the head of the application ppr_app_ty_no_casts = case t of IfaceCastTy head _ -> ppr_ty ctxt_prec (mk_app_tys head ts) _ -> ppr_app_ty mk_app_tys :: IfaceType -> IfaceAppArgs -> IfaceType mk_app_tys (IfaceTyConApp tc tys1) tys2 = IfaceTyConApp tc (tys1 `mappend` tys2) mk_app_tys t1 tys2 = IfaceAppTy t1 tys2 ppr_ty ctxt_prec (IfaceCastTy ty co) = if_print_coercions (parens (ppr_ty topPrec ty <+> text "|>" <+> ppr co)) (ppr_ty ctxt_prec ty) ppr_ty ctxt_prec (IfaceCoercionTy co) = if_print_coercions (ppr_co ctxt_prec co) (text "<>") {- Note [Defaulting RuntimeRep variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RuntimeRep variables are considered by many (most?) users to be little more than syntactic noise. When the notion was introduced there was a significant and understandable push-back from those with pedagogy in mind, which argued that RuntimeRep variables would throw a wrench into nearly any teach approach since they appear in even the lowly ($) function's type, ($) :: forall (w :: RuntimeRep) a (b :: TYPE w). (a -> b) -> a -> b which is significantly less readable than its non RuntimeRep-polymorphic type of ($) :: (a -> b) -> a -> b Moreover, unboxed types don't appear all that often in run-of-the-mill Haskell programs, so it makes little sense to make all users pay this syntactic overhead. For this reason it was decided that we would hide RuntimeRep variables for now (see #11549). We do this by defaulting all type variables of kind RuntimeRep to LiftedRep. Likewise, we default all Multiplicity variables to Many. This is done in a pass right before pretty-printing (defaultIfaceTyVarsOfKind, controlled by -fprint-explicit-runtime-reps and -XLinearTypes) This applies to /quantified/ variables like 'w' above. What about variables that are /free/ in the type being printed, which certainly happens in error messages. Suppose (#16074, #19361) we are reporting a mismatch between skolems (a :: RuntimeRep) ~ (b :: RuntimeRep) or (m :: Multiplicity) ~ Many We certainly don't want to say "Can't match LiftedRep with LiftedRep" or "Can't match Many with Many"! But if we are printing the type (forall (a :: TYPE r). blah) we do want to turn that (free) r into LiftedRep, so it prints as (forall a. blah) We use isMetaTyVar to distinguish between those two situations: metavariables are converted, skolem variables are not. There's one exception though: TyVarTv metavariables should not be defaulted, as they appear during kind-checking of "newtype T :: TYPE r where..." (test T18357a). Therefore, we additionally test for isTyConableTyVar. -} -- | Default 'RuntimeRep' variables to 'LiftedRep', -- 'Levity' variables to 'Lifted', and 'Multiplicity' -- variables to 'Many'. For example: -- -- @ -- ($) :: forall (r :: GHC.Types.RuntimeRep) a (b :: TYPE r). -- (a -> b) -> a -> b -- Just :: forall (k :: Multiplicity) a. a % k -> Maybe a -- @ -- -- turns in to, -- -- @ ($) :: forall a (b :: *). (a -> b) -> a -> b @ -- @ Just :: forall a . a -> Maybe a @ -- -- We do this to prevent RuntimeRep, Levity and Multiplicity variables from -- incurring a significant syntactic overhead in otherwise simple -- type signatures (e.g. ($)). See Note [Defaulting RuntimeRep variables] -- and #11549 for further discussion. defaultIfaceTyVarsOfKind :: Bool -- ^ default 'RuntimeRep'/'Levity' variables? -> Bool -- ^ default 'Multiplicity' variables? -> IfaceType -> IfaceType defaultIfaceTyVarsOfKind def_rep def_mult ty = go emptyFsEnv ty where go :: FastStringEnv IfaceType -- Set of enclosing forall-ed RuntimeRep/Levity/Multiplicity variables -> IfaceType -> IfaceType go subs (IfaceForAllTy (Bndr (IfaceTvBndr (var, var_kind)) argf) ty) | isInvisibleArgFlag argf -- Don't default *visible* quantification -- or we get the mess in #13963 , Just substituted_ty <- check_substitution var_kind = let subs' = extendFsEnv subs var substituted_ty -- Record that we should replace it with LiftedRep/Lifted/Many, -- and recurse, discarding the forall in go subs' ty go subs (IfaceForAllTy bndr ty) = IfaceForAllTy (go_ifacebndr subs bndr) (go subs ty) go subs ty@(IfaceTyVar tv) = case lookupFsEnv subs tv of Just s -> s Nothing -> ty go _ ty@(IfaceFreeTyVar tv) -- See Note [Defaulting RuntimeRep variables], about free vars | def_rep , GHC.Core.Type.isRuntimeRepTy (tyVarKind tv) , isMetaTyVar tv , isTyConableTyVar tv = liftedRep_ty | def_rep , GHC.Core.Type.isLevityTy (tyVarKind tv) , isMetaTyVar tv , isTyConableTyVar tv = lifted_ty | def_mult , GHC.Core.Type.isMultiplicityTy (tyVarKind tv) , isMetaTyVar tv , isTyConableTyVar tv = many_ty | otherwise = ty go subs (IfaceTyConApp tc tc_args) = IfaceTyConApp tc (go_args subs tc_args) go subs (IfaceTupleTy sort is_prom tc_args) = IfaceTupleTy sort is_prom (go_args subs tc_args) go subs (IfaceFunTy af w arg res) = IfaceFunTy af (go subs w) (go subs arg) (go subs res) go subs (IfaceAppTy t ts) = IfaceAppTy (go subs t) (go_args subs ts) go subs (IfaceCastTy x co) = IfaceCastTy (go subs x) co go _ ty@(IfaceLitTy {}) = ty go _ ty@(IfaceCoercionTy {}) = ty go_ifacebndr :: FastStringEnv IfaceType -> IfaceForAllBndr -> IfaceForAllBndr go_ifacebndr subs (Bndr (IfaceIdBndr (w, n, t)) argf) = Bndr (IfaceIdBndr (w, n, go subs t)) argf go_ifacebndr subs (Bndr (IfaceTvBndr (n, t)) argf) = Bndr (IfaceTvBndr (n, go subs t)) argf go_args :: FastStringEnv IfaceType -> IfaceAppArgs -> IfaceAppArgs go_args _ IA_Nil = IA_Nil go_args subs (IA_Arg ty argf args) = IA_Arg (go subs ty) argf (go_args subs args) check_substitution :: IfaceType -> Maybe IfaceType check_substitution (IfaceTyConApp tc _) | def_rep , tc `ifaceTyConHasKey` runtimeRepTyConKey = Just liftedRep_ty | def_rep , tc `ifaceTyConHasKey` levityTyConKey = Just lifted_ty | def_mult , tc `ifaceTyConHasKey` multiplicityTyConKey = Just many_ty check_substitution _ = Nothing -- | The type ('BoxedRep 'Lifted), also known as LiftedRep. liftedRep_ty :: IfaceType liftedRep_ty = IfaceTyConApp liftedRep IA_Nil where liftedRep :: IfaceTyCon liftedRep = IfaceTyCon tc_name (mkIfaceTyConInfo NotPromoted IfaceNormalTyCon) where tc_name = getName liftedRepTyCon -- | The type 'Lifted :: Levity'. lifted_ty :: IfaceType lifted_ty = IfaceTyConApp (IfaceTyCon dc_name (mkIfaceTyConInfo IsPromoted IfaceNormalTyCon)) IA_Nil where dc_name = getName liftedDataConTyCon -- | The type 'Many :: Multiplicity'. many_ty :: IfaceType many_ty = IfaceTyConApp (IfaceTyCon dc_name (mkIfaceTyConInfo IsPromoted IfaceNormalTyCon)) IA_Nil where dc_name = getName manyDataConTyCon hideNonStandardTypes :: (IfaceType -> SDoc) -> IfaceType -> SDoc hideNonStandardTypes f ty = sdocOption sdocPrintExplicitRuntimeReps $ \printExplicitRuntimeReps -> sdocOption sdocLinearTypes $ \linearTypes -> getPprStyle $ \sty -> let def_rep = not printExplicitRuntimeReps def_mult = not linearTypes in if userStyle sty then f (defaultIfaceTyVarsOfKind def_rep def_mult ty) else f ty instance Outputable IfaceAppArgs where ppr tca = pprIfaceAppArgs tca pprIfaceAppArgs, pprParendIfaceAppArgs :: IfaceAppArgs -> SDoc pprIfaceAppArgs = ppr_app_args topPrec pprParendIfaceAppArgs = ppr_app_args appPrec ppr_app_args :: PprPrec -> IfaceAppArgs -> SDoc ppr_app_args ctx_prec = go where go :: IfaceAppArgs -> SDoc go IA_Nil = empty go (IA_Arg t argf ts) = ppr_app_arg ctx_prec (t, argf) <+> go ts -- See Note [Pretty-printing invisible arguments] ppr_app_arg :: PprPrec -> (IfaceType, ArgFlag) -> SDoc ppr_app_arg ctx_prec (t, argf) = sdocOption sdocPrintExplicitKinds $ \print_kinds -> case argf of Required -> ppr_ty ctx_prec t Specified | print_kinds -> char '@' <> ppr_ty appPrec t Inferred | print_kinds -> char '@' <> braces (ppr_ty topPrec t) _ -> empty ------------------- pprIfaceForAllPart :: [IfaceForAllBndr] -> [IfacePredType] -> SDoc -> SDoc pprIfaceForAllPart tvs ctxt sdoc = ppr_iface_forall_part ShowForAllWhen tvs ctxt sdoc -- | Like 'pprIfaceForAllPart', but always uses an explicit @forall@. pprIfaceForAllPartMust :: [IfaceForAllBndr] -> [IfacePredType] -> SDoc -> SDoc pprIfaceForAllPartMust tvs ctxt sdoc = ppr_iface_forall_part ShowForAllMust tvs ctxt sdoc pprIfaceForAllCoPart :: [(IfLclName, IfaceCoercion)] -> SDoc -> SDoc pprIfaceForAllCoPart tvs sdoc = sep [ pprIfaceForAllCo tvs, sdoc ] ppr_iface_forall_part :: ShowForAllFlag -> [IfaceForAllBndr] -> [IfacePredType] -> SDoc -> SDoc ppr_iface_forall_part show_forall tvs ctxt sdoc = sep [ case show_forall of ShowForAllMust -> pprIfaceForAll tvs ShowForAllWhen -> pprUserIfaceForAll tvs , pprIfaceContextArr ctxt , sdoc] -- | Render the "forall ... ." or "forall ... ->" bit of a type. pprIfaceForAll :: [IfaceForAllBndr] -> SDoc pprIfaceForAll [] = empty pprIfaceForAll bndrs@(Bndr _ vis : _) = sep [ add_separator (forAllLit <+> fsep docs) , pprIfaceForAll bndrs' ] where (bndrs', docs) = ppr_itv_bndrs bndrs vis add_separator stuff = case vis of Required -> stuff <+> arrow _inv -> stuff <> dot -- | Render the ... in @(forall ... .)@ or @(forall ... ->)@. -- Returns both the list of not-yet-rendered binders and the doc. -- No anonymous binders here! ppr_itv_bndrs :: [IfaceForAllBndr] -> ArgFlag -- ^ visibility of the first binder in the list -> ([IfaceForAllBndr], [SDoc]) ppr_itv_bndrs all_bndrs@(bndr@(Bndr _ vis) : bndrs) vis1 | vis `sameVis` vis1 = let (bndrs', doc) = ppr_itv_bndrs bndrs vis1 in (bndrs', pprIfaceForAllBndr bndr : doc) | otherwise = (all_bndrs, []) ppr_itv_bndrs [] _ = ([], []) pprIfaceForAllCo :: [(IfLclName, IfaceCoercion)] -> SDoc pprIfaceForAllCo [] = empty pprIfaceForAllCo tvs = text "forall" <+> pprIfaceForAllCoBndrs tvs <> dot pprIfaceForAllCoBndrs :: [(IfLclName, IfaceCoercion)] -> SDoc pprIfaceForAllCoBndrs bndrs = hsep $ map pprIfaceForAllCoBndr bndrs pprIfaceForAllBndr :: IfaceForAllBndr -> SDoc pprIfaceForAllBndr bndr = case bndr of Bndr (IfaceTvBndr tv) Inferred -> braces $ pprIfaceTvBndr tv suppress_sig (UseBndrParens False) Bndr (IfaceTvBndr tv) _ -> pprIfaceTvBndr tv suppress_sig (UseBndrParens True) Bndr (IfaceIdBndr idv) _ -> pprIfaceIdBndr idv where -- See Note [Suppressing binder signatures] suppress_sig = SuppressBndrSig False pprIfaceForAllCoBndr :: (IfLclName, IfaceCoercion) -> SDoc pprIfaceForAllCoBndr (tv, kind_co) = parens (ppr tv <+> dcolon <+> pprIfaceCoercion kind_co) -- | Show forall flag -- -- Unconditionally show the forall quantifier with ('ShowForAllMust') -- or when ('ShowForAllWhen') the names used are free in the binder -- or when compiling with -fprint-explicit-foralls. data ShowForAllFlag = ShowForAllMust | ShowForAllWhen pprIfaceSigmaType :: ShowForAllFlag -> IfaceType -> SDoc pprIfaceSigmaType show_forall ty = hideNonStandardTypes ppr_fn ty where ppr_fn iface_ty = let (invis_tvs, theta, tau) = splitIfaceSigmaTy iface_ty (req_tvs, tau') = splitIfaceReqForallTy tau -- splitIfaceSigmaTy is recursive, so it will gather the binders after -- the theta, i.e. forall a. theta => forall b. tau -- will give you ([a,b], theta, tau). -- -- This isn't right when it comes to visible forall (see -- testsuite/tests/polykinds/T18522-ppr), -- so we split off required binders separately, -- using splitIfaceReqForallTy. -- -- An alternative solution would be to make splitIfaceSigmaTy -- non-recursive (see #18458). -- Then it could handle both invisible and required binders, and -- splitIfaceReqForallTy wouldn't be necessary here. in ppr_iface_forall_part show_forall invis_tvs theta $ sep [pprIfaceForAll req_tvs, ppr tau'] pprUserIfaceForAll :: [IfaceForAllBndr] -> SDoc pprUserIfaceForAll tvs = sdocOption sdocPrintExplicitForalls $ \print_foralls -> -- See Note [When to print foralls] in this module. ppWhen (any tv_has_kind_var tvs || any tv_is_required tvs || print_foralls) $ pprIfaceForAll tvs where tv_has_kind_var (Bndr (IfaceTvBndr (_,kind)) _) = not (ifTypeIsVarFree kind) tv_has_kind_var _ = False tv_is_required = isVisibleArgFlag . binderArgFlag {- Note [When to print foralls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We opt to explicitly pretty-print `forall`s if any of the following criteria are met: 1. -fprint-explicit-foralls is on. 2. A bound type variable has a polymorphic kind. E.g., forall k (a::k). Proxy a -> Proxy a Since a's kind mentions a variable k, we print the foralls. 3. A bound type variable is a visible argument (#14238). Suppose we are printing the kind of: T :: forall k -> k -> Type The "forall k ->" notation means that this kind argument is required. That is, it must be supplied at uses of T. E.g., f :: T (Type->Type) Monad -> Int So we print an explicit "T :: forall k -> k -> Type", because omitting it and printing "T :: k -> Type" would be utterly misleading. See Note [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] in GHC.Core.TyCo.Rep. N.B. Until now (Aug 2018) we didn't check anything for coercion variables. Note [Printing foralls in type family instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use the same criteria as in Note [When to print foralls] to determine whether a type family instance should be pretty-printed with an explicit `forall`. Example: type family Foo (a :: k) :: k where Foo Maybe = [] Foo (a :: Type) = Int Foo a = a Without -fprint-explicit-foralls enabled, this will be pretty-printed as: type family Foo (a :: k) :: k where Foo Maybe = [] Foo a = Int forall k (a :: k). Foo a = a Note that only the third equation has an explicit forall, since it has a type variable with a non-Type kind. (If -fprint-explicit-foralls were enabled, then the second equation would be preceded with `forall a.`.) There is one tricky point in the implementation: what visibility do we give the type variables in a type family instance? Type family instances only store type *variables*, not type variable *binders*, and only the latter has visibility information. We opt to default the visibility of each of these type variables to Specified because users can't ever instantiate these variables manually, so the choice of visibility is only relevant to pretty-printing. (This is why the `k` in `forall k (a :: k). ...` above is printed the way it is, even though it wasn't written explicitly in the original source code.) We adopt the same strategy for data family instances. Example: data family DF (a :: k) data instance DF '[a, b] = DFList That data family instance is pretty-printed as: data instance forall j (a :: j) (b :: j). DF '[a, b] = DFList This is despite that the representation tycon for this data instance (call it $DF:List) actually has different visibilities for its binders. However, the visibilities of these binders are utterly irrelevant to the programmer, who cares only about the specificity of variables in `DF`'s type, not $DF:List's type. Therefore, we opt to pretty-print all variables in data family instances as Specified. Note [Printing promoted type constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this GHCi session (#14343) > _ :: Proxy '[ 'True ] error: Found hole: _ :: Proxy '['True] This would be bad, because the '[' looks like a character literal. Solution: in type-level lists and tuples, add a leading space if the first type is itself promoted. See pprSpaceIfPromotedTyCon. -} ------------------- -- | Prefix a space if the given 'IfaceType' is a promoted 'TyCon'. -- See Note [Printing promoted type constructors] pprSpaceIfPromotedTyCon :: IfaceType -> SDoc -> SDoc pprSpaceIfPromotedTyCon (IfaceTyConApp tyCon _) = case ifaceTyConIsPromoted (ifaceTyConInfo tyCon) of IsPromoted -> (space <>) _ -> id pprSpaceIfPromotedTyCon _ = id -- See equivalent function in "GHC.Core.TyCo.Rep" pprIfaceTyList :: PprPrec -> IfaceType -> IfaceType -> SDoc -- Given a type-level list (t1 ': t2), see if we can print -- it in list notation [t1, ...]. -- Precondition: Opt_PrintExplicitKinds is off pprIfaceTyList ctxt_prec ty1 ty2 = case gather ty2 of (arg_tys, Nothing) -> char '\'' <> brackets (pprSpaceIfPromotedTyCon ty1 (fsep (punctuate comma (map (ppr_ty topPrec) (ty1:arg_tys))))) (arg_tys, Just tl) -> maybeParen ctxt_prec funPrec $ hang (ppr_ty funPrec ty1) 2 (fsep [ colon <+> ppr_ty funPrec ty | ty <- arg_tys ++ [tl]]) where gather :: IfaceType -> ([IfaceType], Maybe IfaceType) -- (gather ty) = (tys, Nothing) means ty is a list [t1, .., tn] -- = (tys, Just tl) means ty is of form t1:t2:...tn:tl gather (IfaceTyConApp tc tys) | tc `ifaceTyConHasKey` consDataConKey , IA_Arg _ argf (IA_Arg ty1 Required (IA_Arg ty2 Required IA_Nil)) <- tys , isInvisibleArgFlag argf , (args, tl) <- gather ty2 = (ty1:args, tl) | tc `ifaceTyConHasKey` nilDataConKey = ([], Nothing) gather ty = ([], Just ty) pprIfaceTypeApp :: PprPrec -> IfaceTyCon -> IfaceAppArgs -> SDoc pprIfaceTypeApp prec tc args = pprTyTcApp prec tc args pprTyTcApp :: PprPrec -> IfaceTyCon -> IfaceAppArgs -> SDoc pprTyTcApp ctxt_prec tc tys = sdocOption sdocPrintExplicitKinds $ \print_kinds -> sdocOption sdocPrintTypeAbbreviations $ \print_type_abbreviations -> getPprDebug $ \debug -> if | ifaceTyConName tc `hasKey` ipClassKey , IA_Arg (IfaceLitTy (IfaceStrTyLit n)) Required (IA_Arg ty Required IA_Nil) <- tys -> maybeParen ctxt_prec funPrec $ char '?' <> ftext n <> text "::" <> ppr_ty topPrec ty | IfaceTupleTyCon arity sort <- ifaceTyConSort info , not debug , arity == ifaceVisAppArgsLength tys -> pprTuple ctxt_prec sort (ifaceTyConIsPromoted info) tys -- NB: pprTuple requires a saturated tuple. | IfaceSumTyCon arity <- ifaceTyConSort info , not debug , arity == ifaceVisAppArgsLength tys -> pprSum (ifaceTyConIsPromoted info) tys -- NB: pprSum requires a saturated unboxed sum. | tc `ifaceTyConHasKey` consDataConKey , False <- print_kinds , IA_Arg _ argf (IA_Arg ty1 Required (IA_Arg ty2 Required IA_Nil)) <- tys , isInvisibleArgFlag argf -> pprIfaceTyList ctxt_prec ty1 ty2 | isIfaceTyConAppLiftedTypeKind tc tys , print_type_abbreviations -- See Note [Printing type abbreviations] -> ppr_kind_type ctxt_prec | tc `ifaceTyConHasKey` funTyConKey , IA_Arg (IfaceTyConApp rep IA_Nil) Required args <- tys , rep `ifaceTyConHasKey` manyDataConKey , print_type_abbreviations -- See Note [Printing type abbreviations] -> pprIfacePrefixApp ctxt_prec (parens arrow) (map (ppr_app_arg appPrec) $ appArgsIfaceTypesArgFlags $ stripInvisArgs (PrintExplicitKinds print_kinds) args) -- Use appArgsIfaceTypesArgFlags to print invisible arguments -- correctly (#19310) | tc `ifaceTyConHasKey` errorMessageTypeErrorFamKey , not debug -- Suppress detail unless you _really_ want to see -> text "(TypeError ...)" | Just doc <- ppr_equality ctxt_prec tc (appArgsIfaceTypes tys) -> doc | otherwise -> ppr_iface_tc_app ppr_app_arg ctxt_prec tc $ appArgsIfaceTypesArgFlags $ stripInvisArgs (PrintExplicitKinds print_kinds) tys where info = ifaceTyConInfo tc ppr_kind_type :: PprPrec -> SDoc ppr_kind_type ctxt_prec = sdocOption sdocStarIsType $ \case False -> pprPrefixOcc liftedTypeKindTyConName True -> maybeParen ctxt_prec starPrec $ unicodeSyntax (char '★') (char '*') -- | Pretty-print a type-level equality. -- Returns (Just doc) if the argument is a /saturated/ application -- of eqTyCon (~) -- eqPrimTyCon (~#) -- eqReprPrimTyCon (~R#) -- heqTyCon (~~) -- -- See Note [Equality predicates in IfaceType] -- and Note [The equality types story] in GHC.Builtin.Types.Prim ppr_equality :: PprPrec -> IfaceTyCon -> [IfaceType] -> Maybe SDoc ppr_equality ctxt_prec tc args | hetero_eq_tc , [k1, k2, t1, t2] <- args = Just $ print_equality (k1, k2, t1, t2) | hom_eq_tc , [k, t1, t2] <- args = Just $ print_equality (k, k, t1, t2) | otherwise = Nothing where homogeneous = tc_name `hasKey` eqTyConKey -- (~) || hetero_tc_used_homogeneously where hetero_tc_used_homogeneously = case ifaceTyConSort $ ifaceTyConInfo tc of IfaceEqualityTyCon -> True _other -> False -- True <=> a heterogeneous equality whose arguments -- are (in this case) of the same kind tc_name = ifaceTyConName tc pp = ppr_ty hom_eq_tc = tc_name `hasKey` eqTyConKey -- (~) hetero_eq_tc = tc_name `hasKey` eqPrimTyConKey -- (~#) || tc_name `hasKey` eqReprPrimTyConKey -- (~R#) || tc_name `hasKey` heqTyConKey -- (~~) nominal_eq_tc = tc_name `hasKey` heqTyConKey -- (~~) || tc_name `hasKey` eqPrimTyConKey -- (~#) print_equality args = sdocOption sdocPrintExplicitKinds $ \print_kinds -> sdocOption sdocPrintEqualityRelations $ \print_eqs -> getPprStyle $ \style -> getPprDebug $ \debug -> print_equality' args print_kinds (print_eqs || dumpStyle style || debug) print_equality' (ki1, ki2, ty1, ty2) print_kinds print_eqs | -- If -fprint-equality-relations is on, just print the original TyCon print_eqs = ppr_infix_eq (ppr tc) | -- Homogeneous use of heterogeneous equality (ty1 ~~ ty2) -- or unlifted equality (ty1 ~# ty2) nominal_eq_tc, homogeneous = ppr_infix_eq (text "~") | -- Heterogeneous use of unlifted equality (ty1 ~# ty2) not homogeneous = ppr_infix_eq (ppr heqTyCon) | -- Homogeneous use of representational unlifted equality (ty1 ~R# ty2) tc_name `hasKey` eqReprPrimTyConKey, homogeneous = let ki | print_kinds = [pp appPrec ki1] | otherwise = [] in pprIfacePrefixApp ctxt_prec (ppr coercibleTyCon) (ki ++ [pp appPrec ty1, pp appPrec ty2]) -- The other cases work as you'd expect | otherwise = ppr_infix_eq (ppr tc) where ppr_infix_eq :: SDoc -> SDoc ppr_infix_eq eq_op = pprIfaceInfixApp ctxt_prec eq_op (pp_ty_ki ty1 ki1) (pp_ty_ki ty2 ki2) where pp_ty_ki ty ki | print_kinds = parens (pp topPrec ty <+> dcolon <+> pp opPrec ki) | otherwise = pp opPrec ty pprIfaceCoTcApp :: PprPrec -> IfaceTyCon -> [IfaceCoercion] -> SDoc pprIfaceCoTcApp ctxt_prec tc tys = ppr_iface_tc_app (\prec (co, _) -> ppr_co prec co) ctxt_prec tc (map (, Required) tys) -- We are trying to re-use ppr_iface_tc_app here, which requires its -- arguments to be accompanied by visibilities. But visibility is -- irrelevant when printing coercions, so just default everything to -- Required. -- | Pretty-prints an application of a type constructor to some arguments -- (whose visibilities are known). This is polymorphic (over @a@) since we use -- this function to pretty-print two different things: -- -- 1. Types (from `pprTyTcApp'`) -- -- 2. Coercions (from 'pprIfaceCoTcApp') ppr_iface_tc_app :: (PprPrec -> (a, ArgFlag) -> SDoc) -> PprPrec -> IfaceTyCon -> [(a, ArgFlag)] -> SDoc ppr_iface_tc_app pp _ tc [ty] | tc `ifaceTyConHasKey` listTyConKey = pprPromotionQuote tc <> brackets (pp topPrec ty) ppr_iface_tc_app pp ctxt_prec tc tys | tc `ifaceTyConHasKey` liftedTypeKindTyConKey = ppr_kind_type ctxt_prec | not (isSymOcc (nameOccName (ifaceTyConName tc))) = pprIfacePrefixApp ctxt_prec (ppr tc) (map (pp appPrec) tys) | [ ty1@(_, Required) , ty2@(_, Required) ] <- tys -- Infix, two visible arguments (we know nothing of precedence though). -- Don't apply this special case if one of the arguments is invisible, -- lest we print something like (@LiftedRep -> @LiftedRep) (#15941). = pprIfaceInfixApp ctxt_prec (ppr tc) (pp opPrec ty1) (pp opPrec ty2) | otherwise = pprIfacePrefixApp ctxt_prec (parens (ppr tc)) (map (pp appPrec) tys) -- | Pretty-print an unboxed sum type. The sum should be saturated: -- as many visible arguments as the arity of the sum. -- -- NB: this always strips off the invisible 'RuntimeRep' arguments, -- even with `-fprint-explicit-runtime-reps` and `-fprint-explicit-kinds`. pprSum :: PromotionFlag -> IfaceAppArgs -> SDoc pprSum is_promoted args = -- drop the RuntimeRep vars. -- See Note [Unboxed tuple RuntimeRep vars] in GHC.Core.TyCon let tys = appArgsIfaceTypes args args' = drop (length tys `div` 2) tys in pprPromotionQuoteI is_promoted <> sumParens (pprWithBars (ppr_ty topPrec) args') -- | Pretty-print a tuple type (boxed tuple, constraint tuple, unboxed tuple). -- The tuple should be saturated: as many visible arguments as the arity of -- the tuple. -- -- NB: this always strips off the invisible 'RuntimeRep' arguments, -- even with `-fprint-explicit-runtime-reps` and `-fprint-explicit-kinds`. pprTuple :: PprPrec -> TupleSort -> PromotionFlag -> IfaceAppArgs -> SDoc pprTuple ctxt_prec sort promoted args = case promoted of IsPromoted -> let tys = appArgsIfaceTypes args args' = drop (length tys `div` 2) tys spaceIfPromoted = case args' of arg0:_ -> pprSpaceIfPromotedTyCon arg0 _ -> id in ppr_tuple_app args' $ pprPromotionQuoteI IsPromoted <> tupleParens sort (spaceIfPromoted (pprWithCommas pprIfaceType args')) NotPromoted | ConstraintTuple <- sort , IA_Nil <- args -> maybeParen ctxt_prec sigPrec $ text "() :: Constraint" | otherwise -> -- drop the RuntimeRep vars. -- See Note [Unboxed tuple RuntimeRep vars] in GHC.Core.TyCon let tys = appArgsIfaceTypes args args' = case sort of UnboxedTuple -> drop (length tys `div` 2) tys _ -> tys in ppr_tuple_app args' $ pprPromotionQuoteI promoted <> tupleParens sort (pprWithCommas pprIfaceType args') where ppr_tuple_app :: [IfaceType] -> SDoc -> SDoc ppr_tuple_app args_wo_runtime_reps ppr_args_w_parens -- Special-case unary boxed tuples so that they are pretty-printed as -- `Solo x`, not `(x)` | [_] <- args_wo_runtime_reps , BoxedTuple <- sort = let unit_tc_info = mkIfaceTyConInfo promoted IfaceNormalTyCon unit_tc = IfaceTyCon (tupleTyConName sort 1) unit_tc_info in pprPrecIfaceType ctxt_prec $ IfaceTyConApp unit_tc args | otherwise = ppr_args_w_parens pprIfaceTyLit :: IfaceTyLit -> SDoc pprIfaceTyLit (IfaceNumTyLit n) = integer n pprIfaceTyLit (IfaceStrTyLit n) = text (show n) pprIfaceTyLit (IfaceCharTyLit c) = text (show c) pprIfaceCoercion, pprParendIfaceCoercion :: IfaceCoercion -> SDoc pprIfaceCoercion = ppr_co topPrec pprParendIfaceCoercion = ppr_co appPrec ppr_co :: PprPrec -> IfaceCoercion -> SDoc ppr_co _ (IfaceReflCo ty) = angleBrackets (ppr ty) <> ppr_role Nominal ppr_co _ (IfaceGReflCo r ty IfaceMRefl) = angleBrackets (ppr ty) <> ppr_role r ppr_co ctxt_prec (IfaceGReflCo r ty (IfaceMCo co)) = ppr_special_co ctxt_prec (text "GRefl" <+> ppr r <+> pprParendIfaceType ty) [co] ppr_co ctxt_prec (IfaceFunCo r cow co1 co2) = maybeParen ctxt_prec funPrec $ sep (ppr_co funPrec co1 : ppr_fun_tail cow co2) where ppr_fun_tail cow' (IfaceFunCo r cow co1 co2) = (coercionArrow cow' <> ppr_role r <+> ppr_co funPrec co1) : ppr_fun_tail cow co2 ppr_fun_tail cow' other_co = [coercionArrow cow' <> ppr_role r <+> pprIfaceCoercion other_co] coercionArrow w = mulArrow (ppr_co topPrec w) ppr_co _ (IfaceTyConAppCo r tc cos) = parens (pprIfaceCoTcApp topPrec tc cos) <> ppr_role r ppr_co ctxt_prec (IfaceAppCo co1 co2) = maybeParen ctxt_prec appPrec $ ppr_co funPrec co1 <+> pprParendIfaceCoercion co2 ppr_co ctxt_prec co@(IfaceForAllCo {}) = maybeParen ctxt_prec funPrec $ pprIfaceForAllCoPart tvs (pprIfaceCoercion inner_co) where (tvs, inner_co) = split_co co split_co (IfaceForAllCo (IfaceTvBndr (name, _)) kind_co co') = let (tvs, co'') = split_co co' in ((name,kind_co):tvs,co'') split_co (IfaceForAllCo (IfaceIdBndr (_, name, _)) kind_co co') = let (tvs, co'') = split_co co' in ((name,kind_co):tvs,co'') split_co co' = ([], co') -- Why these three? See Note [Free tyvars in IfaceType] ppr_co _ (IfaceFreeCoVar covar) = ppr covar ppr_co _ (IfaceCoVarCo covar) = ppr covar ppr_co _ (IfaceHoleCo covar) = braces (ppr covar) ppr_co _ (IfaceUnivCo prov role ty1 ty2) = text "Univ" <> (parens $ sep [ ppr role <+> pprIfaceUnivCoProv prov , dcolon <+> ppr ty1 <> comma <+> ppr ty2 ]) ppr_co ctxt_prec (IfaceInstCo co ty) = maybeParen ctxt_prec appPrec $ text "Inst" <+> pprParendIfaceCoercion co <+> pprParendIfaceCoercion ty ppr_co ctxt_prec (IfaceAxiomRuleCo tc cos) = maybeParen ctxt_prec appPrec $ ppr tc <+> parens (interpp'SP cos) ppr_co ctxt_prec (IfaceAxiomInstCo n i cos) = ppr_special_co ctxt_prec (ppr n <> brackets (ppr i)) cos ppr_co ctxt_prec (IfaceSymCo co) = ppr_special_co ctxt_prec (text "Sym") [co] ppr_co ctxt_prec (IfaceTransCo co1 co2) -- chain nested TransCo = let ppr_trans (IfaceTransCo c1 c2) = semi <+> ppr_co topPrec c1 : ppr_trans c2 ppr_trans c = [semi <+> ppr_co opPrec c] in maybeParen ctxt_prec opPrec $ vcat (ppr_co topPrec co1 : ppr_trans co2) ppr_co ctxt_prec (IfaceNthCo d co) = ppr_special_co ctxt_prec (text "Nth:" <> int d) [co] ppr_co ctxt_prec (IfaceLRCo lr co) = ppr_special_co ctxt_prec (ppr lr) [co] ppr_co ctxt_prec (IfaceSubCo co) = ppr_special_co ctxt_prec (text "Sub") [co] ppr_co ctxt_prec (IfaceKindCo co) = ppr_special_co ctxt_prec (text "Kind") [co] ppr_special_co :: PprPrec -> SDoc -> [IfaceCoercion] -> SDoc ppr_special_co ctxt_prec doc cos = maybeParen ctxt_prec appPrec (sep [doc, nest 4 (sep (map pprParendIfaceCoercion cos))]) ppr_role :: Role -> SDoc ppr_role r = underscore <> pp_role where pp_role = case r of Nominal -> char 'N' Representational -> char 'R' Phantom -> char 'P' ------------------ pprIfaceUnivCoProv :: IfaceUnivCoProv -> SDoc pprIfaceUnivCoProv (IfacePhantomProv co) = text "phantom" <+> pprParendIfaceCoercion co pprIfaceUnivCoProv (IfaceProofIrrelProv co) = text "irrel" <+> pprParendIfaceCoercion co pprIfaceUnivCoProv (IfacePluginProv s) = text "plugin" <+> doubleQuotes (text s) pprIfaceUnivCoProv (IfaceCorePrepProv _) = text "CorePrep" ------------------- instance Outputable IfaceTyCon where ppr tc = pprPromotionQuote tc <> ppr (ifaceTyConName tc) instance Outputable IfaceTyConInfo where ppr (IfaceTyConInfo { ifaceTyConIsPromoted = prom , ifaceTyConSort = sort }) = angleBrackets $ ppr prom <> comma <+> ppr sort pprPromotionQuote :: IfaceTyCon -> SDoc pprPromotionQuote tc = pprPromotionQuoteI $ ifaceTyConIsPromoted $ ifaceTyConInfo tc pprPromotionQuoteI :: PromotionFlag -> SDoc pprPromotionQuoteI NotPromoted = empty pprPromotionQuoteI IsPromoted = char '\'' instance Outputable IfaceCoercion where ppr = pprIfaceCoercion instance Binary IfaceTyCon where put_ bh (IfaceTyCon n i) = put_ bh n >> put_ bh i get bh = do n <- get bh i <- get bh return (IfaceTyCon n i) instance Binary IfaceTyConSort where put_ bh IfaceNormalTyCon = putByte bh 0 put_ bh (IfaceTupleTyCon arity sort) = putByte bh 1 >> put_ bh arity >> put_ bh sort put_ bh (IfaceSumTyCon arity) = putByte bh 2 >> put_ bh arity put_ bh IfaceEqualityTyCon = putByte bh 3 get bh = do n <- getByte bh case n of 0 -> return IfaceNormalTyCon 1 -> IfaceTupleTyCon <$> get bh <*> get bh 2 -> IfaceSumTyCon <$> get bh _ -> return IfaceEqualityTyCon instance Binary IfaceTyConInfo where put_ bh (IfaceTyConInfo i s) = put_ bh i >> put_ bh s get bh = mkIfaceTyConInfo <$> get bh <*> get bh instance Outputable IfaceTyLit where ppr = pprIfaceTyLit instance Binary IfaceTyLit where put_ bh (IfaceNumTyLit n) = putByte bh 1 >> put_ bh n put_ bh (IfaceStrTyLit n) = putByte bh 2 >> put_ bh n put_ bh (IfaceCharTyLit n) = putByte bh 3 >> put_ bh n get bh = do tag <- getByte bh case tag of 1 -> do { n <- get bh ; return (IfaceNumTyLit n) } 2 -> do { n <- get bh ; return (IfaceStrTyLit n) } 3 -> do { n <- get bh ; return (IfaceCharTyLit n) } _ -> panic ("get IfaceTyLit " ++ show tag) instance Binary IfaceAppArgs where put_ bh tk = case tk of IA_Arg t a ts -> putByte bh 0 >> put_ bh t >> put_ bh a >> put_ bh ts IA_Nil -> putByte bh 1 get bh = do c <- getByte bh case c of 0 -> do t <- get bh a <- get bh ts <- get bh return $! IA_Arg t a ts 1 -> return IA_Nil _ -> panic ("get IfaceAppArgs " ++ show c) ------------------- -- Some notes about printing contexts -- -- In the event that we are printing a singleton context (e.g. @Eq a@) we can -- omit parentheses. However, we must take care to set the precedence correctly -- to opPrec, since something like @a :~: b@ must be parenthesized (see -- #9658). -- -- When printing a larger context we use 'fsep' instead of 'sep' so that -- the context doesn't get displayed as a giant column. Rather than, -- instance (Eq a, -- Eq b, -- Eq c, -- Eq d, -- Eq e, -- Eq f, -- Eq g, -- Eq h, -- Eq i, -- Eq j, -- Eq k, -- Eq l) => -- Eq (a, b, c, d, e, f, g, h, i, j, k, l) -- -- we want -- -- instance (Eq a, Eq b, Eq c, Eq d, Eq e, Eq f, Eq g, Eq h, Eq i, -- Eq j, Eq k, Eq l) => -- Eq (a, b, c, d, e, f, g, h, i, j, k, l) -- | Prints "(C a, D b) =>", including the arrow. -- Used when we want to print a context in a type, so we -- use 'funPrec' to decide whether to parenthesise a singleton -- predicate; e.g. Num a => a -> a pprIfaceContextArr :: [IfacePredType] -> SDoc pprIfaceContextArr [] = empty pprIfaceContextArr [pred] = ppr_ty funPrec pred <+> darrow pprIfaceContextArr preds = ppr_parend_preds preds <+> darrow -- | Prints a context or @()@ if empty -- You give it the context precedence pprIfaceContext :: PprPrec -> [IfacePredType] -> SDoc pprIfaceContext _ [] = text "()" pprIfaceContext prec [pred] = ppr_ty prec pred pprIfaceContext _ preds = ppr_parend_preds preds ppr_parend_preds :: [IfacePredType] -> SDoc ppr_parend_preds preds = parens (fsep (punctuate comma (map ppr preds))) instance Binary IfaceType where put_ _ (IfaceFreeTyVar tv) = pprPanic "Can't serialise IfaceFreeTyVar" (ppr tv) put_ bh (IfaceForAllTy aa ab) = do putByte bh 0 put_ bh aa put_ bh ab put_ bh (IfaceTyVar ad) = do putByte bh 1 put_ bh ad put_ bh (IfaceAppTy ae af) = do putByte bh 2 put_ bh ae put_ bh af put_ bh (IfaceFunTy af aw ag ah) = do putByte bh 3 put_ bh af put_ bh aw put_ bh ag put_ bh ah put_ bh (IfaceTyConApp tc tys) = do { putByte bh 5; put_ bh tc; put_ bh tys } put_ bh (IfaceCastTy a b) = do { putByte bh 6; put_ bh a; put_ bh b } put_ bh (IfaceCoercionTy a) = do { putByte bh 7; put_ bh a } put_ bh (IfaceTupleTy s i tys) = do { putByte bh 8; put_ bh s; put_ bh i; put_ bh tys } put_ bh (IfaceLitTy n) = do { putByte bh 9; put_ bh n } get bh = do h <- getByte bh case h of 0 -> do aa <- get bh ab <- get bh return (IfaceForAllTy aa ab) 1 -> do ad <- get bh return (IfaceTyVar ad) 2 -> do ae <- get bh af <- get bh return (IfaceAppTy ae af) 3 -> do af <- get bh aw <- get bh ag <- get bh ah <- get bh return (IfaceFunTy af aw ag ah) 5 -> do { tc <- get bh; tys <- get bh ; return (IfaceTyConApp tc tys) } 6 -> do { a <- get bh; b <- get bh ; return (IfaceCastTy a b) } 7 -> do { a <- get bh ; return (IfaceCoercionTy a) } 8 -> do { s <- get bh; i <- get bh; tys <- get bh ; return (IfaceTupleTy s i tys) } _ -> do n <- get bh return (IfaceLitTy n) instance Binary IfaceMCoercion where put_ bh IfaceMRefl = putByte bh 1 put_ bh (IfaceMCo co) = do putByte bh 2 put_ bh co get bh = do tag <- getByte bh case tag of 1 -> return IfaceMRefl 2 -> do a <- get bh return $ IfaceMCo a _ -> panic ("get IfaceMCoercion " ++ show tag) instance Binary IfaceCoercion where put_ bh (IfaceReflCo a) = do putByte bh 1 put_ bh a put_ bh (IfaceGReflCo a b c) = do putByte bh 2 put_ bh a put_ bh b put_ bh c put_ bh (IfaceFunCo a w b c) = do putByte bh 3 put_ bh a put_ bh w put_ bh b put_ bh c put_ bh (IfaceTyConAppCo a b c) = do putByte bh 4 put_ bh a put_ bh b put_ bh c put_ bh (IfaceAppCo a b) = do putByte bh 5 put_ bh a put_ bh b put_ bh (IfaceForAllCo a b c) = do putByte bh 6 put_ bh a put_ bh b put_ bh c put_ bh (IfaceCoVarCo a) = do putByte bh 7 put_ bh a put_ bh (IfaceAxiomInstCo a b c) = do putByte bh 8 put_ bh a put_ bh b put_ bh c put_ bh (IfaceUnivCo a b c d) = do putByte bh 9 put_ bh a put_ bh b put_ bh c put_ bh d put_ bh (IfaceSymCo a) = do putByte bh 10 put_ bh a put_ bh (IfaceTransCo a b) = do putByte bh 11 put_ bh a put_ bh b put_ bh (IfaceNthCo a b) = do putByte bh 12 put_ bh a put_ bh b put_ bh (IfaceLRCo a b) = do putByte bh 13 put_ bh a put_ bh b put_ bh (IfaceInstCo a b) = do putByte bh 14 put_ bh a put_ bh b put_ bh (IfaceKindCo a) = do putByte bh 15 put_ bh a put_ bh (IfaceSubCo a) = do putByte bh 16 put_ bh a put_ bh (IfaceAxiomRuleCo a b) = do putByte bh 17 put_ bh a put_ bh b put_ _ (IfaceFreeCoVar cv) = pprPanic "Can't serialise IfaceFreeCoVar" (ppr cv) put_ _ (IfaceHoleCo cv) = pprPanic "Can't serialise IfaceHoleCo" (ppr cv) -- See Note [Holes in IfaceCoercion] get bh = do tag <- getByte bh case tag of 1 -> do a <- get bh return $ IfaceReflCo a 2 -> do a <- get bh b <- get bh c <- get bh return $ IfaceGReflCo a b c 3 -> do a <- get bh w <- get bh b <- get bh c <- get bh return $ IfaceFunCo a w b c 4 -> do a <- get bh b <- get bh c <- get bh return $ IfaceTyConAppCo a b c 5 -> do a <- get bh b <- get bh return $ IfaceAppCo a b 6 -> do a <- get bh b <- get bh c <- get bh return $ IfaceForAllCo a b c 7 -> do a <- get bh return $ IfaceCoVarCo a 8 -> do a <- get bh b <- get bh c <- get bh return $ IfaceAxiomInstCo a b c 9 -> do a <- get bh b <- get bh c <- get bh d <- get bh return $ IfaceUnivCo a b c d 10-> do a <- get bh return $ IfaceSymCo a 11-> do a <- get bh b <- get bh return $ IfaceTransCo a b 12-> do a <- get bh b <- get bh return $ IfaceNthCo a b 13-> do a <- get bh b <- get bh return $ IfaceLRCo a b 14-> do a <- get bh b <- get bh return $ IfaceInstCo a b 15-> do a <- get bh return $ IfaceKindCo a 16-> do a <- get bh return $ IfaceSubCo a 17-> do a <- get bh b <- get bh return $ IfaceAxiomRuleCo a b _ -> panic ("get IfaceCoercion " ++ show tag) instance Binary IfaceUnivCoProv where put_ bh (IfacePhantomProv a) = do putByte bh 1 put_ bh a put_ bh (IfaceProofIrrelProv a) = do putByte bh 2 put_ bh a put_ bh (IfacePluginProv a) = do putByte bh 3 put_ bh a put_ bh (IfaceCorePrepProv a) = do putByte bh 4 put_ bh a get bh = do tag <- getByte bh case tag of 1 -> do a <- get bh return $ IfacePhantomProv a 2 -> do a <- get bh return $ IfaceProofIrrelProv a 3 -> do a <- get bh return $ IfacePluginProv a 4 -> do a <- get bh return (IfaceCorePrepProv a) _ -> panic ("get IfaceUnivCoProv " ++ show tag) instance Binary (DefMethSpec IfaceType) where put_ bh VanillaDM = putByte bh 0 put_ bh (GenericDM t) = putByte bh 1 >> put_ bh t get bh = do h <- getByte bh case h of 0 -> return VanillaDM _ -> do { t <- get bh; return (GenericDM t) } instance NFData IfaceType where rnf = \case IfaceFreeTyVar f1 -> f1 `seq` () IfaceTyVar f1 -> rnf f1 IfaceLitTy f1 -> rnf f1 IfaceAppTy f1 f2 -> rnf f1 `seq` rnf f2 IfaceFunTy f1 f2 f3 f4 -> f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 IfaceForAllTy f1 f2 -> f1 `seq` rnf f2 IfaceTyConApp f1 f2 -> rnf f1 `seq` rnf f2 IfaceCastTy f1 f2 -> rnf f1 `seq` rnf f2 IfaceCoercionTy f1 -> rnf f1 IfaceTupleTy f1 f2 f3 -> f1 `seq` f2 `seq` rnf f3 instance NFData IfaceTyLit where rnf = \case IfaceNumTyLit f1 -> rnf f1 IfaceStrTyLit f1 -> rnf f1 IfaceCharTyLit f1 -> rnf f1 instance NFData IfaceCoercion where rnf = \case IfaceReflCo f1 -> rnf f1 IfaceGReflCo f1 f2 f3 -> f1 `seq` rnf f2 `seq` rnf f3 IfaceFunCo f1 f2 f3 f4 -> f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 IfaceTyConAppCo f1 f2 f3 -> f1 `seq` rnf f2 `seq` rnf f3 IfaceAppCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceForAllCo f1 f2 f3 -> rnf f1 `seq` rnf f2 `seq` rnf f3 IfaceCoVarCo f1 -> rnf f1 IfaceAxiomInstCo f1 f2 f3 -> rnf f1 `seq` rnf f2 `seq` rnf f3 IfaceAxiomRuleCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceUnivCo f1 f2 f3 f4 -> rnf f1 `seq` f2 `seq` rnf f3 `seq` rnf f4 IfaceSymCo f1 -> rnf f1 IfaceTransCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceNthCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceLRCo f1 f2 -> f1 `seq` rnf f2 IfaceInstCo f1 f2 -> rnf f1 `seq` rnf f2 IfaceKindCo f1 -> rnf f1 IfaceSubCo f1 -> rnf f1 IfaceFreeCoVar f1 -> f1 `seq` () IfaceHoleCo f1 -> f1 `seq` () instance NFData IfaceUnivCoProv where rnf x = seq x () instance NFData IfaceMCoercion where rnf x = seq x () instance NFData IfaceOneShot where rnf x = seq x () instance NFData IfaceTyConSort where rnf = \case IfaceNormalTyCon -> () IfaceTupleTyCon arity sort -> rnf arity `seq` sort `seq` () IfaceSumTyCon arity -> rnf arity IfaceEqualityTyCon -> () instance NFData IfaceTyConInfo where rnf (IfaceTyConInfo f s) = f `seq` rnf s instance NFData IfaceTyCon where rnf (IfaceTyCon nm info) = rnf nm `seq` rnf info instance NFData IfaceBndr where rnf = \case IfaceIdBndr id_bndr -> rnf id_bndr IfaceTvBndr tv_bndr -> rnf tv_bndr instance NFData IfaceAppArgs where rnf = \case IA_Nil -> () IA_Arg f1 f2 f3 -> rnf f1 `seq` f2 `seq` rnf f3 ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/LanguageExtensions.hs0000644000000000000000000000120414470055371023646 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} -- | This module re-exports the 'Extension' type along with an orphan 'Binary' -- instance for it. -- -- Note that the @ghc-boot@ package has a large set of dependencies; for this -- reason the 'Extension' type itself is defined in the -- "GHC.LanguageExtensions.Type" module provided by the @ghc-boot-th@ package, -- which has no dependencies outside of @base@. For this reason -- @template-haskell@ depends upon @ghc-boot-th@, not @ghc-boot@. -- module GHC.LanguageExtensions ( module GHC.LanguageExtensions.Type ) where import Data.Binary import GHC.LanguageExtensions.Type instance Binary Extension ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/GHC/LanguageExtensions/Type.hs0000644000000000000000000001060214472375231025204 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Module : GHC.LanguageExtensions.Type -- Copyright : (c) The GHC Team -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- A data type defining the language extensions supported by GHC. -- {-# LANGUAGE DeriveGeneric, Safe #-} module GHC.LanguageExtensions.Type ( Extension(..) ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Generics -- | The language extensions known to GHC. -- -- Note that there is an orphan 'Binary' instance for this type supplied by -- the "GHC.LanguageExtensions" module provided by @ghc-boot@. We can't provide -- here as this would require adding transitive dependencies to the -- @template-haskell@ package, which must have a minimal dependency set. data Extension -- See Note [Updating flag description in the User's Guide] in -- GHC.Driver.Session = Cpp | OverlappingInstances | UndecidableInstances | IncoherentInstances | UndecidableSuperClasses | MonomorphismRestriction | MonoLocalBinds | DeepSubsumption | RelaxedPolyRec -- Deprecated | ExtendedDefaultRules -- Use GHC's extended rules for defaulting | ForeignFunctionInterface | UnliftedFFITypes | InterruptibleFFI | CApiFFI | GHCForeignImportPrim | JavaScriptFFI | ParallelArrays -- Syntactic support for parallel arrays | Arrows -- Arrow-notation syntax | TemplateHaskell | TemplateHaskellQuotes -- subset of TH supported by stage1, no splice | QualifiedDo | QuasiQuotes | ImplicitParams | ImplicitPrelude | ScopedTypeVariables | AllowAmbiguousTypes | UnboxedTuples | UnboxedSums | UnliftedNewtypes | UnliftedDatatypes | BangPatterns | TypeFamilies | TypeFamilyDependencies | TypeInType | OverloadedStrings | OverloadedLists | NumDecimals | DisambiguateRecordFields | RecordWildCards | NamedFieldPuns | ViewPatterns | GADTs | GADTSyntax | NPlusKPatterns | DoAndIfThenElse | BlockArguments | RebindableSyntax | ConstraintKinds | PolyKinds -- Kind polymorphism | DataKinds -- Datatype promotion | InstanceSigs | ApplicativeDo | LinearTypes | StandaloneDeriving | DeriveDataTypeable | AutoDeriveTypeable -- Automatic derivation of Typeable | DeriveFunctor | DeriveTraversable | DeriveFoldable | DeriveGeneric -- Allow deriving Generic/1 | DefaultSignatures -- Allow extra signatures for defmeths | DeriveAnyClass -- Allow deriving any class | DeriveLift -- Allow deriving Lift | DerivingStrategies | DerivingVia -- Derive through equal representation | TypeSynonymInstances | FlexibleContexts | FlexibleInstances | ConstrainedClassMethods | MultiParamTypeClasses | NullaryTypeClasses | FunctionalDependencies | UnicodeSyntax | ExistentialQuantification | MagicHash | EmptyDataDecls | KindSignatures | RoleAnnotations | ParallelListComp | TransformListComp | MonadComprehensions | GeneralizedNewtypeDeriving | RecursiveDo | PostfixOperators | TupleSections | PatternGuards | LiberalTypeSynonyms | RankNTypes | ImpredicativeTypes | TypeOperators | ExplicitNamespaces | PackageImports | ExplicitForAll | AlternativeLayoutRule | AlternativeLayoutRuleTransitional | DatatypeContexts | NondecreasingIndentation | RelaxedLayout | TraditionalRecordSyntax | LambdaCase | MultiWayIf | BinaryLiterals | NegativeLiterals | HexFloatLiterals | DuplicateRecordFields | OverloadedLabels | EmptyCase | PatternSynonyms | PartialTypeSignatures | NamedWildCards | StaticPointers | TypeApplications | Strict | StrictData | EmptyDataDeriving | NumericUnderscores | QuantifiedConstraints | StarIsType | ImportQualifiedPost | CUSKs | StandaloneKindSignatures | LexicalNegation | FieldSelectors | OverloadedRecordDot | OverloadedRecordUpdate deriving (Eq, Enum, Show, Generic, Bounded) -- 'Ord' and 'Bounded' are provided for GHC API users (see discussions -- in https://gitlab.haskell.org/ghc/ghc/merge_requests/2707 and -- https://gitlab.haskell.org/ghc/ghc/merge_requests/826). instance Ord Extension where compare a b = compare (fromEnum a) (fromEnum b) ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot-th/GHC/Lexeme.hs0000644000000000000000000000324114470055371021676 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Module : GHC.Lexeme -- Copyright : (c) The GHC Team -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- Functions to evaluate whether or not a string is a valid identifier. -- module GHC.Lexeme ( -- * Lexical characteristics of Haskell names startsVarSym, startsVarId, startsConSym, startsConId, startsVarSymASCII, isVarSymChar, okSymChar ) where import Prelude -- See note [Why do we import Prelude here?] import Data.Char -- | Is this character acceptable in a symbol (after the first char)? -- See alexGetByte in GHC.Parser.Lexer okSymChar :: Char -> Bool okSymChar c | c `elem` "(),;[]`{}_\"'" = False | otherwise = case generalCategory c of ConnectorPunctuation -> True DashPunctuation -> True OtherPunctuation -> True MathSymbol -> True CurrencySymbol -> True ModifierSymbol -> True OtherSymbol -> True _ -> False startsVarSym, startsVarId, startsConSym, startsConId :: Char -> Bool startsVarSym c = okSymChar c && c /= ':' -- Infix Ids startsConSym c = c == ':' -- Infix data constructors startsVarId c = c == '_' || case generalCategory c of -- Ordinary Ids LowercaseLetter -> True OtherLetter -> True -- See #1103 _ -> False startsConId c = isUpper c || c == '(' -- Ordinary type constructors and data constructors startsVarSymASCII :: Char -> Bool startsVarSymASCII c = c `elem` "!#$%&*+./<=>?@\\^|~-" isVarSymChar :: Char -> Bool isVarSymChar c = c == ':' || startsVarSym c ghc-lib-parser-9.4.7.20230826/compiler/GHC/Linker/Static/Utils.hs0000644000000000000000000000176414472400112021752 0ustar0000000000000000module GHC.Linker.Static.Utils where import GHC.Prelude import GHC.Platform import System.FilePath -- | Compute the output file name of a program. -- -- StaticLink boolean is used to indicate if the program is actually a static library -- (e.g., on iOS). -- -- Use the provided filename (if any), otherwise use "main.exe" (Windows), -- "a.out (otherwise without StaticLink set), "liba.a". In every case, add the -- extension if it is missing. exeFileName :: Platform -> Bool -> Maybe FilePath -> FilePath exeFileName platform staticLink output_fn | Just s <- output_fn = case platformOS platform of OSMinGW32 -> s "exe" _ -> if staticLink then s "a" else s | otherwise = if platformOS platform == OSMinGW32 then "main.exe" else if staticLink then "liba.a" else "a.out" where s ext | null (takeExtension s) = s <.> ext | otherwise = s ghc-lib-parser-9.4.7.20230826/compiler/GHC/Linker/Types.hs0000644000000000000000000002361614472400112020527 0ustar0000000000000000----------------------------------------------------------------------------- -- -- Types for the linkers and the loader -- -- (c) The University of Glasgow 2019 -- ----------------------------------------------------------------------------- module GHC.Linker.Types ( Loader (..) , LoaderState (..) , uninitializedLoader , modifyClosureEnv , LinkerEnv(..) , filterLinkerEnv , ClosureEnv , emptyClosureEnv , extendClosureEnv , Linkable(..) , LinkableSet , mkLinkableSet , unionLinkableSet , ObjFile , Unlinked(..) , SptEntry(..) , isObjectLinkable , linkableObjs , isObject , nameOfObject , nameOfObject_maybe , isInterpretable , byteCodeOfObject , LibrarySpec(..) , LoadedPkgInfo(..) , PkgsLoaded ) where import GHC.Prelude import GHC.Unit ( UnitId, Module ) import GHC.ByteCode.Types ( ItblEnv, AddrEnv, CompiledByteCode ) import GHC.Fingerprint.Type ( Fingerprint ) import GHCi.RemoteTypes ( ForeignHValue ) import GHC.Types.Var ( Id ) import GHC.Types.Name.Env ( NameEnv, emptyNameEnv, extendNameEnvList, filterNameEnv ) import GHC.Types.Name ( Name ) import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Concurrent.MVar import Data.Time ( UTCTime ) import Data.Maybe import GHC.Unit.Module.Env import GHC.Types.Unique.DSet import GHC.Types.Unique.DFM {- ********************************************************************** The Loader's state ********************************************************************* -} {- The loader state *must* match the actual state of the C dynamic linker at all times. The MVar used to hold the LoaderState contains a Maybe LoaderState. The MVar serves to ensure mutual exclusion between multiple loaded copies of the GHC library. The Maybe may be Nothing to indicate that the linker has not yet been initialised. The LinkerEnv maps Names to actual closures (for interpreted code only), for use during linking. -} newtype Loader = Loader { loader_state :: MVar (Maybe LoaderState) } data LoaderState = LoaderState { linker_env :: !LinkerEnv -- ^ Current global mapping from Names to their true values , bcos_loaded :: !LinkableSet -- ^ The currently loaded interpreted modules (home package) , objs_loaded :: !LinkableSet -- ^ And the currently-loaded compiled modules (home package) , pkgs_loaded :: !PkgsLoaded -- ^ The currently-loaded packages; always object code -- haskell libraries, system libraries, transitive dependencies , temp_sos :: ![(FilePath, String)] -- ^ We need to remember the name of previous temporary DLL/.so -- libraries so we can link them (see #10322) } uninitializedLoader :: IO Loader uninitializedLoader = Loader <$> newMVar Nothing modifyClosureEnv :: LoaderState -> (ClosureEnv -> ClosureEnv) -> LoaderState modifyClosureEnv pls f = let le = linker_env pls ce = closure_env le in pls { linker_env = le { closure_env = f ce } } data LinkerEnv = LinkerEnv { closure_env :: !ClosureEnv -- ^ Current global mapping from closure Names to their true values , itbl_env :: !ItblEnv -- ^ The current global mapping from RdrNames of DataCons to -- info table addresses. -- When a new Unlinked is linked into the running image, or an existing -- module in the image is replaced, the itbl_env must be updated -- appropriately. , addr_env :: !AddrEnv -- ^ Like 'closure_env' and 'itbl_env', but for top-level 'Addr#' literals, -- see Note [Generating code for top-level string literal bindings] in GHC.StgToByteCode. } filterLinkerEnv :: (Name -> Bool) -> LinkerEnv -> LinkerEnv filterLinkerEnv f le = LinkerEnv { closure_env = filterNameEnv (f . fst) (closure_env le) , itbl_env = filterNameEnv (f . fst) (itbl_env le) , addr_env = filterNameEnv (f . fst) (addr_env le) } type ClosureEnv = NameEnv (Name, ForeignHValue) emptyClosureEnv :: ClosureEnv emptyClosureEnv = emptyNameEnv extendClosureEnv :: ClosureEnv -> [(Name,ForeignHValue)] -> ClosureEnv extendClosureEnv cl_env pairs = extendNameEnvList cl_env [ (n, (n,v)) | (n,v) <- pairs] type PkgsLoaded = UniqDFM UnitId LoadedPkgInfo data LoadedPkgInfo = LoadedPkgInfo { loaded_pkg_uid :: !UnitId , loaded_pkg_hs_objs :: ![LibrarySpec] , loaded_pkg_non_hs_objs :: ![LibrarySpec] , loaded_pkg_trans_deps :: UniqDSet UnitId } instance Outputable LoadedPkgInfo where ppr (LoadedPkgInfo uid hs_objs non_hs_objs trans_deps) = vcat [ppr uid , ppr hs_objs , ppr non_hs_objs , ppr trans_deps ] -- | Information we can use to dynamically link modules into the compiler data Linkable = LM { linkableTime :: !UTCTime, -- ^ Time at which this linkable was built -- (i.e. when the bytecodes were produced, -- or the mod date on the files) linkableModule :: !Module, -- ^ The linkable module itself linkableUnlinked :: [Unlinked] -- ^ Those files and chunks of code we have yet to link. -- -- INVARIANT: A valid linkable always has at least one 'Unlinked' item. } type LinkableSet = ModuleEnv Linkable mkLinkableSet :: [Linkable] -> LinkableSet mkLinkableSet ls = mkModuleEnv [(linkableModule l, l) | l <- ls] unionLinkableSet :: LinkableSet -> LinkableSet -> LinkableSet unionLinkableSet = plusModuleEnv_C go where go l1 l2 | linkableTime l1 > linkableTime l2 = l1 | otherwise = l2 instance Outputable Linkable where ppr (LM when_made mod unlinkeds) = (text "LinkableM" <+> parens (text (show when_made)) <+> ppr mod) $$ nest 3 (ppr unlinkeds) type ObjFile = FilePath -- | Objects which have yet to be linked by the compiler data Unlinked = DotO ObjFile -- ^ An object file (.o) | DotA FilePath -- ^ Static archive file (.a) | DotDLL FilePath -- ^ Dynamically linked library file (.so, .dll, .dylib) | BCOs CompiledByteCode [SptEntry] -- ^ A byte-code object, lives only in memory. Also -- carries some static pointer table entries which -- should be loaded along with the BCOs. -- See Note [Grand plan for static forms] in -- "GHC.Iface.Tidy.StaticPtrTable". instance Outputable Unlinked where ppr (DotO path) = text "DotO" <+> text path ppr (DotA path) = text "DotA" <+> text path ppr (DotDLL path) = text "DotDLL" <+> text path ppr (BCOs bcos spt) = text "BCOs" <+> ppr bcos <+> ppr spt -- | An entry to be inserted into a module's static pointer table. -- See Note [Grand plan for static forms] in "GHC.Iface.Tidy.StaticPtrTable". data SptEntry = SptEntry Id Fingerprint instance Outputable SptEntry where ppr (SptEntry id fpr) = ppr id <> colon <+> ppr fpr isObjectLinkable :: Linkable -> Bool isObjectLinkable l = not (null unlinked) && all isObject unlinked where unlinked = linkableUnlinked l -- A linkable with no Unlinked's is treated as a BCO. We can -- generate a linkable with no Unlinked's as a result of -- compiling a module in NoBackend mode, and this choice -- happens to work well with checkStability in module GHC. linkableObjs :: Linkable -> [FilePath] linkableObjs l = [ f | DotO f <- linkableUnlinked l ] ------------------------------------------- -- | Is this an actual file on disk we can link in somehow? isObject :: Unlinked -> Bool isObject (DotO _) = True isObject (DotA _) = True isObject (DotDLL _) = True isObject _ = False -- | Is this a bytecode linkable with no file on disk? isInterpretable :: Unlinked -> Bool isInterpretable = not . isObject nameOfObject_maybe :: Unlinked -> Maybe FilePath nameOfObject_maybe (DotO fn) = Just fn nameOfObject_maybe (DotA fn) = Just fn nameOfObject_maybe (DotDLL fn) = Just fn nameOfObject_maybe (BCOs {}) = Nothing -- | Retrieve the filename of the linkable if possible. Panic if it is a byte-code object nameOfObject :: Unlinked -> FilePath nameOfObject o = fromMaybe (pprPanic "nameOfObject" (ppr o)) (nameOfObject_maybe o) -- | Retrieve the compiled byte-code if possible. Panic if it is a file-based linkable byteCodeOfObject :: Unlinked -> CompiledByteCode byteCodeOfObject (BCOs bc _) = bc byteCodeOfObject other = pprPanic "byteCodeOfObject" (ppr other) {- ********************************************************************** Loading packages ********************************************************************* -} data LibrarySpec = Objects [FilePath] -- Full path names of set of .o files, including trailing .o -- We allow batched loading to ensure that cyclic symbol -- references can be resolved (see #13786). -- For dynamic objects only, try to find the object -- file in all the directories specified in -- v_Library_paths before giving up. | Archive FilePath -- Full path name of a .a file, including trailing .a | DLL String -- "Unadorned" name of a .DLL/.so -- e.g. On unix "qt" denotes "libqt.so" -- On Windows "burble" denotes "burble.DLL" or "libburble.dll" -- loadDLL is platform-specific and adds the lib/.so/.DLL -- suffixes platform-dependently | DLLPath FilePath -- Absolute or relative pathname to a dynamic library -- (ends with .dll or .so). | Framework String -- Only used for darwin, but does no harm instance Outputable LibrarySpec where ppr (Objects objs) = text "Objects" <+> ppr objs ppr (Archive a) = text "Archive" <+> text a ppr (DLL s) = text "DLL" <+> text s ppr (DLLPath f) = text "DLLPath" <+> text f ppr (Framework s) = text "Framework" <+> text s ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Annotation.hs0000644000000000000000000013736314472400112021552 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Parser.Annotation ( -- * Core Exact Print Annotation types AnnKeywordId(..), EpaComment(..), EpaCommentTok(..), IsUnicodeSyntax(..), unicodeAnn, HasE(..), -- * In-tree Exact Print Annotations AddEpAnn(..), EpaLocation(..), epaLocationRealSrcSpan, epaLocationFromSrcAnn, TokenLocation(..), DeltaPos(..), deltaPos, getDeltaLine, EpAnn(..), Anchor(..), AnchorOperation(..), spanAsAnchor, realSpanAsAnchor, noAnn, -- ** Comments in Annotations EpAnnComments(..), LEpaComment, emptyComments, getFollowingComments, setFollowingComments, setPriorComments, EpAnnCO, -- ** Annotations in 'GenLocated' LocatedA, LocatedL, LocatedC, LocatedN, LocatedAn, LocatedP, SrcSpanAnnA, SrcSpanAnnL, SrcSpanAnnP, SrcSpanAnnC, SrcSpanAnnN, SrcSpanAnn'(..), SrcAnn, -- ** Annotation data types used in 'GenLocated' AnnListItem(..), AnnList(..), AnnParen(..), ParenType(..), parenTypeKws, AnnPragma(..), AnnContext(..), NameAnn(..), NameAdornment(..), NoEpAnns(..), AnnSortKey(..), -- ** Trailing annotations in lists TrailingAnn(..), trailingAnnToAddEpAnn, addTrailingAnnToA, addTrailingAnnToL, addTrailingCommaToN, -- ** Utilities for converting between different 'GenLocated' when -- ** we do not care about the annotations. la2na, na2la, n2l, l2n, l2l, la2la, reLoc, reLocA, reLocL, reLocC, reLocN, la2r, realSrcSpan, -- ** Building up annotations extraToAnnList, reAnn, reAnnL, reAnnC, addAnns, addAnnsA, widenSpan, widenAnchor, widenAnchorR, widenLocatedAn, -- ** Querying annotations getLocAnn, epAnnAnns, epAnnAnnsL, annParen2AddEpAnn, epAnnComments, -- ** Working with locations of annotations sortLocatedA, mapLocA, combineLocsA, combineSrcSpansA, addCLocA, addCLocAA, -- ** Constructing 'GenLocated' annotation types when we do not care -- about annotations. noLocA, getLocA, noSrcSpanA, noAnnSrcSpan, -- ** Working with comments in annotations noComments, comment, addCommentsToSrcAnn, setCommentsSrcAnn, addCommentsToEpAnn, setCommentsEpAnn, transferAnnsA, commentsOnlyA, removeCommentsA, placeholderRealSpan, ) where import GHC.Prelude import Data.Data import Data.Function (on) import Data.List (sortBy) import Data.Semigroup import GHC.Data.FastString import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Hs.DocString import GHC.Utils.Binary import GHC.Utils.Outputable hiding ( (<>) ) import GHC.Utils.Panic import qualified GHC.Data.Strict as Strict {- Note [exact print annotations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Given a parse tree of a Haskell module, how can we reconstruct the original Haskell source code, retaining all whitespace and source code comments? We need to track the locations of all elements from the original source: this includes keywords such as 'let' / 'in' / 'do' etc as well as punctuation such as commas and braces, and also comments. We collectively refer to this metadata as the "exact print annotations". NON-COMMENT ELEMENTS Intuitively, every AST element directly contains a bag of keywords (keywords can show up more than once in a node: a semicolon i.e. newline can show up multiple times before the next AST element), each of which needs to be associated with its location in the original source code. These keywords are recorded directly in the AST element in which they occur, for the GhcPs phase. For any given element in the AST, there is only a set number of keywords that are applicable for it (e.g., you'll never see an 'import' keyword associated with a let-binding.) The set of allowed keywords is documented in a comment associated with the constructor of a given AST element, although the ground truth is in GHC.Parser and GHC.Parser.PostProcess (which actually add the annotations). COMMENT ELEMENTS We associate comments with the lowest (most specific) AST element enclosing them PARSER STATE There are three fields in PState (the parser state) which play a role with annotation comments. > comment_q :: [LEpaComment], > header_comments :: Maybe [LEpaComment], > eof_pos :: Maybe (RealSrcSpan, RealSrcSpan), -- pos, gap to prior token The 'comment_q' field captures comments as they are seen in the token stream, so that when they are ready to be allocated via the parser they are available. The 'header_comments' capture the comments coming at the top of the source file. They are moved there from the `comment_q` when comments are allocated for the first top-level declaration. The 'eof_pos' captures the final location in the file, and the location of the immediately preceding token to the last location, so that the exact-printer can work out how far to advance to add the trailing whitespace. PARSER EMISSION OF ANNOTATIONS The parser interacts with the lexer using the functions > getCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments > getPriorCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments > getFinalCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments The 'getCommentsFor' function is the one used most often. It takes the AST element SrcSpan and removes and returns any comments in the 'comment_q' that are inside the span. 'allocateComments' in 'Lexer' is responsible for making sure we only return comments that actually fit in the 'SrcSpan'. The 'getPriorCommentsFor' function is used for top-level declarations, and removes and returns any comments in the 'comment_q' that either precede or are included in the given SrcSpan. This is to ensure that preceding documentation comments are kept together with the declaration they belong to. The 'getFinalCommentsFor' function is called right at the end when EOF is hit. This drains the 'comment_q' completely, and returns the 'header_comments', remaining 'comment_q' entries and the 'eof_pos'. These values are inserted into the 'HsModule' AST element. The wiki page describing this feature is https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations -} -- -------------------------------------------------------------------- -- | Exact print annotations exist so that tools can perform source to -- source conversions of Haskell code. They are used to keep track of -- the various syntactic keywords that are not otherwise captured in the -- AST. -- -- The wiki page describing this feature is -- https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations -- https://gitlab.haskell.org/ghc/ghc/-/wikis/implementing-trees-that-grow/in-tree-api-annotations -- -- Note: in general the names of these are taken from the -- corresponding token, unless otherwise noted -- See Note [exact print annotations] above for details of the usage data AnnKeywordId = AnnAnyclass | AnnAs | AnnAt | AnnBang -- ^ '!' | AnnBackquote -- ^ '`' | AnnBy | AnnCase -- ^ case or lambda case | AnnCases -- ^ lambda cases | AnnClass | AnnClose -- ^ '\#)' or '\#-}' etc | AnnCloseB -- ^ '|)' | AnnCloseBU -- ^ '|)', unicode variant | AnnCloseC -- ^ '}' | AnnCloseQ -- ^ '|]' | AnnCloseQU -- ^ '|]', unicode variant | AnnCloseP -- ^ ')' | AnnClosePH -- ^ '\#)' | AnnCloseS -- ^ ']' | AnnColon | AnnComma -- ^ as a list separator | AnnCommaTuple -- ^ in a RdrName for a tuple | AnnDarrow -- ^ '=>' | AnnDarrowU -- ^ '=>', unicode variant | AnnData | AnnDcolon -- ^ '::' | AnnDcolonU -- ^ '::', unicode variant | AnnDefault | AnnDeriving | AnnDo | AnnDot -- ^ '.' | AnnDotdot -- ^ '..' | AnnElse | AnnEqual | AnnExport | AnnFamily | AnnForall | AnnForallU -- ^ Unicode variant | AnnForeign | AnnFunId -- ^ for function name in matches where there are -- multiple equations for the function. | AnnGroup | AnnHeader -- ^ for CType | AnnHiding | AnnIf | AnnImport | AnnIn | AnnInfix -- ^ 'infix' or 'infixl' or 'infixr' | AnnInstance | AnnLam | AnnLarrow -- ^ '<-' | AnnLarrowU -- ^ '<-', unicode variant | AnnLet | AnnLollyU -- ^ The '⊸' unicode arrow | AnnMdo | AnnMinus -- ^ '-' | AnnModule | AnnNewtype | AnnName -- ^ where a name loses its location in the AST, this carries it | AnnOf | AnnOpen -- ^ '{-\# DEPRECATED' etc. Opening of pragmas where -- the capitalisation of the string can be changed by -- the user. The actual text used is stored in a -- 'SourceText' on the relevant pragma item. | AnnOpenB -- ^ '(|' | AnnOpenBU -- ^ '(|', unicode variant | AnnOpenC -- ^ '{' | AnnOpenE -- ^ '[e|' or '[e||' | AnnOpenEQ -- ^ '[|' | AnnOpenEQU -- ^ '[|', unicode variant | AnnOpenP -- ^ '(' | AnnOpenS -- ^ '[' | AnnOpenPH -- ^ '(\#' | AnnDollar -- ^ prefix '$' -- TemplateHaskell | AnnDollarDollar -- ^ prefix '$$' -- TemplateHaskell | AnnPackageName | AnnPattern | AnnPercent -- ^ '%' -- for HsExplicitMult | AnnPercentOne -- ^ '%1' -- for HsLinearArrow | AnnProc | AnnQualified | AnnRarrow -- ^ '->' | AnnRarrowU -- ^ '->', unicode variant | AnnRec | AnnRole | AnnSafe | AnnSemi -- ^ ';' | AnnSimpleQuote -- ^ ''' | AnnSignature | AnnStatic -- ^ 'static' | AnnStock | AnnThen | AnnThTyQuote -- ^ double ''' | AnnTilde -- ^ '~' | AnnType | AnnUnit -- ^ '()' for types | AnnUsing | AnnVal -- ^ e.g. INTEGER | AnnValStr -- ^ String value, will need quotes when output | AnnVbar -- ^ '|' | AnnVia -- ^ 'via' | AnnWhere | Annlarrowtail -- ^ '-<' | AnnlarrowtailU -- ^ '-<', unicode variant | Annrarrowtail -- ^ '->' | AnnrarrowtailU -- ^ '->', unicode variant | AnnLarrowtail -- ^ '-<<' | AnnLarrowtailU -- ^ '-<<', unicode variant | AnnRarrowtail -- ^ '>>-' | AnnRarrowtailU -- ^ '>>-', unicode variant deriving (Eq, Ord, Data, Show) instance Outputable AnnKeywordId where ppr x = text (show x) -- | Certain tokens can have alternate representations when unicode syntax is -- enabled. This flag is attached to those tokens in the lexer so that the -- original source representation can be reproduced in the corresponding -- 'EpAnnotation' data IsUnicodeSyntax = UnicodeSyntax | NormalSyntax deriving (Eq, Ord, Data, Show) -- | Convert a normal annotation into its unicode equivalent one unicodeAnn :: AnnKeywordId -> AnnKeywordId unicodeAnn AnnForall = AnnForallU unicodeAnn AnnDcolon = AnnDcolonU unicodeAnn AnnLarrow = AnnLarrowU unicodeAnn AnnRarrow = AnnRarrowU unicodeAnn AnnDarrow = AnnDarrowU unicodeAnn Annlarrowtail = AnnlarrowtailU unicodeAnn Annrarrowtail = AnnrarrowtailU unicodeAnn AnnLarrowtail = AnnLarrowtailU unicodeAnn AnnRarrowtail = AnnRarrowtailU unicodeAnn AnnOpenB = AnnOpenBU unicodeAnn AnnCloseB = AnnCloseBU unicodeAnn AnnOpenEQ = AnnOpenEQU unicodeAnn AnnCloseQ = AnnCloseQU unicodeAnn ann = ann -- | Some template haskell tokens have two variants, one with an `e` the other -- not: -- -- > [| or [e| -- > [|| or [e|| -- -- This type indicates whether the 'e' is present or not. data HasE = HasE | NoE deriving (Eq, Ord, Data, Show) -- --------------------------------------------------------------------- data EpaComment = EpaComment { ac_tok :: EpaCommentTok , ac_prior_tok :: RealSrcSpan -- ^ The location of the prior token, used in exact printing. The -- 'EpaComment' appears as an 'LEpaComment' containing its -- location. The difference between the end of the prior token -- and the start of this location is used for the spacing when -- exact printing the comment. } deriving (Eq, Data, Show) data EpaCommentTok = -- Documentation annotations EpaDocComment HsDocString -- ^ a docstring that can be pretty printed using pprHsDocString | EpaDocOptions String -- ^ doc options (prune, ignore-exports, etc) | EpaLineComment String -- ^ comment starting by "--" | EpaBlockComment String -- ^ comment in {- -} | EpaEofComment -- ^ empty comment, capturing -- location of EOF -- See #19697 for a discussion of EpaEofComment's use and how it -- should be removed in favour of capturing it in the location for -- 'Located HsModule' in the parser. deriving (Eq, Data, Show) -- Note: these are based on the Token versions, but the Token type is -- defined in GHC.Parser.Lexer and bringing it in here would create a loop instance Outputable EpaComment where ppr x = text (show x) -- --------------------------------------------------------------------- -- | Captures an annotation, storing the @'AnnKeywordId'@ and its -- location. The parser only ever inserts @'EpaLocation'@ fields with a -- RealSrcSpan being the original location of the annotation in the -- source file. -- The @'EpaLocation'@ can also store a delta position if the AST has been -- modified and needs to be pretty printed again. -- The usual way an 'AddEpAnn' is created is using the 'mj' ("make -- jump") function, and then it can be inserted into the appropriate -- annotation. data AddEpAnn = AddEpAnn AnnKeywordId EpaLocation deriving (Data,Eq) -- | The anchor for an @'AnnKeywordId'@. The Parser inserts the -- @'EpaSpan'@ variant, giving the exact location of the original item -- in the parsed source. This can be replaced by the @'EpaDelta'@ -- version, to provide a position for the item relative to the end of -- the previous item in the source. This is useful when editing an -- AST prior to exact printing the changed one. The list of comments -- in the @'EpaDelta'@ variant captures any comments between the prior -- output and the thing being marked here, since we cannot otherwise -- sort the relative order. data EpaLocation = EpaSpan !RealSrcSpan | EpaDelta !DeltaPos ![LEpaComment] deriving (Data,Eq) -- | Tokens embedded in the AST have an EpaLocation, unless they come from -- generated code (e.g. by TH). data TokenLocation = NoTokenLoc | TokenLoc !EpaLocation deriving (Data,Eq) -- | Spacing between output items when exact printing. It captures -- the spacing from the current print position on the page to the -- position required for the thing about to be printed. This is -- either on the same line in which case is is simply the number of -- spaces to emit, or it is some number of lines down, with a given -- column offset. The exact printing algorithm keeps track of the -- column offset pertaining to the current anchor position, so the -- `deltaColumn` is the additional spaces to add in this case. See -- https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations for -- details. data DeltaPos = SameLine { deltaColumn :: !Int } | DifferentLine { deltaLine :: !Int, -- ^ deltaLine should always be > 0 deltaColumn :: !Int } deriving (Show,Eq,Ord,Data) -- | Smart constructor for a 'DeltaPos'. It preserves the invariant -- that for the 'DifferentLine' constructor 'deltaLine' is always > 0. deltaPos :: Int -> Int -> DeltaPos deltaPos l c = case l of 0 -> SameLine c _ -> DifferentLine l c getDeltaLine :: DeltaPos -> Int getDeltaLine (SameLine _) = 0 getDeltaLine (DifferentLine r _) = r -- | Used in the parser only, extract the 'RealSrcSpan' from an -- 'EpaLocation'. The parser will never insert a 'DeltaPos', so the -- partial function is safe. epaLocationRealSrcSpan :: EpaLocation -> RealSrcSpan epaLocationRealSrcSpan (EpaSpan r) = r epaLocationRealSrcSpan (EpaDelta _ _) = panic "epaLocationRealSrcSpan" epaLocationFromSrcAnn :: SrcAnn ann -> EpaLocation epaLocationFromSrcAnn (SrcSpanAnn EpAnnNotUsed l) = EpaSpan (realSrcSpan l) epaLocationFromSrcAnn (SrcSpanAnn (EpAnn anc _ _) _) = EpaSpan (anchor anc) instance Outputable EpaLocation where ppr (EpaSpan r) = text "EpaSpan" <+> ppr r ppr (EpaDelta d cs) = text "EpaDelta" <+> ppr d <+> ppr cs instance Outputable AddEpAnn where ppr (AddEpAnn kw ss) = text "AddEpAnn" <+> ppr kw <+> ppr ss -- --------------------------------------------------------------------- -- | The exact print annotations (EPAs) are kept in the HsSyn AST for -- the GhcPs phase. We do not always have EPAs though, only for code -- that has been parsed as they do not exist for generated -- code. This type captures that they may be missing. -- -- A goal of the annotations is that an AST can be edited, including -- moving subtrees from one place to another, duplicating them, and so -- on. This means that each fragment must be self-contained. To this -- end, each annotated fragment keeps track of the anchor position it -- was originally captured at, being simply the start span of the -- topmost element of the ast fragment. This gives us a way to later -- re-calculate all Located items in this layer of the AST, as well as -- any annotations captured. The comments associated with the AST -- fragment are also captured here. -- -- The 'ann' type parameter allows this general structure to be -- specialised to the specific set of locations of original exact -- print annotation elements. So for 'HsLet' we have -- -- type instance XLet GhcPs = EpAnn AnnsLet -- data AnnsLet -- = AnnsLet { -- alLet :: EpaLocation, -- alIn :: EpaLocation -- } deriving Data -- -- The spacing between the items under the scope of a given EpAnn is -- normally derived from the original 'Anchor'. But if a sub-element -- is not in its original position, the required spacing can be -- directly captured in the 'anchor_op' field of the 'entry' Anchor. -- This allows us to freely move elements around, and stitch together -- new AST fragments out of old ones, and have them still printed out -- in a precise way. data EpAnn ann = EpAnn { entry :: !Anchor -- ^ Base location for the start of the syntactic element -- holding the annotations. , anns :: !ann -- ^ Annotations added by the Parser , comments :: !EpAnnComments -- ^ Comments enclosed in the SrcSpan of the element -- this `EpAnn` is attached to } | EpAnnNotUsed -- ^ No Annotation for generated code, -- e.g. from TH, deriving, etc. deriving (Data, Eq, Functor) -- | An 'Anchor' records the base location for the start of the -- syntactic element holding the annotations, and is used as the point -- of reference for calculating delta positions for contained -- annotations. -- It is also normally used as the reference point for the spacing of -- the element relative to its container. If it is moved, that -- relationship is tracked in the 'anchor_op' instead. data Anchor = Anchor { anchor :: RealSrcSpan -- ^ Base location for the start of -- the syntactic element holding -- the annotations. , anchor_op :: AnchorOperation } deriving (Data, Eq, Show) -- | If tools modify the parsed source, the 'MovedAnchor' variant can -- directly provide the spacing for this item relative to the previous -- one when printing. This allows AST fragments with a particular -- anchor to be freely moved, without worrying about recalculating the -- appropriate anchor span. data AnchorOperation = UnchangedAnchor | MovedAnchor DeltaPos deriving (Data, Eq, Show) spanAsAnchor :: SrcSpan -> Anchor spanAsAnchor s = Anchor (realSrcSpan s) UnchangedAnchor realSpanAsAnchor :: RealSrcSpan -> Anchor realSpanAsAnchor s = Anchor s UnchangedAnchor -- --------------------------------------------------------------------- -- | When we are parsing we add comments that belong a particular AST -- element, and print them together with the element, interleaving -- them into the output stream. But when editing the AST to move -- fragments around it is useful to be able to first separate the -- comments into those occuring before the AST element and those -- following it. The 'EpaCommentsBalanced' constructor is used to do -- this. The GHC parser will only insert the 'EpaComments' form. data EpAnnComments = EpaComments { priorComments :: ![LEpaComment] } | EpaCommentsBalanced { priorComments :: ![LEpaComment] , followingComments :: ![LEpaComment] } deriving (Data, Eq) type LEpaComment = GenLocated Anchor EpaComment emptyComments :: EpAnnComments emptyComments = EpaComments [] -- --------------------------------------------------------------------- -- Annotations attached to a 'SrcSpan'. -- --------------------------------------------------------------------- -- | The 'SrcSpanAnn\'' type wraps a normal 'SrcSpan', together with -- an extra annotation type. This is mapped to a specific `GenLocated` -- usage in the AST through the `XRec` and `Anno` type families. -- Important that the fields are strict as these live inside L nodes which -- are live for a long time. data SrcSpanAnn' a = SrcSpanAnn { ann :: !a, locA :: !SrcSpan } deriving (Data, Eq) -- See Note [XRec and Anno in the AST] -- | We mostly use 'SrcSpanAnn\'' with an 'EpAnn\'' type SrcAnn ann = SrcSpanAnn' (EpAnn ann) type LocatedA = GenLocated SrcSpanAnnA type LocatedN = GenLocated SrcSpanAnnN type LocatedL = GenLocated SrcSpanAnnL type LocatedP = GenLocated SrcSpanAnnP type LocatedC = GenLocated SrcSpanAnnC type SrcSpanAnnA = SrcAnn AnnListItem type SrcSpanAnnN = SrcAnn NameAnn type SrcSpanAnnL = SrcAnn AnnList type SrcSpanAnnP = SrcAnn AnnPragma type SrcSpanAnnC = SrcAnn AnnContext -- | General representation of a 'GenLocated' type carrying a -- parameterised annotation type. type LocatedAn an = GenLocated (SrcAnn an) {- Note [XRec and Anno in the AST] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The exact print annotations are captured directly inside the AST, using TTG extension points. However certain annotations need to be captured on the Located versions too. While there is a general form for these, captured in the type SrcSpanAnn', there are also specific usages in different contexts. Some of the particular use cases are 1) RdrNames, which can have additional items such as backticks or parens 2) Items which occur in lists, and the annotation relates purely to its usage inside a list. See the section above this note for the rest. The Anno type family maps the specific SrcSpanAnn' variant for a given item. So type instance XRec (GhcPass p) a = GenLocated (Anno a) a type instance Anno RdrName = SrcSpanAnnN type LocatedN = GenLocated SrcSpanAnnN meaning we can have type LocatedN RdrName -} -- --------------------------------------------------------------------- -- Annotations for items in a list -- --------------------------------------------------------------------- -- | Captures the location of punctuation occuring between items, -- normally in a list. It is captured as a trailing annotation. data TrailingAnn = AddSemiAnn EpaLocation -- ^ Trailing ';' | AddCommaAnn EpaLocation -- ^ Trailing ',' | AddVbarAnn EpaLocation -- ^ Trailing '|' deriving (Data, Eq) instance Outputable TrailingAnn where ppr (AddSemiAnn ss) = text "AddSemiAnn" <+> ppr ss ppr (AddCommaAnn ss) = text "AddCommaAnn" <+> ppr ss ppr (AddVbarAnn ss) = text "AddVbarAnn" <+> ppr ss -- | Annotation for items appearing in a list. They can have one or -- more trailing punctuations items, such as commas or semicolons. data AnnListItem = AnnListItem { lann_trailing :: [TrailingAnn] } deriving (Data, Eq) -- --------------------------------------------------------------------- -- Annotations for the context of a list of items -- --------------------------------------------------------------------- -- | Annotation for the "container" of a list. This captures -- surrounding items such as braces if present, and introductory -- keywords such as 'where'. data AnnList = AnnList { al_anchor :: Maybe Anchor, -- ^ start point of a list having layout al_open :: Maybe AddEpAnn, al_close :: Maybe AddEpAnn, al_rest :: [AddEpAnn], -- ^ context, such as 'where' keyword al_trailing :: [TrailingAnn] -- ^ items appearing after the -- list, such as '=>' for a -- context } deriving (Data,Eq) -- --------------------------------------------------------------------- -- Annotations for parenthesised elements, such as tuples, lists -- --------------------------------------------------------------------- -- | exact print annotation for an item having surrounding "brackets", such as -- tuples or lists data AnnParen = AnnParen { ap_adornment :: ParenType, ap_open :: EpaLocation, ap_close :: EpaLocation } deriving (Data) -- | Detail of the "brackets" used in an 'AnnParen' exact print annotation. data ParenType = AnnParens -- ^ '(', ')' | AnnParensHash -- ^ '(#', '#)' | AnnParensSquare -- ^ '[', ']' deriving (Eq, Ord, Data) -- | Maps the 'ParenType' to the related opening and closing -- AnnKeywordId. Used when actually printing the item. parenTypeKws :: ParenType -> (AnnKeywordId, AnnKeywordId) parenTypeKws AnnParens = (AnnOpenP, AnnCloseP) parenTypeKws AnnParensHash = (AnnOpenPH, AnnClosePH) parenTypeKws AnnParensSquare = (AnnOpenS, AnnCloseS) -- --------------------------------------------------------------------- -- | Exact print annotation for the 'Context' data type. data AnnContext = AnnContext { ac_darrow :: Maybe (IsUnicodeSyntax, EpaLocation), -- ^ location and encoding of the '=>', if present. ac_open :: [EpaLocation], -- ^ zero or more opening parentheses. ac_close :: [EpaLocation] -- ^ zero or more closing parentheses. } deriving (Data) -- --------------------------------------------------------------------- -- Annotations for names -- --------------------------------------------------------------------- -- | exact print annotations for a 'RdrName'. There are many kinds of -- adornment that can be attached to a given 'RdrName'. This type -- captures them, as detailed on the individual constructors. data NameAnn -- | Used for a name with an adornment, so '`foo`', '(bar)' = NameAnn { nann_adornment :: NameAdornment, nann_open :: EpaLocation, nann_name :: EpaLocation, nann_close :: EpaLocation, nann_trailing :: [TrailingAnn] } -- | Used for @(,,,)@, or @(#,,,#)# | NameAnnCommas { nann_adornment :: NameAdornment, nann_open :: EpaLocation, nann_commas :: [EpaLocation], nann_close :: EpaLocation, nann_trailing :: [TrailingAnn] } -- | Used for @(# | | #)@ | NameAnnBars { nann_adornment :: NameAdornment, nann_open :: EpaLocation, nann_bars :: [EpaLocation], nann_close :: EpaLocation, nann_trailing :: [TrailingAnn] } -- | Used for @()@, @(##)@, @[]@ | NameAnnOnly { nann_adornment :: NameAdornment, nann_open :: EpaLocation, nann_close :: EpaLocation, nann_trailing :: [TrailingAnn] } -- | Used for @->@, as an identifier | NameAnnRArrow { nann_name :: EpaLocation, nann_trailing :: [TrailingAnn] } -- | Used for an item with a leading @'@. The annotation for -- unquoted item is stored in 'nann_quoted'. | NameAnnQuote { nann_quote :: EpaLocation, nann_quoted :: SrcSpanAnnN, nann_trailing :: [TrailingAnn] } -- | Used when adding a 'TrailingAnn' to an existing 'LocatedN' -- which has no Api Annotation (via the 'EpAnnNotUsed' constructor. | NameAnnTrailing { nann_trailing :: [TrailingAnn] } deriving (Data, Eq) -- | A 'NameAnn' can capture the locations of surrounding adornments, -- such as parens or backquotes. This data type identifies what -- particular pair are being used. data NameAdornment = NameParens -- ^ '(' ')' | NameParensHash -- ^ '(#' '#)' | NameBackquotes -- ^ '`' | NameSquare -- ^ '[' ']' deriving (Eq, Ord, Data) -- --------------------------------------------------------------------- -- | exact print annotation used for capturing the locations of -- annotations in pragmas. data AnnPragma = AnnPragma { apr_open :: AddEpAnn, apr_close :: AddEpAnn, apr_rest :: [AddEpAnn] } deriving (Data,Eq) -- --------------------------------------------------------------------- -- | Captures the sort order of sub elements. This is needed when the -- sub-elements have been split (as in a HsLocalBind which holds separate -- binds and sigs) or for infix patterns where the order has been -- re-arranged. It is captured explicitly so that after the Delta phase a -- SrcSpan is used purely as an index into the annotations, allowing -- transformations of the AST including the introduction of new Located -- items or re-arranging existing ones. data AnnSortKey = NoAnnSortKey | AnnSortKey [RealSrcSpan] deriving (Data, Eq) -- --------------------------------------------------------------------- -- | Convert a 'TrailingAnn' to an 'AddEpAnn' trailingAnnToAddEpAnn :: TrailingAnn -> AddEpAnn trailingAnnToAddEpAnn (AddSemiAnn ss) = AddEpAnn AnnSemi ss trailingAnnToAddEpAnn (AddCommaAnn ss) = AddEpAnn AnnComma ss trailingAnnToAddEpAnn (AddVbarAnn ss) = AddEpAnn AnnVbar ss -- | Helper function used in the parser to add a 'TrailingAnn' items -- to an existing annotation. addTrailingAnnToL :: SrcSpan -> TrailingAnn -> EpAnnComments -> EpAnn AnnList -> EpAnn AnnList addTrailingAnnToL s t cs EpAnnNotUsed = EpAnn (spanAsAnchor s) (AnnList (Just $ spanAsAnchor s) Nothing Nothing [] [t]) cs addTrailingAnnToL _ t cs n = n { anns = addTrailing (anns n) , comments = comments n <> cs } where -- See Note [list append in addTrailing*] addTrailing n = n { al_trailing = al_trailing n ++ [t]} -- | Helper function used in the parser to add a 'TrailingAnn' items -- to an existing annotation. addTrailingAnnToA :: SrcSpan -> TrailingAnn -> EpAnnComments -> EpAnn AnnListItem -> EpAnn AnnListItem addTrailingAnnToA s t cs EpAnnNotUsed = EpAnn (spanAsAnchor s) (AnnListItem [t]) cs addTrailingAnnToA _ t cs n = n { anns = addTrailing (anns n) , comments = comments n <> cs } where -- See Note [list append in addTrailing*] addTrailing n = n { lann_trailing = lann_trailing n ++ [t] } -- | Helper function used in the parser to add a comma location to an -- existing annotation. addTrailingCommaToN :: SrcSpan -> EpAnn NameAnn -> EpaLocation -> EpAnn NameAnn addTrailingCommaToN s EpAnnNotUsed l = EpAnn (spanAsAnchor s) (NameAnnTrailing [AddCommaAnn l]) emptyComments addTrailingCommaToN _ n l = n { anns = addTrailing (anns n) l } where -- See Note [list append in addTrailing*] addTrailing :: NameAnn -> EpaLocation -> NameAnn addTrailing n l = n { nann_trailing = nann_trailing n ++ [AddCommaAnn l]} {- Note [list append in addTrailing*] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The addTrailingAnnToL, addTrailingAnnToA and addTrailingCommaToN functions are used to add a separator for an item when it occurs in a list. So they are used to capture a comma, vbar, semicolon and similar. In general, a given element will have zero or one of these. In extreme (test) cases, there may be multiple semicolons. In exact printing we sometimes convert the EpaLocation variant for an trailing annotation to the EpaDelta variant, which cannot be sorted. Hence it is critical that these annotations are captured in the order they appear in the original source file. And so we use the less efficient list append to preserve the order, knowing that in most cases the original list is empty. -} -- --------------------------------------------------------------------- -- |Helper function (temporary) during transition of names -- Discards any annotations l2n :: LocatedAn a1 a2 -> LocatedN a2 l2n (L la a) = L (noAnnSrcSpan (locA la)) a n2l :: LocatedN a -> LocatedA a n2l (L la a) = L (na2la la) a -- |Helper function (temporary) during transition of names -- Discards any annotations la2na :: SrcSpanAnn' a -> SrcSpanAnnN la2na l = noAnnSrcSpan (locA l) -- |Helper function (temporary) during transition of names -- Discards any annotations la2la :: LocatedAn ann1 a2 -> LocatedAn ann2 a2 la2la (L la a) = L (noAnnSrcSpan (locA la)) a l2l :: SrcSpanAnn' a -> SrcAnn ann l2l l = noAnnSrcSpan (locA l) -- |Helper function (temporary) during transition of names -- Discards any annotations na2la :: SrcSpanAnn' a -> SrcAnn ann na2la l = noAnnSrcSpan (locA l) reLoc :: LocatedAn a e -> Located e reLoc (L (SrcSpanAnn _ l) a) = L l a reLocA :: Located e -> LocatedAn ann e reLocA (L l a) = (L (SrcSpanAnn EpAnnNotUsed l) a) reLocL :: LocatedN e -> LocatedA e reLocL (L l a) = (L (na2la l) a) reLocC :: LocatedN e -> LocatedC e reLocC (L l a) = (L (na2la l) a) reLocN :: LocatedN a -> Located a reLocN (L (SrcSpanAnn _ l) a) = L l a -- --------------------------------------------------------------------- realSrcSpan :: SrcSpan -> RealSrcSpan realSrcSpan (RealSrcSpan s _) = s realSrcSpan _ = mkRealSrcSpan l l -- AZ temporary where l = mkRealSrcLoc (fsLit "foo") (-1) (-1) la2r :: SrcSpanAnn' a -> RealSrcSpan la2r l = realSrcSpan (locA l) extraToAnnList :: AnnList -> [AddEpAnn] -> AnnList extraToAnnList (AnnList a o c e t) as = AnnList a o c (e++as) t reAnn :: [TrailingAnn] -> EpAnnComments -> Located a -> LocatedA a reAnn anns cs (L l a) = L (SrcSpanAnn (EpAnn (spanAsAnchor l) (AnnListItem anns) cs) l) a reAnnC :: AnnContext -> EpAnnComments -> Located a -> LocatedC a reAnnC anns cs (L l a) = L (SrcSpanAnn (EpAnn (spanAsAnchor l) anns cs) l) a reAnnL :: ann -> EpAnnComments -> Located e -> GenLocated (SrcAnn ann) e reAnnL anns cs (L l a) = L (SrcSpanAnn (EpAnn (spanAsAnchor l) anns cs) l) a getLocAnn :: Located a -> SrcSpanAnnA getLocAnn (L l _) = SrcSpanAnn EpAnnNotUsed l getLocA :: GenLocated (SrcSpanAnn' a) e -> SrcSpan getLocA (L (SrcSpanAnn _ l) _) = l noLocA :: a -> LocatedAn an a noLocA = L (SrcSpanAnn EpAnnNotUsed noSrcSpan) noAnnSrcSpan :: SrcSpan -> SrcAnn ann noAnnSrcSpan l = SrcSpanAnn EpAnnNotUsed l noSrcSpanA :: SrcAnn ann noSrcSpanA = noAnnSrcSpan noSrcSpan -- | Short form for 'EpAnnNotUsed' noAnn :: EpAnn a noAnn = EpAnnNotUsed addAnns :: EpAnn [AddEpAnn] -> [AddEpAnn] -> EpAnnComments -> EpAnn [AddEpAnn] addAnns (EpAnn l as1 cs) as2 cs2 = EpAnn (widenAnchor l (as1 ++ as2)) (as1 ++ as2) (cs <> cs2) addAnns EpAnnNotUsed [] (EpaComments []) = EpAnnNotUsed addAnns EpAnnNotUsed [] (EpaCommentsBalanced [] []) = EpAnnNotUsed addAnns EpAnnNotUsed as cs = EpAnn (Anchor placeholderRealSpan UnchangedAnchor) as cs -- AZ:TODO use widenSpan here too addAnnsA :: SrcSpanAnnA -> [TrailingAnn] -> EpAnnComments -> SrcSpanAnnA addAnnsA (SrcSpanAnn (EpAnn l as1 cs) loc) as2 cs2 = SrcSpanAnn (EpAnn l (AnnListItem (lann_trailing as1 ++ as2)) (cs <> cs2)) loc addAnnsA (SrcSpanAnn EpAnnNotUsed loc) [] (EpaComments []) = SrcSpanAnn EpAnnNotUsed loc addAnnsA (SrcSpanAnn EpAnnNotUsed loc) [] (EpaCommentsBalanced [] []) = SrcSpanAnn EpAnnNotUsed loc addAnnsA (SrcSpanAnn EpAnnNotUsed loc) as cs = SrcSpanAnn (EpAnn (spanAsAnchor loc) (AnnListItem as) cs) loc -- | The annotations need to all come after the anchor. Make sure -- this is the case. widenSpan :: SrcSpan -> [AddEpAnn] -> SrcSpan widenSpan s as = foldl combineSrcSpans s (go as) where go [] = [] go (AddEpAnn _ (EpaSpan s):rest) = RealSrcSpan s Strict.Nothing : go rest go (AddEpAnn _ (EpaDelta _ _):rest) = go rest -- | The annotations need to all come after the anchor. Make sure -- this is the case. widenRealSpan :: RealSrcSpan -> [AddEpAnn] -> RealSrcSpan widenRealSpan s as = foldl combineRealSrcSpans s (go as) where go [] = [] go (AddEpAnn _ (EpaSpan s):rest) = s : go rest go (AddEpAnn _ (EpaDelta _ _):rest) = go rest widenAnchor :: Anchor -> [AddEpAnn] -> Anchor widenAnchor (Anchor s op) as = Anchor (widenRealSpan s as) op widenAnchorR :: Anchor -> RealSrcSpan -> Anchor widenAnchorR (Anchor s op) r = Anchor (combineRealSrcSpans s r) op widenLocatedAn :: SrcSpanAnn' an -> [AddEpAnn] -> SrcSpanAnn' an widenLocatedAn (SrcSpanAnn a l) as = SrcSpanAnn a (widenSpan l as) epAnnAnnsL :: EpAnn a -> [a] epAnnAnnsL EpAnnNotUsed = [] epAnnAnnsL (EpAnn _ anns _) = [anns] epAnnAnns :: EpAnn [AddEpAnn] -> [AddEpAnn] epAnnAnns EpAnnNotUsed = [] epAnnAnns (EpAnn _ anns _) = anns annParen2AddEpAnn :: EpAnn AnnParen -> [AddEpAnn] annParen2AddEpAnn EpAnnNotUsed = [] annParen2AddEpAnn (EpAnn _ (AnnParen pt o c) _) = [AddEpAnn ai o, AddEpAnn ac c] where (ai,ac) = parenTypeKws pt epAnnComments :: EpAnn an -> EpAnnComments epAnnComments EpAnnNotUsed = EpaComments [] epAnnComments (EpAnn _ _ cs) = cs -- --------------------------------------------------------------------- -- sortLocatedA :: [LocatedA a] -> [LocatedA a] sortLocatedA :: [GenLocated (SrcSpanAnn' a) e] -> [GenLocated (SrcSpanAnn' a) e] sortLocatedA = sortBy (leftmost_smallest `on` getLocA) mapLocA :: (a -> b) -> GenLocated SrcSpan a -> GenLocated (SrcAnn ann) b mapLocA f (L l a) = L (noAnnSrcSpan l) (f a) -- AZ:TODO: move this somewhere sane combineLocsA :: Semigroup a => GenLocated (SrcAnn a) e1 -> GenLocated (SrcAnn a) e2 -> SrcAnn a combineLocsA (L a _) (L b _) = combineSrcSpansA a b combineSrcSpansA :: Semigroup a => SrcAnn a -> SrcAnn a -> SrcAnn a combineSrcSpansA (SrcSpanAnn aa la) (SrcSpanAnn ab lb) = case SrcSpanAnn (aa <> ab) (combineSrcSpans la lb) of SrcSpanAnn EpAnnNotUsed l -> SrcSpanAnn EpAnnNotUsed l SrcSpanAnn (EpAnn anc an cs) l -> SrcSpanAnn (EpAnn (widenAnchorR anc (realSrcSpan l)) an cs) l -- | Combine locations from two 'Located' things and add them to a third thing addCLocA :: GenLocated (SrcSpanAnn' a) e1 -> GenLocated SrcSpan e2 -> e3 -> GenLocated (SrcAnn ann) e3 addCLocA a b c = L (noAnnSrcSpan $ combineSrcSpans (locA $ getLoc a) (getLoc b)) c addCLocAA :: GenLocated (SrcSpanAnn' a1) e1 -> GenLocated (SrcSpanAnn' a2) e2 -> e3 -> GenLocated (SrcAnn ann) e3 addCLocAA a b c = L (noAnnSrcSpan $ combineSrcSpans (locA $ getLoc a) (locA $ getLoc b)) c -- --------------------------------------------------------------------- -- Utilities for manipulating EpAnnComments -- --------------------------------------------------------------------- getFollowingComments :: EpAnnComments -> [LEpaComment] getFollowingComments (EpaComments _) = [] getFollowingComments (EpaCommentsBalanced _ cs) = cs setFollowingComments :: EpAnnComments -> [LEpaComment] -> EpAnnComments setFollowingComments (EpaComments ls) cs = EpaCommentsBalanced ls cs setFollowingComments (EpaCommentsBalanced ls _) cs = EpaCommentsBalanced ls cs setPriorComments :: EpAnnComments -> [LEpaComment] -> EpAnnComments setPriorComments (EpaComments _) cs = EpaComments cs setPriorComments (EpaCommentsBalanced _ ts) cs = EpaCommentsBalanced cs ts -- --------------------------------------------------------------------- -- Comment-only annotations -- --------------------------------------------------------------------- type EpAnnCO = EpAnn NoEpAnns -- ^ Api Annotations for comments only data NoEpAnns = NoEpAnns deriving (Data,Eq,Ord) noComments ::EpAnnCO noComments = EpAnn (Anchor placeholderRealSpan UnchangedAnchor) NoEpAnns emptyComments -- TODO:AZ get rid of this placeholderRealSpan :: RealSrcSpan placeholderRealSpan = realSrcLocSpan (mkRealSrcLoc (mkFastString "placeholder") (-1) (-1)) comment :: RealSrcSpan -> EpAnnComments -> EpAnnCO comment loc cs = EpAnn (Anchor loc UnchangedAnchor) NoEpAnns cs -- --------------------------------------------------------------------- -- Utilities for managing comments in an `EpAnn a` structure. -- --------------------------------------------------------------------- -- | Add additional comments to a 'SrcAnn', used for manipulating the -- AST prior to exact printing the changed one. addCommentsToSrcAnn :: (Monoid ann) => SrcAnn ann -> EpAnnComments -> SrcAnn ann addCommentsToSrcAnn (SrcSpanAnn EpAnnNotUsed loc) cs = SrcSpanAnn (EpAnn (Anchor (realSrcSpan loc) UnchangedAnchor) mempty cs) loc addCommentsToSrcAnn (SrcSpanAnn (EpAnn a an cs) loc) cs' = SrcSpanAnn (EpAnn a an (cs <> cs')) loc -- | Replace any existing comments on a 'SrcAnn', used for manipulating the -- AST prior to exact printing the changed one. setCommentsSrcAnn :: (Monoid ann) => SrcAnn ann -> EpAnnComments -> SrcAnn ann setCommentsSrcAnn (SrcSpanAnn EpAnnNotUsed loc) cs = SrcSpanAnn (EpAnn (Anchor (realSrcSpan loc) UnchangedAnchor) mempty cs) loc setCommentsSrcAnn (SrcSpanAnn (EpAnn a an _) loc) cs = SrcSpanAnn (EpAnn a an cs) loc -- | Add additional comments, used for manipulating the -- AST prior to exact printing the changed one. addCommentsToEpAnn :: (Monoid a) => SrcSpan -> EpAnn a -> EpAnnComments -> EpAnn a addCommentsToEpAnn loc EpAnnNotUsed cs = EpAnn (Anchor (realSrcSpan loc) UnchangedAnchor) mempty cs addCommentsToEpAnn _ (EpAnn a an ocs) ncs = EpAnn a an (ocs <> ncs) -- | Replace any existing comments, used for manipulating the -- AST prior to exact printing the changed one. setCommentsEpAnn :: (Monoid a) => SrcSpan -> EpAnn a -> EpAnnComments -> EpAnn a setCommentsEpAnn loc EpAnnNotUsed cs = EpAnn (Anchor (realSrcSpan loc) UnchangedAnchor) mempty cs setCommentsEpAnn _ (EpAnn a an _) cs = EpAnn a an cs -- | Transfer comments and trailing items from the annotations in the -- first 'SrcSpanAnnA' argument to those in the second. transferAnnsA :: SrcSpanAnnA -> SrcSpanAnnA -> (SrcSpanAnnA, SrcSpanAnnA) transferAnnsA from@(SrcSpanAnn EpAnnNotUsed _) to = (from, to) transferAnnsA (SrcSpanAnn (EpAnn a an cs) l) to = ((SrcSpanAnn (EpAnn a mempty emptyComments) l), to') where to' = case to of (SrcSpanAnn EpAnnNotUsed loc) -> SrcSpanAnn (EpAnn (Anchor (realSrcSpan loc) UnchangedAnchor) an cs) loc (SrcSpanAnn (EpAnn a an' cs') loc) -> SrcSpanAnn (EpAnn a (an' <> an) (cs' <> cs)) loc -- | Remove the exact print annotations payload, leaving only the -- anchor and comments. commentsOnlyA :: Monoid ann => SrcAnn ann -> SrcAnn ann commentsOnlyA (SrcSpanAnn EpAnnNotUsed loc) = SrcSpanAnn EpAnnNotUsed loc commentsOnlyA (SrcSpanAnn (EpAnn a _ cs) loc) = (SrcSpanAnn (EpAnn a mempty cs) loc) -- | Remove the comments, leaving the exact print annotations payload removeCommentsA :: SrcAnn ann -> SrcAnn ann removeCommentsA (SrcSpanAnn EpAnnNotUsed loc) = SrcSpanAnn EpAnnNotUsed loc removeCommentsA (SrcSpanAnn (EpAnn a an _) loc) = (SrcSpanAnn (EpAnn a an emptyComments) loc) -- --------------------------------------------------------------------- -- Semigroup instances, to allow easy combination of annotaion elements -- --------------------------------------------------------------------- instance (Semigroup an) => Semigroup (SrcSpanAnn' an) where (SrcSpanAnn a1 l1) <> (SrcSpanAnn a2 l2) = SrcSpanAnn (a1 <> a2) (combineSrcSpans l1 l2) -- The critical part about the location is its left edge, and all -- annotations must follow it. So we combine them which yields the -- largest span instance (Semigroup a) => Semigroup (EpAnn a) where EpAnnNotUsed <> x = x x <> EpAnnNotUsed = x (EpAnn l1 a1 b1) <> (EpAnn l2 a2 b2) = EpAnn (l1 <> l2) (a1 <> a2) (b1 <> b2) -- The critical part about the anchor is its left edge, and all -- annotations must follow it. So we combine them which yields the -- largest span instance Ord Anchor where compare (Anchor s1 _) (Anchor s2 _) = compare s1 s2 instance Semigroup Anchor where Anchor r1 o1 <> Anchor r2 _ = Anchor (combineRealSrcSpans r1 r2) o1 instance Semigroup EpAnnComments where EpaComments cs1 <> EpaComments cs2 = EpaComments (cs1 ++ cs2) EpaComments cs1 <> EpaCommentsBalanced cs2 as2 = EpaCommentsBalanced (cs1 ++ cs2) as2 EpaCommentsBalanced cs1 as1 <> EpaComments cs2 = EpaCommentsBalanced (cs1 ++ cs2) as1 EpaCommentsBalanced cs1 as1 <> EpaCommentsBalanced cs2 as2 = EpaCommentsBalanced (cs1 ++ cs2) (as1++as2) instance (Monoid a) => Monoid (EpAnn a) where mempty = EpAnnNotUsed instance Semigroup NoEpAnns where _ <> _ = NoEpAnns instance Semigroup AnnListItem where (AnnListItem l1) <> (AnnListItem l2) = AnnListItem (l1 <> l2) instance Monoid AnnListItem where mempty = AnnListItem [] instance Semigroup AnnList where (AnnList a1 o1 c1 r1 t1) <> (AnnList a2 o2 c2 r2 t2) = AnnList (a1 <> a2) (c o1 o2) (c c1 c2) (r1 <> r2) (t1 <> t2) where -- Left biased combination for the open and close annotations c Nothing x = x c x Nothing = x c f _ = f instance Monoid AnnList where mempty = AnnList Nothing Nothing Nothing [] [] instance Semigroup NameAnn where _ <> _ = panic "semigroup nameann" instance Monoid NameAnn where mempty = NameAnnTrailing [] instance Semigroup AnnSortKey where NoAnnSortKey <> x = x x <> NoAnnSortKey = x AnnSortKey ls1 <> AnnSortKey ls2 = AnnSortKey (ls1 <> ls2) instance Monoid AnnSortKey where mempty = NoAnnSortKey instance (Outputable a) => Outputable (EpAnn a) where ppr (EpAnn l a c) = text "EpAnn" <+> ppr l <+> ppr a <+> ppr c ppr EpAnnNotUsed = text "EpAnnNotUsed" instance Outputable NoEpAnns where ppr NoEpAnns = text "NoEpAnns" instance Outputable Anchor where ppr (Anchor a o) = text "Anchor" <+> ppr a <+> ppr o instance Outputable AnchorOperation where ppr UnchangedAnchor = text "UnchangedAnchor" ppr (MovedAnchor d) = text "MovedAnchor" <+> ppr d instance Outputable DeltaPos where ppr (SameLine c) = text "SameLine" <+> ppr c ppr (DifferentLine l c) = text "DifferentLine" <+> ppr l <+> ppr c instance Outputable (GenLocated Anchor EpaComment) where ppr (L l c) = text "L" <+> ppr l <+> ppr c instance Outputable EpAnnComments where ppr (EpaComments cs) = text "EpaComments" <+> ppr cs ppr (EpaCommentsBalanced cs ts) = text "EpaCommentsBalanced" <+> ppr cs <+> ppr ts instance (NamedThing (Located a)) => NamedThing (LocatedAn an a) where getName (L l a) = getName (L (locA l) a) instance Outputable AnnContext where ppr (AnnContext a o c) = text "AnnContext" <+> ppr a <+> ppr o <+> ppr c instance Outputable AnnSortKey where ppr NoAnnSortKey = text "NoAnnSortKey" ppr (AnnSortKey ls) = text "AnnSortKey" <+> ppr ls instance Outputable IsUnicodeSyntax where ppr = text . show instance Binary a => Binary (LocatedL a) where -- We do not serialise the annotations put_ bh (L l x) = do put_ bh (locA l) put_ bh x get bh = do l <- get bh x <- get bh return (L (noAnnSrcSpan l) x) instance (Outputable a) => Outputable (SrcSpanAnn' a) where ppr (SrcSpanAnn a l) = text "SrcSpanAnn" <+> ppr a <+> ppr l instance (Outputable a, Outputable e) => Outputable (GenLocated (SrcSpanAnn' a) e) where ppr = pprLocated instance (Outputable a, OutputableBndr e) => OutputableBndr (GenLocated (SrcSpanAnn' a) e) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprPrefixOcc . unLoc instance Outputable AnnListItem where ppr (AnnListItem ts) = text "AnnListItem" <+> ppr ts instance Outputable NameAdornment where ppr NameParens = text "NameParens" ppr NameParensHash = text "NameParensHash" ppr NameBackquotes = text "NameBackquotes" ppr NameSquare = text "NameSquare" instance Outputable NameAnn where ppr (NameAnn a o n c t) = text "NameAnn" <+> ppr a <+> ppr o <+> ppr n <+> ppr c <+> ppr t ppr (NameAnnCommas a o n c t) = text "NameAnnCommas" <+> ppr a <+> ppr o <+> ppr n <+> ppr c <+> ppr t ppr (NameAnnBars a o n b t) = text "NameAnnBars" <+> ppr a <+> ppr o <+> ppr n <+> ppr b <+> ppr t ppr (NameAnnOnly a o c t) = text "NameAnnOnly" <+> ppr a <+> ppr o <+> ppr c <+> ppr t ppr (NameAnnRArrow n t) = text "NameAnnRArrow" <+> ppr n <+> ppr t ppr (NameAnnQuote q n t) = text "NameAnnQuote" <+> ppr q <+> ppr n <+> ppr t ppr (NameAnnTrailing t) = text "NameAnnTrailing" <+> ppr t instance Outputable AnnList where ppr (AnnList a o c r t) = text "AnnList" <+> ppr a <+> ppr o <+> ppr c <+> ppr r <+> ppr t instance Outputable AnnPragma where ppr (AnnPragma o c r) = text "AnnPragma" <+> ppr o <+> ppr c <+> ppr r ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/CharClass.hs0000644000000000000000000002027414472400112021273 0ustar0000000000000000-- Character classification module GHC.Parser.CharClass ( is_ident -- Char# -> Bool , is_symbol -- Char# -> Bool , is_any -- Char# -> Bool , is_space -- Char# -> Bool , is_lower -- Char# -> Bool , is_upper -- Char# -> Bool , is_digit -- Char# -> Bool , is_alphanum -- Char# -> Bool , is_decdigit, is_hexdigit, is_octdigit, is_bindigit , hexDigit, octDecDigit ) where import GHC.Prelude import Data.Char ( ord, chr ) import Data.Word import GHC.Utils.Panic -- Bit masks cIdent, cSymbol, cAny, cSpace, cLower, cUpper, cDigit :: Word8 cIdent = 1 cSymbol = 2 cAny = 4 cSpace = 8 cLower = 16 cUpper = 32 cDigit = 64 -- | The predicates below look costly, but aren't, GHC+GCC do a great job -- at the big case below. {-# INLINABLE is_ctype #-} is_ctype :: Word8 -> Char -> Bool is_ctype mask c = (charType c .&. mask) /= 0 is_ident, is_symbol, is_any, is_space, is_lower, is_upper, is_digit, is_alphanum :: Char -> Bool is_ident = is_ctype cIdent is_symbol = is_ctype cSymbol is_any = is_ctype cAny is_space = is_ctype cSpace is_lower = is_ctype cLower is_upper = is_ctype cUpper is_digit = is_ctype cDigit is_alphanum = is_ctype (cLower+cUpper+cDigit) -- Utils hexDigit :: Char -> Int hexDigit c | is_decdigit c = ord c - ord '0' | otherwise = ord (to_lower c) - ord 'a' + 10 octDecDigit :: Char -> Int octDecDigit c = ord c - ord '0' is_decdigit :: Char -> Bool is_decdigit c = c >= '0' && c <= '9' is_hexdigit :: Char -> Bool is_hexdigit c = is_decdigit c || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') is_octdigit :: Char -> Bool is_octdigit c = c >= '0' && c <= '7' is_bindigit :: Char -> Bool is_bindigit c = c == '0' || c == '1' to_lower :: Char -> Char to_lower c | c >= 'A' && c <= 'Z' = chr (ord c - (ord 'A' - ord 'a')) | otherwise = c charType :: Char -> Word8 charType c = case c of '\0' -> 0 -- \000 '\1' -> 0 -- \001 '\2' -> 0 -- \002 '\3' -> 0 -- \003 '\4' -> 0 -- \004 '\5' -> 0 -- \005 '\6' -> 0 -- \006 '\7' -> 0 -- \007 '\8' -> 0 -- \010 '\9' -> cSpace -- \t (not allowed in strings, so !cAny) '\10' -> cSpace -- \n (ditto) '\11' -> cSpace -- \v (ditto) '\12' -> cSpace -- \f (ditto) '\13' -> cSpace -- ^M (ditto) '\14' -> 0 -- \016 '\15' -> 0 -- \017 '\16' -> 0 -- \020 '\17' -> 0 -- \021 '\18' -> 0 -- \022 '\19' -> 0 -- \023 '\20' -> 0 -- \024 '\21' -> 0 -- \025 '\22' -> 0 -- \026 '\23' -> 0 -- \027 '\24' -> 0 -- \030 '\25' -> 0 -- \031 '\26' -> 0 -- \032 '\27' -> 0 -- \033 '\28' -> 0 -- \034 '\29' -> 0 -- \035 '\30' -> 0 -- \036 '\31' -> 0 -- \037 '\32' -> cAny .|. cSpace -- '\33' -> cAny .|. cSymbol -- ! '\34' -> cAny -- " '\35' -> cAny .|. cSymbol -- # '\36' -> cAny .|. cSymbol -- $ '\37' -> cAny .|. cSymbol -- % '\38' -> cAny .|. cSymbol -- & '\39' -> cAny .|. cIdent -- ' '\40' -> cAny -- ( '\41' -> cAny -- ) '\42' -> cAny .|. cSymbol -- * '\43' -> cAny .|. cSymbol -- + '\44' -> cAny -- , '\45' -> cAny .|. cSymbol -- - '\46' -> cAny .|. cSymbol -- . '\47' -> cAny .|. cSymbol -- / '\48' -> cAny .|. cIdent .|. cDigit -- 0 '\49' -> cAny .|. cIdent .|. cDigit -- 1 '\50' -> cAny .|. cIdent .|. cDigit -- 2 '\51' -> cAny .|. cIdent .|. cDigit -- 3 '\52' -> cAny .|. cIdent .|. cDigit -- 4 '\53' -> cAny .|. cIdent .|. cDigit -- 5 '\54' -> cAny .|. cIdent .|. cDigit -- 6 '\55' -> cAny .|. cIdent .|. cDigit -- 7 '\56' -> cAny .|. cIdent .|. cDigit -- 8 '\57' -> cAny .|. cIdent .|. cDigit -- 9 '\58' -> cAny .|. cSymbol -- : '\59' -> cAny -- ; '\60' -> cAny .|. cSymbol -- < '\61' -> cAny .|. cSymbol -- = '\62' -> cAny .|. cSymbol -- > '\63' -> cAny .|. cSymbol -- ? '\64' -> cAny .|. cSymbol -- @ '\65' -> cAny .|. cIdent .|. cUpper -- A '\66' -> cAny .|. cIdent .|. cUpper -- B '\67' -> cAny .|. cIdent .|. cUpper -- C '\68' -> cAny .|. cIdent .|. cUpper -- D '\69' -> cAny .|. cIdent .|. cUpper -- E '\70' -> cAny .|. cIdent .|. cUpper -- F '\71' -> cAny .|. cIdent .|. cUpper -- G '\72' -> cAny .|. cIdent .|. cUpper -- H '\73' -> cAny .|. cIdent .|. cUpper -- I '\74' -> cAny .|. cIdent .|. cUpper -- J '\75' -> cAny .|. cIdent .|. cUpper -- K '\76' -> cAny .|. cIdent .|. cUpper -- L '\77' -> cAny .|. cIdent .|. cUpper -- M '\78' -> cAny .|. cIdent .|. cUpper -- N '\79' -> cAny .|. cIdent .|. cUpper -- O '\80' -> cAny .|. cIdent .|. cUpper -- P '\81' -> cAny .|. cIdent .|. cUpper -- Q '\82' -> cAny .|. cIdent .|. cUpper -- R '\83' -> cAny .|. cIdent .|. cUpper -- S '\84' -> cAny .|. cIdent .|. cUpper -- T '\85' -> cAny .|. cIdent .|. cUpper -- U '\86' -> cAny .|. cIdent .|. cUpper -- V '\87' -> cAny .|. cIdent .|. cUpper -- W '\88' -> cAny .|. cIdent .|. cUpper -- X '\89' -> cAny .|. cIdent .|. cUpper -- Y '\90' -> cAny .|. cIdent .|. cUpper -- Z '\91' -> cAny -- [ '\92' -> cAny .|. cSymbol -- backslash '\93' -> cAny -- ] '\94' -> cAny .|. cSymbol -- ^ '\95' -> cAny .|. cIdent .|. cLower -- _ '\96' -> cAny -- ` '\97' -> cAny .|. cIdent .|. cLower -- a '\98' -> cAny .|. cIdent .|. cLower -- b '\99' -> cAny .|. cIdent .|. cLower -- c '\100' -> cAny .|. cIdent .|. cLower -- d '\101' -> cAny .|. cIdent .|. cLower -- e '\102' -> cAny .|. cIdent .|. cLower -- f '\103' -> cAny .|. cIdent .|. cLower -- g '\104' -> cAny .|. cIdent .|. cLower -- h '\105' -> cAny .|. cIdent .|. cLower -- i '\106' -> cAny .|. cIdent .|. cLower -- j '\107' -> cAny .|. cIdent .|. cLower -- k '\108' -> cAny .|. cIdent .|. cLower -- l '\109' -> cAny .|. cIdent .|. cLower -- m '\110' -> cAny .|. cIdent .|. cLower -- n '\111' -> cAny .|. cIdent .|. cLower -- o '\112' -> cAny .|. cIdent .|. cLower -- p '\113' -> cAny .|. cIdent .|. cLower -- q '\114' -> cAny .|. cIdent .|. cLower -- r '\115' -> cAny .|. cIdent .|. cLower -- s '\116' -> cAny .|. cIdent .|. cLower -- t '\117' -> cAny .|. cIdent .|. cLower -- u '\118' -> cAny .|. cIdent .|. cLower -- v '\119' -> cAny .|. cIdent .|. cLower -- w '\120' -> cAny .|. cIdent .|. cLower -- x '\121' -> cAny .|. cIdent .|. cLower -- y '\122' -> cAny .|. cIdent .|. cLower -- z '\123' -> cAny -- { '\124' -> cAny .|. cSymbol -- | '\125' -> cAny -- } '\126' -> cAny .|. cSymbol -- ~ '\127' -> 0 -- \177 _ -> panic ("charType: " ++ show c) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Errors/Basic.hs0000644000000000000000000000156214472400112021724 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} module GHC.Parser.Errors.Basic where import GHC.Utils.Outputable ( SDoc, text ) -- | The operator symbol in the 'PsOperatorWhitespaceExtConflictMessage' diagnostic. data OperatorWhitespaceSymbol = OperatorWhitespaceSymbol_PrefixPercent | OperatorWhitespaceSymbol_PrefixDollar | OperatorWhitespaceSymbol_PrefixDollarDollar pprOperatorWhitespaceSymbol :: OperatorWhitespaceSymbol -> SDoc pprOperatorWhitespaceSymbol = \case OperatorWhitespaceSymbol_PrefixPercent -> text "%" OperatorWhitespaceSymbol_PrefixDollar -> text "$" OperatorWhitespaceSymbol_PrefixDollarDollar -> text "$$" -- | The operator occurrence type in the 'PsOperatorWhitespaceMessage' diagnostic. data OperatorWhitespaceOccurrence = OperatorWhitespaceOccurrence_Prefix | OperatorWhitespaceOccurrence_Suffix | OperatorWhitespaceOccurrence_TightInfix ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Errors/Ppr.hs0000644000000000000000000013033614472400112021446 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE FlexibleContexts #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic PsMessage module GHC.Parser.Errors.Ppr where import GHC.Prelude import GHC.Driver.Flags import GHC.Parser.Errors.Basic import GHC.Parser.Errors.Types import GHC.Parser.Types import GHC.Types.Basic import GHC.Types.Hint import GHC.Types.Error import GHC.Types.Hint.Ppr (perhapsAsPat) import GHC.Types.SrcLoc import GHC.Types.Name.Reader ( opIsAt, rdrNameOcc, mkUnqual ) import GHC.Types.Name.Occurrence (isSymOcc, occNameFS, varName) import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Data.FastString import GHC.Data.Maybe (catMaybes) import GHC.Hs.Expr (prependQualified, HsExpr(..), LamCaseVariant(..), lamCaseKeyword) import GHC.Hs.Type (pprLHsContext) import GHC.Builtin.Names (allNameStrings) import GHC.Builtin.Types (filterCTuple) import qualified GHC.LanguageExtensions as LangExt import Data.List.NonEmpty (NonEmpty((:|))) instance Diagnostic PsMessage where diagnosticMessage = \case PsUnknownMessage m -> diagnosticMessage m PsHeaderMessage m -> psHeaderMessageDiagnostic m PsWarnHaddockInvalidPos -> mkSimpleDecorated $ text "A Haddock comment cannot appear in this position and will be ignored." PsWarnHaddockIgnoreMulti -> mkSimpleDecorated $ text "Multiple Haddock comments for a single entity are not allowed." $$ text "The extraneous comment will be ignored." PsWarnBidirectionalFormatChars ((loc,_,desc) :| xs) -> mkSimpleDecorated $ text "A unicode bidirectional formatting character" <+> parens (text desc) $$ text "was found at offset" <+> ppr (bufPos (psBufPos loc)) <+> text "in the file" $$ (case xs of [] -> empty xs -> text "along with further bidirectional formatting characters at" <+> pprChars xs where pprChars [] = empty pprChars ((loc,_,desc):xs) = text "offset" <+> ppr (bufPos (psBufPos loc)) <> text ":" <+> text desc $$ pprChars xs ) $$ text "Bidirectional formatting characters may be rendered misleadingly in certain editors" PsWarnTab tc -> mkSimpleDecorated $ text "Tab character found here" <> (if tc == 1 then text "" else text ", and in" <+> speakNOf (fromIntegral (tc - 1)) (text "further location")) <> text "." PsWarnTransitionalLayout reason -> mkSimpleDecorated $ text "transitional layout will not be accepted in the future:" $$ text (case reason of TransLayout_Where -> "`where' clause at the same depth as implicit layout block" TransLayout_Pipe -> "`|' at the same depth as implicit layout block" ) PsWarnOperatorWhitespaceExtConflict sym -> let mk_prefix_msg extension_name syntax_meaning = text "The prefix use of a" <+> quotes (pprOperatorWhitespaceSymbol sym) <+> text "would denote" <+> text syntax_meaning $$ nest 2 (text "were the" <+> text extension_name <+> text "extension enabled.") in mkSimpleDecorated $ case sym of OperatorWhitespaceSymbol_PrefixPercent -> mk_prefix_msg "LinearTypes" "a multiplicity annotation" OperatorWhitespaceSymbol_PrefixDollar -> mk_prefix_msg "TemplateHaskell" "an untyped splice" OperatorWhitespaceSymbol_PrefixDollarDollar -> mk_prefix_msg "TemplateHaskell" "a typed splice" PsWarnOperatorWhitespace sym occ_type -> let mk_msg occ_type_str = text "The" <+> text occ_type_str <+> text "use of a" <+> quotes (ftext sym) <+> text "might be repurposed as special syntax" $$ nest 2 (text "by a future language extension.") in mkSimpleDecorated $ case occ_type of OperatorWhitespaceOccurrence_Prefix -> mk_msg "prefix" OperatorWhitespaceOccurrence_Suffix -> mk_msg "suffix" OperatorWhitespaceOccurrence_TightInfix -> mk_msg "tight infix" PsWarnStarBinder -> mkSimpleDecorated $ text "Found binding occurrence of" <+> quotes (text "*") <+> text "yet StarIsType is enabled." PsWarnStarIsType -> mkSimpleDecorated $ text "Using" <+> quotes (text "*") <+> text "(or its Unicode variant) to mean" <+> quotes (text "Data.Kind.Type") $$ text "relies on the StarIsType extension, which will become" $$ text "deprecated in the future." PsWarnUnrecognisedPragma -> mkSimpleDecorated $ text "Unrecognised pragma" PsWarnMisplacedPragma prag -> mkSimpleDecorated $ text "Misplaced" <+> pprFileHeaderPragmaType prag <+> text "pragma" PsWarnImportPreQualified -> mkSimpleDecorated $ text "Found" <+> quotes (text "qualified") <+> text "in prepositive position" PsErrLexer err kind -> mkSimpleDecorated $ hcat [ text $ case err of LexError -> "lexical error" LexUnknownPragma -> "unknown pragma" LexErrorInPragma -> "lexical error in pragma" LexNumEscapeRange -> "numeric escape sequence out of range" LexStringCharLit -> "lexical error in string/character literal" LexStringCharLitEOF -> "unexpected end-of-file in string/character literal" LexUnterminatedComment -> "unterminated `{-'" LexUnterminatedOptions -> "unterminated OPTIONS pragma" LexUnterminatedQQ -> "unterminated quasiquotation" , text $ case kind of LexErrKind_EOF -> " at end of input" LexErrKind_UTF8 -> " (UTF-8 decoding error)" LexErrKind_Char c -> " at character " ++ show c ] PsErrParse token _details | null token -> mkSimpleDecorated $ text "parse error (possibly incorrect indentation or mismatched brackets)" | otherwise -> mkSimpleDecorated $ text "parse error on input" <+> quotes (text token) PsErrCmmLexer -> mkSimpleDecorated $ text "Cmm lexical error" PsErrCmmParser cmm_err -> mkSimpleDecorated $ case cmm_err of CmmUnknownPrimitive name -> text "unknown primitive" <+> ftext name CmmUnknownMacro fun -> text "unknown macro" <+> ftext fun CmmUnknownCConv cconv -> text "unknown calling convention:" <+> text cconv CmmUnrecognisedSafety safety -> text "unrecognised safety" <+> text safety CmmUnrecognisedHint hint -> text "unrecognised hint:" <+> text hint PsErrTypeAppWithoutSpace v e -> mkSimpleDecorated $ sep [ text "@-pattern in expression context:" , nest 4 (pprPrefixOcc v <> text "@" <> ppr e) ] $$ text "Type application syntax requires a space before '@'" PsErrLazyPatWithoutSpace e -> mkSimpleDecorated $ sep [ text "Lazy pattern in expression context:" , nest 4 (text "~" <> ppr e) ] $$ text "Did you mean to add a space after the '~'?" PsErrBangPatWithoutSpace e -> mkSimpleDecorated $ sep [ text "Bang pattern in expression context:" , nest 4 (text "!" <> ppr e) ] $$ text "Did you mean to add a space after the '!'?" PsErrInvalidInfixHole -> mkSimpleDecorated $ text "Invalid infix hole, expected an infix operator" PsErrExpectedHyphen -> mkSimpleDecorated $ text "Expected a hyphen" PsErrSpaceInSCC -> mkSimpleDecorated $ text "Spaces are not allowed in SCCs" PsErrEmptyDoubleQuotes _th_on -> mkSimpleDecorated $ vcat msg where msg = [ text "Parser error on `''`" , text "Character literals may not be empty" ] PsErrLambdaCase -- we can't get this error for \cases, since without -XLambdaCase, that's -- just a regular lambda expression -> mkSimpleDecorated $ text "Illegal" <+> lamCaseKeyword LamCase PsErrEmptyLambda -> mkSimpleDecorated $ text "A lambda requires at least one parameter" PsErrLinearFunction -> mkSimpleDecorated $ text "Illegal use of linear functions" PsErrOverloadedRecordUpdateNotEnabled -> mkSimpleDecorated $ text "Illegal overloaded record update" PsErrMultiWayIf -> mkSimpleDecorated $ text "Illegal multi-way if-expression" PsErrNumUnderscores reason -> mkSimpleDecorated $ text $ case reason of NumUnderscore_Integral -> "Illegal underscores in integer literals" NumUnderscore_Float -> "Illegal underscores in floating literals" PsErrIllegalBangPattern e -> mkSimpleDecorated $ text "Illegal bang-pattern" $$ ppr e PsErrOverloadedRecordDotInvalid -> mkSimpleDecorated $ text "Use of OverloadedRecordDot '.' not valid ('.' isn't allowed when constructing records or in record patterns)" PsErrIllegalPatSynExport -> mkSimpleDecorated $ text "Illegal export form" PsErrOverloadedRecordUpdateNoQualifiedFields -> mkSimpleDecorated $ text "Fields cannot be qualified when OverloadedRecordUpdate is enabled" PsErrExplicitForall is_unicode -> mkSimpleDecorated $ text "Illegal symbol" <+> quotes (forallSym is_unicode) <+> text "in type" PsErrIllegalQualifiedDo qdoDoc -> mkSimpleDecorated $ text "Illegal qualified" <+> quotes qdoDoc <+> text "block" PsErrQualifiedDoInCmd m -> mkSimpleDecorated $ hang (text "Parse error in command:") 2 $ text "Found a qualified" <+> ppr m <> text ".do block in a command, but" $$ text "qualified 'do' is not supported in commands." PsErrRecordSyntaxInPatSynDecl pat -> mkSimpleDecorated $ text "record syntax not supported for pattern synonym declarations:" $$ ppr pat PsErrEmptyWhereInPatSynDecl patsyn_name -> mkSimpleDecorated $ text "pattern synonym 'where' clause cannot be empty" $$ text "In the pattern synonym declaration for: " <+> ppr (patsyn_name) PsErrInvalidWhereBindInPatSynDecl patsyn_name decl -> mkSimpleDecorated $ text "pattern synonym 'where' clause must bind the pattern synonym's name" <+> quotes (ppr patsyn_name) $$ ppr decl PsErrNoSingleWhereBindInPatSynDecl _patsyn_name decl -> mkSimpleDecorated $ text "pattern synonym 'where' clause must contain a single binding:" $$ ppr decl PsErrDeclSpliceNotAtTopLevel d -> mkSimpleDecorated $ hang (text "Declaration splices are allowed only" <+> text "at the top level:") 2 (ppr d) PsErrMultipleNamesInStandaloneKindSignature vs -> mkSimpleDecorated $ vcat [ hang (text "Standalone kind signatures do not support multiple names at the moment:") 2 (pprWithCommas ppr vs) , text "See https://gitlab.haskell.org/ghc/ghc/issues/16754 for details." ] PsErrIllegalExplicitNamespace -> mkSimpleDecorated $ text "Illegal keyword 'type'" PsErrUnallowedPragma prag -> mkSimpleDecorated $ hang (text "A pragma is not allowed in this position:") 2 (ppr prag) PsErrImportPostQualified -> mkSimpleDecorated $ text "Found" <+> quotes (text "qualified") <+> text "in postpositive position. " PsErrImportQualifiedTwice -> mkSimpleDecorated $ text "Multiple occurrences of 'qualified'" PsErrIllegalImportBundleForm -> mkSimpleDecorated $ text "Illegal import form, this syntax can only be used to bundle" $+$ text "pattern synonyms with types in module exports." PsErrInvalidRuleActivationMarker -> mkSimpleDecorated $ text "Invalid rule activation marker" PsErrMissingBlock -> mkSimpleDecorated $ text "Missing block" PsErrUnsupportedBoxedSumExpr s -> mkSimpleDecorated $ hang (text "Boxed sums not supported:") 2 (pprSumOrTuple Boxed s) PsErrUnsupportedBoxedSumPat s -> mkSimpleDecorated $ hang (text "Boxed sums not supported:") 2 (pprSumOrTuple Boxed s) PsErrUnexpectedQualifiedConstructor v -> mkSimpleDecorated $ hang (text "Expected an unqualified type constructor:") 2 (ppr v) PsErrTupleSectionInPat -> mkSimpleDecorated $ text "Tuple section in pattern context" PsErrOpFewArgs _ op -> mkSimpleDecorated $ text "Operator applied to too few arguments:" <+> ppr op PsErrVarForTyCon name -> mkSimpleDecorated $ text "Expecting a type constructor but found a variable," <+> quotes (ppr name) <> text "." $$ if isSymOcc $ rdrNameOcc name then text "If" <+> quotes (ppr name) <+> text "is a type constructor" <+> text "then enable ExplicitNamespaces and use the 'type' keyword." else empty PsErrMalformedEntityString -> mkSimpleDecorated $ text "Malformed entity string" PsErrDotsInRecordUpdate -> mkSimpleDecorated $ text "You cannot use `..' in a record update" PsErrInvalidDataCon t -> mkSimpleDecorated $ hang (text "Cannot parse data constructor in a data/newtype declaration:") 2 (ppr t) PsErrInvalidInfixDataCon lhs tc rhs -> mkSimpleDecorated $ hang (text "Cannot parse an infix data constructor in a data/newtype declaration:") 2 (ppr lhs <+> ppr tc <+> ppr rhs) PsErrIllegalPromotionQuoteDataCon name -> mkSimpleDecorated $ text "Illegal promotion quote mark in the declaration of" $$ text "data/newtype constructor" <+> pprPrefixOcc name PsErrUnpackDataCon -> mkSimpleDecorated $ text "{-# UNPACK #-} cannot be applied to a data constructor." PsErrUnexpectedKindAppInDataCon lhs ki -> mkSimpleDecorated $ hang (text "Unexpected kind application in a data/newtype declaration:") 2 (ppr lhs <+> text "@" <> ppr ki) PsErrInvalidRecordCon p -> mkSimpleDecorated $ text "Not a record constructor:" <+> ppr p PsErrIllegalUnboxedStringInPat lit -> mkSimpleDecorated $ text "Illegal unboxed string literal in pattern:" $$ ppr lit PsErrIllegalUnboxedFloatingLitInPat lit -> mkSimpleDecorated $ text "Illegal unboxed floating point literal in pattern:" $$ ppr lit PsErrDoNotationInPat -> mkSimpleDecorated $ text "do-notation in pattern" PsErrIfThenElseInPat -> mkSimpleDecorated $ text "(if ... then ... else ...)-syntax in pattern" (PsErrLambdaCaseInPat lc_variant) -> mkSimpleDecorated $ lamCaseKeyword lc_variant <+> text "...-syntax in pattern" PsErrCaseInPat -> mkSimpleDecorated $ text "(case ... of ...)-syntax in pattern" PsErrLetInPat -> mkSimpleDecorated $ text "(let ... in ...)-syntax in pattern" PsErrLambdaInPat -> mkSimpleDecorated $ text "Lambda-syntax in pattern." $$ text "Pattern matching on functions is not possible." PsErrArrowExprInPat e -> mkSimpleDecorated $ text "Expression syntax in pattern:" <+> ppr e PsErrArrowCmdInPat c -> mkSimpleDecorated $ text "Command syntax in pattern:" <+> ppr c PsErrArrowCmdInExpr c -> mkSimpleDecorated $ vcat [ text "Arrow command found where an expression was expected:" , nest 2 (ppr c) ] PsErrViewPatInExpr a b -> mkSimpleDecorated $ sep [ text "View pattern in expression context:" , nest 4 (ppr a <+> text "->" <+> ppr b) ] PsErrLambdaCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "lambda command") a PsErrCaseCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "case command") a PsErrLambdaCaseCmdInFunAppCmd lc_variant a -> mkSimpleDecorated $ pp_unexpected_fun_app (lamCaseKeyword lc_variant <+> text "command") a PsErrIfCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "if command") a PsErrLetCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "let command") a PsErrDoCmdInFunAppCmd a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "do command") a PsErrDoInFunAppExpr m a -> mkSimpleDecorated $ pp_unexpected_fun_app (prependQualified m (text "do block")) a PsErrMDoInFunAppExpr m a -> mkSimpleDecorated $ pp_unexpected_fun_app (prependQualified m (text "mdo block")) a PsErrLambdaInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "lambda expression") a PsErrCaseInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "case expression") a PsErrLambdaCaseInFunAppExpr lc_variant a -> mkSimpleDecorated $ pp_unexpected_fun_app (lamCaseKeyword lc_variant <+> text "expression") a PsErrLetInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "let expression") a PsErrIfInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "if expression") a PsErrProcInFunAppExpr a -> mkSimpleDecorated $ pp_unexpected_fun_app (text "proc expression") a PsErrMalformedTyOrClDecl ty -> mkSimpleDecorated $ text "Malformed head of type or class declaration:" <+> ppr ty PsErrIllegalWhereInDataDecl -> mkSimpleDecorated $ text "Illegal keyword 'where' in data declaration" PsErrIllegalDataTypeContext c -> mkSimpleDecorated $ text "Illegal datatype context:" <+> pprLHsContext (Just c) PsErrPrimStringInvalidChar -> mkSimpleDecorated $ text "primitive string literal must contain only characters <= \'\\xFF\'" PsErrSuffixAT -> mkSimpleDecorated $ text "Suffix occurrence of @. For an as-pattern, remove the leading whitespace." PsErrPrecedenceOutOfRange i -> mkSimpleDecorated $ text "Precedence out of range: " <> int i PsErrSemiColonsInCondExpr c st t se e -> mkSimpleDecorated $ text "Unexpected semi-colons in conditional:" $$ nest 4 expr where pprOptSemi True = semi pprOptSemi False = empty expr = text "if" <+> ppr c <> pprOptSemi st <+> text "then" <+> ppr t <> pprOptSemi se <+> text "else" <+> ppr e PsErrSemiColonsInCondCmd c st t se e -> mkSimpleDecorated $ text "Unexpected semi-colons in conditional:" $$ nest 4 expr where pprOptSemi True = semi pprOptSemi False = empty expr = text "if" <+> ppr c <> pprOptSemi st <+> text "then" <+> ppr t <> pprOptSemi se <+> text "else" <+> ppr e PsErrAtInPatPos -> mkSimpleDecorated $ text "Found a binding for the" <+> quotes (text "@") <+> text "operator in a pattern position." $$ perhapsAsPat PsErrParseErrorOnInput occ -> mkSimpleDecorated $ text "parse error on input" <+> ftext (occNameFS occ) PsErrMalformedDecl what for -> mkSimpleDecorated $ text "Malformed" <+> what <+> text "declaration for" <+> quotes (ppr for) PsErrUnexpectedTypeAppInDecl ki what for -> mkSimpleDecorated $ vcat [ text "Unexpected type application" <+> text "@" <> ppr ki , text "In the" <+> what <+> text "declaration for" <+> quotes (ppr for) ] PsErrNotADataCon name -> mkSimpleDecorated $ text "Not a data constructor:" <+> quotes (ppr name) PsErrInferredTypeVarNotAllowed -> mkSimpleDecorated $ text "Inferred type variables are not allowed here" PsErrIllegalTraditionalRecordSyntax s -> mkSimpleDecorated $ text "Illegal record syntax:" <+> s PsErrParseErrorInCmd s -> mkSimpleDecorated $ hang (text "Parse error in command:") 2 s PsErrInPat s details -> let msg = parse_error_in_pat body = case details of PEIP_NegApp -> text "-" <> ppr s PEIP_TypeArgs peipd_tyargs | not (null peipd_tyargs) -> ppr s <+> vcat [ hsep [text "@" <> ppr t | t <- peipd_tyargs] , text "Type applications in patterns are only allowed on data constructors." ] | otherwise -> ppr s PEIP_OtherPatDetails (ParseContext (Just fun) _) -> ppr s <+> text "In a function binding for the" <+> quotes (ppr fun) <+> text "operator." $$ if opIsAt fun then perhapsAsPat else empty _ -> ppr s in mkSimpleDecorated $ msg <+> body PsErrParseRightOpSectionInPat infixOcc s -> mkSimpleDecorated $ parse_error_in_pat <+> pprInfixOcc infixOcc <> ppr s PsErrIllegalRoleName role _nearby -> mkSimpleDecorated $ text "Illegal role name" <+> quotes (ppr role) PsErrInvalidTypeSignature lhs -> mkSimpleDecorated $ text "Invalid type signature:" <+> ppr lhs <+> text ":: ..." PsErrUnexpectedTypeInDecl t what tc tparms equals_or_where -> mkSimpleDecorated $ vcat [ text "Unexpected type" <+> quotes (ppr t) , text "In the" <+> what <+> text "declaration for" <+> quotes tc' , vcat[ (text "A" <+> what <+> text "declaration should have form") , nest 2 (what <+> tc' <+> hsep (map text (takeList tparms allNameStrings)) <+> equals_or_where) ] ] where -- Avoid printing a constraint tuple in the error message. Print -- a plain old tuple instead (since that's what the user probably -- wrote). See #14907 tc' = ppr $ filterCTuple tc PsErrInvalidPackageName pkg -> mkSimpleDecorated $ vcat [ text "Parse error" <> colon <+> quotes (ftext pkg) , text "Version number or non-alphanumeric" <+> text "character in package name" ] PsErrIllegalGadtRecordMultiplicity arr -> mkSimpleDecorated $ vcat [ text "Parse error" <> colon <+> quotes (ppr arr) , text "Record constructors in GADTs must use an ordinary, non-linear arrow." ] PsErrInvalidCApiImport {} -> mkSimpleDecorated $ vcat [ text "Wrapper stubs can't be used with CApiFFI."] diagnosticReason = \case PsUnknownMessage m -> diagnosticReason m PsHeaderMessage m -> psHeaderMessageReason m PsWarnBidirectionalFormatChars{} -> WarningWithFlag Opt_WarnUnicodeBidirectionalFormatCharacters PsWarnTab{} -> WarningWithFlag Opt_WarnTabs PsWarnTransitionalLayout{} -> WarningWithFlag Opt_WarnAlternativeLayoutRuleTransitional PsWarnOperatorWhitespaceExtConflict{} -> WarningWithFlag Opt_WarnOperatorWhitespaceExtConflict PsWarnOperatorWhitespace{} -> WarningWithFlag Opt_WarnOperatorWhitespace PsWarnHaddockInvalidPos -> WarningWithFlag Opt_WarnInvalidHaddock PsWarnHaddockIgnoreMulti -> WarningWithFlag Opt_WarnInvalidHaddock PsWarnStarBinder -> WarningWithFlag Opt_WarnStarBinder PsWarnStarIsType -> WarningWithFlag Opt_WarnStarIsType PsWarnUnrecognisedPragma -> WarningWithFlag Opt_WarnUnrecognisedPragmas PsWarnMisplacedPragma{} -> WarningWithFlag Opt_WarnMisplacedPragmas PsWarnImportPreQualified -> WarningWithFlag Opt_WarnPrepositiveQualifiedModule PsErrLexer{} -> ErrorWithoutFlag PsErrCmmLexer -> ErrorWithoutFlag PsErrCmmParser{} -> ErrorWithoutFlag PsErrParse{} -> ErrorWithoutFlag PsErrTypeAppWithoutSpace{} -> ErrorWithoutFlag PsErrLazyPatWithoutSpace{} -> ErrorWithoutFlag PsErrBangPatWithoutSpace{} -> ErrorWithoutFlag PsErrInvalidInfixHole -> ErrorWithoutFlag PsErrExpectedHyphen -> ErrorWithoutFlag PsErrSpaceInSCC -> ErrorWithoutFlag PsErrEmptyDoubleQuotes{} -> ErrorWithoutFlag PsErrLambdaCase{} -> ErrorWithoutFlag PsErrEmptyLambda{} -> ErrorWithoutFlag PsErrLinearFunction{} -> ErrorWithoutFlag PsErrMultiWayIf{} -> ErrorWithoutFlag PsErrOverloadedRecordUpdateNotEnabled{} -> ErrorWithoutFlag PsErrNumUnderscores{} -> ErrorWithoutFlag PsErrIllegalBangPattern{} -> ErrorWithoutFlag PsErrOverloadedRecordDotInvalid{} -> ErrorWithoutFlag PsErrIllegalPatSynExport -> ErrorWithoutFlag PsErrOverloadedRecordUpdateNoQualifiedFields -> ErrorWithoutFlag PsErrExplicitForall{} -> ErrorWithoutFlag PsErrIllegalQualifiedDo{} -> ErrorWithoutFlag PsErrQualifiedDoInCmd{} -> ErrorWithoutFlag PsErrRecordSyntaxInPatSynDecl{} -> ErrorWithoutFlag PsErrEmptyWhereInPatSynDecl{} -> ErrorWithoutFlag PsErrInvalidWhereBindInPatSynDecl{} -> ErrorWithoutFlag PsErrNoSingleWhereBindInPatSynDecl{} -> ErrorWithoutFlag PsErrDeclSpliceNotAtTopLevel{} -> ErrorWithoutFlag PsErrMultipleNamesInStandaloneKindSignature{} -> ErrorWithoutFlag PsErrIllegalExplicitNamespace -> ErrorWithoutFlag PsErrUnallowedPragma{} -> ErrorWithoutFlag PsErrImportPostQualified -> ErrorWithoutFlag PsErrImportQualifiedTwice -> ErrorWithoutFlag PsErrIllegalImportBundleForm -> ErrorWithoutFlag PsErrInvalidRuleActivationMarker -> ErrorWithoutFlag PsErrMissingBlock -> ErrorWithoutFlag PsErrUnsupportedBoxedSumExpr{} -> ErrorWithoutFlag PsErrUnsupportedBoxedSumPat{} -> ErrorWithoutFlag PsErrUnexpectedQualifiedConstructor{} -> ErrorWithoutFlag PsErrTupleSectionInPat{} -> ErrorWithoutFlag PsErrOpFewArgs{} -> ErrorWithoutFlag PsErrVarForTyCon{} -> ErrorWithoutFlag PsErrMalformedEntityString -> ErrorWithoutFlag PsErrDotsInRecordUpdate -> ErrorWithoutFlag PsErrInvalidDataCon{} -> ErrorWithoutFlag PsErrInvalidInfixDataCon{} -> ErrorWithoutFlag PsErrIllegalPromotionQuoteDataCon{} -> ErrorWithoutFlag PsErrUnpackDataCon -> ErrorWithoutFlag PsErrUnexpectedKindAppInDataCon{} -> ErrorWithoutFlag PsErrInvalidRecordCon{} -> ErrorWithoutFlag PsErrIllegalUnboxedStringInPat{} -> ErrorWithoutFlag PsErrIllegalUnboxedFloatingLitInPat{} -> ErrorWithoutFlag PsErrDoNotationInPat{} -> ErrorWithoutFlag PsErrIfThenElseInPat -> ErrorWithoutFlag PsErrLambdaCaseInPat{} -> ErrorWithoutFlag PsErrCaseInPat -> ErrorWithoutFlag PsErrLetInPat -> ErrorWithoutFlag PsErrLambdaInPat -> ErrorWithoutFlag PsErrArrowExprInPat{} -> ErrorWithoutFlag PsErrArrowCmdInPat{} -> ErrorWithoutFlag PsErrArrowCmdInExpr{} -> ErrorWithoutFlag PsErrViewPatInExpr{} -> ErrorWithoutFlag PsErrLambdaCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrCaseCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrLambdaCaseCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrIfCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrLetCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrDoCmdInFunAppCmd{} -> ErrorWithoutFlag PsErrDoInFunAppExpr{} -> ErrorWithoutFlag PsErrMDoInFunAppExpr{} -> ErrorWithoutFlag PsErrLambdaInFunAppExpr{} -> ErrorWithoutFlag PsErrCaseInFunAppExpr{} -> ErrorWithoutFlag PsErrLambdaCaseInFunAppExpr{} -> ErrorWithoutFlag PsErrLetInFunAppExpr{} -> ErrorWithoutFlag PsErrIfInFunAppExpr{} -> ErrorWithoutFlag PsErrProcInFunAppExpr{} -> ErrorWithoutFlag PsErrMalformedTyOrClDecl{} -> ErrorWithoutFlag PsErrIllegalWhereInDataDecl -> ErrorWithoutFlag PsErrIllegalDataTypeContext{} -> ErrorWithoutFlag PsErrPrimStringInvalidChar -> ErrorWithoutFlag PsErrSuffixAT -> ErrorWithoutFlag PsErrPrecedenceOutOfRange{} -> ErrorWithoutFlag PsErrSemiColonsInCondExpr{} -> ErrorWithoutFlag PsErrSemiColonsInCondCmd{} -> ErrorWithoutFlag PsErrAtInPatPos -> ErrorWithoutFlag PsErrParseErrorOnInput{} -> ErrorWithoutFlag PsErrMalformedDecl{} -> ErrorWithoutFlag PsErrUnexpectedTypeAppInDecl{} -> ErrorWithoutFlag PsErrNotADataCon{} -> ErrorWithoutFlag PsErrInferredTypeVarNotAllowed -> ErrorWithoutFlag PsErrIllegalTraditionalRecordSyntax{} -> ErrorWithoutFlag PsErrParseErrorInCmd{} -> ErrorWithoutFlag PsErrInPat{} -> ErrorWithoutFlag PsErrIllegalRoleName{} -> ErrorWithoutFlag PsErrInvalidTypeSignature{} -> ErrorWithoutFlag PsErrUnexpectedTypeInDecl{} -> ErrorWithoutFlag PsErrInvalidPackageName{} -> ErrorWithoutFlag PsErrParseRightOpSectionInPat{} -> ErrorWithoutFlag PsErrIllegalGadtRecordMultiplicity{} -> ErrorWithoutFlag PsErrInvalidCApiImport {} -> ErrorWithoutFlag diagnosticHints = \case PsUnknownMessage m -> diagnosticHints m PsHeaderMessage m -> psHeaderMessageHints m PsWarnBidirectionalFormatChars{} -> noHints PsWarnTab{} -> [SuggestUseSpaces] PsWarnTransitionalLayout{} -> noHints PsWarnOperatorWhitespaceExtConflict sym -> [SuggestUseWhitespaceAfter sym] PsWarnOperatorWhitespace sym occ -> [SuggestUseWhitespaceAround (unpackFS sym) occ] PsWarnHaddockInvalidPos -> noHints PsWarnHaddockIgnoreMulti -> noHints PsWarnStarBinder -> [SuggestQualifyStarOperator] PsWarnStarIsType -> [SuggestUseTypeFromDataKind Nothing] PsWarnUnrecognisedPragma -> noHints PsWarnMisplacedPragma{} -> [SuggestPlacePragmaInHeader] PsWarnImportPreQualified -> [ SuggestQualifiedAfterModuleName , suggestExtension LangExt.ImportQualifiedPost] PsErrLexer{} -> noHints PsErrCmmLexer -> noHints PsErrCmmParser{} -> noHints PsErrParse token PsErrParseDetails{..} -> case token of "" -> [] "$" | not ped_th_enabled -> [suggestExtension LangExt.TemplateHaskell] -- #7396 "$$" | not ped_th_enabled -> [suggestExtension LangExt.TemplateHaskell] -- #20157 "<-" | ped_mdo_in_last_100 -> [suggestExtension LangExt.RecursiveDo] | otherwise -> [SuggestMissingDo] "=" | ped_do_in_last_100 -> [SuggestLetInDo] -- #15849 _ | not ped_pat_syn_enabled , ped_pattern_parsed -> [suggestExtension LangExt.PatternSynonyms] -- #12429 | otherwise -> [] PsErrTypeAppWithoutSpace{} -> noHints PsErrLazyPatWithoutSpace{} -> noHints PsErrBangPatWithoutSpace{} -> noHints PsErrInvalidInfixHole -> noHints PsErrExpectedHyphen -> noHints PsErrSpaceInSCC -> noHints PsErrEmptyDoubleQuotes th_on | th_on -> [SuggestThQuotationSyntax] | otherwise -> noHints PsErrLambdaCase{} -> [suggestExtension LangExt.LambdaCase] PsErrEmptyLambda{} -> noHints PsErrLinearFunction{} -> [suggestExtension LangExt.LinearTypes] PsErrMultiWayIf{} -> [suggestExtension LangExt.MultiWayIf] PsErrOverloadedRecordUpdateNotEnabled{} -> [suggestExtension LangExt.OverloadedRecordUpdate] PsErrNumUnderscores{} -> [suggestExtension LangExt.NumericUnderscores] PsErrIllegalBangPattern{} -> [suggestExtension LangExt.BangPatterns] PsErrOverloadedRecordDotInvalid{} -> noHints PsErrIllegalPatSynExport -> [suggestExtension LangExt.PatternSynonyms] PsErrOverloadedRecordUpdateNoQualifiedFields -> noHints PsErrExplicitForall is_unicode -> let info = text "or a similar language extension to enable explicit-forall syntax:" <+> forallSym is_unicode <+> text ". " in [ suggestExtensionWithInfo info LangExt.RankNTypes ] PsErrIllegalQualifiedDo{} -> [suggestExtension LangExt.QualifiedDo] PsErrQualifiedDoInCmd{} -> noHints PsErrRecordSyntaxInPatSynDecl{} -> noHints PsErrEmptyWhereInPatSynDecl{} -> noHints PsErrInvalidWhereBindInPatSynDecl{} -> noHints PsErrNoSingleWhereBindInPatSynDecl{} -> noHints PsErrDeclSpliceNotAtTopLevel{} -> noHints PsErrMultipleNamesInStandaloneKindSignature{} -> noHints PsErrIllegalExplicitNamespace -> [suggestExtension LangExt.ExplicitNamespaces] PsErrUnallowedPragma{} -> noHints PsErrImportPostQualified -> [suggestExtension LangExt.ImportQualifiedPost] PsErrImportQualifiedTwice -> noHints PsErrIllegalImportBundleForm -> noHints PsErrInvalidRuleActivationMarker -> noHints PsErrMissingBlock -> noHints PsErrUnsupportedBoxedSumExpr{} -> noHints PsErrUnsupportedBoxedSumPat{} -> noHints PsErrUnexpectedQualifiedConstructor{} -> noHints PsErrTupleSectionInPat{} -> noHints PsErrOpFewArgs star_is_type op -> noStarIsTypeHints star_is_type op PsErrVarForTyCon{} -> noHints PsErrMalformedEntityString -> noHints PsErrDotsInRecordUpdate -> noHints PsErrInvalidDataCon{} -> noHints PsErrInvalidInfixDataCon{} -> noHints PsErrIllegalPromotionQuoteDataCon{} -> noHints PsErrUnpackDataCon -> noHints PsErrUnexpectedKindAppInDataCon{} -> noHints PsErrInvalidRecordCon{} -> noHints PsErrIllegalUnboxedStringInPat{} -> noHints PsErrIllegalUnboxedFloatingLitInPat{} -> noHints PsErrDoNotationInPat{} -> noHints PsErrIfThenElseInPat -> noHints PsErrLambdaCaseInPat{} -> noHints PsErrCaseInPat -> noHints PsErrLetInPat -> noHints PsErrLambdaInPat -> noHints PsErrArrowExprInPat{} -> noHints PsErrArrowCmdInPat{} -> noHints PsErrArrowCmdInExpr{} -> noHints PsErrViewPatInExpr{} -> noHints PsErrLambdaCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrCaseCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrLambdaCaseCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrIfCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrLetCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrDoCmdInFunAppCmd{} -> suggestParensAndBlockArgs PsErrDoInFunAppExpr{} -> suggestParensAndBlockArgs PsErrMDoInFunAppExpr{} -> suggestParensAndBlockArgs PsErrLambdaInFunAppExpr{} -> suggestParensAndBlockArgs PsErrCaseInFunAppExpr{} -> suggestParensAndBlockArgs PsErrLambdaCaseInFunAppExpr{} -> suggestParensAndBlockArgs PsErrLetInFunAppExpr{} -> suggestParensAndBlockArgs PsErrIfInFunAppExpr{} -> suggestParensAndBlockArgs PsErrProcInFunAppExpr{} -> suggestParensAndBlockArgs PsErrMalformedTyOrClDecl{} -> noHints PsErrIllegalWhereInDataDecl -> [ suggestExtensionWithInfo (text "or a similar language extension to enable syntax: data T where") LangExt.GADTs ] PsErrIllegalDataTypeContext{} -> [suggestExtension LangExt.DatatypeContexts] PsErrPrimStringInvalidChar -> noHints PsErrSuffixAT -> noHints PsErrPrecedenceOutOfRange{} -> noHints PsErrSemiColonsInCondExpr{} -> [suggestExtension LangExt.DoAndIfThenElse] PsErrSemiColonsInCondCmd{} -> [suggestExtension LangExt.DoAndIfThenElse] PsErrAtInPatPos -> noHints PsErrParseErrorOnInput{} -> noHints PsErrMalformedDecl{} -> noHints PsErrUnexpectedTypeAppInDecl{} -> noHints PsErrNotADataCon{} -> noHints PsErrInferredTypeVarNotAllowed -> noHints PsErrIllegalTraditionalRecordSyntax{} -> [suggestExtension LangExt.TraditionalRecordSyntax] PsErrParseErrorInCmd{} -> noHints PsErrInPat _ details -> case details of PEIP_RecPattern args YesPatIsRecursive ctx | length args /= 0 -> catMaybes [sug_recdo, sug_missingdo ctx] | otherwise -> catMaybes [sug_missingdo ctx] PEIP_OtherPatDetails ctx -> catMaybes [sug_missingdo ctx] _ -> [] where sug_recdo = Just (suggestExtension LangExt.RecursiveDo) sug_missingdo (ParseContext _ YesIncompleteDoBlock) = Just SuggestMissingDo sug_missingdo _ = Nothing PsErrParseRightOpSectionInPat{} -> noHints PsErrIllegalRoleName _ nearby -> [SuggestRoles nearby] PsErrInvalidTypeSignature lhs -> if | foreign_RDR `looks_like` lhs -> [suggestExtension LangExt.ForeignFunctionInterface] | default_RDR `looks_like` lhs -> [suggestExtension LangExt.DefaultSignatures] | pattern_RDR `looks_like` lhs -> [suggestExtension LangExt.PatternSynonyms] | otherwise -> [SuggestTypeSignatureForm] where -- A common error is to forget the ForeignFunctionInterface flag -- so check for that, and suggest. cf #3805 -- Sadly 'foreign import' still barfs 'parse error' because -- 'import' is a keyword -- looks_like :: RdrName -> LHsExpr GhcPsErr -> Bool -- AZ looks_like s (L _ (HsVar _ (L _ v))) = v == s looks_like s (L _ (HsApp _ lhs _)) = looks_like s lhs looks_like _ _ = False foreign_RDR = mkUnqual varName (fsLit "foreign") default_RDR = mkUnqual varName (fsLit "default") pattern_RDR = mkUnqual varName (fsLit "pattern") PsErrUnexpectedTypeInDecl{} -> noHints PsErrInvalidPackageName{} -> noHints PsErrIllegalGadtRecordMultiplicity{} -> noHints PsErrInvalidCApiImport {} -> noHints psHeaderMessageDiagnostic :: PsHeaderMessage -> DecoratedSDoc psHeaderMessageDiagnostic = \case PsErrParseLanguagePragma -> mkSimpleDecorated $ vcat [ text "Cannot parse LANGUAGE pragma" , text "Expecting comma-separated list of language options," , text "each starting with a capital letter" , nest 2 (text "E.g. {-# LANGUAGE TemplateHaskell, GADTs #-}") ] PsErrUnsupportedExt unsup _ -> mkSimpleDecorated $ text "Unsupported extension: " <> text unsup PsErrParseOptionsPragma str -> mkSimpleDecorated $ vcat [ text "Error while parsing OPTIONS_GHC pragma." , text "Expecting whitespace-separated list of GHC options." , text " E.g. {-# OPTIONS_GHC -Wall -O2 #-}" , text ("Input was: " ++ show str) ] PsErrUnknownOptionsPragma flag -> mkSimpleDecorated $ text "Unknown flag in {-# OPTIONS_GHC #-} pragma:" <+> text flag psHeaderMessageReason :: PsHeaderMessage -> DiagnosticReason psHeaderMessageReason = \case PsErrParseLanguagePragma -> ErrorWithoutFlag PsErrUnsupportedExt{} -> ErrorWithoutFlag PsErrParseOptionsPragma{} -> ErrorWithoutFlag PsErrUnknownOptionsPragma{} -> ErrorWithoutFlag psHeaderMessageHints :: PsHeaderMessage -> [GhcHint] psHeaderMessageHints = \case PsErrParseLanguagePragma -> noHints PsErrUnsupportedExt unsup supported -> if null suggestions then noHints -- FIXME(adn) To fix the compiler crash in #19923 we just rewrap this into an -- UnknownHint, but we should have here a proper hint, but that would require -- changing 'supportedExtensions' to emit a list of 'Extension'. else [UnknownHint $ text "Perhaps you meant" <+> quotedListWithOr (map text suggestions)] where suggestions :: [String] suggestions = fuzzyMatch unsup supported PsErrParseOptionsPragma{} -> noHints PsErrUnknownOptionsPragma{} -> noHints suggestParensAndBlockArgs :: [GhcHint] suggestParensAndBlockArgs = [SuggestParentheses, suggestExtension LangExt.BlockArguments] pp_unexpected_fun_app :: Outputable a => SDoc -> a -> SDoc pp_unexpected_fun_app e a = text "Unexpected " <> e <> text " in function application:" $$ nest 4 (ppr a) parse_error_in_pat :: SDoc parse_error_in_pat = text "Parse error in pattern:" forallSym :: Bool -> SDoc forallSym True = text "∀" forallSym False = text "forall" pprFileHeaderPragmaType :: FileHeaderPragmaType -> SDoc pprFileHeaderPragmaType OptionsPrag = text "OPTIONS" pprFileHeaderPragmaType IncludePrag = text "INCLUDE" pprFileHeaderPragmaType LanguagePrag = text "LANGUAGE" pprFileHeaderPragmaType DocOptionsPrag = text "OPTIONS_HADDOCK" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Errors/Types.hs0000644000000000000000000004272214472400112022012 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.Parser.Errors.Types where import GHC.Prelude import Data.Typeable import GHC.Core.TyCon (Role) import GHC.Data.FastString import GHC.Hs import GHC.Parser.Types import GHC.Parser.Errors.Basic import GHC.Types.Error import GHC.Types.Hint import GHC.Types.Name.Occurrence (OccName) import GHC.Types.Name.Reader import GHC.Unit.Module.Name import GHC.Utils.Outputable import Data.List.NonEmpty (NonEmpty) import GHC.Types.SrcLoc (PsLoc) -- The type aliases below are useful to make some type signatures a bit more -- descriptive, like 'handleWarningsThrowErrors' in 'GHC.Driver.Main'. type PsWarning = PsMessage -- /INVARIANT/: The diagnosticReason is a Warning reason type PsError = PsMessage -- /INVARIANT/: The diagnosticReason is ErrorWithoutFlag {- Note [Messages from GHC.Parser.Header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We group the messages from 'GHC.Parser.Header' because we need to be able to pattern match on them in the driver code. This is because in functions like 'GHC.Driver.Pipeline.preprocess' we want to handle only a specific subset of parser messages, during dependency analysis, and having a single constructor to handle them all is handy. -} data PsHeaderMessage = PsErrParseLanguagePragma | PsErrUnsupportedExt !String ![String] | PsErrParseOptionsPragma !String {-| PsErrUnsupportedOptionsPragma is an error that occurs when an unknown OPTIONS_GHC pragma is supplied is found. Example(s): {-# OPTIONS_GHC foo #-} Test case(s): tests/safeHaskell/flags/SafeFlags28 tests/safeHaskell/flags/SafeFlags19 tests/safeHaskell/flags/SafeFlags29 tests/parser/should_fail/T19923c tests/parser/should_fail/T19923b tests/parser/should_fail/readFail044 tests/driver/T2499 -} | PsErrUnknownOptionsPragma !String data PsMessage = {-| An \"unknown\" message from the parser. This type constructor allows arbitrary messages to be embedded. The typical use case would be GHC plugins willing to emit custom diagnostics. -} forall a. (Diagnostic a, Typeable a) => PsUnknownMessage a {-| A group of parser messages emitted in 'GHC.Parser.Header'. See Note [Messages from GHC.Parser.Header]. -} | PsHeaderMessage !PsHeaderMessage {-| PsWarnBidirectionalFormatChars is a warning (controlled by the -Wwarn-bidirectional-format-characters flag) that occurs when unicode bi-directional format characters are found within in a file The 'PsLoc' contains the exact position in the buffer the character occured, and the string contains a description of the character. -} | PsWarnBidirectionalFormatChars (NonEmpty (PsLoc, Char, String)) {-| PsWarnTab is a warning (controlled by the -Wwarn-tabs flag) that occurs when tabulations (tabs) are found within a file. Test case(s): parser/should_fail/T12610 parser/should_compile/T9723b parser/should_compile/T9723a parser/should_compile/read043 parser/should_fail/T16270 warnings/should_compile/T9230 -} | PsWarnTab !Word -- ^ Number of other occurrences other than the first one {-| PsWarnTransitionalLayout is a warning (controlled by the -Walternative-layout-rule-transitional flag) that occurs when pipes ('|') or 'where' are at the same depth of an implicit layout block. Example(s): f :: IO () f | True = do let x = () y = () return () | True = return () Test case(s): layout/layout006 layout/layout003 layout/layout001 -} | PsWarnTransitionalLayout !TransLayoutReason -- | Unrecognised pragma | PsWarnUnrecognisedPragma | PsWarnMisplacedPragma !FileHeaderPragmaType -- | Invalid Haddock comment position | PsWarnHaddockInvalidPos -- | Multiple Haddock comment for the same entity | PsWarnHaddockIgnoreMulti -- | Found binding occurrence of "*" while StarIsType is enabled | PsWarnStarBinder -- | Using "*" for "Type" without StarIsType enabled | PsWarnStarIsType -- | Pre qualified import with 'WarnPrepositiveQualifiedModule' enabled | PsWarnImportPreQualified | PsWarnOperatorWhitespaceExtConflict !OperatorWhitespaceSymbol | PsWarnOperatorWhitespace !FastString !OperatorWhitespaceOccurrence -- | LambdaCase syntax used without the extension enabled | PsErrLambdaCase -- | A lambda requires at least one parameter | PsErrEmptyLambda -- | Underscores in literals without the extension enabled | PsErrNumUnderscores !NumUnderscoreReason -- | Invalid character in primitive string | PsErrPrimStringInvalidChar -- | Missing block | PsErrMissingBlock -- | Lexer error | PsErrLexer !LexErr !LexErrKind -- | Suffix occurrence of `@` | PsErrSuffixAT -- | Parse errors | PsErrParse !String !PsErrParseDetails -- | Cmm lexer error | PsErrCmmLexer -- | Unsupported boxed sum in expression | PsErrUnsupportedBoxedSumExpr !(SumOrTuple (HsExpr GhcPs)) -- | Unsupported boxed sum in pattern | PsErrUnsupportedBoxedSumPat !(SumOrTuple (PatBuilder GhcPs)) -- | Unexpected qualified constructor | PsErrUnexpectedQualifiedConstructor !RdrName -- | Tuple section in pattern context | PsErrTupleSectionInPat -- | Bang-pattern without BangPattterns enabled | PsErrIllegalBangPattern !(Pat GhcPs) -- | Operator applied to too few arguments | PsErrOpFewArgs !StarIsType !RdrName -- | Import: multiple occurrences of 'qualified' | PsErrImportQualifiedTwice -- | Post qualified import without 'ImportQualifiedPost' | PsErrImportPostQualified -- | Explicit namespace keyword without 'ExplicitNamespaces' | PsErrIllegalExplicitNamespace -- | Expecting a type constructor but found a variable | PsErrVarForTyCon !RdrName -- | Illegal export form allowed by PatternSynonyms | PsErrIllegalPatSynExport -- | Malformed entity string | PsErrMalformedEntityString -- | Dots used in record update | PsErrDotsInRecordUpdate -- | Precedence out of range | PsErrPrecedenceOutOfRange !Int -- | Invalid use of record dot syntax `.' | PsErrOverloadedRecordDotInvalid -- | `OverloadedRecordUpdate` is not enabled. | PsErrOverloadedRecordUpdateNotEnabled -- | Can't use qualified fields when OverloadedRecordUpdate is enabled. | PsErrOverloadedRecordUpdateNoQualifiedFields -- | Cannot parse data constructor in a data/newtype declaration | PsErrInvalidDataCon !(HsType GhcPs) -- | Cannot parse data constructor in a data/newtype declaration | PsErrInvalidInfixDataCon !(HsType GhcPs) !RdrName !(HsType GhcPs) -- | Illegal DataKinds quote mark in data/newtype constructor declaration | PsErrIllegalPromotionQuoteDataCon !RdrName -- | UNPACK applied to a data constructor | PsErrUnpackDataCon -- | Unexpected kind application in data/newtype declaration | PsErrUnexpectedKindAppInDataCon !DataConBuilder !(HsType GhcPs) -- | Not a record constructor | PsErrInvalidRecordCon !(PatBuilder GhcPs) -- | Illegal unboxed string literal in pattern | PsErrIllegalUnboxedStringInPat !(HsLit GhcPs) -- | Illegal primitive floating point literal in pattern | PsErrIllegalUnboxedFloatingLitInPat !(HsLit GhcPs) -- | Do-notation in pattern | PsErrDoNotationInPat -- | If-then-else syntax in pattern | PsErrIfThenElseInPat -- | Lambda-case in pattern | PsErrLambdaCaseInPat LamCaseVariant -- | case..of in pattern | PsErrCaseInPat -- | let-syntax in pattern | PsErrLetInPat -- | Lambda-syntax in pattern | PsErrLambdaInPat -- | Arrow expression-syntax in pattern | PsErrArrowExprInPat !(HsExpr GhcPs) -- | Arrow command-syntax in pattern | PsErrArrowCmdInPat !(HsCmd GhcPs) -- | Arrow command-syntax in expression | PsErrArrowCmdInExpr !(HsCmd GhcPs) -- | View-pattern in expression | PsErrViewPatInExpr !(LHsExpr GhcPs) !(LHsExpr GhcPs) -- | Type-application without space before '@' | PsErrTypeAppWithoutSpace !RdrName !(LHsExpr GhcPs) -- | Lazy-pattern ('~') without space after it | PsErrLazyPatWithoutSpace !(LHsExpr GhcPs) -- | Bang-pattern ('!') without space after it | PsErrBangPatWithoutSpace !(LHsExpr GhcPs) -- | Pragma not allowed in this position | PsErrUnallowedPragma !(HsPragE GhcPs) -- | Qualified do block in command | PsErrQualifiedDoInCmd !ModuleName -- | Invalid infix hole, expected an infix operator | PsErrInvalidInfixHole -- | Unexpected semi-colons in conditional expression | PsErrSemiColonsInCondExpr !(HsExpr GhcPs) -- ^ conditional expr !Bool -- ^ "then" semi-colon? !(HsExpr GhcPs) -- ^ "then" expr !Bool -- ^ "else" semi-colon? !(HsExpr GhcPs) -- ^ "else" expr -- | Unexpected semi-colons in conditional command | PsErrSemiColonsInCondCmd !(HsExpr GhcPs) -- ^ conditional expr !Bool -- ^ "then" semi-colon? !(HsCmd GhcPs) -- ^ "then" expr !Bool -- ^ "else" semi-colon? !(HsCmd GhcPs) -- ^ "else" expr -- | @-operator in a pattern position | PsErrAtInPatPos -- | Unexpected lambda command in function application | PsErrLambdaCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected case command in function application | PsErrCaseCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected \case(s) command in function application | PsErrLambdaCaseCmdInFunAppCmd !LamCaseVariant !(LHsCmd GhcPs) -- | Unexpected if command in function application | PsErrIfCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected let command in function application | PsErrLetCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected do command in function application | PsErrDoCmdInFunAppCmd !(LHsCmd GhcPs) -- | Unexpected do block in function application | PsErrDoInFunAppExpr !(Maybe ModuleName) !(LHsExpr GhcPs) -- | Unexpected mdo block in function application | PsErrMDoInFunAppExpr !(Maybe ModuleName) !(LHsExpr GhcPs) -- | Unexpected lambda expression in function application | PsErrLambdaInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected case expression in function application | PsErrCaseInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected \case(s) expression in function application | PsErrLambdaCaseInFunAppExpr !LamCaseVariant !(LHsExpr GhcPs) -- | Unexpected let expression in function application | PsErrLetInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected if expression in function application | PsErrIfInFunAppExpr !(LHsExpr GhcPs) -- | Unexpected proc expression in function application | PsErrProcInFunAppExpr !(LHsExpr GhcPs) -- | Malformed head of type or class declaration | PsErrMalformedTyOrClDecl !(LHsType GhcPs) -- | Illegal 'where' keyword in data declaration | PsErrIllegalWhereInDataDecl -- | Illegal datatype context | PsErrIllegalDataTypeContext !(LHsContext GhcPs) -- | Parse error on input | PsErrParseErrorOnInput !OccName -- | Malformed ... declaration for ... | PsErrMalformedDecl !SDoc !RdrName -- | Unexpected type application in a declaration | PsErrUnexpectedTypeAppInDecl !(LHsType GhcPs) !SDoc !RdrName -- | Not a data constructor | PsErrNotADataCon !RdrName -- | Record syntax used in pattern synonym declaration | PsErrRecordSyntaxInPatSynDecl !(LPat GhcPs) -- | Empty 'where' clause in pattern-synonym declaration | PsErrEmptyWhereInPatSynDecl !RdrName -- | Invalid binding name in 'where' clause of pattern-synonym declaration | PsErrInvalidWhereBindInPatSynDecl !RdrName !(HsDecl GhcPs) -- | Multiple bindings in 'where' clause of pattern-synonym declaration | PsErrNoSingleWhereBindInPatSynDecl !RdrName !(HsDecl GhcPs) -- | Declaration splice not a top-level | PsErrDeclSpliceNotAtTopLevel !(SpliceDecl GhcPs) -- | Inferred type variables not allowed here | PsErrInferredTypeVarNotAllowed -- | Multiple names in standalone kind signatures | PsErrMultipleNamesInStandaloneKindSignature [LIdP GhcPs] -- | Illegal import bundle form | PsErrIllegalImportBundleForm -- | Illegal role name | PsErrIllegalRoleName !FastString [Role] -- | Invalid type signature | PsErrInvalidTypeSignature !(LHsExpr GhcPs) -- | Unexpected type in declaration | PsErrUnexpectedTypeInDecl !(LHsType GhcPs) !SDoc !RdrName [LHsTypeArg GhcPs] !SDoc -- | Expected a hyphen | PsErrExpectedHyphen -- | Found a space in a SCC | PsErrSpaceInSCC -- | Found two single quotes | PsErrEmptyDoubleQuotes !Bool -- ^ Is TH on? -- | Invalid package name | PsErrInvalidPackageName !FastString -- | Invalid rule activation marker | PsErrInvalidRuleActivationMarker -- | Linear function found but LinearTypes not enabled | PsErrLinearFunction -- | Multi-way if-expression found but MultiWayIf not enabled | PsErrMultiWayIf -- | Explicit forall found but no extension allowing it is enabled | PsErrExplicitForall !Bool -- ^ is Unicode forall? -- | Found qualified-do without QualifiedDo enabled | PsErrIllegalQualifiedDo !SDoc -- | Cmm parser error | PsErrCmmParser !CmmParserError -- | Illegal traditional record syntax -- -- TODO: distinguish errors without using SDoc | PsErrIllegalTraditionalRecordSyntax !SDoc -- | Parse error in command -- -- TODO: distinguish errors without using SDoc | PsErrParseErrorInCmd !SDoc -- | Parse error in pattern | PsErrInPat !(PatBuilder GhcPs) !PsErrInPatDetails -- | Parse error in right operator section pattern -- TODO: embed the proper operator, if possible | forall infixOcc. (OutputableBndr infixOcc) => PsErrParseRightOpSectionInPat !infixOcc !(PatBuilder GhcPs) -- | Illegal linear arrow or multiplicity annotation in GADT record syntax | PsErrIllegalGadtRecordMultiplicity !(HsArrow GhcPs) | PsErrInvalidCApiImport -- | Extra details about a parse error, which helps -- us in determining which should be the hints to -- suggest. data PsErrParseDetails = PsErrParseDetails { ped_th_enabled :: !Bool -- Is 'TemplateHaskell' enabled? , ped_do_in_last_100 :: !Bool -- ^ Is there a 'do' in the last 100 characters? , ped_mdo_in_last_100 :: !Bool -- ^ Is there an 'mdo' in the last 100 characters? , ped_pat_syn_enabled :: !Bool -- ^ Is 'PatternSynonyms' enabled? , ped_pattern_parsed :: !Bool -- ^ Did we parse a \"pattern\" keyword? } -- | Is the parsed pattern recursive? data PatIsRecursive = YesPatIsRecursive | NoPatIsRecursive data PatIncompleteDoBlock = YesIncompleteDoBlock | NoIncompleteDoBlock deriving Eq -- | Extra information for the expression GHC is currently inspecting/parsing. -- It can be used to generate more informative parser diagnostics and hints. data ParseContext = ParseContext { is_infix :: !(Maybe RdrName) -- ^ If 'Just', this is an infix -- pattern with the binded operator name , incomplete_do_block :: !PatIncompleteDoBlock -- ^ Did the parser likely fail due to an incomplete do block? } deriving Eq data PsErrInPatDetails = PEIP_NegApp -- ^ Negative application pattern? | PEIP_TypeArgs [HsPatSigType GhcPs] -- ^ The list of type arguments for the pattern | PEIP_RecPattern [LPat GhcPs] -- ^ The pattern arguments !PatIsRecursive -- ^ Is the parsed pattern recursive? !ParseContext | PEIP_OtherPatDetails !ParseContext noParseContext :: ParseContext noParseContext = ParseContext Nothing NoIncompleteDoBlock incompleteDoBlock :: ParseContext incompleteDoBlock = ParseContext Nothing YesIncompleteDoBlock -- | Builds a 'PsErrInPatDetails' with the information provided by the 'ParseContext'. fromParseContext :: ParseContext -> PsErrInPatDetails fromParseContext = PEIP_OtherPatDetails data NumUnderscoreReason = NumUnderscore_Integral | NumUnderscore_Float deriving (Show,Eq,Ord) data LexErrKind = LexErrKind_EOF -- ^ End of input | LexErrKind_UTF8 -- ^ UTF-8 decoding error | LexErrKind_Char !Char -- ^ Error at given character deriving (Show,Eq,Ord) data LexErr = LexError -- ^ Lexical error | LexUnknownPragma -- ^ Unknown pragma | LexErrorInPragma -- ^ Lexical error in pragma | LexNumEscapeRange -- ^ Numeric escape sequence out of range | LexStringCharLit -- ^ Lexical error in string/character literal | LexStringCharLitEOF -- ^ Unexpected end-of-file in string/character literal | LexUnterminatedComment -- ^ Unterminated `{-' | LexUnterminatedOptions -- ^ Unterminated OPTIONS pragma | LexUnterminatedQQ -- ^ Unterminated quasiquotation -- | Errors from the Cmm parser data CmmParserError = CmmUnknownPrimitive !FastString -- ^ Unknown Cmm primitive | CmmUnknownMacro !FastString -- ^ Unknown macro | CmmUnknownCConv !String -- ^ Unknown calling convention | CmmUnrecognisedSafety !String -- ^ Unrecognised safety | CmmUnrecognisedHint !String -- ^ Unrecognised hint data TransLayoutReason = TransLayout_Where -- ^ "`where' clause at the same depth as implicit layout block" | TransLayout_Pipe -- ^ "`|' at the same depth as implicit layout block") data FileHeaderPragmaType = OptionsPrag | IncludePrag | LanguagePrag | DocOptionsPrag ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/HaddockLex.x0000644000000000000000000001657014472375231021314 0ustar0000000000000000{ {-# LANGUAGE BangPatterns #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# OPTIONS_GHC -funbox-strict-fields #-} module GHC.Parser.HaddockLex (lexHsDoc, lexStringLiteral) where import GHC.Prelude import GHC.Data.FastString import GHC.Hs.Doc import GHC.Parser.Lexer import GHC.Parser.Annotation import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Data.StringBuffer import qualified GHC.Data.Strict as Strict import GHC.Types.Name.Reader import GHC.Utils.Outputable import GHC.Utils.Error import GHC.Utils.Encoding import GHC.Hs.Extension import qualified GHC.Data.EnumSet as EnumSet import Data.Maybe import Data.Word import Data.ByteString ( ByteString ) import qualified Data.ByteString as BS import qualified GHC.LanguageExtensions as LangExt } -- ----------------------------------------------------------------------------- -- Alex "Character set macros" -- Copied from GHC/Parser/Lexer.x -- NB: The logic behind these definitions is also reflected in "GHC.Utils.Lexeme" -- Any changes here should likely be reflected there. $unispace = \x05 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $nl = [\n\r\f] $whitechar = [$nl\v\ $unispace] $white_no_nl = $whitechar # \n -- TODO #8424 $tab = \t $ascdigit = 0-9 $unidigit = \x03 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $decdigit = $ascdigit -- exactly $ascdigit, no more no less. $digit = [$ascdigit $unidigit] $special = [\(\)\,\;\[\]\`\{\}] $ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~\:] $unisymbol = \x04 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $symbol = [$ascsymbol $unisymbol] # [$special \_\"\'] $unilarge = \x01 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $asclarge = [A-Z] $large = [$asclarge $unilarge] $unismall = \x02 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $ascsmall = [a-z] $small = [$ascsmall $unismall \_] $uniidchar = \x07 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $idchar = [$small $large $digit $uniidchar \'] $unigraphic = \x06 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $graphic = [$small $large $symbol $digit $idchar $special $unigraphic \"\'] $alpha = [$small $large] -- The character sets marked "TODO" are mostly overly inclusive -- and should be defined more precisely once alex has better -- support for unicode character sets (see -- https://github.com/simonmar/alex/issues/126). @id = $alpha $idchar* \#* | $symbol+ @modname = $large $idchar* @qualid = (@modname \.)* @id :- \' @qualid \' | \` @qualid \` { getIdentifier 1 } \'\` @qualid \`\' | \'\( @qualid \)\' | \`\( @qualid \)\` { getIdentifier 2 } [. \n] ; { data AlexInput = AlexInput { alexInput_position :: !RealSrcLoc , alexInput_string :: !ByteString } -- NB: As long as we don't use a left-context we don't need to track the -- previous input character. alexInputPrevChar :: AlexInput -> Word8 alexInputPrevChar = error "Left-context not supported" alexGetByte :: AlexInput -> Maybe (Word8, AlexInput) alexGetByte (AlexInput p s) = case utf8UnconsByteString s of Nothing -> Nothing Just (c,bs) -> Just (adjustChar c, AlexInput (advanceSrcLoc p c) bs) alexScanTokens :: RealSrcLoc -> ByteString -> [(RealSrcSpan, ByteString)] alexScanTokens start str0 = go (AlexInput start str0) where go inp@(AlexInput pos str) = case alexScan inp 0 of AlexSkip inp' _ln -> go inp' AlexToken inp'@(AlexInput _ str') _ act -> act pos (BS.length str - BS.length str') str : go inp' AlexEOF -> [] AlexError (AlexInput p _) -> error $ "lexical error at " ++ show p -------------------------------------------------------------------------------- -- | Extract identifier from Alex state. getIdentifier :: Int -- ^ adornment length -> RealSrcLoc -> Int -- ^ Token length -> ByteString -- ^ The remaining input beginning with the found token -> (RealSrcSpan, ByteString) getIdentifier !i !loc0 !len0 !s0 = (mkRealSrcSpan loc1 loc2, ident) where (adornment, s1) = BS.splitAt i s0 ident = BS.take (len0 - 2*i) s1 loc1 = advanceSrcLocBS loc0 adornment loc2 = advanceSrcLocBS loc1 ident advanceSrcLocBS :: RealSrcLoc -> ByteString -> RealSrcLoc advanceSrcLocBS !loc bs = case utf8UnconsByteString bs of Nothing -> loc Just (c, bs') -> advanceSrcLocBS (advanceSrcLoc loc c) bs' -- | Lex 'StringLiteral' for warning messages lexStringLiteral :: P (LocatedN RdrName) -- ^ A precise identifier parser -> Located StringLiteral -> Located (WithHsDocIdentifiers StringLiteral GhcPs) lexStringLiteral identParser (L l sl@(StringLiteral _ fs _)) = L l (WithHsDocIdentifiers sl idents) where bs = bytesFS fs idents = mapMaybe (uncurry (validateIdentWith identParser)) plausibleIdents plausibleIdents :: [(SrcSpan,ByteString)] plausibleIdents = case l of RealSrcSpan span _ -> [(RealSrcSpan span' Strict.Nothing, tok) | (span', tok) <- alexScanTokens (realSrcSpanStart span) bs] UnhelpfulSpan reason -> [(UnhelpfulSpan reason, tok) | (_, tok) <- alexScanTokens fakeLoc bs] fakeLoc = mkRealSrcLoc (mkFastString "") 0 0 -- | Lex identifiers from a docstring. lexHsDoc :: P (LocatedN RdrName) -- ^ A precise identifier parser -> HsDocString -> HsDoc GhcPs lexHsDoc identParser doc = WithHsDocIdentifiers doc idents where docStrings = docStringChunks doc idents = concat [mapMaybe maybeDocIdentifier (plausibleIdents doc) | doc <- docStrings] maybeDocIdentifier :: (SrcSpan, ByteString) -> Maybe (Located RdrName) maybeDocIdentifier = uncurry (validateIdentWith identParser) plausibleIdents :: LHsDocStringChunk -> [(SrcSpan,ByteString)] plausibleIdents (L (RealSrcSpan span _) (HsDocStringChunk s)) = [(RealSrcSpan span' Strict.Nothing, tok) | (span', tok) <- alexScanTokens (realSrcSpanStart span) s] plausibleIdents (L (UnhelpfulSpan reason) (HsDocStringChunk s)) = [(UnhelpfulSpan reason, tok) | (_, tok) <- alexScanTokens fakeLoc s] -- preserve the original reason fakeLoc = mkRealSrcLoc (mkFastString "") 0 0 validateIdentWith :: P (LocatedN RdrName) -> SrcSpan -> ByteString -> Maybe (Located RdrName) validateIdentWith identParser mloc str0 = let -- These ParserFlags should be as "inclusive" as possible, allowing -- identifiers defined with any language extension. pflags = mkParserOpts (EnumSet.fromList [LangExt.MagicHash]) dopts [] False False False False dopts = DiagOpts { diag_warning_flags = EnumSet.empty , diag_fatal_warning_flags = EnumSet.empty , diag_warn_is_error = False , diag_reverse_errors = False , diag_max_errors = Nothing , diag_ppr_ctx = defaultSDocContext } buffer = stringBufferFromByteString str0 realSrcLc = case mloc of RealSrcSpan loc _ -> realSrcSpanStart loc UnhelpfulSpan _ -> mkRealSrcLoc (mkFastString "") 0 0 pstate = initParserState pflags buffer realSrcLc in case unP identParser pstate of POk _ name -> Just $ case mloc of RealSrcSpan _ _ -> reLoc name UnhelpfulSpan _ -> L mloc (unLoc name) -- Preserve the original reason _ -> Nothing } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Header.hs0000644000000000000000000004663614472400112020632 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} ----------------------------------------------------------------------------- -- -- | Parsing the top of a Haskell source file to get its module name, -- imports and options. -- -- (c) Simon Marlow 2005 -- (c) Lemmih 2006 -- ----------------------------------------------------------------------------- module GHC.Parser.Header ( getImports , mkPrelImports -- used by the renamer too , getOptionsFromFile , getOptions , toArgs , checkProcessArgsResult ) where import GHC.Prelude import GHC.Data.Bag import GHC.Driver.Errors.Types -- Unfortunate, needed due to the fact we throw exceptions! import GHC.Parser.Errors.Types import GHC.Parser ( parseHeader ) import GHC.Parser.Lexer import GHC.Hs import GHC.Unit.Module import GHC.Builtin.Names import GHC.Types.Error import GHC.Types.SrcLoc import GHC.Types.SourceError import GHC.Types.SourceText import GHC.Types.PkgQual import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Monad import GHC.Utils.Error import GHC.Utils.Exception as Exception import GHC.Data.StringBuffer import GHC.Data.Maybe import GHC.Data.FastString import qualified GHC.Data.Strict as Strict import Control.Monad import System.IO import System.IO.Unsafe import Data.List (partition) import Data.Char (isSpace) import Text.ParserCombinators.ReadP (readP_to_S, gather) import Text.ParserCombinators.ReadPrec (readPrec_to_P) import Text.Read (readPrec) ------------------------------------------------------------------------------ -- | Parse the imports of a source file. -- -- Throws a 'SourceError' if parsing fails. getImports :: ParserOpts -- ^ Parser options -> Bool -- ^ Implicit Prelude? -> StringBuffer -- ^ Parse this. -> FilePath -- ^ Filename the buffer came from. Used for -- reporting parse error locations. -> FilePath -- ^ The original source filename (used for locations -- in the function result) -> IO (Either (Messages PsMessage) ([(RawPkgQual, Located ModuleName)], [(RawPkgQual, Located ModuleName)], Bool, -- Is GHC.Prim imported or not Located ModuleName)) -- ^ The source imports and normal imports (with optional package -- names from -XPackageImports), and the module name. getImports popts implicit_prelude buf filename source_filename = do let loc = mkRealSrcLoc (mkFastString filename) 1 1 case unP parseHeader (initParserState popts buf loc) of PFailed pst -> -- assuming we're not logging warnings here as per below return $ Left $ getPsErrorMessages pst POk pst rdr_module -> fmap Right $ do let (_warns, errs) = getPsMessages pst -- don't log warnings: they'll be reported when we parse the file -- for real. See #2500. if not (isEmptyMessages errs) then throwErrors (GhcPsMessage <$> errs) else let hsmod = unLoc rdr_module mb_mod = hsmodName hsmod imps = hsmodImports hsmod main_loc = srcLocSpan (mkSrcLoc (mkFastString source_filename) 1 1) mod = mb_mod `orElse` L (noAnnSrcSpan main_loc) mAIN_NAME (src_idecls, ord_idecls) = partition ((== IsBoot) . ideclSource . unLoc) imps -- GHC.Prim doesn't exist physically, so don't go looking for it. (ordinary_imps, ghc_prim_import) = partition ((/= moduleName gHC_PRIM) . unLoc . ideclName . unLoc) ord_idecls implicit_imports = mkPrelImports (unLoc mod) main_loc implicit_prelude imps convImport (L _ i) = (ideclPkgQual i, reLoc $ ideclName i) in return (map convImport src_idecls , map convImport (implicit_imports ++ ordinary_imps) , not (null ghc_prim_import) , reLoc mod) mkPrelImports :: ModuleName -> SrcSpan -- Attribute the "import Prelude" to this location -> Bool -> [LImportDecl GhcPs] -> [LImportDecl GhcPs] -- Construct the implicit declaration "import Prelude" (or not) -- -- NB: opt_NoImplicitPrelude is slightly different to import Prelude (); -- because the former doesn't even look at Prelude.hi for instance -- declarations, whereas the latter does. mkPrelImports this_mod loc implicit_prelude import_decls | this_mod == pRELUDE_NAME || explicit_prelude_import || not implicit_prelude = [] | otherwise = [preludeImportDecl] where explicit_prelude_import = any is_prelude_import import_decls is_prelude_import (L _ decl) = unLoc (ideclName decl) == pRELUDE_NAME -- allow explicit "base" package qualifier (#19082, #17045) && case ideclPkgQual decl of NoRawPkgQual -> True RawPkgQual b -> sl_fs b == unitIdFS baseUnitId loc' = noAnnSrcSpan loc preludeImportDecl :: LImportDecl GhcPs preludeImportDecl = L loc' $ ImportDecl { ideclExt = noAnn, ideclSourceSrc = NoSourceText, ideclName = L loc' pRELUDE_NAME, ideclPkgQual = NoRawPkgQual, ideclSource = NotBoot, ideclSafe = False, -- Not a safe import ideclQualified = NotQualified, ideclImplicit = True, -- Implicit! ideclAs = Nothing, ideclHiding = Nothing } -------------------------------------------------------------- -- Get options -------------------------------------------------------------- -- | Parse OPTIONS and LANGUAGE pragmas of the source file. -- -- Throws a 'SourceError' if flag parsing fails (including unsupported flags.) getOptionsFromFile :: ParserOpts -> FilePath -- ^ Input file -> IO (Messages PsMessage, [Located String]) -- ^ Parsed options, if any. getOptionsFromFile opts filename = Exception.bracket (openBinaryFile filename ReadMode) (hClose) (\handle -> do (warns, opts) <- fmap (getOptions' opts) (lazyGetToks opts' filename handle) seqList opts $ seqList (bagToList $ getMessages warns) $ return (warns, opts)) where -- We don't need to get haddock doc tokens when we're just -- getting the options from pragmas, and lazily lexing them -- correctly is a little tricky: If there is "\n" or "\n-" -- left at the end of a buffer then the haddock doc may -- continue past the end of the buffer, despite the fact that -- we already have an apparently-complete token. -- We therefore just turn Opt_Haddock off when doing the lazy -- lex. opts' = disableHaddock opts blockSize :: Int -- blockSize = 17 -- for testing :-) blockSize = 1024 lazyGetToks :: ParserOpts -> FilePath -> Handle -> IO [Located Token] lazyGetToks popts filename handle = do buf <- hGetStringBufferBlock handle blockSize let prag_state = initPragState popts buf loc unsafeInterleaveIO $ lazyLexBuf handle prag_state False blockSize where loc = mkRealSrcLoc (mkFastString filename) 1 1 lazyLexBuf :: Handle -> PState -> Bool -> Int -> IO [Located Token] lazyLexBuf handle state eof size = case unP (lexer False return) state of POk state' t -> do -- pprTrace "lazyLexBuf" (text (show (buffer state'))) (return ()) if atEnd (buffer state') && not eof -- if this token reached the end of the buffer, and we haven't -- necessarily read up to the end of the file, then the token might -- be truncated, so read some more of the file and lex it again. then getMore handle state size else case unLoc t of ITeof -> return [t] _other -> do rest <- lazyLexBuf handle state' eof size return (t : rest) _ | not eof -> getMore handle state size | otherwise -> return [L (mkSrcSpanPs (last_loc state)) ITeof] -- parser assumes an ITeof sentinel at the end getMore :: Handle -> PState -> Int -> IO [Located Token] getMore handle state size = do -- pprTrace "getMore" (text (show (buffer state))) (return ()) let new_size = size * 2 -- double the buffer size each time we read a new block. This -- counteracts the quadratic slowdown we otherwise get for very -- large module names (#5981) nextbuf <- hGetStringBufferBlock handle new_size if (len nextbuf == 0) then lazyLexBuf handle state True new_size else do newbuf <- appendStringBuffers (buffer state) nextbuf unsafeInterleaveIO $ lazyLexBuf handle state{buffer=newbuf} False new_size getToks :: ParserOpts -> FilePath -> StringBuffer -> [Located Token] getToks popts filename buf = lexAll pstate where pstate = initPragState popts buf loc loc = mkRealSrcLoc (mkFastString filename) 1 1 lexAll state = case unP (lexer False return) state of POk _ t@(L _ ITeof) -> [t] POk state' t -> t : lexAll state' _ -> [L (mkSrcSpanPs (last_loc state)) ITeof] -- | Parse OPTIONS and LANGUAGE pragmas of the source file. -- -- Throws a 'SourceError' if flag parsing fails (including unsupported flags.) getOptions :: ParserOpts -> StringBuffer -- ^ Input Buffer -> FilePath -- ^ Source filename. Used for location info. -> (Messages PsMessage,[Located String]) -- ^ warnings and parsed options. getOptions opts buf filename = getOptions' opts (getToks opts filename buf) -- The token parser is written manually because Happy can't -- return a partial result when it encounters a lexer error. -- We want to extract options before the buffer is passed through -- CPP, so we can't use the same trick as 'getImports'. getOptions' :: ParserOpts -> [Located Token] -- Input buffer -> (Messages PsMessage,[Located String]) -- Options. getOptions' opts toks = parseToks toks where parseToks (open:close:xs) | IToptions_prag str <- unLoc open , ITclose_prag <- unLoc close = case toArgs starting_loc str of Left _err -> optionsParseError str $ -- #15053 combineSrcSpans (getLoc open) (getLoc close) Right args -> fmap (args ++) (parseToks xs) where src_span = getLoc open real_src_span = expectJust "getOptions'" (srcSpanToRealSrcSpan src_span) starting_loc = realSrcSpanStart real_src_span parseToks (open:close:xs) | ITinclude_prag str <- unLoc open , ITclose_prag <- unLoc close = fmap (map (L (getLoc open)) ["-#include",removeSpaces str] ++) (parseToks xs) parseToks (open:close:xs) | ITdocOptions str _ <- unLoc open , ITclose_prag <- unLoc close = fmap (map (L (getLoc open)) ["-haddock-opts", removeSpaces str] ++) (parseToks xs) parseToks (open:xs) | ITlanguage_prag <- unLoc open = parseLanguage xs parseToks (comment:xs) -- Skip over comments | isComment (unLoc comment) = parseToks xs -- At the end of the header, warn about all the misplaced pragmas parseToks xs = (unionManyMessages $ mapMaybe mkMessage xs ,[]) parseLanguage ((L loc (ITconid fs)):rest) = fmap (checkExtension opts (L loc fs) :) $ case rest of (L _loc ITcomma):more -> parseLanguage more (L _loc ITclose_prag):more -> parseToks more (L loc _):_ -> languagePragParseError loc [] -> panic "getOptions'.parseLanguage(1) went past eof token" parseLanguage (tok:_) = languagePragParseError (getLoc tok) parseLanguage [] = panic "getOptions'.parseLanguage(2) went past eof token" -- Warn for all the misplaced pragmas mkMessage :: Located Token -> Maybe (Messages PsMessage) mkMessage (L loc token) | IToptions_prag _ <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma OptionsPrag)) | ITinclude_prag _ <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma IncludePrag)) | ITdocOptions _ _ <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma DocOptionsPrag)) | ITlanguage_prag <- token = Just (singleMessage $ mkPlainMsgEnvelope diag_opts loc (PsWarnMisplacedPragma LanguagePrag)) | otherwise = Nothing where diag_opts = pDiagOpts opts isComment :: Token -> Bool isComment c = case c of (ITlineComment {}) -> True (ITblockComment {}) -> True (ITdocComment {}) -> True _ -> False toArgs :: RealSrcLoc -> String -> Either String -- Error [Located String] -- Args toArgs starting_loc orig_str = let (after_spaces_loc, after_spaces_str) = consume_spaces starting_loc orig_str in case after_spaces_str of '[':after_bracket -> let after_bracket_loc = advanceSrcLoc after_spaces_loc '[' (after_bracket_spaces_loc, after_bracket_spaces_str) = consume_spaces after_bracket_loc after_bracket in case after_bracket_spaces_str of ']':rest | all isSpace rest -> Right [] _ -> readAsList after_bracket_spaces_loc after_bracket_spaces_str _ -> toArgs' after_spaces_loc after_spaces_str where consume_spaces :: RealSrcLoc -> String -> (RealSrcLoc, String) consume_spaces loc [] = (loc, []) consume_spaces loc (c:cs) | isSpace c = consume_spaces (advanceSrcLoc loc c) cs | otherwise = (loc, c:cs) break_with_loc :: (Char -> Bool) -> RealSrcLoc -> String -> (String, RealSrcLoc, String) -- location is start of second string break_with_loc p = go [] where go reversed_acc loc [] = (reverse reversed_acc, loc, []) go reversed_acc loc (c:cs) | p c = (reverse reversed_acc, loc, c:cs) | otherwise = go (c:reversed_acc) (advanceSrcLoc loc c) cs advance_src_loc_many :: RealSrcLoc -> String -> RealSrcLoc advance_src_loc_many = foldl' advanceSrcLoc locate :: RealSrcLoc -> RealSrcLoc -> a -> Located a locate begin end x = L (RealSrcSpan (mkRealSrcSpan begin end) Strict.Nothing) x toArgs' :: RealSrcLoc -> String -> Either String [Located String] -- Remove outer quotes: -- > toArgs' "\"foo\" \"bar baz\"" -- Right ["foo", "bar baz"] -- -- Keep inner quotes: -- > toArgs' "-DFOO=\"bar baz\"" -- Right ["-DFOO=\"bar baz\""] toArgs' loc s = let (after_spaces_loc, after_spaces_str) = consume_spaces loc s in case after_spaces_str of [] -> Right [] '"' : _ -> do -- readAsString removes outer quotes (arg, new_loc, rest) <- readAsString after_spaces_loc after_spaces_str check_for_space rest (locate after_spaces_loc new_loc arg:) `fmap` toArgs' new_loc rest _ -> case break_with_loc (isSpace <||> (== '"')) after_spaces_loc after_spaces_str of (argPart1, loc2, s''@('"':_)) -> do (argPart2, loc3, rest) <- readAsString loc2 s'' check_for_space rest -- show argPart2 to keep inner quotes (locate after_spaces_loc loc3 (argPart1 ++ show argPart2):) `fmap` toArgs' loc3 rest (arg, loc2, s'') -> (locate after_spaces_loc loc2 arg:) `fmap` toArgs' loc2 s'' check_for_space :: String -> Either String () check_for_space [] = Right () check_for_space (c:_) | isSpace c = Right () | otherwise = Left ("Whitespace expected after string in " ++ show orig_str) reads_with_consumed :: Read a => String -> [((String, a), String)] -- ((consumed string, parsed result), remainder of input) reads_with_consumed = readP_to_S (gather (readPrec_to_P readPrec 0)) readAsString :: RealSrcLoc -> String -> Either String (String, RealSrcLoc, String) readAsString loc s = case reads_with_consumed s of [((consumed, arg), rest)] -> Right (arg, advance_src_loc_many loc consumed, rest) _ -> Left ("Couldn't read " ++ show s ++ " as String") -- input has had the '[' stripped off readAsList :: RealSrcLoc -> String -> Either String [Located String] readAsList loc s = do let (after_spaces_loc, after_spaces_str) = consume_spaces loc s (arg, after_arg_loc, after_arg_str) <- readAsString after_spaces_loc after_spaces_str let (after_arg_spaces_loc, after_arg_spaces_str) = consume_spaces after_arg_loc after_arg_str (locate after_spaces_loc after_arg_loc arg :) <$> case after_arg_spaces_str of ',':after_comma -> readAsList (advanceSrcLoc after_arg_spaces_loc ',') after_comma ']':after_bracket | all isSpace after_bracket -> Right [] _ -> Left ("Couldn't read " ++ show ('[' : s) ++ " as [String]") -- reinsert missing '[' for clarity. ----------------------------------------------------------------------------- -- | Complain about non-dynamic flags in OPTIONS pragmas. -- -- Throws a 'SourceError' if the input list is non-empty claiming that the -- input flags are unknown. checkProcessArgsResult :: MonadIO m => [Located String] -> m () checkProcessArgsResult flags = when (notNull flags) $ liftIO $ throwErrors $ foldMap (singleMessage . mkMsg) flags where mkMsg (L loc flag) = mkPlainErrorMsgEnvelope loc $ GhcPsMessage $ PsHeaderMessage $ PsErrUnknownOptionsPragma flag ----------------------------------------------------------------------------- checkExtension :: ParserOpts -> Located FastString -> Located String checkExtension opts (L l ext) -- Checks if a given extension is valid, and if so returns -- its corresponding flag. Otherwise it throws an exception. = if ext' `elem` (pSupportedExts opts) then L l ("-X"++ext') else unsupportedExtnError opts l ext' where ext' = unpackFS ext languagePragParseError :: SrcSpan -> a languagePragParseError loc = throwErr loc $ PsErrParseLanguagePragma unsupportedExtnError :: ParserOpts -> SrcSpan -> String -> a unsupportedExtnError opts loc unsup = throwErr loc $ PsErrUnsupportedExt unsup (pSupportedExts opts) optionsParseError :: String -> SrcSpan -> a -- #15053 optionsParseError str loc = throwErr loc $ PsErrParseOptionsPragma str throwErr :: SrcSpan -> PsHeaderMessage -> a -- #15053 throwErr loc ps_msg = let msg = mkPlainErrorMsgEnvelope loc $ GhcPsMessage (PsHeaderMessage ps_msg) in throw $ mkSrcErr $ singleMessage msg ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/PostProcess.hs0000644000000000000000000037753714472400112021735 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE DataKinds #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- -- (c) The University of Glasgow 2002-2006 -- -- Functions over HsSyn specialised to RdrName. module GHC.Parser.PostProcess ( mkRdrGetField, mkRdrProjection, Fbind, -- RecordDot mkHsOpApp, mkHsIntegral, mkHsFractional, mkHsIsString, mkHsDo, mkSpliceDecl, mkRoleAnnotDecl, mkClassDecl, mkTyData, mkDataFamInst, mkTySynonym, mkTyFamInstEqn, mkStandaloneKindSig, mkTyFamInst, mkFamDecl, mkInlinePragma, mkOpaquePragma, mkPatSynMatchGroup, mkRecConstrOrUpdate, mkTyClD, mkInstD, mkRdrRecordCon, mkRdrRecordUpd, setRdrNameSpace, fromSpecTyVarBndr, fromSpecTyVarBndrs, annBinds, fixValbindsAnn, cvBindGroup, cvBindsAndSigs, cvTopDecls, placeHolderPunRhs, -- Stuff to do with Foreign declarations mkImport, parseCImport, mkExport, mkExtName, -- RdrName -> CLabelString mkGadtDecl, -- [LocatedA RdrName] -> LHsType RdrName -> ConDecl RdrName mkConDeclH98, -- Bunch of functions in the parser monad for -- checking and constructing values checkImportDecl, checkExpBlockArguments, checkCmdBlockArguments, checkPrecP, -- Int -> P Int checkContext, -- HsType -> P HsContext checkPattern, -- HsExp -> P HsPat checkPattern_details, incompleteDoBlock, ParseContext(..), checkMonadComp, -- P (HsStmtContext GhcPs) checkValDef, -- (SrcLoc, HsExp, HsRhs, [HsDecl]) -> P HsDecl checkValSigLhs, LRuleTyTmVar, RuleTyTmVar(..), mkRuleBndrs, mkRuleTyVarBndrs, checkRuleTyVarBndrNames, checkRecordSyntax, checkEmptyGADTs, addFatalError, hintBangPat, mkBangTy, UnpackednessPragma(..), mkMultTy, -- Token location mkTokenLocation, -- Help with processing exports ImpExpSubSpec(..), ImpExpQcSpec(..), mkModuleImpExp, mkTypeImpExp, mkImpExpSubSpec, checkImportSpec, -- Token symbols starSym, -- Warnings and errors warnStarIsType, warnPrepositiveQualifiedModule, failOpFewArgs, failOpNotEnabledImportQualifiedPost, failOpImportQualifiedTwice, SumOrTuple (..), -- Expression/command/pattern ambiguity resolution PV, runPV, ECP(ECP, unECP), DisambInfixOp(..), DisambECP(..), ecpFromExp, ecpFromCmd, PatBuilder, -- Type/datacon ambiguity resolution DisambTD(..), addUnpackednessP, dataConBuilderCon, dataConBuilderDetails, ) where import GHC.Prelude import GHC.Hs -- Lots of it import GHC.Core.TyCon ( TyCon, isTupleTyCon, tyConSingleDataCon_maybe ) import GHC.Core.DataCon ( DataCon, dataConTyCon ) import GHC.Core.ConLike ( ConLike(..) ) import GHC.Core.Coercion.Axiom ( Role, fsFromRole ) import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Unit.Module (ModuleName) import GHC.Types.Basic import GHC.Types.Error import GHC.Types.Fixity import GHC.Types.Hint import GHC.Types.SourceText import GHC.Parser.Types import GHC.Parser.Lexer import GHC.Parser.Errors.Types import GHC.Parser.Errors.Ppr () import GHC.Utils.Lexeme ( okConOcc ) import GHC.Types.TyThing import GHC.Core.Type ( unrestrictedFunTyCon, Specificity(..) ) import GHC.Builtin.Types( cTupleTyConName, tupleTyCon, tupleDataCon, nilDataConName, nilDataConKey, listTyConName, listTyConKey ) import GHC.Types.ForeignCall import GHC.Types.SrcLoc import GHC.Types.Unique ( hasKey ) import GHC.Data.OrdList import GHC.Utils.Outputable as Outputable import GHC.Data.FastString import GHC.Data.Maybe import GHC.Utils.Error import GHC.Utils.Misc import Data.Either import Data.List ( findIndex ) import Data.Foldable import qualified Data.Semigroup as Semi import GHC.Utils.Panic import GHC.Utils.Panic.Plain import qualified GHC.Data.Strict as Strict import Control.Monad import Text.ParserCombinators.ReadP as ReadP import Data.Char import Data.Data ( dataTypeOf, fromConstr, dataTypeConstrs ) import Data.Kind ( Type ) import Data.List.NonEmpty (NonEmpty) {- ********************************************************************** Construction functions for Rdr stuff ********************************************************************* -} -- | mkClassDecl builds a RdrClassDecl, filling in the names for tycon and -- datacon by deriving them from the name of the class. We fill in the names -- for the tycon and datacon corresponding to the class, by deriving them -- from the name of the class itself. This saves recording the names in the -- interface file (which would be equally good). -- Similarly for mkConDecl, mkClassOpSig and default-method names. -- *** See Note [The Naming story] in GHC.Hs.Decls **** mkTyClD :: LTyClDecl (GhcPass p) -> LHsDecl (GhcPass p) mkTyClD (L loc d) = L loc (TyClD noExtField d) mkInstD :: LInstDecl (GhcPass p) -> LHsDecl (GhcPass p) mkInstD (L loc d) = L loc (InstD noExtField d) mkClassDecl :: SrcSpan -> Located (Maybe (LHsContext GhcPs), LHsType GhcPs) -> Located (a,[LHsFunDep GhcPs]) -> OrdList (LHsDecl GhcPs) -> LayoutInfo -> [AddEpAnn] -> P (LTyClDecl GhcPs) mkClassDecl loc' (L _ (mcxt, tycl_hdr)) fds where_cls layoutInfo annsIn = do { let loc = noAnnSrcSpan loc' ; (binds, sigs, ats, at_defs, _, docs) <- cvBindsAndSigs where_cls ; (cls, tparams, fixity, ann) <- checkTyClHdr True tycl_hdr ; tyvars <- checkTyVars (text "class") whereDots cls tparams ; cs <- getCommentsFor (locA loc) -- Get any remaining comments ; let anns' = addAnns (EpAnn (spanAsAnchor $ locA loc) annsIn emptyComments) ann cs ; return (L loc (ClassDecl { tcdCExt = (anns', NoAnnSortKey, layoutInfo) , tcdCtxt = mcxt , tcdLName = cls, tcdTyVars = tyvars , tcdFixity = fixity , tcdFDs = snd (unLoc fds) , tcdSigs = mkClassOpSigs sigs , tcdMeths = binds , tcdATs = ats, tcdATDefs = at_defs , tcdDocs = docs })) } mkTyData :: SrcSpan -> NewOrData -> Maybe (LocatedP CType) -> Located (Maybe (LHsContext GhcPs), LHsType GhcPs) -> Maybe (LHsKind GhcPs) -> [LConDecl GhcPs] -> Located (HsDeriving GhcPs) -> [AddEpAnn] -> P (LTyClDecl GhcPs) mkTyData loc' new_or_data cType (L _ (mcxt, tycl_hdr)) ksig data_cons (L _ maybe_deriv) annsIn = do { let loc = noAnnSrcSpan loc' ; (tc, tparams, fixity, ann) <- checkTyClHdr False tycl_hdr ; tyvars <- checkTyVars (ppr new_or_data) equalsDots tc tparams ; cs <- getCommentsFor (locA loc) -- Get any remaining comments ; let anns' = addAnns (EpAnn (spanAsAnchor $ locA loc) annsIn emptyComments) ann cs ; defn <- mkDataDefn new_or_data cType mcxt ksig data_cons maybe_deriv ; return (L loc (DataDecl { tcdDExt = anns', tcdLName = tc, tcdTyVars = tyvars, tcdFixity = fixity, tcdDataDefn = defn })) } mkDataDefn :: NewOrData -> Maybe (LocatedP CType) -> Maybe (LHsContext GhcPs) -> Maybe (LHsKind GhcPs) -> [LConDecl GhcPs] -> HsDeriving GhcPs -> P (HsDataDefn GhcPs) mkDataDefn new_or_data cType mcxt ksig data_cons maybe_deriv = do { checkDatatypeContext mcxt ; return (HsDataDefn { dd_ext = noExtField , dd_ND = new_or_data, dd_cType = cType , dd_ctxt = mcxt , dd_cons = data_cons , dd_kindSig = ksig , dd_derivs = maybe_deriv }) } mkTySynonym :: SrcSpan -> LHsType GhcPs -- LHS -> LHsType GhcPs -- RHS -> [AddEpAnn] -> P (LTyClDecl GhcPs) mkTySynonym loc lhs rhs annsIn = do { (tc, tparams, fixity, ann) <- checkTyClHdr False lhs ; cs1 <- getCommentsFor loc -- Add any API Annotations to the top SrcSpan [temp] ; tyvars <- checkTyVars (text "type") equalsDots tc tparams ; cs2 <- getCommentsFor loc -- Add any API Annotations to the top SrcSpan [temp] ; let anns' = addAnns (EpAnn (spanAsAnchor loc) annsIn emptyComments) ann (cs1 Semi.<> cs2) ; return (L (noAnnSrcSpan loc) (SynDecl { tcdSExt = anns' , tcdLName = tc, tcdTyVars = tyvars , tcdFixity = fixity , tcdRhs = rhs })) } mkStandaloneKindSig :: SrcSpan -> Located [LocatedN RdrName] -- LHS -> LHsSigType GhcPs -- RHS -> [AddEpAnn] -> P (LStandaloneKindSig GhcPs) mkStandaloneKindSig loc lhs rhs anns = do { vs <- mapM check_lhs_name (unLoc lhs) ; v <- check_singular_lhs (reverse vs) ; cs <- getCommentsFor loc ; return $ L (noAnnSrcSpan loc) $ StandaloneKindSig (EpAnn (spanAsAnchor loc) anns cs) v rhs } where check_lhs_name v@(unLoc->name) = if isUnqual name && isTcOcc (rdrNameOcc name) then return v else addFatalError $ mkPlainErrorMsgEnvelope (getLocA v) $ (PsErrUnexpectedQualifiedConstructor (unLoc v)) check_singular_lhs vs = case vs of [] -> panic "mkStandaloneKindSig: empty left-hand side" [v] -> return v _ -> addFatalError $ mkPlainErrorMsgEnvelope (getLoc lhs) $ (PsErrMultipleNamesInStandaloneKindSignature vs) mkTyFamInstEqn :: SrcSpan -> HsOuterFamEqnTyVarBndrs GhcPs -> LHsType GhcPs -> LHsType GhcPs -> [AddEpAnn] -> P (LTyFamInstEqn GhcPs) mkTyFamInstEqn loc bndrs lhs rhs anns = do { (tc, tparams, fixity, ann) <- checkTyClHdr False lhs ; cs <- getCommentsFor loc ; return (L (noAnnSrcSpan loc) $ FamEqn { feqn_ext = EpAnn (spanAsAnchor loc) (anns `mappend` ann) cs , feqn_tycon = tc , feqn_bndrs = bndrs , feqn_pats = tparams , feqn_fixity = fixity , feqn_rhs = rhs })} mkDataFamInst :: SrcSpan -> NewOrData -> Maybe (LocatedP CType) -> (Maybe ( LHsContext GhcPs), HsOuterFamEqnTyVarBndrs GhcPs , LHsType GhcPs) -> Maybe (LHsKind GhcPs) -> [LConDecl GhcPs] -> Located (HsDeriving GhcPs) -> [AddEpAnn] -> P (LInstDecl GhcPs) mkDataFamInst loc new_or_data cType (mcxt, bndrs, tycl_hdr) ksig data_cons (L _ maybe_deriv) anns = do { (tc, tparams, fixity, ann) <- checkTyClHdr False tycl_hdr ; cs <- getCommentsFor loc -- Add any API Annotations to the top SrcSpan ; let fam_eqn_ans = addAnns (EpAnn (spanAsAnchor loc) ann cs) anns emptyComments ; defn <- mkDataDefn new_or_data cType mcxt ksig data_cons maybe_deriv ; return (L (noAnnSrcSpan loc) (DataFamInstD noExtField (DataFamInstDecl (FamEqn { feqn_ext = fam_eqn_ans , feqn_tycon = tc , feqn_bndrs = bndrs , feqn_pats = tparams , feqn_fixity = fixity , feqn_rhs = defn })))) } -- mkDataFamInst loc new_or_data cType (mcxt, bndrs, tycl_hdr) -- ksig data_cons (L _ maybe_deriv) anns -- = do { (tc, tparams, fixity, ann) <- checkTyClHdr False tycl_hdr -- ; cs <- getCommentsFor loc -- Add any API Annotations to the top SrcSpan -- ; let anns' = addAnns (EpAnn (spanAsAnchor loc) ann cs) anns emptyComments -- ; defn <- mkDataDefn new_or_data cType mcxt ksig data_cons maybe_deriv -- ; return (L (noAnnSrcSpan loc) (DataFamInstD anns' (DataFamInstDecl -- (FamEqn { feqn_ext = anns' -- , feqn_tycon = tc -- , feqn_bndrs = bndrs -- , feqn_pats = tparams -- , feqn_fixity = fixity -- , feqn_rhs = defn })))) } mkTyFamInst :: SrcSpan -> TyFamInstEqn GhcPs -> [AddEpAnn] -> P (LInstDecl GhcPs) mkTyFamInst loc eqn anns = do cs <- getCommentsFor loc return (L (noAnnSrcSpan loc) (TyFamInstD noExtField (TyFamInstDecl (EpAnn (spanAsAnchor loc) anns cs) eqn))) mkFamDecl :: SrcSpan -> FamilyInfo GhcPs -> TopLevelFlag -> LHsType GhcPs -- LHS -> LFamilyResultSig GhcPs -- Optional result signature -> Maybe (LInjectivityAnn GhcPs) -- Injectivity annotation -> [AddEpAnn] -> P (LTyClDecl GhcPs) mkFamDecl loc info topLevel lhs ksig injAnn annsIn = do { (tc, tparams, fixity, ann) <- checkTyClHdr False lhs ; cs1 <- getCommentsFor loc -- Add any API Annotations to the top SrcSpan [temp] ; tyvars <- checkTyVars (ppr info) equals_or_where tc tparams ; cs2 <- getCommentsFor loc -- Add any API Annotations to the top SrcSpan [temp] ; let anns' = addAnns (EpAnn (spanAsAnchor loc) annsIn emptyComments) ann (cs1 Semi.<> cs2) ; return (L (noAnnSrcSpan loc) (FamDecl noExtField (FamilyDecl { fdExt = anns' , fdTopLevel = topLevel , fdInfo = info, fdLName = tc , fdTyVars = tyvars , fdFixity = fixity , fdResultSig = ksig , fdInjectivityAnn = injAnn }))) } where equals_or_where = case info of DataFamily -> empty OpenTypeFamily -> empty ClosedTypeFamily {} -> whereDots mkSpliceDecl :: LHsExpr GhcPs -> P (LHsDecl GhcPs) -- If the user wrote -- [pads| ... ] then return a QuasiQuoteD -- $(e) then return a SpliceD -- but if they wrote, say, -- f x then behave as if they'd written $(f x) -- ie a SpliceD -- -- Typed splices are not allowed at the top level, thus we do not represent them -- as spliced declaration. See #10945 mkSpliceDecl lexpr@(L loc expr) | HsSpliceE _ splice@(HsUntypedSplice {}) <- expr = do cs <- getCommentsFor (locA loc) return $ L (addCommentsToSrcAnn loc cs) $ SpliceD noExtField (SpliceDecl noExtField (L loc splice) ExplicitSplice) | HsSpliceE _ splice@(HsQuasiQuote {}) <- expr = do cs <- getCommentsFor (locA loc) return $ L (addCommentsToSrcAnn loc cs) $ SpliceD noExtField (SpliceDecl noExtField (L loc splice) ExplicitSplice) | otherwise = do cs <- getCommentsFor (locA loc) return $ L (addCommentsToSrcAnn loc cs) $ SpliceD noExtField (SpliceDecl noExtField (L loc (mkUntypedSplice noAnn BareSplice lexpr)) ImplicitSplice) mkRoleAnnotDecl :: SrcSpan -> LocatedN RdrName -- type being annotated -> [Located (Maybe FastString)] -- roles -> [AddEpAnn] -> P (LRoleAnnotDecl GhcPs) mkRoleAnnotDecl loc tycon roles anns = do { roles' <- mapM parse_role roles ; cs <- getCommentsFor loc ; return $ L (noAnnSrcSpan loc) $ RoleAnnotDecl (EpAnn (spanAsAnchor loc) anns cs) tycon roles' } where role_data_type = dataTypeOf (undefined :: Role) all_roles = map fromConstr $ dataTypeConstrs role_data_type possible_roles = [(fsFromRole role, role) | role <- all_roles] parse_role (L loc_role Nothing) = return $ L (noAnnSrcSpan loc_role) Nothing parse_role (L loc_role (Just role)) = case lookup role possible_roles of Just found_role -> return $ L (noAnnSrcSpan loc_role) $ Just found_role Nothing -> let nearby = fuzzyLookup (unpackFS role) (mapFst unpackFS possible_roles) in addFatalError $ mkPlainErrorMsgEnvelope loc_role $ (PsErrIllegalRoleName role nearby) -- | Converts a list of 'LHsTyVarBndr's annotated with their 'Specificity' to -- binders without annotations. Only accepts specified variables, and errors if -- any of the provided binders has an 'InferredSpec' annotation. fromSpecTyVarBndrs :: [LHsTyVarBndr Specificity GhcPs] -> P [LHsTyVarBndr () GhcPs] fromSpecTyVarBndrs = mapM fromSpecTyVarBndr -- | Converts 'LHsTyVarBndr' annotated with its 'Specificity' to one without -- annotations. Only accepts specified variables, and errors if the provided -- binder has an 'InferredSpec' annotation. fromSpecTyVarBndr :: LHsTyVarBndr Specificity GhcPs -> P (LHsTyVarBndr () GhcPs) fromSpecTyVarBndr bndr = case bndr of (L loc (UserTyVar xtv flag idp)) -> (check_spec flag loc) >> return (L loc $ UserTyVar xtv () idp) (L loc (KindedTyVar xtv flag idp k)) -> (check_spec flag loc) >> return (L loc $ KindedTyVar xtv () idp k) where check_spec :: Specificity -> SrcSpanAnnA -> P () check_spec SpecifiedSpec _ = return () check_spec InferredSpec loc = addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ PsErrInferredTypeVarNotAllowed -- | Add the annotation for a 'where' keyword to existing @HsLocalBinds@ annBinds :: AddEpAnn -> EpAnnComments -> HsLocalBinds GhcPs -> (HsLocalBinds GhcPs, Maybe EpAnnComments) annBinds a cs (HsValBinds an bs) = (HsValBinds (add_where a an cs) bs, Nothing) annBinds a cs (HsIPBinds an bs) = (HsIPBinds (add_where a an cs) bs, Nothing) annBinds _ cs (EmptyLocalBinds x) = (EmptyLocalBinds x, Just cs) add_where :: AddEpAnn -> EpAnn AnnList -> EpAnnComments -> EpAnn AnnList add_where an@(AddEpAnn _ (EpaSpan rs)) (EpAnn a (AnnList anc o c r t) cs) cs2 | valid_anchor (anchor a) = EpAnn (widenAnchor a [an]) (AnnList anc o c (an:r) t) (cs Semi.<> cs2) | otherwise = EpAnn (patch_anchor rs a) (AnnList (fmap (patch_anchor rs) anc) o c (an:r) t) (cs Semi.<> cs2) add_where an@(AddEpAnn _ (EpaSpan rs)) EpAnnNotUsed cs = EpAnn (Anchor rs UnchangedAnchor) (AnnList (Just $ Anchor rs UnchangedAnchor) Nothing Nothing [an] []) cs add_where (AddEpAnn _ (EpaDelta _ _)) _ _ = panic "add_where" -- EpaDelta should only be used for transformations valid_anchor :: RealSrcSpan -> Bool valid_anchor r = srcSpanStartLine r >= 0 -- If the decl list for where binds is empty, the anchor ends up -- invalid. In this case, use the parent one patch_anchor :: RealSrcSpan -> Anchor -> Anchor patch_anchor r1 (Anchor r0 op) = Anchor r op where r = if srcSpanStartLine r0 < 0 then r1 else r0 fixValbindsAnn :: EpAnn AnnList -> EpAnn AnnList fixValbindsAnn EpAnnNotUsed = EpAnnNotUsed fixValbindsAnn (EpAnn anchor (AnnList ma o c r t) cs) = (EpAnn (widenAnchor anchor (map trailingAnnToAddEpAnn t)) (AnnList ma o c r t) cs) {- ********************************************************************** #cvBinds-etc# Converting to @HsBinds@, etc. ********************************************************************* -} -- | Function definitions are restructured here. Each is assumed to be recursive -- initially, and non recursive definitions are discovered by the dependency -- analyser. -- | Groups together bindings for a single function cvTopDecls :: OrdList (LHsDecl GhcPs) -> [LHsDecl GhcPs] cvTopDecls decls = getMonoBindAll (fromOL decls) -- Declaration list may only contain value bindings and signatures. cvBindGroup :: OrdList (LHsDecl GhcPs) -> P (HsValBinds GhcPs) cvBindGroup binding = do { (mbs, sigs, fam_ds, tfam_insts , dfam_insts, _) <- cvBindsAndSigs binding ; massert (null fam_ds && null tfam_insts && null dfam_insts) ; return $ ValBinds NoAnnSortKey mbs sigs } cvBindsAndSigs :: OrdList (LHsDecl GhcPs) -> P (LHsBinds GhcPs, [LSig GhcPs], [LFamilyDecl GhcPs] , [LTyFamInstDecl GhcPs], [LDataFamInstDecl GhcPs], [LDocDecl GhcPs]) -- Input decls contain just value bindings and signatures -- and in case of class or instance declarations also -- associated type declarations. They might also contain Haddock comments. cvBindsAndSigs fb = do fb' <- drop_bad_decls (fromOL fb) return (partitionBindsAndSigs (getMonoBindAll fb')) where -- cvBindsAndSigs is called in several places in the parser, -- and its items can be produced by various productions: -- -- * decl (when parsing a where clause or a let-expression) -- * decl_inst (when parsing an instance declaration) -- * decl_cls (when parsing a class declaration) -- -- partitionBindsAndSigs can handle almost all declaration forms produced -- by the aforementioned productions, except for SpliceD, which we filter -- out here (in drop_bad_decls). -- -- We're not concerned with every declaration form possible, such as those -- produced by the topdecl parser production, because cvBindsAndSigs is not -- called on top-level declarations. drop_bad_decls [] = return [] drop_bad_decls (L l (SpliceD _ d) : ds) = do addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrDeclSpliceNotAtTopLevel d drop_bad_decls ds drop_bad_decls (d:ds) = (d:) <$> drop_bad_decls ds ----------------------------------------------------------------------------- -- Group function bindings into equation groups getMonoBind :: LHsBind GhcPs -> [LHsDecl GhcPs] -> (LHsBind GhcPs, [LHsDecl GhcPs]) -- Suppose (b',ds') = getMonoBind b ds -- ds is a list of parsed bindings -- b is a MonoBinds that has just been read off the front -- Then b' is the result of grouping more equations from ds that -- belong with b into a single MonoBinds, and ds' is the depleted -- list of parsed bindings. -- -- All Haddock comments between equations inside the group are -- discarded. -- -- No AndMonoBinds or EmptyMonoBinds here; just single equations getMonoBind (L loc1 (FunBind { fun_id = fun_id1@(L _ f1) , fun_matches = MG { mg_alts = (L _ m1@[L _ mtchs1]) } })) binds | has_args m1 = go [L (removeCommentsA loc1) mtchs1] (commentsOnlyA loc1) binds [] where go :: [LMatch GhcPs (LHsExpr GhcPs)] -> SrcSpanAnnA -> [LHsDecl GhcPs] -> [LHsDecl GhcPs] -> (LHsBind GhcPs,[LHsDecl GhcPs]) -- AZ go mtchs loc ((L loc2 (ValD _ (FunBind { fun_id = (L _ f2) , fun_matches = MG { mg_alts = (L _ [L lm2 mtchs2]) } }))) : binds) _ | f1 == f2 = let (loc2', lm2') = transferAnnsA loc2 lm2 in go (L lm2' mtchs2 : mtchs) (combineSrcSpansA loc loc2') binds [] go mtchs loc (doc_decl@(L loc2 (DocD {})) : binds) doc_decls = let doc_decls' = doc_decl : doc_decls in go mtchs (combineSrcSpansA loc loc2) binds doc_decls' go mtchs loc binds doc_decls = ( L loc (makeFunBind fun_id1 (mkLocatedList $ reverse mtchs)) , (reverse doc_decls) ++ binds) -- Reverse the final matches, to get it back in the right order -- Do the same thing with the trailing doc comments getMonoBind bind binds = (bind, binds) -- Group together adjacent FunBinds for every function. getMonoBindAll :: [LHsDecl GhcPs] -> [LHsDecl GhcPs] getMonoBindAll [] = [] getMonoBindAll (L l (ValD _ b) : ds) = let (L l' b', ds') = getMonoBind (L l b) ds in L l' (ValD noExtField b') : getMonoBindAll ds' getMonoBindAll (d : ds) = d : getMonoBindAll ds has_args :: [LMatch GhcPs (LHsExpr GhcPs)] -> Bool has_args [] = panic "GHC.Parser.PostProcess.has_args" has_args (L _ (Match { m_pats = args }) : _) = not (null args) -- Don't group together FunBinds if they have -- no arguments. This is necessary now that variable bindings -- with no arguments are now treated as FunBinds rather -- than pattern bindings (tests/rename/should_fail/rnfail002). {- ********************************************************************** #PrefixToHS-utils# Utilities for conversion ********************************************************************* -} {- Note [Parsing data constructors is hard] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The problem with parsing data constructors is that they look a lot like types. Compare: (s1) data T = C t1 t2 (s2) type T = C t1 t2 Syntactically, there's little difference between these declarations, except in (s1) 'C' is a data constructor, but in (s2) 'C' is a type constructor. This similarity would pose no problem if we knew ahead of time if we are parsing a type or a constructor declaration. Looking at (s1) and (s2), a simple (but wrong!) rule comes to mind: in 'data' declarations assume we are parsing data constructors, and in other contexts (e.g. 'type' declarations) assume we are parsing type constructors. This simple rule does not work because of two problematic cases: (p1) data T = C t1 t2 :+ t3 (p2) data T = C t1 t2 => t3 In (p1) we encounter (:+) and it turns out we are parsing an infix data declaration, so (C t1 t2) is a type and 'C' is a type constructor. In (p2) we encounter (=>) and it turns out we are parsing an existential context, so (C t1 t2) is a constraint and 'C' is a type constructor. As the result, in order to determine whether (C t1 t2) declares a data constructor, a type, or a context, we would need unlimited lookahead which 'happy' is not so happy with. -} -- | Reinterpret a type constructor, including type operators, as a data -- constructor. -- See Note [Parsing data constructors is hard] tyConToDataCon :: LocatedN RdrName -> Either (MsgEnvelope PsMessage) (LocatedN RdrName) tyConToDataCon (L loc tc) | okConOcc (occNameString occ) = return (L loc (setRdrNameSpace tc srcDataName)) | otherwise = Left $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrNotADataCon tc) where occ = rdrNameOcc tc mkPatSynMatchGroup :: LocatedN RdrName -> LocatedL (OrdList (LHsDecl GhcPs)) -> P (MatchGroup GhcPs (LHsExpr GhcPs)) mkPatSynMatchGroup (L loc patsyn_name) (L ld decls) = do { matches <- mapM fromDecl (fromOL decls) ; when (null matches) (wrongNumberErr (locA loc)) ; return $ mkMatchGroup FromSource (L ld matches) } where fromDecl (L loc decl@(ValD _ (PatBind _ -- AZ: where should these anns come from? pat@(L _ (ConPat noAnn ln@(L _ name) details)) rhs _))) = do { unless (name == patsyn_name) $ wrongNameBindingErr (locA loc) decl ; match <- case details of PrefixCon _ pats -> return $ Match { m_ext = noAnn , m_ctxt = ctxt, m_pats = pats , m_grhss = rhs } where ctxt = FunRhs { mc_fun = ln , mc_fixity = Prefix , mc_strictness = NoSrcStrict } InfixCon p1 p2 -> return $ Match { m_ext = noAnn , m_ctxt = ctxt , m_pats = [p1, p2] , m_grhss = rhs } where ctxt = FunRhs { mc_fun = ln , mc_fixity = Infix , mc_strictness = NoSrcStrict } RecCon{} -> recordPatSynErr (locA loc) pat ; return $ L loc match } fromDecl (L loc decl) = extraDeclErr (locA loc) decl extraDeclErr loc decl = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrNoSingleWhereBindInPatSynDecl patsyn_name decl) wrongNameBindingErr loc decl = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrInvalidWhereBindInPatSynDecl patsyn_name decl) wrongNumberErr loc = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrEmptyWhereInPatSynDecl patsyn_name) recordPatSynErr :: SrcSpan -> LPat GhcPs -> P a recordPatSynErr loc pat = addFatalError $ mkPlainErrorMsgEnvelope loc $ (PsErrRecordSyntaxInPatSynDecl pat) mkConDeclH98 :: EpAnn [AddEpAnn] -> LocatedN RdrName -> Maybe [LHsTyVarBndr Specificity GhcPs] -> Maybe (LHsContext GhcPs) -> HsConDeclH98Details GhcPs -> ConDecl GhcPs mkConDeclH98 ann name mb_forall mb_cxt args = ConDeclH98 { con_ext = ann , con_name = name , con_forall = isJust mb_forall , con_ex_tvs = mb_forall `orElse` [] , con_mb_cxt = mb_cxt , con_args = args , con_doc = Nothing } -- | Construct a GADT-style data constructor from the constructor names and -- their type. Some interesting aspects of this function: -- -- * This splits up the constructor type into its quantified type variables (if -- provided), context (if provided), argument types, and result type, and -- records whether this is a prefix or record GADT constructor. See -- Note [GADT abstract syntax] in "GHC.Hs.Decls" for more details. mkGadtDecl :: SrcSpan -> [LocatedN RdrName] -> LHsSigType GhcPs -> [AddEpAnn] -> P (LConDecl GhcPs) mkGadtDecl loc names ty annsIn = do cs <- getCommentsFor loc let l = noAnnSrcSpan loc (args, res_ty, annsa, csa) <- case body_ty of L ll (HsFunTy af hsArr (L loc' (HsRecTy an rf)) res_ty) -> do let an' = addCommentsToEpAnn (locA loc') an (comments af) arr <- case hsArr of HsUnrestrictedArrow arr -> return arr _ -> do addError $ mkPlainErrorMsgEnvelope (getLocA body_ty) $ (PsErrIllegalGadtRecordMultiplicity hsArr) return noHsUniTok return ( RecConGADT (L (SrcSpanAnn an' (locA loc')) rf) arr, res_ty , [], epAnnComments (ann ll)) _ -> do let (anns, cs, arg_types, res_type) = splitHsFunType body_ty return (PrefixConGADT arg_types, res_type, anns, cs) let an = EpAnn (spanAsAnchor loc) (annsIn ++ annsa) (cs Semi.<> csa) pure $ L l ConDeclGADT { con_g_ext = an , con_names = names , con_bndrs = L (getLoc ty) outer_bndrs , con_mb_cxt = mcxt , con_g_args = args , con_res_ty = res_ty , con_doc = Nothing } where (outer_bndrs, mcxt, body_ty) = splitLHsGadtTy ty setRdrNameSpace :: RdrName -> NameSpace -> RdrName -- ^ This rather gruesome function is used mainly by the parser. -- When parsing: -- -- > data T a = T | T1 Int -- -- we parse the data constructors as /types/ because of parser ambiguities, -- so then we need to change the /type constr/ to a /data constr/ -- -- The exact-name case /can/ occur when parsing: -- -- > data [] a = [] | a : [a] -- -- For the exact-name case we return an original name. setRdrNameSpace (Unqual occ) ns = Unqual (setOccNameSpace ns occ) setRdrNameSpace (Qual m occ) ns = Qual m (setOccNameSpace ns occ) setRdrNameSpace (Orig m occ) ns = Orig m (setOccNameSpace ns occ) setRdrNameSpace (Exact n) ns | Just thing <- wiredInNameTyThing_maybe n = setWiredInNameSpace thing ns -- Preserve Exact Names for wired-in things, -- notably tuples and lists | isExternalName n = Orig (nameModule n) occ | otherwise -- This can happen when quoting and then -- splicing a fixity declaration for a type = Exact (mkSystemNameAt (nameUnique n) occ (nameSrcSpan n)) where occ = setOccNameSpace ns (nameOccName n) setWiredInNameSpace :: TyThing -> NameSpace -> RdrName setWiredInNameSpace (ATyCon tc) ns | isDataConNameSpace ns = ty_con_data_con tc | isTcClsNameSpace ns = Exact (getName tc) -- No-op setWiredInNameSpace (AConLike (RealDataCon dc)) ns | isTcClsNameSpace ns = data_con_ty_con dc | isDataConNameSpace ns = Exact (getName dc) -- No-op setWiredInNameSpace thing ns = pprPanic "setWiredinNameSpace" (pprNameSpace ns <+> ppr thing) ty_con_data_con :: TyCon -> RdrName ty_con_data_con tc | isTupleTyCon tc , Just dc <- tyConSingleDataCon_maybe tc = Exact (getName dc) | tc `hasKey` listTyConKey = Exact nilDataConName | otherwise -- See Note [setRdrNameSpace for wired-in names] = Unqual (setOccNameSpace srcDataName (getOccName tc)) data_con_ty_con :: DataCon -> RdrName data_con_ty_con dc | let tc = dataConTyCon dc , isTupleTyCon tc = Exact (getName tc) | dc `hasKey` nilDataConKey = Exact listTyConName | otherwise -- See Note [setRdrNameSpace for wired-in names] = Unqual (setOccNameSpace tcClsName (getOccName dc)) {- Note [setRdrNameSpace for wired-in names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In GHC.Types, which declares (:), we have infixr 5 : The ambiguity about which ":" is meant is resolved by parsing it as a data constructor, but then using dataTcOccs to try the type constructor too; and that in turn calls setRdrNameSpace to change the name-space of ":" to tcClsName. There isn't a corresponding ":" type constructor, but it's painful to make setRdrNameSpace partial, so we just make an Unqual name instead. It really doesn't matter! -} eitherToP :: MonadP m => Either (MsgEnvelope PsMessage) a -> m a -- Adapts the Either monad to the P monad eitherToP (Left err) = addFatalError err eitherToP (Right thing) = return thing checkTyVars :: SDoc -> SDoc -> LocatedN RdrName -> [LHsTypeArg GhcPs] -> P (LHsQTyVars GhcPs) -- the synthesized type variables -- ^ Check whether the given list of type parameters are all type variables -- (possibly with a kind signature). checkTyVars pp_what equals_or_where tc tparms = do { tvs <- mapM check tparms ; return (mkHsQTvs tvs) } where check (HsTypeArg _ ki@(L loc _)) = addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrUnexpectedTypeAppInDecl ki pp_what (unLoc tc)) check (HsValArg ty) = chkParens [] [] emptyComments ty check (HsArgPar sp) = addFatalError $ mkPlainErrorMsgEnvelope sp $ (PsErrMalformedDecl pp_what (unLoc tc)) -- Keep around an action for adjusting the annotations of extra parens chkParens :: [AddEpAnn] -> [AddEpAnn] -> EpAnnComments -> LHsType GhcPs -> P (LHsTyVarBndr () GhcPs) chkParens ops cps cs (L l (HsParTy an ty)) = let (o,c) = mkParensEpAnn (realSrcSpan $ locA l) in chkParens (o:ops) (c:cps) (cs Semi.<> epAnnComments an) ty chkParens ops cps cs ty = chk ops cps cs ty -- Check that the name space is correct! chk :: [AddEpAnn] -> [AddEpAnn] -> EpAnnComments -> LHsType GhcPs -> P (LHsTyVarBndr () GhcPs) chk ops cps cs (L l (HsKindSig annk (L annt (HsTyVar ann _ (L lv tv))) k)) | isRdrTyVar tv = let an = (reverse ops) ++ cps in return (L (widenLocatedAn (l Semi.<> annt) an) (KindedTyVar (addAnns (annk Semi.<> ann) an cs) () (L lv tv) k)) chk ops cps cs (L l (HsTyVar ann _ (L ltv tv))) | isRdrTyVar tv = let an = (reverse ops) ++ cps in return (L (widenLocatedAn l an) (UserTyVar (addAnns ann an cs) () (L ltv tv))) chk _ _ _ t@(L loc _) = addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrUnexpectedTypeInDecl t pp_what (unLoc tc) tparms equals_or_where) whereDots, equalsDots :: SDoc -- Second argument to checkTyVars whereDots = text "where ..." equalsDots = text "= ..." checkDatatypeContext :: Maybe (LHsContext GhcPs) -> P () checkDatatypeContext Nothing = return () checkDatatypeContext (Just c) = do allowed <- getBit DatatypeContextsBit unless allowed $ addError $ mkPlainErrorMsgEnvelope (getLocA c) $ (PsErrIllegalDataTypeContext c) type LRuleTyTmVar = LocatedAn NoEpAnns RuleTyTmVar data RuleTyTmVar = RuleTyTmVar (EpAnn [AddEpAnn]) (LocatedN RdrName) (Maybe (LHsType GhcPs)) -- ^ Essentially a wrapper for a @RuleBndr GhcPs@ -- turns RuleTyTmVars into RuleBnrs - this is straightforward mkRuleBndrs :: [LRuleTyTmVar] -> [LRuleBndr GhcPs] mkRuleBndrs = fmap (fmap cvt_one) where cvt_one (RuleTyTmVar ann v Nothing) = RuleBndr ann v cvt_one (RuleTyTmVar ann v (Just sig)) = RuleBndrSig ann v (mkHsPatSigType noAnn sig) -- turns RuleTyTmVars into HsTyVarBndrs - this is more interesting mkRuleTyVarBndrs :: [LRuleTyTmVar] -> [LHsTyVarBndr () GhcPs] mkRuleTyVarBndrs = fmap cvt_one where cvt_one (L l (RuleTyTmVar ann v Nothing)) = L (l2l l) (UserTyVar ann () (fmap tm_to_ty v)) cvt_one (L l (RuleTyTmVar ann v (Just sig))) = L (l2l l) (KindedTyVar ann () (fmap tm_to_ty v) sig) -- takes something in namespace 'varName' to something in namespace 'tvName' tm_to_ty (Unqual occ) = Unqual (setOccNameSpace tvName occ) tm_to_ty _ = panic "mkRuleTyVarBndrs" -- See Note [Parsing explicit foralls in Rules] in Parser.y checkRuleTyVarBndrNames :: [LHsTyVarBndr flag GhcPs] -> P () checkRuleTyVarBndrNames = mapM_ (check . fmap hsTyVarName) where check (L loc (Unqual occ)) = -- TODO: don't use string here, OccName has a Unique/FastString when ((occNameString occ ==) `any` ["forall","family","role"]) (addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrParseErrorOnInput occ)) check _ = panic "checkRuleTyVarBndrNames" checkRecordSyntax :: (MonadP m, Outputable a) => LocatedA a -> m (LocatedA a) checkRecordSyntax lr@(L loc r) = do allowed <- getBit TraditionalRecordSyntaxBit unless allowed $ addError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrIllegalTraditionalRecordSyntax (ppr r)) return lr -- | Check if the gadt_constrlist is empty. Only raise parse error for -- `data T where` to avoid affecting existing error message, see #8258. checkEmptyGADTs :: Located ([AddEpAnn], [LConDecl GhcPs]) -> P (Located ([AddEpAnn], [LConDecl GhcPs])) checkEmptyGADTs gadts@(L span (_, [])) -- Empty GADT declaration. = do gadtSyntax <- getBit GadtSyntaxBit -- GADTs implies GADTSyntax unless gadtSyntax $ addError $ mkPlainErrorMsgEnvelope span $ PsErrIllegalWhereInDataDecl return gadts checkEmptyGADTs gadts = return gadts -- Ordinary GADT declaration. checkTyClHdr :: Bool -- True <=> class header -- False <=> type header -> LHsType GhcPs -> P (LocatedN RdrName, -- the head symbol (type or class name) [LHsTypeArg GhcPs], -- parameters of head symbol LexicalFixity, -- the declaration is in infix format [AddEpAnn]) -- API Annotation for HsParTy -- when stripping parens -- Well-formedness check and decomposition of type and class heads. -- Decomposes T ty1 .. tyn into (T, [ty1, ..., tyn]) -- Int :*: Bool into (:*:, [Int, Bool]) -- returning the pieces checkTyClHdr is_cls ty = goL ty [] [] [] Prefix where goL (L l ty) acc ops cps fix = go (locA l) ty acc ops cps fix -- workaround to define '*' despite StarIsType go _ (HsParTy an (L l (HsStarTy _ isUni))) acc ops' cps' fix = do { addPsMessage (locA l) PsWarnStarBinder ; let name = mkOccName tcClsName (starSym isUni) ; let a' = newAnns l an ; return (L a' (Unqual name), acc, fix , (reverse ops') ++ cps') } go _ (HsTyVar _ _ ltc@(L _ tc)) acc ops cps fix | isRdrTc tc = return (ltc, acc, fix, (reverse ops) ++ cps) go _ (HsOpTy _ _ t1 ltc@(L _ tc) t2) acc ops cps _fix | isRdrTc tc = return (ltc, HsValArg t1:HsValArg t2:acc, Infix, (reverse ops) ++ cps) go l (HsParTy _ ty) acc ops cps fix = goL ty acc (o:ops) (c:cps) fix where (o,c) = mkParensEpAnn (realSrcSpan l) go _ (HsAppTy _ t1 t2) acc ops cps fix = goL t1 (HsValArg t2:acc) ops cps fix go _ (HsAppKindTy l ty ki) acc ops cps fix = goL ty (HsTypeArg l ki:acc) ops cps fix go l (HsTupleTy _ HsBoxedOrConstraintTuple ts) [] ops cps fix = return (L (noAnnSrcSpan l) (nameRdrName tup_name) , map HsValArg ts, fix, (reverse ops)++cps) where arity = length ts tup_name | is_cls = cTupleTyConName arity | otherwise = getName (tupleTyCon Boxed arity) -- See Note [Unit tuples] in GHC.Hs.Type (TODO: is this still relevant?) go l _ _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l $ (PsErrMalformedTyOrClDecl ty) -- Combine the annotations from the HsParTy and HsStarTy into a -- new one for the LocatedN RdrName newAnns :: SrcSpanAnnA -> EpAnn AnnParen -> SrcSpanAnnN newAnns (SrcSpanAnn EpAnnNotUsed l) (EpAnn as (AnnParen _ o c) cs) = let lr = combineRealSrcSpans (realSrcSpan l) (anchor as) an = (EpAnn (Anchor lr UnchangedAnchor) (NameAnn NameParens o (EpaSpan $ realSrcSpan l) c []) cs) in SrcSpanAnn an (RealSrcSpan lr Strict.Nothing) newAnns _ EpAnnNotUsed = panic "missing AnnParen" newAnns (SrcSpanAnn (EpAnn ap (AnnListItem ta) csp) l) (EpAnn as (AnnParen _ o c) cs) = let lr = combineRealSrcSpans (anchor ap) (anchor as) an = (EpAnn (Anchor lr UnchangedAnchor) (NameAnn NameParens o (EpaSpan $ realSrcSpan l) c ta) (csp Semi.<> cs)) in SrcSpanAnn an (RealSrcSpan lr Strict.Nothing) -- | Yield a parse error if we have a function applied directly to a do block -- etc. and BlockArguments is not enabled. checkExpBlockArguments :: LHsExpr GhcPs -> PV () checkCmdBlockArguments :: LHsCmd GhcPs -> PV () (checkExpBlockArguments, checkCmdBlockArguments) = (checkExpr, checkCmd) where checkExpr :: LHsExpr GhcPs -> PV () checkExpr expr = case unLoc expr of HsDo _ (DoExpr m) _ -> check (PsErrDoInFunAppExpr m) expr HsDo _ (MDoExpr m) _ -> check (PsErrMDoInFunAppExpr m) expr HsLam {} -> check PsErrLambdaInFunAppExpr expr HsCase {} -> check PsErrCaseInFunAppExpr expr HsLamCase _ lc_variant _ -> check (PsErrLambdaCaseInFunAppExpr lc_variant) expr HsLet {} -> check PsErrLetInFunAppExpr expr HsIf {} -> check PsErrIfInFunAppExpr expr HsProc {} -> check PsErrProcInFunAppExpr expr _ -> return () checkCmd :: LHsCmd GhcPs -> PV () checkCmd cmd = case unLoc cmd of HsCmdLam {} -> check PsErrLambdaCmdInFunAppCmd cmd HsCmdCase {} -> check PsErrCaseCmdInFunAppCmd cmd HsCmdLamCase _ lc_variant _ -> check (PsErrLambdaCaseCmdInFunAppCmd lc_variant) cmd HsCmdIf {} -> check PsErrIfCmdInFunAppCmd cmd HsCmdLet {} -> check PsErrLetCmdInFunAppCmd cmd HsCmdDo {} -> check PsErrDoCmdInFunAppCmd cmd _ -> return () check err a = do blockArguments <- getBit BlockArgumentsBit unless blockArguments $ addError $ mkPlainErrorMsgEnvelope (getLocA a) $ (err a) -- | Validate the context constraints and break up a context into a list -- of predicates. -- -- @ -- (Eq a, Ord b) --> [Eq a, Ord b] -- Eq a --> [Eq a] -- (Eq a) --> [Eq a] -- (((Eq a))) --> [Eq a] -- @ checkContext :: LHsType GhcPs -> P (LHsContext GhcPs) checkContext orig_t@(L (SrcSpanAnn _ l) _orig_t) = check ([],[],emptyComments) orig_t where check :: ([EpaLocation],[EpaLocation],EpAnnComments) -> LHsType GhcPs -> P (LHsContext GhcPs) check (oparens,cparens,cs) (L _l (HsTupleTy ann' HsBoxedOrConstraintTuple ts)) -- (Eq a, Ord b) shows up as a tuple type. Only boxed tuples can -- be used as context constraints. -- Ditto () = do let (op,cp,cs') = case ann' of EpAnnNotUsed -> ([],[],emptyComments) EpAnn _ (AnnParen _ o c) cs -> ([o],[c],cs) return (L (SrcSpanAnn (EpAnn (spanAsAnchor l) -- Append parens so that the original order in the source is maintained (AnnContext Nothing (oparens ++ op) (cp ++ cparens)) (cs Semi.<> cs')) l) ts) check (opi,cpi,csi) (L _lp1 (HsParTy ann' ty)) -- to be sure HsParTy doesn't get into the way = do let (op,cp,cs') = case ann' of EpAnnNotUsed -> ([],[],emptyComments) EpAnn _ (AnnParen _ open close ) cs -> ([open],[close],cs) check (op++opi,cp++cpi,cs' Semi.<> csi) ty -- No need for anns, returning original check (_opi,_cpi,_csi) _t = return (L (SrcSpanAnn (EpAnn (spanAsAnchor l) (AnnContext Nothing [] []) emptyComments) l) [orig_t]) checkImportDecl :: Maybe EpaLocation -> Maybe EpaLocation -> P () checkImportDecl mPre mPost = do let whenJust mg f = maybe (pure ()) f mg importQualifiedPostEnabled <- getBit ImportQualifiedPostBit -- Error if 'qualified' found in postpositive position and -- 'ImportQualifiedPost' is not in effect. whenJust mPost $ \post -> when (not importQualifiedPostEnabled) $ failOpNotEnabledImportQualifiedPost (RealSrcSpan (epaLocationRealSrcSpan post) Strict.Nothing) -- Error if 'qualified' occurs in both pre and postpositive -- positions. whenJust mPost $ \post -> when (isJust mPre) $ failOpImportQualifiedTwice (RealSrcSpan (epaLocationRealSrcSpan post) Strict.Nothing) -- Warn if 'qualified' found in prepositive position and -- 'Opt_WarnPrepositiveQualifiedModule' is enabled. whenJust mPre $ \pre -> warnPrepositiveQualifiedModule (RealSrcSpan (epaLocationRealSrcSpan pre) Strict.Nothing) -- ------------------------------------------------------------------------- -- Checking Patterns. -- We parse patterns as expressions and check for valid patterns below, -- converting the expression into a pattern at the same time. checkPattern :: LocatedA (PatBuilder GhcPs) -> P (LPat GhcPs) checkPattern = runPV . checkLPat checkPattern_details :: ParseContext -> PV (LocatedA (PatBuilder GhcPs)) -> P (LPat GhcPs) checkPattern_details extraDetails pp = runPV_details extraDetails (pp >>= checkLPat) checkLPat :: LocatedA (PatBuilder GhcPs) -> PV (LPat GhcPs) checkLPat e@(L l _) = checkPat l e [] [] checkPat :: SrcSpanAnnA -> LocatedA (PatBuilder GhcPs) -> [HsPatSigType GhcPs] -> [LPat GhcPs] -> PV (LPat GhcPs) checkPat loc (L l e@(PatBuilderVar (L ln c))) tyargs args | isRdrDataCon c = return . L loc $ ConPat { pat_con_ext = noAnn -- AZ: where should this come from? , pat_con = L ln c , pat_args = PrefixCon tyargs args } | not (null tyargs) = patFail (locA l) . PsErrInPat e $ PEIP_TypeArgs tyargs | (not (null args) && patIsRec c) = do ctx <- askParseContext patFail (locA l) . PsErrInPat e $ PEIP_RecPattern args YesPatIsRecursive ctx checkPat loc (L _ (PatBuilderAppType f t)) tyargs args = checkPat loc f (t : tyargs) args checkPat loc (L _ (PatBuilderApp f e)) [] args = do p <- checkLPat e checkPat loc f [] (p : args) checkPat loc (L l e) [] [] = do p <- checkAPat loc e return (L l p) checkPat loc e _ _ = do details <- fromParseContext <$> askParseContext patFail (locA loc) (PsErrInPat (unLoc e) details) checkAPat :: SrcSpanAnnA -> PatBuilder GhcPs -> PV (Pat GhcPs) checkAPat loc e0 = do nPlusKPatterns <- getBit NPlusKPatternsBit case e0 of PatBuilderPat p -> return p PatBuilderVar x -> return (VarPat noExtField x) -- Overloaded numeric patterns (e.g. f 0 x = x) -- Negation is recorded separately, so that the literal is zero or +ve -- NB. Negative *primitive* literals are already handled by the lexer PatBuilderOverLit pos_lit -> return (mkNPat (L (l2l loc) pos_lit) Nothing noAnn) -- n+k patterns PatBuilderOpApp (L _ (PatBuilderVar (L nloc n))) (L l plus) (L lloc (PatBuilderOverLit lit@(OverLit {ol_val = HsIntegral {}}))) (EpAnn anc _ cs) | nPlusKPatterns && (plus == plus_RDR) -> return (mkNPlusKPat (L nloc n) (L (l2l lloc) lit) (EpAnn anc (epaLocationFromSrcAnn l) cs)) -- Improve error messages for the @-operator when the user meant an @-pattern PatBuilderOpApp _ op _ _ | opIsAt (unLoc op) -> do addError $ mkPlainErrorMsgEnvelope (getLocA op) PsErrAtInPatPos return (WildPat noExtField) PatBuilderOpApp l (L cl c) r anns | isRdrDataCon c -> do l <- checkLPat l r <- checkLPat r return $ ConPat { pat_con_ext = anns , pat_con = L cl c , pat_args = InfixCon l r } PatBuilderPar lpar e rpar -> do p <- checkLPat e return (ParPat (EpAnn (spanAsAnchor (locA loc)) NoEpAnns emptyComments) lpar p rpar) _ -> do details <- fromParseContext <$> askParseContext patFail (locA loc) (PsErrInPat e0 details) placeHolderPunRhs :: DisambECP b => PV (LocatedA b) -- The RHS of a punned record field will be filled in by the renamer -- It's better not to make it an error, in case we want to print it when -- debugging placeHolderPunRhs = mkHsVarPV (noLocA pun_RDR) plus_RDR, pun_RDR :: RdrName plus_RDR = mkUnqual varName (fsLit "+") -- Hack pun_RDR = mkUnqual varName (fsLit "pun-right-hand-side") checkPatField :: LHsRecField GhcPs (LocatedA (PatBuilder GhcPs)) -> PV (LHsRecField GhcPs (LPat GhcPs)) checkPatField (L l fld) = do p <- checkLPat (hfbRHS fld) return (L l (fld { hfbRHS = p })) patFail :: SrcSpan -> PsMessage -> PV a patFail loc msg = addFatalError $ mkPlainErrorMsgEnvelope loc $ msg patIsRec :: RdrName -> Bool patIsRec e = e == mkUnqual varName (fsLit "rec") --------------------------------------------------------------------------- -- Check Equation Syntax checkValDef :: SrcSpan -> LocatedA (PatBuilder GhcPs) -> Maybe (AddEpAnn, LHsType GhcPs) -> Located (GRHSs GhcPs (LHsExpr GhcPs)) -> P (HsBind GhcPs) checkValDef loc lhs (Just (sigAnn, sig)) grhss -- x :: ty = rhs parses as a *pattern* binding = do lhs' <- runPV $ mkHsTySigPV (combineLocsA lhs sig) lhs sig [sigAnn] >>= checkLPat checkPatBind loc [] lhs' grhss checkValDef loc lhs Nothing g = do { mb_fun <- isFunLhs lhs ; case mb_fun of Just (fun, is_infix, pats, ann) -> checkFunBind NoSrcStrict loc ann fun is_infix pats g Nothing -> do lhs' <- checkPattern lhs checkPatBind loc [] lhs' g } checkFunBind :: SrcStrictness -> SrcSpan -> [AddEpAnn] -> LocatedN RdrName -> LexicalFixity -> [LocatedA (PatBuilder GhcPs)] -> Located (GRHSs GhcPs (LHsExpr GhcPs)) -> P (HsBind GhcPs) checkFunBind strictness locF ann fun is_infix pats (L _ grhss) = do ps <- runPV_details extraDetails (mapM checkLPat pats) let match_span = noAnnSrcSpan $ locF cs <- getCommentsFor locF return (makeFunBind fun (L (noAnnSrcSpan $ locA match_span) [L match_span (Match { m_ext = EpAnn (spanAsAnchor locF) ann cs , m_ctxt = FunRhs { mc_fun = fun , mc_fixity = is_infix , mc_strictness = strictness } , m_pats = ps , m_grhss = grhss })])) -- The span of the match covers the entire equation. -- That isn't quite right, but it'll do for now. where extraDetails | Infix <- is_infix = ParseContext (Just $ unLoc fun) NoIncompleteDoBlock | otherwise = noParseContext makeFunBind :: LocatedN RdrName -> LocatedL [LMatch GhcPs (LHsExpr GhcPs)] -> HsBind GhcPs -- Like GHC.Hs.Utils.mkFunBind, but we need to be able to set the fixity too makeFunBind fn ms = FunBind { fun_ext = noExtField, fun_id = fn, fun_matches = mkMatchGroup FromSource ms, fun_tick = [] } -- See Note [FunBind vs PatBind] checkPatBind :: SrcSpan -> [AddEpAnn] -> LPat GhcPs -> Located (GRHSs GhcPs (LHsExpr GhcPs)) -> P (HsBind GhcPs) checkPatBind loc annsIn (L _ (BangPat (EpAnn _ ans cs) (L _ (VarPat _ v)))) (L _match_span grhss) = return (makeFunBind v (L (noAnnSrcSpan loc) [L (noAnnSrcSpan loc) (m (EpAnn (spanAsAnchor loc) (ans++annsIn) cs) v)])) where m a v = Match { m_ext = a , m_ctxt = FunRhs { mc_fun = v , mc_fixity = Prefix , mc_strictness = SrcStrict } , m_pats = [] , m_grhss = grhss } checkPatBind loc annsIn lhs (L _ grhss) = do cs <- getCommentsFor loc return (PatBind (EpAnn (spanAsAnchor loc) annsIn cs) lhs grhss ([],[])) checkValSigLhs :: LHsExpr GhcPs -> P (LocatedN RdrName) checkValSigLhs (L _ (HsVar _ lrdr@(L _ v))) | isUnqual v , not (isDataOcc (rdrNameOcc v)) = return lrdr checkValSigLhs lhs@(L l _) = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrInvalidTypeSignature lhs checkDoAndIfThenElse :: (Outputable a, Outputable b, Outputable c) => (a -> Bool -> b -> Bool -> c -> PsMessage) -> LocatedA a -> Bool -> LocatedA b -> Bool -> LocatedA c -> PV () checkDoAndIfThenElse err guardExpr semiThen thenExpr semiElse elseExpr | semiThen || semiElse = do doAndIfThenElse <- getBit DoAndIfThenElseBit let e = err (unLoc guardExpr) semiThen (unLoc thenExpr) semiElse (unLoc elseExpr) loc = combineLocs (reLoc guardExpr) (reLoc elseExpr) unless doAndIfThenElse $ addError (mkPlainErrorMsgEnvelope loc e) | otherwise = return () isFunLhs :: LocatedA (PatBuilder GhcPs) -> P (Maybe (LocatedN RdrName, LexicalFixity, [LocatedA (PatBuilder GhcPs)],[AddEpAnn])) -- A variable binding is parsed as a FunBind. -- Just (fun, is_infix, arg_pats) if e is a function LHS isFunLhs e = go e [] [] [] where go (L _ (PatBuilderVar (L loc f))) es ops cps | not (isRdrDataCon f) = return (Just (L loc f, Prefix, es, (reverse ops) ++ cps)) go (L _ (PatBuilderApp f e)) es ops cps = go f (e:es) ops cps go (L l (PatBuilderPar _ e _)) es@(_:_) ops cps = let (o,c) = mkParensEpAnn (realSrcSpan $ locA l) in go e es (o:ops) (c:cps) go (L loc (PatBuilderOpApp l (L loc' op) r (EpAnn loca anns cs))) es ops cps | not (isRdrDataCon op) -- We have found the function! = return (Just (L loc' op, Infix, (l:r:es), (anns ++ reverse ops ++ cps))) | otherwise -- Infix data con; keep going = do { mb_l <- go l es ops cps ; case mb_l of Just (op', Infix, j : k : es', anns') -> return (Just (op', Infix, j : op_app : es', anns')) where op_app = L loc (PatBuilderOpApp k (L loc' op) r (EpAnn loca (reverse ops++cps) cs)) _ -> return Nothing } go _ _ _ _ = return Nothing mkBangTy :: EpAnn [AddEpAnn] -> SrcStrictness -> LHsType GhcPs -> HsType GhcPs mkBangTy anns strictness = HsBangTy anns (HsSrcBang NoSourceText NoSrcUnpack strictness) -- | Result of parsing @{-\# UNPACK \#-}@ or @{-\# NOUNPACK \#-}@. data UnpackednessPragma = UnpackednessPragma [AddEpAnn] SourceText SrcUnpackedness -- | Annotate a type with either an @{-\# UNPACK \#-}@ or a @{-\# NOUNPACK \#-}@ pragma. addUnpackednessP :: MonadP m => Located UnpackednessPragma -> LHsType GhcPs -> m (LHsType GhcPs) addUnpackednessP (L lprag (UnpackednessPragma anns prag unpk)) ty = do let l' = combineSrcSpans lprag (getLocA ty) cs <- getCommentsFor l' let an = EpAnn (spanAsAnchor l') anns cs t' = addUnpackedness an ty return (L (noAnnSrcSpan l') t') where -- If we have a HsBangTy that only has a strictness annotation, -- such as ~T or !T, then add the pragma to the existing HsBangTy. -- -- Otherwise, wrap the type in a new HsBangTy constructor. addUnpackedness an (L _ (HsBangTy x bang t)) | HsSrcBang NoSourceText NoSrcUnpack strictness <- bang = HsBangTy (addAnns an (epAnnAnns x) (epAnnComments x)) (HsSrcBang prag unpk strictness) t addUnpackedness an t = HsBangTy an (HsSrcBang prag unpk NoSrcStrict) t --------------------------------------------------------------------------- -- | Check for monad comprehensions -- -- If the flag MonadComprehensions is set, return a 'MonadComp' context, -- otherwise use the usual 'ListComp' context checkMonadComp :: PV HsDoFlavour checkMonadComp = do monadComprehensions <- getBit MonadComprehensionsBit return $ if monadComprehensions then MonadComp else ListComp -- ------------------------------------------------------------------------- -- Expression/command/pattern ambiguity. -- See Note [Ambiguous syntactic categories] -- -- See Note [Ambiguous syntactic categories] -- -- This newtype is required to avoid impredicative types in monadic -- productions. That is, in a production that looks like -- -- | ... {% return (ECP ...) } -- -- we are dealing with -- P ECP -- whereas without a newtype we would be dealing with -- P (forall b. DisambECP b => PV (Located b)) -- newtype ECP = ECP { unECP :: forall b. DisambECP b => PV (LocatedA b) } ecpFromExp :: LHsExpr GhcPs -> ECP ecpFromExp a = ECP (ecpFromExp' a) ecpFromCmd :: LHsCmd GhcPs -> ECP ecpFromCmd a = ECP (ecpFromCmd' a) -- The 'fbinds' parser rule produces values of this type. See Note -- [RecordDotSyntax field updates]. type Fbind b = Either (LHsRecField GhcPs (LocatedA b)) (LHsRecProj GhcPs (LocatedA b)) -- | Disambiguate infix operators. -- See Note [Ambiguous syntactic categories] class DisambInfixOp b where mkHsVarOpPV :: LocatedN RdrName -> PV (LocatedN b) mkHsConOpPV :: LocatedN RdrName -> PV (LocatedN b) mkHsInfixHolePV :: SrcSpan -> (EpAnnComments -> EpAnn EpAnnUnboundVar) -> PV (Located b) instance DisambInfixOp (HsExpr GhcPs) where mkHsVarOpPV v = return $ L (getLoc v) (HsVar noExtField v) mkHsConOpPV v = return $ L (getLoc v) (HsVar noExtField v) mkHsInfixHolePV l ann = do cs <- getCommentsFor l return $ L l (hsHoleExpr (ann cs)) instance DisambInfixOp RdrName where mkHsConOpPV (L l v) = return $ L l v mkHsVarOpPV (L l v) = return $ L l v mkHsInfixHolePV l _ = addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrInvalidInfixHole type AnnoBody b = ( Anno (GRHS GhcPs (LocatedA (Body b GhcPs))) ~ SrcAnn NoEpAnns , Anno [LocatedA (Match GhcPs (LocatedA (Body b GhcPs)))] ~ SrcSpanAnnL , Anno (Match GhcPs (LocatedA (Body b GhcPs))) ~ SrcSpanAnnA , Anno (StmtLR GhcPs GhcPs (LocatedA (Body (Body b GhcPs) GhcPs))) ~ SrcSpanAnnA , Anno [LocatedA (StmtLR GhcPs GhcPs (LocatedA (Body (Body (Body b GhcPs) GhcPs) GhcPs)))] ~ SrcSpanAnnL ) -- | Disambiguate constructs that may appear when we do not know ahead of time whether we are -- parsing an expression, a command, or a pattern. -- See Note [Ambiguous syntactic categories] class (b ~ (Body b) GhcPs, AnnoBody b) => DisambECP b where -- | See Note [Body in DisambECP] type Body b :: Type -> Type -- | Return a command without ambiguity, or fail in a non-command context. ecpFromCmd' :: LHsCmd GhcPs -> PV (LocatedA b) -- | Return an expression without ambiguity, or fail in a non-expression context. ecpFromExp' :: LHsExpr GhcPs -> PV (LocatedA b) mkHsProjUpdatePV :: SrcSpan -> Located [LocatedAn NoEpAnns (DotFieldOcc GhcPs)] -> LocatedA b -> Bool -> [AddEpAnn] -> PV (LHsRecProj GhcPs (LocatedA b)) -- | Disambiguate "\... -> ..." (lambda) mkHsLamPV :: SrcSpan -> (EpAnnComments -> MatchGroup GhcPs (LocatedA b)) -> PV (LocatedA b) -- | Disambiguate "let ... in ..." mkHsLetPV :: SrcSpan -> LHsToken "let" GhcPs -> HsLocalBinds GhcPs -> LHsToken "in" GhcPs -> LocatedA b -> PV (LocatedA b) -- | Infix operator representation type InfixOp b -- | Bring superclass constraints on InfixOp into scope. -- See Note [UndecidableSuperClasses for associated types] superInfixOp :: (DisambInfixOp (InfixOp b) => PV (LocatedA b )) -> PV (LocatedA b) -- | Disambiguate "f # x" (infix operator) mkHsOpAppPV :: SrcSpan -> LocatedA b -> LocatedN (InfixOp b) -> LocatedA b -> PV (LocatedA b) -- | Disambiguate "case ... of ..." mkHsCasePV :: SrcSpan -> LHsExpr GhcPs -> (LocatedL [LMatch GhcPs (LocatedA b)]) -> EpAnnHsCase -> PV (LocatedA b) -- | Disambiguate "\case" and "\cases" mkHsLamCasePV :: SrcSpan -> LamCaseVariant -> (LocatedL [LMatch GhcPs (LocatedA b)]) -> [AddEpAnn] -> PV (LocatedA b) -- | Function argument representation type FunArg b -- | Bring superclass constraints on FunArg into scope. -- See Note [UndecidableSuperClasses for associated types] superFunArg :: (DisambECP (FunArg b) => PV (LocatedA b)) -> PV (LocatedA b) -- | Disambiguate "f x" (function application) mkHsAppPV :: SrcSpanAnnA -> LocatedA b -> LocatedA (FunArg b) -> PV (LocatedA b) -- | Disambiguate "f @t" (visible type application) mkHsAppTypePV :: SrcSpanAnnA -> LocatedA b -> SrcSpan -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate "if ... then ... else ..." mkHsIfPV :: SrcSpan -> LHsExpr GhcPs -> Bool -- semicolon? -> LocatedA b -> Bool -- semicolon? -> LocatedA b -> AnnsIf -> PV (LocatedA b) -- | Disambiguate "do { ... }" (do notation) mkHsDoPV :: SrcSpan -> Maybe ModuleName -> LocatedL [LStmt GhcPs (LocatedA b)] -> AnnList -> PV (LocatedA b) -- | Disambiguate "( ... )" (parentheses) mkHsParPV :: SrcSpan -> LHsToken "(" GhcPs -> LocatedA b -> LHsToken ")" GhcPs -> PV (LocatedA b) -- | Disambiguate a variable "f" or a data constructor "MkF". mkHsVarPV :: LocatedN RdrName -> PV (LocatedA b) -- | Disambiguate a monomorphic literal mkHsLitPV :: Located (HsLit GhcPs) -> PV (Located b) -- | Disambiguate an overloaded literal mkHsOverLitPV :: LocatedAn a (HsOverLit GhcPs) -> PV (LocatedAn a b) -- | Disambiguate a wildcard mkHsWildCardPV :: SrcSpan -> PV (Located b) -- | Disambiguate "a :: t" (type annotation) mkHsTySigPV :: SrcSpanAnnA -> LocatedA b -> LHsType GhcPs -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate "[a,b,c]" (list syntax) mkHsExplicitListPV :: SrcSpan -> [LocatedA b] -> AnnList -> PV (LocatedA b) -- | Disambiguate "$(...)" and "[quasi|...|]" (TH splices) mkHsSplicePV :: Located (HsSplice GhcPs) -> PV (Located b) -- | Disambiguate "f { a = b, ... }" syntax (record construction and record updates) mkHsRecordPV :: Bool -> -- Is OverloadedRecordUpdate in effect? SrcSpan -> SrcSpan -> LocatedA b -> ([Fbind b], Maybe SrcSpan) -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate "-a" (negation) mkHsNegAppPV :: SrcSpan -> LocatedA b -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate "(# a)" (right operator section) mkHsSectionR_PV :: SrcSpan -> LocatedA (InfixOp b) -> LocatedA b -> PV (Located b) -- | Disambiguate "(a -> b)" (view pattern) mkHsViewPatPV :: SrcSpan -> LHsExpr GhcPs -> LocatedA b -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate "a@b" (as-pattern) mkHsAsPatPV :: SrcSpan -> LocatedN RdrName -> LocatedA b -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate "~a" (lazy pattern) mkHsLazyPatPV :: SrcSpan -> LocatedA b -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate "!a" (bang pattern) mkHsBangPatPV :: SrcSpan -> LocatedA b -> [AddEpAnn] -> PV (LocatedA b) -- | Disambiguate tuple sections and unboxed sums mkSumOrTuplePV :: SrcSpanAnnA -> Boxity -> SumOrTuple b -> [AddEpAnn] -> PV (LocatedA b) -- | Validate infixexp LHS to reject unwanted {-# SCC ... #-} pragmas rejectPragmaPV :: LocatedA b -> PV () {- Note [UndecidableSuperClasses for associated types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (This Note is about the code in GHC, not about the user code that we are parsing) Assume we have a class C with an associated type T: class C a where type T a ... If we want to add 'C (T a)' as a superclass, we need -XUndecidableSuperClasses: {-# LANGUAGE UndecidableSuperClasses #-} class C (T a) => C a where type T a ... Unfortunately, -XUndecidableSuperClasses don't work all that well, sometimes making GHC loop. The workaround is to bring this constraint into scope manually with a helper method: class C a where type T a superT :: (C (T a) => r) -> r In order to avoid ambiguous types, 'r' must mention 'a'. For consistency, we use this approach for all constraints on associated types, even when -XUndecidableSuperClasses are not required. -} {- Note [Body in DisambECP] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are helper functions (mkBodyStmt, mkBindStmt, unguardedRHS, etc) that require their argument to take a form of (body GhcPs) for some (body :: Type -> *). To satisfy this requirement, we say that (b ~ Body b GhcPs) in the superclass constraints of DisambECP. The alternative is to change mkBodyStmt, mkBindStmt, unguardedRHS, etc, to drop this requirement. It is possible and would allow removing the type index of PatBuilder, but leads to worse type inference, breaking some code in the typechecker. -} instance DisambECP (HsCmd GhcPs) where type Body (HsCmd GhcPs) = HsCmd ecpFromCmd' = return ecpFromExp' (L l e) = cmdFail (locA l) (ppr e) mkHsProjUpdatePV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrOverloadedRecordDotInvalid mkHsLamPV l mg = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsCmdLam NoExtField (mg cs)) mkHsLetPV l tkLet bs tkIn e = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsCmdLet (EpAnn (spanAsAnchor l) NoEpAnns cs) tkLet bs tkIn e) type InfixOp (HsCmd GhcPs) = HsExpr GhcPs superInfixOp m = m mkHsOpAppPV l c1 op c2 = do let cmdArg c = L (l2l $ getLoc c) $ HsCmdTop noExtField c cs <- getCommentsFor l return $ L (noAnnSrcSpan l) $ HsCmdArrForm (EpAnn (spanAsAnchor l) (AnnList Nothing Nothing Nothing [] []) cs) (reLocL op) Infix Nothing [cmdArg c1, cmdArg c2] mkHsCasePV l c (L lm m) anns = do cs <- getCommentsFor l let mg = mkMatchGroup FromSource (L lm m) return $ L (noAnnSrcSpan l) (HsCmdCase (EpAnn (spanAsAnchor l) anns cs) c mg) mkHsLamCasePV l lc_variant (L lm m) anns = do cs <- getCommentsFor l let mg = mkLamCaseMatchGroup FromSource lc_variant (L lm m) return $ L (noAnnSrcSpan l) (HsCmdLamCase (EpAnn (spanAsAnchor l) anns cs) lc_variant mg) type FunArg (HsCmd GhcPs) = HsExpr GhcPs superFunArg m = m mkHsAppPV l c e = do cs <- getCommentsFor (locA l) checkCmdBlockArguments c checkExpBlockArguments e return $ L l (HsCmdApp (comment (realSrcSpan $ locA l) cs) c e) mkHsAppTypePV l c _ t = cmdFail (locA l) (ppr c <+> text "@" <> ppr t) mkHsIfPV l c semi1 a semi2 b anns = do checkDoAndIfThenElse PsErrSemiColonsInCondCmd c semi1 a semi2 b cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (mkHsCmdIf c a b (EpAnn (spanAsAnchor l) anns cs)) mkHsDoPV l Nothing stmts anns = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsCmdDo (EpAnn (spanAsAnchor l) anns cs) stmts) mkHsDoPV l (Just m) _ _ = addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrQualifiedDoInCmd m mkHsParPV l lpar c rpar = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsCmdPar (EpAnn (spanAsAnchor l) NoEpAnns cs) lpar c rpar) mkHsVarPV (L l v) = cmdFail (locA l) (ppr v) mkHsLitPV (L l a) = cmdFail l (ppr a) mkHsOverLitPV (L l a) = cmdFail (locA l) (ppr a) mkHsWildCardPV l = cmdFail l (text "_") mkHsTySigPV l a sig _ = cmdFail (locA l) (ppr a <+> text "::" <+> ppr sig) mkHsExplicitListPV l xs _ = cmdFail l $ brackets (fsep (punctuate comma (map ppr xs))) mkHsSplicePV (L l sp) = cmdFail l (ppr sp) mkHsRecordPV _ l _ a (fbinds, ddLoc) _ = do let (fs, ps) = partitionEithers fbinds if not (null ps) then addFatalError $ mkPlainErrorMsgEnvelope l $ PsErrOverloadedRecordDotInvalid else cmdFail l $ ppr a <+> ppr (mk_rec_fields fs ddLoc) mkHsNegAppPV l a _ = cmdFail l (text "-" <> ppr a) mkHsSectionR_PV l op c = cmdFail l $ let pp_op = fromMaybe (panic "cannot print infix operator") (ppr_infix_expr (unLoc op)) in pp_op <> ppr c mkHsViewPatPV l a b _ = cmdFail l $ ppr a <+> text "->" <+> ppr b mkHsAsPatPV l v c _ = cmdFail l $ pprPrefixOcc (unLoc v) <> text "@" <> ppr c mkHsLazyPatPV l c _ = cmdFail l $ text "~" <> ppr c mkHsBangPatPV l c _ = cmdFail l $ text "!" <> ppr c mkSumOrTuplePV l boxity a _ = cmdFail (locA l) (pprSumOrTuple boxity a) rejectPragmaPV _ = return () cmdFail :: SrcSpan -> SDoc -> PV a cmdFail loc e = addFatalError $ mkPlainErrorMsgEnvelope loc $ PsErrParseErrorInCmd e checkLamMatchGroup :: SrcSpan -> MatchGroup GhcPs (LHsExpr GhcPs) -> PV () checkLamMatchGroup l (MG { mg_alts = (L _ (matches:_))}) = do when (null (hsLMatchPats matches)) $ addError $ mkPlainErrorMsgEnvelope l PsErrEmptyLambda checkLamMatchGroup _ _ = return () instance DisambECP (HsExpr GhcPs) where type Body (HsExpr GhcPs) = HsExpr ecpFromCmd' (L l c) = do addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrArrowCmdInExpr c return (L l (hsHoleExpr noAnn)) ecpFromExp' = return mkHsProjUpdatePV l fields arg isPun anns = do cs <- getCommentsFor l return $ mkRdrProjUpdate (noAnnSrcSpan l) fields arg isPun (EpAnn (spanAsAnchor l) anns cs) mkHsLamPV l mg = do cs <- getCommentsFor l let mg' = mg cs checkLamMatchGroup l mg' return $ L (noAnnSrcSpan l) (HsLam NoExtField mg') mkHsLetPV l tkLet bs tkIn c = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsLet (EpAnn (spanAsAnchor l) NoEpAnns cs) tkLet bs tkIn c) type InfixOp (HsExpr GhcPs) = HsExpr GhcPs superInfixOp m = m mkHsOpAppPV l e1 op e2 = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) $ OpApp (EpAnn (spanAsAnchor l) [] cs) e1 (reLocL op) e2 mkHsCasePV l e (L lm m) anns = do cs <- getCommentsFor l let mg = mkMatchGroup FromSource (L lm m) return $ L (noAnnSrcSpan l) (HsCase (EpAnn (spanAsAnchor l) anns cs) e mg) mkHsLamCasePV l lc_variant (L lm m) anns = do cs <- getCommentsFor l let mg = mkLamCaseMatchGroup FromSource lc_variant (L lm m) return $ L (noAnnSrcSpan l) (HsLamCase (EpAnn (spanAsAnchor l) anns cs) lc_variant mg) type FunArg (HsExpr GhcPs) = HsExpr GhcPs superFunArg m = m mkHsAppPV l e1 e2 = do cs <- getCommentsFor (locA l) checkExpBlockArguments e1 checkExpBlockArguments e2 return $ L l (HsApp (comment (realSrcSpan $ locA l) cs) e1 e2) mkHsAppTypePV l e la t = do checkExpBlockArguments e return $ L l (HsAppType la e (mkHsWildCardBndrs t)) mkHsIfPV l c semi1 a semi2 b anns = do checkDoAndIfThenElse PsErrSemiColonsInCondExpr c semi1 a semi2 b cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (mkHsIf c a b (EpAnn (spanAsAnchor l) anns cs)) mkHsDoPV l mod stmts anns = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsDo (EpAnn (spanAsAnchor l) anns cs) (DoExpr mod) stmts) mkHsParPV l lpar e rpar = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (HsPar (EpAnn (spanAsAnchor l) NoEpAnns cs) lpar e rpar) mkHsVarPV v@(L l _) = return $ L (na2la l) (HsVar noExtField v) mkHsLitPV (L l a) = do cs <- getCommentsFor l return $ L l (HsLit (comment (realSrcSpan l) cs) a) mkHsOverLitPV (L l a) = do cs <- getCommentsFor (locA l) return $ L l (HsOverLit (comment (realSrcSpan (locA l)) cs) a) mkHsWildCardPV l = return $ L l (hsHoleExpr noAnn) mkHsTySigPV l a sig anns = do cs <- getCommentsFor (locA l) return $ L l (ExprWithTySig (EpAnn (spanAsAnchor $ locA l) anns cs) a (hsTypeToHsSigWcType sig)) mkHsExplicitListPV l xs anns = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (ExplicitList (EpAnn (spanAsAnchor l) anns cs) xs) mkHsSplicePV sp@(L l _) = do cs <- getCommentsFor l return $ mapLoc (HsSpliceE (EpAnn (spanAsAnchor l) NoEpAnns cs)) sp mkHsRecordPV opts l lrec a (fbinds, ddLoc) anns = do cs <- getCommentsFor l r <- mkRecConstrOrUpdate opts a lrec (fbinds, ddLoc) (EpAnn (spanAsAnchor l) anns cs) checkRecordSyntax (L (noAnnSrcSpan l) r) mkHsNegAppPV l a anns = do cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (NegApp (EpAnn (spanAsAnchor l) anns cs) a noSyntaxExpr) mkHsSectionR_PV l op e = do cs <- getCommentsFor l return $ L l (SectionR (comment (realSrcSpan l) cs) op e) mkHsViewPatPV l a b _ = addError (mkPlainErrorMsgEnvelope l $ PsErrViewPatInExpr a b) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkHsAsPatPV l v e _ = addError (mkPlainErrorMsgEnvelope l $ PsErrTypeAppWithoutSpace (unLoc v) e) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkHsLazyPatPV l e _ = addError (mkPlainErrorMsgEnvelope l $ PsErrLazyPatWithoutSpace e) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkHsBangPatPV l e _ = addError (mkPlainErrorMsgEnvelope l $ PsErrBangPatWithoutSpace e) >> return (L (noAnnSrcSpan l) (hsHoleExpr noAnn)) mkSumOrTuplePV = mkSumOrTupleExpr rejectPragmaPV (L _ (OpApp _ _ _ e)) = -- assuming left-associative parsing of operators rejectPragmaPV e rejectPragmaPV (L l (HsPragE _ prag _)) = addError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrUnallowedPragma prag) rejectPragmaPV _ = return () hsHoleExpr :: EpAnn EpAnnUnboundVar -> HsExpr GhcPs hsHoleExpr anns = HsUnboundVar anns (mkVarOcc "_") type instance Anno (GRHS GhcPs (LocatedA (PatBuilder GhcPs))) = SrcAnn NoEpAnns type instance Anno [LocatedA (Match GhcPs (LocatedA (PatBuilder GhcPs)))] = SrcSpanAnnL type instance Anno (Match GhcPs (LocatedA (PatBuilder GhcPs))) = SrcSpanAnnA type instance Anno (StmtLR GhcPs GhcPs (LocatedA (PatBuilder GhcPs))) = SrcSpanAnnA instance DisambECP (PatBuilder GhcPs) where type Body (PatBuilder GhcPs) = PatBuilder ecpFromCmd' (L l c) = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrArrowCmdInPat c ecpFromExp' (L l e) = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrArrowExprInPat e mkHsLamPV l _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrLambdaInPat mkHsLetPV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrLetInPat mkHsProjUpdatePV l _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrOverloadedRecordDotInvalid type InfixOp (PatBuilder GhcPs) = RdrName superInfixOp m = m mkHsOpAppPV l p1 op p2 = do cs <- getCommentsFor l let anns = EpAnn (spanAsAnchor l) [] cs return $ L (noAnnSrcSpan l) $ PatBuilderOpApp p1 op p2 anns mkHsCasePV l _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrCaseInPat mkHsLamCasePV l lc_variant _ _ = addFatalError $ mkPlainErrorMsgEnvelope l (PsErrLambdaCaseInPat lc_variant) type FunArg (PatBuilder GhcPs) = PatBuilder GhcPs superFunArg m = m mkHsAppPV l p1 p2 = return $ L l (PatBuilderApp p1 p2) mkHsAppTypePV l p la t = do cs <- getCommentsFor (locA l) let anns = EpAnn (spanAsAnchor (combineSrcSpans la (getLocA t))) (EpaSpan (realSrcSpan la)) cs return $ L l (PatBuilderAppType p (mkHsPatSigType anns t)) mkHsIfPV l _ _ _ _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrIfThenElseInPat mkHsDoPV l _ _ _ = addFatalError $ mkPlainErrorMsgEnvelope l PsErrDoNotationInPat mkHsParPV l lpar p rpar = return $ L (noAnnSrcSpan l) (PatBuilderPar lpar p rpar) mkHsVarPV v@(getLoc -> l) = return $ L (na2la l) (PatBuilderVar v) mkHsLitPV lit@(L l a) = do checkUnboxedLitPat lit return $ L l (PatBuilderPat (LitPat noExtField a)) mkHsOverLitPV (L l a) = return $ L l (PatBuilderOverLit a) mkHsWildCardPV l = return $ L l (PatBuilderPat (WildPat noExtField)) mkHsTySigPV l b sig anns = do p <- checkLPat b cs <- getCommentsFor (locA l) return $ L l (PatBuilderPat (SigPat (EpAnn (spanAsAnchor $ locA l) anns cs) p (mkHsPatSigType noAnn sig))) mkHsExplicitListPV l xs anns = do ps <- traverse checkLPat xs cs <- getCommentsFor l return (L (noAnnSrcSpan l) (PatBuilderPat (ListPat (EpAnn (spanAsAnchor l) anns cs) ps))) mkHsSplicePV (L l sp) = return $ L l (PatBuilderPat (SplicePat noExtField sp)) mkHsRecordPV _ l _ a (fbinds, ddLoc) anns = do let (fs, ps) = partitionEithers fbinds if not (null ps) then addFatalError $ mkPlainErrorMsgEnvelope l PsErrOverloadedRecordDotInvalid else do cs <- getCommentsFor l r <- mkPatRec a (mk_rec_fields fs ddLoc) (EpAnn (spanAsAnchor l) anns cs) checkRecordSyntax (L (noAnnSrcSpan l) r) mkHsNegAppPV l (L lp p) anns = do lit <- case p of PatBuilderOverLit pos_lit -> return (L (l2l lp) pos_lit) _ -> patFail l $ PsErrInPat p PEIP_NegApp cs <- getCommentsFor l let an = EpAnn (spanAsAnchor l) anns cs return $ L (noAnnSrcSpan l) (PatBuilderPat (mkNPat lit (Just noSyntaxExpr) an)) mkHsSectionR_PV l op p = patFail l (PsErrParseRightOpSectionInPat (unLoc op) (unLoc p)) mkHsViewPatPV l a b anns = do p <- checkLPat b cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (PatBuilderPat (ViewPat (EpAnn (spanAsAnchor l) anns cs) a p)) mkHsAsPatPV l v e a = do p <- checkLPat e cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (PatBuilderPat (AsPat (EpAnn (spanAsAnchor l) a cs) v p)) mkHsLazyPatPV l e a = do p <- checkLPat e cs <- getCommentsFor l return $ L (noAnnSrcSpan l) (PatBuilderPat (LazyPat (EpAnn (spanAsAnchor l) a cs) p)) mkHsBangPatPV l e an = do p <- checkLPat e cs <- getCommentsFor l let pb = BangPat (EpAnn (spanAsAnchor l) an cs) p hintBangPat l pb return $ L (noAnnSrcSpan l) (PatBuilderPat pb) mkSumOrTuplePV = mkSumOrTuplePat rejectPragmaPV _ = return () -- | Ensure that a literal pattern isn't of type Addr#, Float#, Double#. checkUnboxedLitPat :: Located (HsLit GhcPs) -> PV () checkUnboxedLitPat (L loc lit) = case lit of -- Don't allow primitive string literal patterns. -- See #13260. HsStringPrim {} -> addError $ mkPlainErrorMsgEnvelope loc $ (PsErrIllegalUnboxedStringInPat lit) -- Don't allow Float#/Double# literal patterns. -- See #9238 and Note [Rules for floating-point comparisons] -- in GHC.Core.Opt.ConstantFold. _ | is_floating_lit lit -> addError $ mkPlainErrorMsgEnvelope loc $ (PsErrIllegalUnboxedFloatingLitInPat lit) | otherwise -> return () where is_floating_lit :: HsLit GhcPs -> Bool is_floating_lit (HsFloatPrim {}) = True is_floating_lit (HsDoublePrim {}) = True is_floating_lit _ = False mkPatRec :: LocatedA (PatBuilder GhcPs) -> HsRecFields GhcPs (LocatedA (PatBuilder GhcPs)) -> EpAnn [AddEpAnn] -> PV (PatBuilder GhcPs) mkPatRec (unLoc -> PatBuilderVar c) (HsRecFields fs dd) anns | isRdrDataCon (unLoc c) = do fs <- mapM checkPatField fs return $ PatBuilderPat $ ConPat { pat_con_ext = anns , pat_con = c , pat_args = RecCon (HsRecFields fs dd) } mkPatRec p _ _ = addFatalError $ mkPlainErrorMsgEnvelope (getLocA p) $ (PsErrInvalidRecordCon (unLoc p)) -- | Disambiguate constructs that may appear when we do not know -- ahead of time whether we are parsing a type or a newtype/data constructor. -- -- See Note [Ambiguous syntactic categories] for the general idea. -- -- See Note [Parsing data constructors is hard] for the specific issue this -- particular class is solving. -- class DisambTD b where -- | Process the head of a type-level function/constructor application, -- i.e. the @H@ in @H a b c@. mkHsAppTyHeadPV :: LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @f x@ (function application or prefix data constructor). mkHsAppTyPV :: LocatedA b -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @f \@t@ (visible kind application) mkHsAppKindTyPV :: LocatedA b -> SrcSpan -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @f \# x@ (infix operator) mkHsOpTyPV :: PromotionFlag -> LHsType GhcPs -> LocatedN RdrName -> LHsType GhcPs -> PV (LocatedA b) -- | Disambiguate @{-\# UNPACK \#-} t@ (unpack/nounpack pragma) mkUnpackednessPV :: Located UnpackednessPragma -> LocatedA b -> PV (LocatedA b) instance DisambTD (HsType GhcPs) where mkHsAppTyHeadPV = return mkHsAppTyPV t1 t2 = return (mkHsAppTy t1 t2) mkHsAppKindTyPV t l_at ki = return (mkHsAppKindTy l_at t ki) mkHsOpTyPV prom t1 op t2 = return (mkLHsOpTy prom t1 op t2) mkUnpackednessPV = addUnpackednessP dataConBuilderCon :: DataConBuilder -> LocatedN RdrName dataConBuilderCon (PrefixDataConBuilder _ dc) = dc dataConBuilderCon (InfixDataConBuilder _ dc _) = dc dataConBuilderDetails :: DataConBuilder -> HsConDeclH98Details GhcPs -- Detect when the record syntax is used: -- data T = MkT { ... } dataConBuilderDetails (PrefixDataConBuilder flds _) | [L l_t (HsRecTy an fields)] <- toList flds = RecCon (L (SrcSpanAnn an (locA l_t)) fields) -- Normal prefix constructor, e.g. data T = MkT A B C dataConBuilderDetails (PrefixDataConBuilder flds _) = PrefixCon noTypeArgs (map hsLinear (toList flds)) -- Infix constructor, e.g. data T = Int :! Bool dataConBuilderDetails (InfixDataConBuilder lhs _ rhs) = InfixCon (hsLinear lhs) (hsLinear rhs) instance DisambTD DataConBuilder where mkHsAppTyHeadPV = tyToDataConBuilder mkHsAppTyPV (L l (PrefixDataConBuilder flds fn)) t = return $ L (noAnnSrcSpan $ combineSrcSpans (locA l) (getLocA t)) (PrefixDataConBuilder (flds `snocOL` t) fn) mkHsAppTyPV (L _ InfixDataConBuilder{}) _ = -- This case is impossible because of the way -- the grammar in Parser.y is written (see infixtype/ftype). panic "mkHsAppTyPV: InfixDataConBuilder" mkHsAppKindTyPV lhs l_at ki = addFatalError $ mkPlainErrorMsgEnvelope l_at $ (PsErrUnexpectedKindAppInDataCon (unLoc lhs) (unLoc ki)) mkHsOpTyPV prom lhs tc rhs = do check_no_ops (unLoc rhs) -- check the RHS because parsing type operators is right-associative data_con <- eitherToP $ tyConToDataCon tc checkNotPromotedDataCon prom data_con return $ L l (InfixDataConBuilder lhs data_con rhs) where l = combineLocsA lhs rhs check_no_ops (HsBangTy _ _ t) = check_no_ops (unLoc t) check_no_ops (HsOpTy{}) = addError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrInvalidInfixDataCon (unLoc lhs) (unLoc tc) (unLoc rhs)) check_no_ops _ = return () mkUnpackednessPV unpk constr_stuff | L _ (InfixDataConBuilder lhs data_con rhs) <- constr_stuff = -- When the user writes data T = {-# UNPACK #-} Int :+ Bool -- we apply {-# UNPACK #-} to the LHS do lhs' <- addUnpackednessP unpk lhs let l = combineLocsA (reLocA unpk) constr_stuff return $ L l (InfixDataConBuilder lhs' data_con rhs) | otherwise = do addError $ mkPlainErrorMsgEnvelope (getLoc unpk) PsErrUnpackDataCon return constr_stuff tyToDataConBuilder :: LHsType GhcPs -> PV (LocatedA DataConBuilder) tyToDataConBuilder (L l (HsTyVar _ prom v)) = do data_con <- eitherToP $ tyConToDataCon v checkNotPromotedDataCon prom data_con return $ L l (PrefixDataConBuilder nilOL data_con) tyToDataConBuilder (L l (HsTupleTy _ HsBoxedOrConstraintTuple ts)) = do let data_con = L (l2l l) (getRdrName (tupleDataCon Boxed (length ts))) return $ L l (PrefixDataConBuilder (toOL ts) data_con) tyToDataConBuilder t = addFatalError $ mkPlainErrorMsgEnvelope (getLocA t) $ (PsErrInvalidDataCon (unLoc t)) -- | Rejects declarations such as @data T = 'MkT@ (note the leading tick). checkNotPromotedDataCon :: PromotionFlag -> LocatedN RdrName -> PV () checkNotPromotedDataCon NotPromoted _ = return () checkNotPromotedDataCon IsPromoted (L l name) = addError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrIllegalPromotionQuoteDataCon name {- Note [Ambiguous syntactic categories] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are places in the grammar where we do not know whether we are parsing an expression or a pattern without unlimited lookahead (which we do not have in 'happy'): View patterns: f (Con a b ) = ... -- 'Con a b' is a pattern f (Con a b -> x) = ... -- 'Con a b' is an expression do-notation: do { Con a b <- x } -- 'Con a b' is a pattern do { Con a b } -- 'Con a b' is an expression Guards: x | True <- p && q = ... -- 'True' is a pattern x | True = ... -- 'True' is an expression Top-level value/function declarations (FunBind/PatBind): f ! a -- TH splice f ! a = ... -- function declaration Until we encounter the = sign, we don't know if it's a top-level TemplateHaskell splice where ! is used, or if it's a function declaration where ! is bound. There are also places in the grammar where we do not know whether we are parsing an expression or a command: proc x -> do { (stuff) -< x } -- 'stuff' is an expression proc x -> do { (stuff) } -- 'stuff' is a command Until we encounter arrow syntax (-<) we don't know whether to parse 'stuff' as an expression or a command. In fact, do-notation is subject to both ambiguities: proc x -> do { (stuff) -< x } -- 'stuff' is an expression proc x -> do { (stuff) <- f -< x } -- 'stuff' is a pattern proc x -> do { (stuff) } -- 'stuff' is a command There are many possible solutions to this problem. For an overview of the ones we decided against, see Note [Resolving parsing ambiguities: non-taken alternatives] The solution that keeps basic definitions (such as HsExpr) clean, keeps the concerns local to the parser, and does not require duplication of hsSyn types, or an extra pass over the entire AST, is to parse into an overloaded parser-validator (a so-called tagless final encoding): class DisambECP b where ... instance DisambECP (HsCmd GhcPs) where ... instance DisambECP (HsExp GhcPs) where ... instance DisambECP (PatBuilder GhcPs) where ... The 'DisambECP' class contains functions to build and validate 'b'. For example, to add parentheses we have: mkHsParPV :: DisambECP b => SrcSpan -> Located b -> PV (Located b) 'mkHsParPV' will wrap the inner value in HsCmdPar for commands, HsPar for expressions, and 'PatBuilderPar' for patterns (later transformed into ParPat, see Note [PatBuilder]). Consider the 'alts' production used to parse case-of alternatives: alts :: { Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)]) } : alts1 { sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } We abstract over LHsExpr GhcPs, and it becomes: alts :: { forall b. DisambECP b => PV (Located ([AddEpAnn],[LMatch GhcPs (Located b)])) } : alts1 { $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } Compared to the initial definition, the added bits are: forall b. DisambECP b => PV ( ... ) -- in the type signature $1 >>= \ $1 -> return $ -- in one reduction rule $2 >>= \ $2 -> return $ -- in another reduction rule The overhead is constant relative to the size of the rest of the reduction rule, so this approach scales well to large parser productions. Note that we write ($1 >>= \ $1 -> ...), so the second $1 is in a binding position and shadows the previous $1. We can do this because internally 'happy' desugars $n to happy_var_n, and the rationale behind this idiom is to be able to write (sLL $1 $>) later on. The alternative would be to write this as ($1 >>= \ fresh_name -> ...), but then we couldn't refer to the last fresh name as $>. Finally, we instantiate the polymorphic type to a concrete one, and run the parser-validator, for example: stmt :: { forall b. DisambECP b => PV (LStmt GhcPs (Located b)) } e_stmt :: { LStmt GhcPs (LHsExpr GhcPs) } : stmt {% runPV $1 } In e_stmt, three things happen: 1. we instantiate: b ~ HsExpr GhcPs 2. we embed the PV computation into P by using runPV 3. we run validation by using a monadic production, {% ... } At this point the ambiguity is resolved. -} {- Note [Resolving parsing ambiguities: non-taken alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Alternative I, extra constructors in GHC.Hs.Expr ------------------------------------------------ We could add extra constructors to HsExpr to represent command-specific and pattern-specific syntactic constructs. Under this scheme, we parse patterns and commands as expressions and rejig later. This is what GHC used to do, and it polluted 'HsExpr' with irrelevant constructors: * for commands: 'HsArrForm', 'HsArrApp' * for patterns: 'EWildPat', 'EAsPat', 'EViewPat', 'ELazyPat' (As of now, we still do that for patterns, but we plan to fix it). There are several issues with this: * The implementation details of parsing are leaking into hsSyn definitions. * Code that uses HsExpr has to panic on these impossible-after-parsing cases. * HsExpr is arbitrarily selected as the extension basis. Why not extend HsCmd or HsPat with extra constructors instead? Alternative II, extra constructors in GHC.Hs.Expr for GhcPs ----------------------------------------------------------- We could address some of the problems with Alternative I by using Trees That Grow and extending HsExpr only in the GhcPs pass. However, GhcPs corresponds to the output of parsing, not to its intermediate results, so we wouldn't want them there either. Alternative III, extra constructors in GHC.Hs.Expr for GhcPrePs --------------------------------------------------------------- We could introduce a new pass, GhcPrePs, to keep GhcPs pristine. Unfortunately, creating a new pass would significantly bloat conversion code and slow down the compiler by adding another linear-time pass over the entire AST. For example, in order to build HsExpr GhcPrePs, we would need to build HsLocalBinds GhcPrePs (as part of HsLet), and we never want HsLocalBinds GhcPrePs. Alternative IV, sum type and bottom-up data flow ------------------------------------------------ Expressions and commands are disjoint. There are no user inputs that could be interpreted as either an expression or a command depending on outer context: 5 -- definitely an expression x -< y -- definitely a command Even though we have both 'HsLam' and 'HsCmdLam', we can look at the body to disambiguate: \p -> 5 -- definitely an expression \p -> x -< y -- definitely a command This means we could use a bottom-up flow of information to determine whether we are parsing an expression or a command, using a sum type for intermediate results: Either (LHsExpr GhcPs) (LHsCmd GhcPs) There are two problems with this: * We cannot handle the ambiguity between expressions and patterns, which are not disjoint. * Bottom-up flow of information leads to poor error messages. Consider if ... then 5 else (x -< y) Do we report that '5' is not a valid command or that (x -< y) is not a valid expression? It depends on whether we want the entire node to be 'HsIf' or 'HsCmdIf', and this information flows top-down, from the surrounding parsing context (are we in 'proc'?) Alternative V, backtracking with parser combinators --------------------------------------------------- One might think we could sidestep the issue entirely by using a backtracking parser and doing something along the lines of (try pExpr <|> pPat). Turns out, this wouldn't work very well, as there can be patterns inside expressions (e.g. via 'case', 'let', 'do') and expressions inside patterns (e.g. view patterns). To handle this, we would need to backtrack while backtracking, and unbound levels of backtracking lead to very fragile performance. Alternative VI, an intermediate data type ----------------------------------------- There are common syntactic elements of expressions, commands, and patterns (e.g. all of them must have balanced parentheses), and we can capture this common structure in an intermediate data type, Frame: data Frame = FrameVar RdrName -- ^ Identifier: Just, map, BS.length | FrameTuple [LTupArgFrame] Boxity -- ^ Tuple (section): (a,b) (a,b,c) (a,,) (,a,) | FrameTySig LFrame (LHsSigWcType GhcPs) -- ^ Type signature: x :: ty | FramePar (SrcSpan, SrcSpan) LFrame -- ^ Parentheses | FrameIf LFrame LFrame LFrame -- ^ If-expression: if p then x else y | FrameCase LFrame [LFrameMatch] -- ^ Case-expression: case x of { p1 -> e1; p2 -> e2 } | FrameDo (HsStmtContext GhcRn) [LFrameStmt] -- ^ Do-expression: do { s1; a <- s2; s3 } ... | FrameExpr (HsExpr GhcPs) -- unambiguously an expression | FramePat (HsPat GhcPs) -- unambiguously a pattern | FrameCommand (HsCmd GhcPs) -- unambiguously a command To determine which constructors 'Frame' needs to have, we take the union of intersections between HsExpr, HsCmd, and HsPat. The intersection between HsPat and HsExpr: HsPat = VarPat | TuplePat | SigPat | ParPat | ... HsExpr = HsVar | ExplicitTuple | ExprWithTySig | HsPar | ... ------------------------------------------------------------------- Frame = FrameVar | FrameTuple | FrameTySig | FramePar | ... The intersection between HsCmd and HsExpr: HsCmd = HsCmdIf | HsCmdCase | HsCmdDo | HsCmdPar HsExpr = HsIf | HsCase | HsDo | HsPar ------------------------------------------------ Frame = FrameIf | FrameCase | FrameDo | FramePar The intersection between HsCmd and HsPat: HsPat = ParPat | ... HsCmd = HsCmdPar | ... ----------------------- Frame = FramePar | ... Take the union of each intersection and this yields the final 'Frame' data type. The problem with this approach is that we end up duplicating a good portion of hsSyn: Frame for HsExpr, HsPat, HsCmd TupArgFrame for HsTupArg FrameMatch for Match FrameStmt for StmtLR FrameGRHS for GRHS FrameGRHSs for GRHSs ... Alternative VII, a product type ------------------------------- We could avoid the intermediate representation of Alternative VI by parsing into a product of interpretations directly: type ExpCmdPat = ( PV (LHsExpr GhcPs) , PV (LHsCmd GhcPs) , PV (LHsPat GhcPs) ) This means that in positions where we do not know whether to produce expression, a pattern, or a command, we instead produce a parser-validator for each possible option. Then, as soon as we have parsed far enough to resolve the ambiguity, we pick the appropriate component of the product, discarding the rest: checkExpOf3 (e, _, _) = e -- interpret as an expression checkCmdOf3 (_, c, _) = c -- interpret as a command checkPatOf3 (_, _, p) = p -- interpret as a pattern We can easily define ambiguities between arbitrary subsets of interpretations. For example, when we know ahead of type that only an expression or a command is possible, but not a pattern, we can use a smaller type: type ExpCmd = (PV (LHsExpr GhcPs), PV (LHsCmd GhcPs)) checkExpOf2 (e, _) = e -- interpret as an expression checkCmdOf2 (_, c) = c -- interpret as a command However, there is a slight problem with this approach, namely code duplication in parser productions. Consider the 'alts' production used to parse case-of alternatives: alts :: { Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)]) } : alts1 { sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } Under the new scheme, we have to completely duplicate its type signature and each reduction rule: alts :: { ( PV (Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)])) -- as an expression , PV (Located ([AddEpAnn],[LMatch GhcPs (LHsCmd GhcPs)])) -- as a command ) } : alts1 { ( checkExpOf2 $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) , checkCmdOf2 $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) ) } | ';' alts { ( checkExpOf2 $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) , checkCmdOf2 $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) ) } And the same goes for other productions: 'altslist', 'alts1', 'alt', 'alt_rhs', 'ralt', 'gdpats', 'gdpat', 'exp', ... and so on. That is a lot of code! Alternative VIII, a function from a GADT ---------------------------------------- We could avoid code duplication of the Alternative VII by representing the product as a function from a GADT: data ExpCmdG b where ExpG :: ExpCmdG HsExpr CmdG :: ExpCmdG HsCmd type ExpCmd = forall b. ExpCmdG b -> PV (Located (b GhcPs)) checkExp :: ExpCmd -> PV (LHsExpr GhcPs) checkCmd :: ExpCmd -> PV (LHsCmd GhcPs) checkExp f = f ExpG -- interpret as an expression checkCmd f = f CmdG -- interpret as a command Consider the 'alts' production used to parse case-of alternatives: alts :: { Located ([AddEpAnn],[LMatch GhcPs (LHsExpr GhcPs)]) } : alts1 { sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } We abstract over LHsExpr, and it becomes: alts :: { forall b. ExpCmdG b -> PV (Located ([AddEpAnn],[LMatch GhcPs (Located (b GhcPs))])) } : alts1 { \tag -> $1 tag >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { \tag -> $2 tag >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } Note that 'ExpCmdG' is a singleton type, the value is completely determined by the type: when (b~HsExpr), tag = ExpG when (b~HsCmd), tag = CmdG This is a clear indication that we can use a class to pass this value behind the scenes: class ExpCmdI b where expCmdG :: ExpCmdG b instance ExpCmdI HsExpr where expCmdG = ExpG instance ExpCmdI HsCmd where expCmdG = CmdG And now the 'alts' production is simplified, as we no longer need to thread 'tag' explicitly: alts :: { forall b. ExpCmdI b => PV (Located ([AddEpAnn],[LMatch GhcPs (Located (b GhcPs))])) } : alts1 { $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts { $2 >>= \ $2 -> return $ sLL $1 $> ((mj AnnSemi $1:(fst $ unLoc $2)),snd $ unLoc $2) } This encoding works well enough, but introduces an extra GADT unlike the tagless final encoding, and there's no need for this complexity. -} {- Note [PatBuilder] ~~~~~~~~~~~~~~~~~~~~ Unlike HsExpr or HsCmd, the Pat type cannot accommodate all intermediate forms, so we introduce the notion of a PatBuilder. Consider a pattern like this: Con a b c We parse arguments to "Con" one at a time in the fexp aexp parser production, building the result with mkHsAppPV, so the intermediate forms are: 1. Con 2. Con a 3. Con a b 4. Con a b c In 'HsExpr', we have 'HsApp', so the intermediate forms are represented like this (pseudocode): 1. "Con" 2. HsApp "Con" "a" 3. HsApp (HsApp "Con" "a") "b" 3. HsApp (HsApp (HsApp "Con" "a") "b") "c" Similarly, in 'HsCmd' we have 'HsCmdApp'. In 'Pat', however, what we have instead is 'ConPatIn', which is very awkward to modify and thus unsuitable for the intermediate forms. We also need an intermediate representation to postpone disambiguation between FunBind and PatBind. Consider: a `Con` b = ... a `fun` b = ... How do we know that (a `Con` b) is a PatBind but (a `fun` b) is a FunBind? We learn this by inspecting an intermediate representation in 'isFunLhs' and seeing that 'Con' is a data constructor but 'f' is not. We need an intermediate representation capable of representing both a FunBind and a PatBind, so Pat is insufficient. PatBuilder is an extension of Pat that is capable of representing intermediate parsing results for patterns and function bindings: data PatBuilder p = PatBuilderPat (Pat p) | PatBuilderApp (LocatedA (PatBuilder p)) (LocatedA (PatBuilder p)) | PatBuilderOpApp (LocatedA (PatBuilder p)) (LocatedA RdrName) (LocatedA (PatBuilder p)) ... It can represent any pattern via 'PatBuilderPat', but it also has a variety of other constructors which were added by following a simple principle: we never pattern match on the pattern stored inside 'PatBuilderPat'. -} --------------------------------------------------------------------------- -- Miscellaneous utilities -- | Check if a fixity is valid. We support bypassing the usual bound checks -- for some special operators. checkPrecP :: Located (SourceText,Int) -- ^ precedence -> Located (OrdList (LocatedN RdrName)) -- ^ operators -> P () checkPrecP (L l (_,i)) (L _ ol) | 0 <= i, i <= maxPrecedence = pure () | all specialOp ol = pure () | otherwise = addFatalError $ mkPlainErrorMsgEnvelope l (PsErrPrecedenceOutOfRange i) where -- If you change this, consider updating Note [Fixity of (->)] in GHC/Types.hs specialOp op = unLoc op == getRdrName unrestrictedFunTyCon mkRecConstrOrUpdate :: Bool -> LHsExpr GhcPs -> SrcSpan -> ([Fbind (HsExpr GhcPs)], Maybe SrcSpan) -> EpAnn [AddEpAnn] -> PV (HsExpr GhcPs) mkRecConstrOrUpdate _ (L _ (HsVar _ (L l c))) _lrec (fbinds,dd) anns | isRdrDataCon c = do let (fs, ps) = partitionEithers fbinds if not (null ps) then addFatalError $ mkPlainErrorMsgEnvelope (getLocA (head ps)) $ PsErrOverloadedRecordDotInvalid else return (mkRdrRecordCon (L l c) (mk_rec_fields fs dd) anns) mkRecConstrOrUpdate overloaded_update exp _ (fs,dd) anns | Just dd_loc <- dd = addFatalError $ mkPlainErrorMsgEnvelope dd_loc $ PsErrDotsInRecordUpdate | otherwise = mkRdrRecordUpd overloaded_update exp fs anns mkRdrRecordUpd :: Bool -> LHsExpr GhcPs -> [Fbind (HsExpr GhcPs)] -> EpAnn [AddEpAnn] -> PV (HsExpr GhcPs) mkRdrRecordUpd overloaded_on exp@(L loc _) fbinds anns = do -- We do not need to know if OverloadedRecordDot is in effect. We do -- however need to know if OverloadedRecordUpdate (passed in -- overloaded_on) is in effect because it affects the Left/Right nature -- of the RecordUpd value we calculate. let (fs, ps) = partitionEithers fbinds fs' :: [LHsRecUpdField GhcPs] fs' = map (fmap mk_rec_upd_field) fs case overloaded_on of False | not $ null ps -> -- A '.' was found in an update and OverloadedRecordUpdate isn't on. addFatalError $ mkPlainErrorMsgEnvelope (locA loc) PsErrOverloadedRecordUpdateNotEnabled False -> -- This is just a regular record update. return RecordUpd { rupd_ext = anns , rupd_expr = exp , rupd_flds = Left fs' } True -> do let qualifiedFields = [ L l lbl | L _ (HsFieldBind _ (L l lbl) _ _) <- fs' , isQual . rdrNameAmbiguousFieldOcc $ lbl ] if not $ null qualifiedFields then addFatalError $ mkPlainErrorMsgEnvelope (getLocA (head qualifiedFields)) $ PsErrOverloadedRecordUpdateNoQualifiedFields else -- This is a RecordDotSyntax update. return RecordUpd { rupd_ext = anns , rupd_expr = exp , rupd_flds = Right (toProjUpdates fbinds) } where toProjUpdates :: [Fbind (HsExpr GhcPs)] -> [LHsRecUpdProj GhcPs] toProjUpdates = map (\case { Right p -> p; Left f -> recFieldToProjUpdate f }) -- Convert a top-level field update like {foo=2} or {bar} (punned) -- to a projection update. recFieldToProjUpdate :: LHsRecField GhcPs (LHsExpr GhcPs) -> LHsRecUpdProj GhcPs recFieldToProjUpdate (L l (HsFieldBind anns (L _ (FieldOcc _ (L loc rdr))) arg pun)) = -- The idea here is to convert the label to a singleton [FastString]. let f = occNameFS . rdrNameOcc $ rdr fl = DotFieldOcc noAnn (L loc f) lf = locA loc in mkRdrProjUpdate l (L lf [L (l2l loc) fl]) (punnedVar f) pun anns where -- If punning, compute HsVar "f" otherwise just arg. This -- has the effect that sentinel HsVar "pun-rhs" is replaced -- by HsVar "f" here, before the update is written to a -- setField expressions. punnedVar :: FastString -> LHsExpr GhcPs punnedVar f = if not pun then arg else noLocA . HsVar noExtField . noLocA . mkRdrUnqual . mkVarOccFS $ f mkRdrRecordCon :: LocatedN RdrName -> HsRecordBinds GhcPs -> EpAnn [AddEpAnn] -> HsExpr GhcPs mkRdrRecordCon con flds anns = RecordCon { rcon_ext = anns, rcon_con = con, rcon_flds = flds } mk_rec_fields :: [LocatedA (HsRecField (GhcPass p) arg)] -> Maybe SrcSpan -> HsRecFields (GhcPass p) arg mk_rec_fields fs Nothing = HsRecFields { rec_flds = fs, rec_dotdot = Nothing } mk_rec_fields fs (Just s) = HsRecFields { rec_flds = fs , rec_dotdot = Just (L s (length fs)) } mk_rec_upd_field :: HsRecField GhcPs (LHsExpr GhcPs) -> HsRecUpdField GhcPs mk_rec_upd_field (HsFieldBind noAnn (L loc (FieldOcc _ rdr)) arg pun) = HsFieldBind noAnn (L loc (Unambiguous noExtField rdr)) arg pun mkInlinePragma :: SourceText -> (InlineSpec, RuleMatchInfo) -> Maybe Activation -> InlinePragma -- The (Maybe Activation) is because the user can omit -- the activation spec (and usually does) mkInlinePragma src (inl, match_info) mb_act = InlinePragma { inl_src = src -- Note [Pragma source text] in GHC.Types.SourceText , inl_inline = inl , inl_sat = Nothing , inl_act = act , inl_rule = match_info } where act = case mb_act of Just act -> act Nothing -> -- No phase specified case inl of NoInline _ -> NeverActive Opaque _ -> NeverActive _other -> AlwaysActive mkOpaquePragma :: SourceText -> InlinePragma mkOpaquePragma src = InlinePragma { inl_src = src , inl_inline = Opaque src , inl_sat = Nothing -- By marking the OPAQUE pragma NeverActive we stop -- (constructor) specialisation on OPAQUE things. -- -- See Note [OPAQUE pragma] , inl_act = NeverActive , inl_rule = FunLike } ----------------------------------------------------------------------------- -- utilities for foreign declarations -- construct a foreign import declaration -- mkImport :: Located CCallConv -> Located Safety -> (Located StringLiteral, LocatedN RdrName, LHsSigType GhcPs) -> P (EpAnn [AddEpAnn] -> HsDecl GhcPs) mkImport cconv safety (L loc (StringLiteral esrc entity _), v, ty) = case unLoc cconv of CCallConv -> returnSpec =<< mkCImport CApiConv -> do imp <- mkCImport if isCWrapperImport imp then addFatalError $ mkPlainErrorMsgEnvelope loc PsErrInvalidCApiImport else returnSpec imp StdCallConv -> returnSpec =<< mkCImport PrimCallConv -> mkOtherImport JavaScriptCallConv -> mkOtherImport where -- Parse a C-like entity string of the following form: -- "[static] [chname] [&] [cid]" | "dynamic" | "wrapper" -- If 'cid' is missing, the function name 'v' is used instead as symbol -- name (cf section 8.5.1 in Haskell 2010 report). mkCImport = do let e = unpackFS entity case parseCImport cconv safety (mkExtName (unLoc v)) e (L loc esrc) of Nothing -> addFatalError $ mkPlainErrorMsgEnvelope loc $ PsErrMalformedEntityString Just importSpec -> return importSpec isCWrapperImport (CImport _ _ _ CWrapper _) = True isCWrapperImport _ = False -- currently, all the other import conventions only support a symbol name in -- the entity string. If it is missing, we use the function name instead. mkOtherImport = returnSpec importSpec where entity' = if nullFS entity then mkExtName (unLoc v) else entity funcTarget = CFunction (StaticTarget esrc entity' Nothing True) importSpec = CImport cconv safety Nothing funcTarget (L loc esrc) returnSpec spec = return $ \ann -> ForD noExtField $ ForeignImport { fd_i_ext = ann , fd_name = v , fd_sig_ty = ty , fd_fi = spec } -- the string "foo" is ambiguous: either a header or a C identifier. The -- C identifier case comes first in the alternatives below, so we pick -- that one. parseCImport :: Located CCallConv -> Located Safety -> FastString -> String -> Located SourceText -> Maybe ForeignImport parseCImport cconv safety nm str sourceText = listToMaybe $ map fst $ filter (null.snd) $ readP_to_S parse str where parse = do skipSpaces r <- choice [ string "dynamic" >> return (mk Nothing (CFunction DynamicTarget)), string "wrapper" >> return (mk Nothing CWrapper), do optional (token "static" >> skipSpaces) ((mk Nothing <$> cimp nm) +++ (do h <- munch1 hdr_char skipSpaces mk (Just (Header (SourceText h) (mkFastString h))) <$> cimp nm)) ] skipSpaces return r token str = do _ <- string str toks <- look case toks of c : _ | id_char c -> pfail _ -> return () mk h n = CImport cconv safety h n sourceText hdr_char c = not (isSpace c) -- header files are filenames, which can contain -- pretty much any char (depending on the platform), -- so just accept any non-space character id_first_char c = isAlpha c || c == '_' id_char c = isAlphaNum c || c == '_' cimp nm = (ReadP.char '&' >> skipSpaces >> CLabel <$> cid) +++ (do isFun <- case unLoc cconv of CApiConv -> option True (do token "value" skipSpaces return False) _ -> return True cid' <- cid return (CFunction (StaticTarget NoSourceText cid' Nothing isFun))) where cid = return nm +++ (do c <- satisfy id_first_char cs <- many (satisfy id_char) return (mkFastString (c:cs))) -- construct a foreign export declaration -- mkExport :: Located CCallConv -> (Located StringLiteral, LocatedN RdrName, LHsSigType GhcPs) -> P (EpAnn [AddEpAnn] -> HsDecl GhcPs) mkExport (L lc cconv) (L le (StringLiteral esrc entity _), v, ty) = return $ \ann -> ForD noExtField $ ForeignExport { fd_e_ext = ann, fd_name = v, fd_sig_ty = ty , fd_fe = CExport (L lc (CExportStatic esrc entity' cconv)) (L le esrc) } where entity' | nullFS entity = mkExtName (unLoc v) | otherwise = entity -- Supplying the ext_name in a foreign decl is optional; if it -- isn't there, the Haskell name is assumed. Note that no transformation -- of the Haskell name is then performed, so if you foreign export (++), -- it's external name will be "++". Too bad; it's important because we don't -- want z-encoding (e.g. names with z's in them shouldn't be doubled) -- mkExtName :: RdrName -> CLabelString mkExtName rdrNm = mkFastString (occNameString (rdrNameOcc rdrNm)) -------------------------------------------------------------------------------- -- Help with module system imports/exports data ImpExpSubSpec = ImpExpAbs | ImpExpAll | ImpExpList [LocatedA ImpExpQcSpec] | ImpExpAllWith [LocatedA ImpExpQcSpec] data ImpExpQcSpec = ImpExpQcName (LocatedN RdrName) | ImpExpQcType EpaLocation (LocatedN RdrName) | ImpExpQcWildcard mkModuleImpExp :: [AddEpAnn] -> LocatedA ImpExpQcSpec -> ImpExpSubSpec -> P (IE GhcPs) mkModuleImpExp anns (L l specname) subs = do cs <- getCommentsFor (locA l) -- AZ: IEVar can discard comments let ann = EpAnn (spanAsAnchor $ locA l) anns cs case subs of ImpExpAbs | isVarNameSpace (rdrNameSpace name) -> return $ IEVar noExtField (L l (ieNameFromSpec specname)) | otherwise -> IEThingAbs ann . L l <$> nameT ImpExpAll -> IEThingAll ann . L l <$> nameT ImpExpList xs -> (\newName -> IEThingWith ann (L l newName) NoIEWildcard (wrapped xs)) <$> nameT ImpExpAllWith xs -> do allowed <- getBit PatternSynonymsBit if allowed then let withs = map unLoc xs pos = maybe NoIEWildcard IEWildcard (findIndex isImpExpQcWildcard withs) ies :: [LocatedA (IEWrappedName RdrName)] ies = wrapped $ filter (not . isImpExpQcWildcard . unLoc) xs in (\newName -> IEThingWith ann (L l newName) pos ies) <$> nameT else addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrIllegalPatSynExport where name = ieNameVal specname nameT = if isVarNameSpace (rdrNameSpace name) then addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ (PsErrVarForTyCon name) else return $ ieNameFromSpec specname ieNameVal (ImpExpQcName ln) = unLoc ln ieNameVal (ImpExpQcType _ ln) = unLoc ln ieNameVal (ImpExpQcWildcard) = panic "ieNameVal got wildcard" ieNameFromSpec (ImpExpQcName ln) = IEName ln ieNameFromSpec (ImpExpQcType r ln) = IEType r ln ieNameFromSpec (ImpExpQcWildcard) = panic "ieName got wildcard" wrapped = map (mapLoc ieNameFromSpec) mkTypeImpExp :: LocatedN RdrName -- TcCls or Var name space -> P (LocatedN RdrName) mkTypeImpExp name = do allowed <- getBit ExplicitNamespacesBit unless allowed $ addError $ mkPlainErrorMsgEnvelope (getLocA name) $ PsErrIllegalExplicitNamespace return (fmap (`setRdrNameSpace` tcClsName) name) checkImportSpec :: LocatedL [LIE GhcPs] -> P (LocatedL [LIE GhcPs]) checkImportSpec ie@(L _ specs) = case [l | (L l (IEThingWith _ _ (IEWildcard _) _)) <- specs] of [] -> return ie (l:_) -> importSpecError (locA l) where importSpecError l = addFatalError $ mkPlainErrorMsgEnvelope l PsErrIllegalImportBundleForm -- In the correct order mkImpExpSubSpec :: [LocatedA ImpExpQcSpec] -> P ([AddEpAnn], ImpExpSubSpec) mkImpExpSubSpec [] = return ([], ImpExpList []) mkImpExpSubSpec [L la ImpExpQcWildcard] = return ([AddEpAnn AnnDotdot (EpaSpan $ la2r la)], ImpExpAll) mkImpExpSubSpec xs = if (any (isImpExpQcWildcard . unLoc) xs) then return $ ([], ImpExpAllWith xs) else return $ ([], ImpExpList xs) isImpExpQcWildcard :: ImpExpQcSpec -> Bool isImpExpQcWildcard ImpExpQcWildcard = True isImpExpQcWildcard _ = False ----------------------------------------------------------------------------- -- Warnings and failures warnPrepositiveQualifiedModule :: SrcSpan -> P () warnPrepositiveQualifiedModule span = addPsMessage span PsWarnImportPreQualified failOpNotEnabledImportQualifiedPost :: SrcSpan -> P () failOpNotEnabledImportQualifiedPost loc = addError $ mkPlainErrorMsgEnvelope loc $ PsErrImportPostQualified failOpImportQualifiedTwice :: SrcSpan -> P () failOpImportQualifiedTwice loc = addError $ mkPlainErrorMsgEnvelope loc $ PsErrImportQualifiedTwice warnStarIsType :: SrcSpan -> P () warnStarIsType span = addPsMessage span PsWarnStarIsType failOpFewArgs :: MonadP m => LocatedN RdrName -> m a failOpFewArgs (L loc op) = do { star_is_type <- getBit StarIsTypeBit ; let is_star_type = if star_is_type then StarIsType else StarIsNotType ; addFatalError $ mkPlainErrorMsgEnvelope (locA loc) $ (PsErrOpFewArgs is_star_type op) } ----------------------------------------------------------------------------- -- Misc utils data PV_Context = PV_Context { pv_options :: ParserOpts , pv_details :: ParseContext -- See Note [Parser-Validator Details] } data PV_Accum = PV_Accum { pv_warnings :: Messages PsMessage , pv_errors :: Messages PsMessage , pv_header_comments :: Strict.Maybe [LEpaComment] , pv_comment_q :: [LEpaComment] } data PV_Result a = PV_Ok PV_Accum a | PV_Failed PV_Accum -- During parsing, we make use of several monadic effects: reporting parse errors, -- accumulating warnings, adding API annotations, and checking for extensions. These -- effects are captured by the 'MonadP' type class. -- -- Sometimes we need to postpone some of these effects to a later stage due to -- ambiguities described in Note [Ambiguous syntactic categories]. -- We could use two layers of the P monad, one for each stage: -- -- abParser :: forall x. DisambAB x => P (P x) -- -- The outer layer of P consumes the input and builds the inner layer, which -- validates the input. But this type is not particularly helpful, as it obscures -- the fact that the inner layer of P never consumes any input. -- -- For clarity, we introduce the notion of a parser-validator: a parser that does -- not consume any input, but may fail or use other effects. Thus we have: -- -- abParser :: forall x. DisambAB x => P (PV x) -- newtype PV a = PV { unPV :: PV_Context -> PV_Accum -> PV_Result a } instance Functor PV where fmap = liftM instance Applicative PV where pure a = a `seq` PV (\_ acc -> PV_Ok acc a) (<*>) = ap instance Monad PV where m >>= f = PV $ \ctx acc -> case unPV m ctx acc of PV_Ok acc' a -> unPV (f a) ctx acc' PV_Failed acc' -> PV_Failed acc' runPV :: PV a -> P a runPV = runPV_details noParseContext askParseContext :: PV ParseContext askParseContext = PV $ \(PV_Context _ details) acc -> PV_Ok acc details runPV_details :: ParseContext -> PV a -> P a runPV_details details m = P $ \s -> let pv_ctx = PV_Context { pv_options = options s , pv_details = details } pv_acc = PV_Accum { pv_warnings = warnings s , pv_errors = errors s , pv_header_comments = header_comments s , pv_comment_q = comment_q s } mkPState acc' = s { warnings = pv_warnings acc' , errors = pv_errors acc' , comment_q = pv_comment_q acc' } in case unPV m pv_ctx pv_acc of PV_Ok acc' a -> POk (mkPState acc') a PV_Failed acc' -> PFailed (mkPState acc') instance MonadP PV where addError err = PV $ \_ctx acc -> PV_Ok acc{pv_errors = err `addMessage` pv_errors acc} () addWarning w = PV $ \_ctx acc -> -- No need to check for the warning flag to be set, GHC will correctly discard suppressed -- diagnostics. PV_Ok acc{pv_warnings= w `addMessage` pv_warnings acc} () addFatalError err = addError err >> PV (const PV_Failed) getBit ext = PV $ \ctx acc -> let b = ext `xtest` pExtsBitmap (pv_options ctx) in PV_Ok acc $! b allocateCommentsP ss = PV $ \_ s -> let (comment_q', newAnns) = allocateComments ss (pv_comment_q s) in PV_Ok s { pv_comment_q = comment_q' } (EpaComments newAnns) allocatePriorCommentsP ss = PV $ \_ s -> let (header_comments', comment_q', newAnns) = allocatePriorComments ss (pv_comment_q s) (pv_header_comments s) in PV_Ok s { pv_header_comments = header_comments', pv_comment_q = comment_q' } (EpaComments newAnns) allocateFinalCommentsP ss = PV $ \_ s -> let (header_comments', comment_q', newAnns) = allocateFinalComments ss (pv_comment_q s) (pv_header_comments s) in PV_Ok s { pv_header_comments = header_comments', pv_comment_q = comment_q' } (EpaCommentsBalanced (Strict.fromMaybe [] header_comments') newAnns) {- Note [Parser-Validator Details] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A PV computation is parametrized by some 'ParseContext' for diagnostic messages, which can be set depending on validation context. We use this in checkPattern to fix #984. Consider this example, where the user has forgotten a 'do': f _ = do x <- computation case () of _ -> result <- computation case () of () -> undefined GHC parses it as follows: f _ = do x <- computation (case () of _ -> result) <- computation case () of () -> undefined Note that this fragment is parsed as a pattern: case () of _ -> result We attempt to detect such cases and add a hint to the diagnostic messages: T984.hs:6:9: Parse error in pattern: case () of { _ -> result } Possibly caused by a missing 'do'? The "Possibly caused by a missing 'do'?" suggestion is the hint that is computed out of the 'ParseContext', which are read by functions like 'patFail' when constructing the 'PsParseErrorInPatDetails' data structure. When validating in a context other than 'bindpat' (a pattern to the left of <-), we set the details to 'noParseContext' and it has no effect on the diagnostic messages. -} -- | Hint about bang patterns, assuming @BangPatterns@ is off. hintBangPat :: SrcSpan -> Pat GhcPs -> PV () hintBangPat span e = do bang_on <- getBit BangPatBit unless bang_on $ addError $ mkPlainErrorMsgEnvelope span $ PsErrIllegalBangPattern e mkSumOrTupleExpr :: SrcSpanAnnA -> Boxity -> SumOrTuple (HsExpr GhcPs) -> [AddEpAnn] -> PV (LHsExpr GhcPs) -- Tuple mkSumOrTupleExpr l boxity (Tuple es) anns = do cs <- getCommentsFor (locA l) return $ L l (ExplicitTuple (EpAnn (spanAsAnchor $ locA l) anns cs) (map toTupArg es) boxity) where toTupArg :: Either (EpAnn EpaLocation) (LHsExpr GhcPs) -> HsTupArg GhcPs toTupArg (Left ann) = missingTupArg ann toTupArg (Right a) = Present noAnn a -- Sum -- mkSumOrTupleExpr l Unboxed (Sum alt arity e) = -- return $ L l (ExplicitSum noExtField alt arity e) mkSumOrTupleExpr l Unboxed (Sum alt arity e barsp barsa) anns = do let an = case anns of [AddEpAnn AnnOpenPH o, AddEpAnn AnnClosePH c] -> AnnExplicitSum o barsp barsa c _ -> panic "mkSumOrTupleExpr" cs <- getCommentsFor (locA l) return $ L l (ExplicitSum (EpAnn (spanAsAnchor $ locA l) an cs) alt arity e) mkSumOrTupleExpr l Boxed a@Sum{} _ = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrUnsupportedBoxedSumExpr a mkSumOrTuplePat :: SrcSpanAnnA -> Boxity -> SumOrTuple (PatBuilder GhcPs) -> [AddEpAnn] -> PV (LocatedA (PatBuilder GhcPs)) -- Tuple mkSumOrTuplePat l boxity (Tuple ps) anns = do ps' <- traverse toTupPat ps cs <- getCommentsFor (locA l) return $ L l (PatBuilderPat (TuplePat (EpAnn (spanAsAnchor $ locA l) anns cs) ps' boxity)) where toTupPat :: Either (EpAnn EpaLocation) (LocatedA (PatBuilder GhcPs)) -> PV (LPat GhcPs) -- Ignore the element location so that the error message refers to the -- entire tuple. See #19504 (and the discussion) for details. toTupPat p = case p of Left _ -> addFatalError $ mkPlainErrorMsgEnvelope (locA l) PsErrTupleSectionInPat Right p' -> checkLPat p' -- Sum mkSumOrTuplePat l Unboxed (Sum alt arity p barsb barsa) anns = do p' <- checkLPat p cs <- getCommentsFor (locA l) let an = EpAnn (spanAsAnchor $ locA l) (EpAnnSumPat anns barsb barsa) cs return $ L l (PatBuilderPat (SumPat an p' alt arity)) mkSumOrTuplePat l Boxed a@Sum{} _ = addFatalError $ mkPlainErrorMsgEnvelope (locA l) $ PsErrUnsupportedBoxedSumPat a mkLHsOpTy :: PromotionFlag -> LHsType GhcPs -> LocatedN RdrName -> LHsType GhcPs -> LHsType GhcPs mkLHsOpTy prom x op y = let loc = getLoc x `combineSrcSpansA` (noAnnSrcSpan $ getLocA op) `combineSrcSpansA` getLoc y in L loc (mkHsOpTy prom x op y) mkMultTy :: LHsToken "%" GhcPs -> LHsType GhcPs -> LHsUniToken "->" "→" GhcPs -> HsArrow GhcPs mkMultTy pct t@(L _ (HsTyLit _ (HsNumTy (SourceText "1") 1))) arr -- See #18888 for the use of (SourceText "1") above = HsLinearArrow (HsPct1 (L locOfPct1 HsTok) arr) where -- The location of "%" combined with the location of "1". locOfPct1 :: TokenLocation locOfPct1 = token_location_widenR (getLoc pct) (locA (getLoc t)) mkMultTy pct t arr = HsExplicitMult pct t arr mkTokenLocation :: SrcSpan -> TokenLocation mkTokenLocation (UnhelpfulSpan _) = NoTokenLoc mkTokenLocation (RealSrcSpan r _) = TokenLoc (EpaSpan r) -- Precondition: the TokenLocation has EpaSpan, never EpaDelta. token_location_widenR :: TokenLocation -> SrcSpan -> TokenLocation token_location_widenR NoTokenLoc _ = NoTokenLoc token_location_widenR tl (UnhelpfulSpan _) = tl token_location_widenR (TokenLoc (EpaSpan r1)) (RealSrcSpan r2 _) = (TokenLoc (EpaSpan (combineRealSrcSpans r1 r2))) token_location_widenR (TokenLoc (EpaDelta _ _)) _ = -- Never happens because the parser does not produce EpaDelta. panic "token_location_widenR: EpaDelta" ----------------------------------------------------------------------------- -- Token symbols starSym :: Bool -> String starSym True = "★" starSym False = "*" ----------------------------------------- -- Bits and pieces for RecordDotSyntax. mkRdrGetField :: SrcSpanAnnA -> LHsExpr GhcPs -> LocatedAn NoEpAnns (DotFieldOcc GhcPs) -> EpAnnCO -> LHsExpr GhcPs mkRdrGetField loc arg field anns = L loc HsGetField { gf_ext = anns , gf_expr = arg , gf_field = field } mkRdrProjection :: NonEmpty (LocatedAn NoEpAnns (DotFieldOcc GhcPs)) -> EpAnn AnnProjection -> HsExpr GhcPs mkRdrProjection flds anns = HsProjection { proj_ext = anns , proj_flds = flds } mkRdrProjUpdate :: SrcSpanAnnA -> Located [LocatedAn NoEpAnns (DotFieldOcc GhcPs)] -> LHsExpr GhcPs -> Bool -> EpAnn [AddEpAnn] -> LHsRecProj GhcPs (LHsExpr GhcPs) mkRdrProjUpdate _ (L _ []) _ _ _ = panic "mkRdrProjUpdate: The impossible has happened!" mkRdrProjUpdate loc (L l flds) arg isPun anns = L loc HsFieldBind { hfbAnn = anns , hfbLHS = L (noAnnSrcSpan l) (FieldLabelStrings flds) , hfbRHS = arg , hfbPun = isPun } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/PostProcess/Haddock.hs0000644000000000000000000016623714472400112023263 0ustar0000000000000000{-# LANGUAGE ApplicativeDo #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {- | This module implements 'addHaddockToModule', which inserts Haddock comments accumulated during parsing into the AST (#17544). We process Haddock comments in two phases: 1. Parse the program (via the Happy parser in `Parser.y`), generating an AST, and (quite separately) a list of all the Haddock comments found in the file. More precisely, the Haddock comments are accumulated in the `hdk_comments` field of the `PState`, the parser state (see Lexer.x): data PState = PState { ... , hdk_comments :: [PsLocated HdkComment] } Each of these Haddock comments has a `PsSpan`, which gives the `BufPos` of the beginning and end of the Haddock comment. 2. Walk over the AST, attaching the Haddock comments to the correct parts of the tree. This step is called `addHaddockToModule`, and is implemented in this module. See Note [Adding Haddock comments to the syntax tree]. This approach codifies an important principle: The presence or absence of a Haddock comment should never change the parsing of a program. Alternative approaches that did not work properly: 1. Using 'RealSrcLoc' instead of 'BufPos'. This led to failures in presence of {-# LANGUAGE CPP #-} and other sources of line pragmas. See documentation on 'BufPos' (in GHC.Types.SrcLoc) for the details. 2. In earlier versions of GHC, the Haddock comments were incorporated into the Parser.y grammar. The parser constructed the AST and attached comments to it in a single pass. See Note [Old solution: Haddock in the grammar] for the details. -} module GHC.Parser.PostProcess.Haddock (addHaddockToModule) where import GHC.Prelude hiding (mod) import GHC.Hs import GHC.Types.SrcLoc import GHC.Utils.Panic import GHC.Data.Bag import Data.Semigroup import Data.Foldable import Data.Traversable import Data.Maybe import Control.Monad import Control.Monad.Trans.State.Strict import Control.Monad.Trans.Reader import Control.Monad.Trans.Writer import Data.Functor.Identity import qualified Data.Monoid import {-# SOURCE #-} GHC.Parser (parseIdentifier) import GHC.Parser.Lexer import GHC.Parser.HaddockLex import GHC.Parser.Errors.Types import GHC.Utils.Misc (mergeListsBy, filterOut, mapLastM, (<&&>)) import qualified GHC.Data.Strict as Strict {- Note [Adding Haddock comments to the syntax tree] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'addHaddock' traverses the AST in concrete syntax order, building a computation (represented by HdkA) that reconstructs the AST but with Haddock comments inserted in appropriate positions: addHaddock :: HasHaddock a => a -> HdkA a Consider this code example: f :: Int -- ^ comment on argument -> Bool -- ^ comment on result In the AST, the "Int" part of this snippet is represented like this (pseudo-code): L (BufSpan 6 8) (HsTyVar "Int") :: LHsType GhcPs And the comments are represented like this (pseudo-code): L (BufSpan 11 35) (HdkCommentPrev "comment on argument") L (BufSpan 46 69) (HdkCommentPrev "comment on result") So when we are traversing the AST and 'addHaddock' is applied to HsTyVar "Int", how does it know to associate it with "comment on argument" but not with "comment on result"? The trick is to look in the space between syntactic elements. In the example above, the location range in which we search for HdkCommentPrev is as follows: f :: Int████████████████████████ ████Bool -- ^ comment on result We search for comments after HsTyVar "Int" and until the next syntactic element, in this case HsTyVar "Bool". Ignoring the "->" allows us to accommodate alternative coding styles: f :: Int -> -- ^ comment on argument Bool -- ^ comment on result Sometimes we also need to take indentation information into account. Compare the following examples: class C a where f :: a -> Int -- ^ comment on f class C a where f :: a -> Int -- ^ comment on C Notice how "comment on f" and "comment on C" differ only by indentation level. Therefore, in order to know the location range in which the comments are applicable to a syntactic elements, we need three nuggets of information: 1. lower bound on the BufPos of a comment 2. upper bound on the BufPos of a comment 3. minimum indentation level of a comment This information is represented by the 'LocRange' type. In order to propagate this information, we have the 'HdkA' applicative. 'HdkA' is defined as follows: data HdkA a = HdkA (Maybe BufSpan) (HdkM a) The first field contains a 'BufSpan', which represents the location span taken by a syntactic element: addHaddock (L bufSpan ...) = HdkA (Just bufSpan) ... The second field, 'HdkM', is a stateful computation that looks up Haddock comments in the specified location range: HdkM a ≈ LocRange -- The allowed location range -> [PsLocated HdkComment] -- Unallocated comments -> (a, -- AST with comments inserted into it [PsLocated HdkComment]) -- Leftover comments The 'Applicative' instance for 'HdkA' is defined in such a way that the location range of every computation is defined by its neighbours: addHaddock aaa <*> addHaddock bbb <*> addHaddock ccc Here, the 'LocRange' passed to the 'HdkM' computation of addHaddock bbb is determined by the BufSpan recorded in addHaddock aaa and addHaddock ccc. This is why it's important to traverse the AST in the order of the concrete syntax. In the example above we assume that aaa, bbb, ccc are ordered by location: * getBufSpan (getLoc aaa) < getBufSpan (getLoc bbb) * getBufSpan (getLoc bbb) < getBufSpan (getLoc ccc) Violation of this assumption would lead to bugs, and care must be taken to traverse the AST correctly. For example, when dealing with class declarations, we have to use 'flattenBindsAndSigs' to traverse it in the correct order. -} -- | Add Haddock documentation accumulated in the parser state -- to a parsed HsModule. -- -- Reports badly positioned comments when -Winvalid-haddock is enabled. addHaddockToModule :: Located HsModule -> P (Located HsModule) addHaddockToModule lmod = do pState <- getPState let all_comments = toList (hdk_comments pState) initial_hdk_st = HdkSt all_comments [] (lmod', final_hdk_st) = runHdkA (addHaddock lmod) initial_hdk_st hdk_warnings = collectHdkWarnings final_hdk_st -- lmod': module with Haddock comments inserted into the AST -- hdk_warnings: warnings accumulated during AST/comment processing mapM_ reportHdkWarning hdk_warnings return lmod' reportHdkWarning :: HdkWarn -> P () reportHdkWarning (HdkWarnInvalidComment (L l _)) = addPsMessage (mkSrcSpanPs l) PsWarnHaddockInvalidPos reportHdkWarning (HdkWarnExtraComment (L l _)) = addPsMessage l PsWarnHaddockIgnoreMulti collectHdkWarnings :: HdkSt -> [HdkWarn] collectHdkWarnings HdkSt{ hdk_st_pending, hdk_st_warnings } = map HdkWarnInvalidComment hdk_st_pending -- leftover Haddock comments not inserted into the AST ++ hdk_st_warnings {- ********************************************************************* * * * addHaddock: a family of functions that processes the AST * * in concrete syntax order, adding documentation comments to it * * * ********************************************************************* -} -- HasHaddock is a convenience class for overloading the addHaddock operation. -- Alternatively, we could define a family of monomorphic functions: -- -- addHaddockSomeTypeX :: SomeTypeX -> HdkA SomeTypeX -- addHaddockAnotherTypeY :: AnotherTypeY -> HdkA AnotherTypeY -- addHaddockOneMoreTypeZ :: OneMoreTypeZ -> HdkA OneMoreTypeZ -- -- But having a single name for all of them is just easier to read, and makes it clear -- that they all are of the form t -> HdkA t for some t. -- -- If you need to handle a more complicated scenario that doesn't fit this -- pattern, it's always possible to define separate functions outside of this -- class, as is done in case of e.g. addHaddockConDeclField. -- -- See Note [Adding Haddock comments to the syntax tree]. class HasHaddock a where addHaddock :: a -> HdkA a instance HasHaddock a => HasHaddock [a] where addHaddock = traverse addHaddock -- -- | Module header comment -- module M ( -- -- * Export list comment -- Item1, -- Item2, -- -- * Export list comment -- item3, -- item4 -- ) where -- instance HasHaddock (Located HsModule) where addHaddock (L l_mod mod) = do -- Step 1, get the module header documentation comment: -- -- -- | Module header comment -- module M where -- -- Only do this when the module header exists. headerDocs <- for @Maybe (hsmodName mod) $ \(L l_name _) -> extendHdkA (locA l_name) $ liftHdkA $ do -- todo: register keyword location of 'module', see Note [Register keyword location] docs <- inLocRange (locRangeTo (getBufPos (srcSpanStart (locA l_name)))) $ takeHdkComments mkDocNext dc <- selectDocString docs pure $ lexLHsDocString <$> dc -- Step 2, process documentation comments in the export list: -- -- module M ( -- -- * Export list comment -- Item1, -- Item2, -- -- * Export list comment -- item3, -- item4 -- ) where -- -- Only do this when the export list exists. hsmodExports' <- traverse @Maybe addHaddock (hsmodExports mod) -- Step 3, register the import section to reject invalid comments: -- -- import Data.Maybe -- -- | rejected comment (cannot appear here) -- import Data.Bool -- traverse_ registerHdkA (hsmodImports mod) -- Step 4, process declarations: -- -- module M where -- -- | Comment on D -- data D = MkD -- ^ Comment on MkD -- data C = MkC -- ^ Comment on MkC -- -- ^ Comment on C -- let layout_info = hsmodLayout mod hsmodDecls' <- addHaddockInterleaveItems layout_info (mkDocHsDecl layout_info) (hsmodDecls mod) pure $ L l_mod $ mod { hsmodExports = hsmodExports' , hsmodDecls = hsmodDecls' , hsmodHaddockModHeader = join @Maybe headerDocs } lexHsDocString :: HsDocString -> HsDoc GhcPs lexHsDocString = lexHsDoc parseIdentifier lexLHsDocString :: Located HsDocString -> LHsDoc GhcPs lexLHsDocString = fmap lexHsDocString -- Only for module exports, not module imports. -- -- module M (a, b, c) where -- use on this [LIE GhcPs] -- import I (a, b, c) -- do not use here! -- -- Imports cannot have documentation comments anyway. instance HasHaddock (LocatedL [LocatedA (IE GhcPs)]) where addHaddock (L l_exports exports) = extendHdkA (locA l_exports) $ do exports' <- addHaddockInterleaveItems NoLayoutInfo mkDocIE exports registerLocHdkA (srcLocSpan (srcSpanEnd (locA l_exports))) -- Do not consume comments after the closing parenthesis pure $ L l_exports exports' -- Needed to use 'addHaddockInterleaveItems' in 'instance HasHaddock (Located [LIE GhcPs])'. instance HasHaddock (LocatedA (IE GhcPs)) where addHaddock a = a <$ registerHdkA a {- Add Haddock items to a list of non-Haddock items. Used to process export lists (with mkDocIE) and declarations (with mkDocHsDecl). For example: module M where -- | Comment on D data D = MkD -- ^ Comment on MkD data C = MkC -- ^ Comment on MkC -- ^ Comment on C In this case, we should produce four HsDecl items (pseudo-code): 1. DocD (DocCommentNext "Comment on D") 2. TyClD (DataDecl "D" ... [ConDeclH98 "MkD" ... (Just "Comment on MkD")]) 3. TyClD (DataDecl "C" ... [ConDeclH98 "MkC" ... (Just "Comment on MkC")]) 4. DocD (DocCommentPrev "Comment on C") The inputs to addHaddockInterleaveItems are: * layout_info :: LayoutInfo In the example above, note that the indentation level inside the module is 2 spaces. It would be represented as layout_info = VirtualBraces 2. It is used to delimit the search space for comments when processing declarations. Here, we restrict indentation levels to >=(2+1), so that when we look up comment on MkC, we get "Comment on MkC" but not "Comment on C". * get_doc_item :: PsLocated HdkComment -> Maybe a This is the function used to look up documentation comments. In the above example, get_doc_item = mkDocHsDecl layout_info, and it will produce the following parts of the output: DocD (DocCommentNext "Comment on D") DocD (DocCommentPrev "Comment on C") * The list of items. These are the declarations that will be annotated with documentation comments. Before processing: TyClD (DataDecl "D" ... [ConDeclH98 "MkD" ... Nothing]) TyClD (DataDecl "C" ... [ConDeclH98 "MkC" ... Nothing]) After processing: TyClD (DataDecl "D" ... [ConDeclH98 "MkD" ... (Just "Comment on MkD")]) TyClD (DataDecl "C" ... [ConDeclH98 "MkC" ... (Just "Comment on MkC")]) -} addHaddockInterleaveItems :: forall a. HasHaddock a => LayoutInfo -> (PsLocated HdkComment -> Maybe a) -- Get a documentation item -> [a] -- Unprocessed (non-documentation) items -> HdkA [a] -- Documentation items & processed non-documentation items addHaddockInterleaveItems layout_info get_doc_item = go where go :: [a] -> HdkA [a] go [] = liftHdkA (takeHdkComments get_doc_item) go (item : items) = do docItems <- liftHdkA (takeHdkComments get_doc_item) item' <- with_layout_info (addHaddock item) other_items <- go items pure $ docItems ++ item':other_items with_layout_info :: HdkA a -> HdkA a with_layout_info = case layout_info of NoLayoutInfo -> id ExplicitBraces -> id VirtualBraces n -> let loc_range = mempty { loc_range_col = ColumnFrom (n+1) } in hoistHdkA (inLocRange loc_range) instance HasHaddock (LocatedA (HsDecl GhcPs)) where addHaddock ldecl = extendHdkA (getLocA ldecl) $ traverse @LocatedA addHaddock ldecl -- Process documentation comments *inside* a declaration, for example: -- -- data T = MkT -- ^ Comment on MkT (inside DataDecl) -- f, g -- :: Int -- ^ Comment on Int (inside TypeSig) -- -> Bool -- ^ Comment on Bool (inside TypeSig) -- -- Comments that relate to the entire declaration are processed elsewhere: -- -- -- | Comment on T (not processed in this instance) -- data T = MkT -- -- -- | Comment on f, g (not processed in this instance) -- f, g :: Int -> Bool -- f = ... -- g = ... -- -- Such comments are inserted into the syntax tree as DocD declarations -- by addHaddockInterleaveItems, and then associated with other declarations -- in GHC.HsToCore.Docs (see DeclDocMap). -- -- In this instance, we only process comments that relate to parts of the -- declaration, not to the declaration itself. instance HasHaddock (HsDecl GhcPs) where -- Type signatures: -- -- f, g -- :: Int -- ^ Comment on Int -- -> Bool -- ^ Comment on Bool -- addHaddock (SigD _ (TypeSig x names t)) = do traverse_ registerHdkA names t' <- addHaddock t pure (SigD noExtField (TypeSig x names t')) -- Pattern synonym type signatures: -- -- pattern MyPat -- :: Bool -- ^ Comment on Bool -- -> Maybe Bool -- ^ Comment on Maybe Bool -- addHaddock (SigD _ (PatSynSig x names t)) = do traverse_ registerHdkA names t' <- addHaddock t pure (SigD noExtField (PatSynSig x names t')) -- Class method signatures and default signatures: -- -- class C x where -- method_of_c -- :: Maybe x -- ^ Comment on Maybe x -- -> IO () -- ^ Comment on IO () -- default method_of_c -- :: Eq x -- => Maybe x -- ^ Comment on Maybe x -- -> IO () -- ^ Comment on IO () -- addHaddock (SigD _ (ClassOpSig x is_dflt names t)) = do traverse_ registerHdkA names t' <- addHaddock t pure (SigD noExtField (ClassOpSig x is_dflt names t')) -- Data/newtype declarations: -- -- data T = MkT -- ^ Comment on MkT -- A -- ^ Comment on A -- B -- ^ Comment on B -- -- data G where -- -- | Comment on MkG -- MkG :: A -- ^ Comment on A -- -> B -- ^ Comment on B -- -> G -- -- newtype N = MkN { getN :: Natural } -- ^ Comment on N -- deriving newtype (Eq {- ^ Comment on Eq N -}) -- deriving newtype (Ord {- ^ Comment on Ord N -}) -- addHaddock (TyClD x decl) | DataDecl { tcdDExt, tcdLName, tcdTyVars, tcdFixity, tcdDataDefn = defn } <- decl = do registerHdkA tcdLName defn' <- addHaddock defn pure $ TyClD x (DataDecl { tcdDExt, tcdLName, tcdTyVars, tcdFixity, tcdDataDefn = defn' }) -- Class declarations: -- -- class C a where -- -- | Comment on the first method -- first_method :: a -> Bool -- second_method :: a -> String -- -- ^ Comment on the second method -- addHaddock (TyClD _ decl) | ClassDecl { tcdCExt = (x, NoAnnSortKey, tcdLayout), tcdCtxt, tcdLName, tcdTyVars, tcdFixity, tcdFDs, tcdSigs, tcdMeths, tcdATs, tcdATDefs } <- decl = do registerHdkA tcdLName -- todo: register keyword location of 'where', see Note [Register keyword location] where_cls' <- addHaddockInterleaveItems tcdLayout (mkDocHsDecl tcdLayout) $ flattenBindsAndSigs (tcdMeths, tcdSigs, tcdATs, tcdATDefs, [], []) pure $ let (tcdMeths', tcdSigs', tcdATs', tcdATDefs', _, tcdDocs) = partitionBindsAndSigs where_cls' decl' = ClassDecl { tcdCExt = (x, NoAnnSortKey, tcdLayout) , tcdCtxt, tcdLName, tcdTyVars, tcdFixity, tcdFDs , tcdSigs = tcdSigs' , tcdMeths = tcdMeths' , tcdATs = tcdATs' , tcdATDefs = tcdATDefs' , tcdDocs } in TyClD noExtField decl' -- Data family instances: -- -- data instance D Bool where ... (same as data/newtype declarations) -- data instance D Bool = ... (same as data/newtype declarations) -- addHaddock (InstD _ decl) | DataFamInstD { dfid_ext, dfid_inst } <- decl , DataFamInstDecl { dfid_eqn } <- dfid_inst = do dfid_eqn' <- case dfid_eqn of FamEqn { feqn_ext, feqn_tycon, feqn_bndrs, feqn_pats, feqn_fixity, feqn_rhs } -> do registerHdkA feqn_tycon feqn_rhs' <- addHaddock feqn_rhs pure $ FamEqn { feqn_ext, feqn_tycon, feqn_bndrs, feqn_pats, feqn_fixity, feqn_rhs = feqn_rhs' } pure $ InstD noExtField (DataFamInstD { dfid_ext, dfid_inst = DataFamInstDecl { dfid_eqn = dfid_eqn' } }) -- Type synonyms: -- -- type T = Int -- ^ Comment on Int -- addHaddock (TyClD _ decl) | SynDecl { tcdSExt, tcdLName, tcdTyVars, tcdFixity, tcdRhs } <- decl = do registerHdkA tcdLName -- todo: register keyword location of '=', see Note [Register keyword location] tcdRhs' <- addHaddock tcdRhs pure $ TyClD noExtField (SynDecl { tcdSExt, tcdLName, tcdTyVars, tcdFixity, tcdRhs = tcdRhs' }) -- Foreign imports: -- -- foreign import ccall unsafe -- o :: Float -- ^ The input float -- -> IO Float -- ^ The output float -- addHaddock (ForD _ decl) = do registerHdkA (fd_name decl) fd_sig_ty' <- addHaddock (fd_sig_ty decl) pure $ ForD noExtField (decl{ fd_sig_ty = fd_sig_ty' }) -- Other declarations addHaddock d = pure d -- The right-hand side of a data/newtype declaration or data family instance. instance HasHaddock (HsDataDefn GhcPs) where addHaddock defn@HsDataDefn{} = do -- Register the kind signature: -- data D :: Type -> Type where ... -- data instance D Bool :: Type where ... traverse_ @Maybe registerHdkA (dd_kindSig defn) -- todo: register keyword location of '=' or 'where', see Note [Register keyword location] -- Process the data constructors: -- -- data T -- = MkT1 Int Bool -- ^ Comment on MkT1 -- | MkT2 Char Int -- ^ Comment on MkT2 -- dd_cons' <- addHaddock (dd_cons defn) -- Process the deriving clauses: -- -- newtype N = MkN Natural -- deriving (Eq {- ^ Comment on Eq N -}) -- deriving (Ord {- ^ Comment on Ord N -}) -- dd_derivs' <- addHaddock (dd_derivs defn) pure $ defn { dd_cons = dd_cons', dd_derivs = dd_derivs' } -- Process the deriving clauses of a data/newtype declaration. -- Not used for standalone deriving. instance HasHaddock (Located [LocatedAn NoEpAnns (HsDerivingClause GhcPs)]) where addHaddock lderivs = extendHdkA (getLoc lderivs) $ traverse @Located addHaddock lderivs -- Process a single deriving clause of a data/newtype declaration: -- -- newtype N = MkN Natural -- deriving newtype (Eq {- ^ Comment on Eq N -}) -- deriving (Ord {- ^ Comment on Ord N -}) via Down N -- -- Not used for standalone deriving. instance HasHaddock (LocatedAn NoEpAnns (HsDerivingClause GhcPs)) where addHaddock lderiv = extendHdkA (getLocA lderiv) $ for @(LocatedAn NoEpAnns) lderiv $ \deriv -> case deriv of HsDerivingClause { deriv_clause_ext, deriv_clause_strategy, deriv_clause_tys } -> do let -- 'stock', 'anyclass', and 'newtype' strategies come -- before the clause types. -- -- 'via' comes after. -- -- See tests/.../T11768.hs (register_strategy_before, register_strategy_after) = case deriv_clause_strategy of Nothing -> (pure (), pure ()) Just (L l (ViaStrategy _)) -> (pure (), registerLocHdkA (locA l)) Just (L l _) -> (registerLocHdkA (locA l), pure ()) register_strategy_before deriv_clause_tys' <- addHaddock deriv_clause_tys register_strategy_after pure HsDerivingClause { deriv_clause_ext, deriv_clause_strategy, deriv_clause_tys = deriv_clause_tys' } -- Process the types in a single deriving clause, which may come in one of the -- following forms: -- -- 1. A singular type constructor: -- deriving Eq -- ^ Comment on Eq -- -- 2. A list of comma-separated types surrounded by enclosing parentheses: -- deriving ( Eq -- ^ Comment on Eq -- , C a -- ^ Comment on C a -- ) instance HasHaddock (LocatedC (DerivClauseTys GhcPs)) where addHaddock (L l_dct dct) = extendHdkA (locA l_dct) $ case dct of DctSingle x ty -> do ty' <- addHaddock ty pure $ L l_dct $ DctSingle x ty' DctMulti x tys -> do tys' <- addHaddock tys pure $ L l_dct $ DctMulti x tys' -- Process a single data constructor declaration, which may come in one of the -- following forms: -- -- 1. H98-syntax PrefixCon: -- data T = -- MkT -- ^ Comment on MkT -- Int -- ^ Comment on Int -- Bool -- ^ Comment on Bool -- -- 2. H98-syntax InfixCon: -- data T = -- Int -- ^ Comment on Int -- :+ -- ^ Comment on (:+) -- Bool -- ^ Comment on Bool -- -- 3. H98-syntax RecCon: -- data T = -- MkT { int_field :: Int, -- ^ Comment on int_field -- bool_field :: Bool } -- ^ Comment on bool_field -- -- 4. GADT-syntax PrefixCon: -- data T where -- -- | Comment on MkT -- MkT :: Int -- ^ Comment on Int -- -> Bool -- ^ Comment on Bool -- -> T -- -- 5. GADT-syntax RecCon: -- data T where -- -- | Comment on MkT -- MkT :: { int_field :: Int, -- ^ Comment on int_field -- bool_field :: Bool } -- ^ Comment on bool_field -- -> T -- instance HasHaddock (LocatedA (ConDecl GhcPs)) where addHaddock (L l_con_decl con_decl) = extendHdkA (locA l_con_decl) $ case con_decl of ConDeclGADT { con_g_ext, con_names, con_bndrs, con_mb_cxt, con_g_args, con_res_ty } -> do -- discardHasInnerDocs is ok because we don't need this info for GADTs. con_doc' <- discardHasInnerDocs $ getConDoc (getLocA (head con_names)) con_g_args' <- case con_g_args of PrefixConGADT ts -> PrefixConGADT <$> addHaddock ts RecConGADT (L l_rec flds) arr -> do -- discardHasInnerDocs is ok because we don't need this info for GADTs. flds' <- traverse (discardHasInnerDocs . addHaddockConDeclField) flds pure $ RecConGADT (L l_rec flds') arr con_res_ty' <- addHaddock con_res_ty pure $ L l_con_decl $ ConDeclGADT { con_g_ext, con_names, con_bndrs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_g_args = con_g_args', con_res_ty = con_res_ty' } ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_args } -> addConTrailingDoc (srcSpanEnd $ locA l_con_decl) $ case con_args of PrefixCon _ ts -> do con_doc' <- getConDoc (getLocA con_name) ts' <- traverse addHaddockConDeclFieldTy ts pure $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_args = PrefixCon noTypeArgs ts' } InfixCon t1 t2 -> do t1' <- addHaddockConDeclFieldTy t1 con_doc' <- getConDoc (getLocA con_name) t2' <- addHaddockConDeclFieldTy t2 pure $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_args = InfixCon t1' t2' } RecCon (L l_rec flds) -> do con_doc' <- getConDoc (getLocA con_name) flds' <- traverse addHaddockConDeclField flds pure $ L l_con_decl $ ConDeclH98 { con_ext, con_name, con_forall, con_ex_tvs, con_mb_cxt, con_doc = lexLHsDocString <$> con_doc', con_args = RecCon (L l_rec flds') } -- Keep track of documentation comments on the data constructor or any of its -- fields. -- -- See Note [Trailing comment on constructor declaration] type ConHdkA = WriterT HasInnerDocs HdkA -- Does the data constructor declaration have any inner (non-trailing) -- documentation comments? -- -- Example when HasInnerDocs is True: -- -- data X = -- MkX -- ^ inner comment -- Field1 -- ^ inner comment -- Field2 -- ^ inner comment -- Field3 -- ^ trailing comment -- -- Example when HasInnerDocs is False: -- -- data Y = MkY Field1 Field2 Field3 -- ^ trailing comment -- -- See Note [Trailing comment on constructor declaration] newtype HasInnerDocs = HasInnerDocs Bool deriving (Semigroup, Monoid) via Data.Monoid.Any -- Run ConHdkA by discarding the HasInnerDocs info when we have no use for it. -- -- We only do this when processing data declarations that use GADT syntax, -- because only the H98 syntax declarations have special treatment for the -- trailing documentation comment. -- -- See Note [Trailing comment on constructor declaration] discardHasInnerDocs :: ConHdkA a -> HdkA a discardHasInnerDocs = fmap fst . runWriterT -- Get the documentation comment associated with the data constructor in a -- data/newtype declaration. getConDoc :: SrcSpan -- Location of the data constructor -> ConHdkA (Maybe (Located HsDocString)) getConDoc l = WriterT $ extendHdkA l $ liftHdkA $ do mDoc <- getPrevNextDoc l return (mDoc, HasInnerDocs (isJust mDoc)) -- Add documentation comment to a data constructor field. -- Used for PrefixCon and InfixCon. addHaddockConDeclFieldTy :: HsScaled GhcPs (LHsType GhcPs) -> ConHdkA (HsScaled GhcPs (LHsType GhcPs)) addHaddockConDeclFieldTy (HsScaled mult (L l t)) = WriterT $ extendHdkA (locA l) $ liftHdkA $ do mDoc <- getPrevNextDoc (locA l) return (HsScaled mult (mkLHsDocTy (L l t) mDoc), HasInnerDocs (isJust mDoc)) -- Add documentation comment to a data constructor field. -- Used for RecCon. addHaddockConDeclField :: LConDeclField GhcPs -> ConHdkA (LConDeclField GhcPs) addHaddockConDeclField (L l_fld fld) = WriterT $ extendHdkA (locA l_fld) $ liftHdkA $ do cd_fld_doc <- fmap lexLHsDocString <$> getPrevNextDoc (locA l_fld) return (L l_fld (fld { cd_fld_doc }), HasInnerDocs (isJust cd_fld_doc)) -- 1. Process a H98-syntax data constructor declaration in a context with no -- access to the trailing documentation comment (by running the provided -- ConHdkA computation). -- -- 2. Then grab the trailing comment (if it exists) and attach it where -- appropriate: either to the data constructor itself or to its last field, -- depending on HasInnerDocs. -- -- See Note [Trailing comment on constructor declaration] addConTrailingDoc :: SrcLoc -- The end of a data constructor declaration. -- Any docprev comment past this point is considered trailing. -> ConHdkA (LConDecl GhcPs) -> HdkA (LConDecl GhcPs) addConTrailingDoc l_sep = hoistHdkA add_trailing_doc . runWriterT where add_trailing_doc :: HdkM (LConDecl GhcPs, HasInnerDocs) -> HdkM (LConDecl GhcPs) add_trailing_doc m = do (L l con_decl, HasInnerDocs has_inner_docs) <- inLocRange (locRangeTo (getBufPos l_sep)) m -- inLocRange delimits the context so that the inner computation -- will not consume the trailing documentation comment. case con_decl of ConDeclH98{} -> do trailingDocs <- inLocRange (locRangeFrom (getBufPos l_sep)) $ takeHdkComments mkDocPrev if null trailingDocs then return (L l con_decl) else do if has_inner_docs then do let mk_doc_ty :: HsScaled GhcPs (LHsType GhcPs) -> HdkM (HsScaled GhcPs (LHsType GhcPs)) mk_doc_ty x@(HsScaled _ (L _ HsDocTy{})) = -- Happens in the following case: -- -- data T = -- MkT -- -- | Comment on SomeField -- SomeField -- -- ^ Another comment on SomeField? (rejected) -- -- See tests/.../haddockExtraDocs.hs x <$ reportExtraDocs trailingDocs mk_doc_ty (HsScaled mult (L l' t)) = do doc <- selectDocString trailingDocs return $ HsScaled mult (mkLHsDocTy (L l' t) doc) let mk_doc_fld :: LConDeclField GhcPs -> HdkM (LConDeclField GhcPs) mk_doc_fld x@(L _ (ConDeclField { cd_fld_doc = Just _ })) = -- Happens in the following case: -- -- data T = -- MkT { -- -- | Comment on SomeField -- someField :: SomeField -- } -- ^ Another comment on SomeField? (rejected) -- -- See tests/.../haddockExtraDocs.hs x <$ reportExtraDocs trailingDocs mk_doc_fld (L l' con_fld) = do doc <- selectDocString trailingDocs return $ L l' (con_fld { cd_fld_doc = fmap lexLHsDocString doc }) con_args' <- case con_args con_decl of x@(PrefixCon _ []) -> x <$ reportExtraDocs trailingDocs x@(RecCon (L _ [])) -> x <$ reportExtraDocs trailingDocs PrefixCon _ ts -> PrefixCon noTypeArgs <$> mapLastM mk_doc_ty ts InfixCon t1 t2 -> InfixCon t1 <$> mk_doc_ty t2 RecCon (L l_rec flds) -> do flds' <- mapLastM mk_doc_fld flds return (RecCon (L l_rec flds')) return $ L l (con_decl{ con_args = con_args' }) else do con_doc' <- selectDoc (con_doc con_decl `mcons` (map lexLHsDocString trailingDocs)) return $ L l (con_decl{ con_doc = con_doc' }) _ -> panic "addConTrailingDoc: non-H98 ConDecl" {- Note [Trailing comment on constructor declaration] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The trailing comment after a constructor declaration is associated with the constructor itself when there are no other comments inside the declaration: data T = MkT A B -- ^ Comment on MkT data T = MkT { x :: A } -- ^ Comment on MkT When there are other comments, the trailing comment applies to the last field: data T = MkT -- ^ Comment on MkT A -- ^ Comment on A B -- ^ Comment on B data T = MkT { a :: A -- ^ Comment on a , b :: B -- ^ Comment on b , c :: C } -- ^ Comment on c This makes the trailing comment context-sensitive. Example: data T = -- | comment 1 MkT Int Bool -- ^ comment 2 Here, "comment 2" applies to the Bool field. But if we removed "comment 1", then "comment 2" would be apply to the data constructor rather than its field. All of this applies to H98-style data declarations only. GADTSyntax data constructors don't have any special treatment for the trailing comment. We implement this in two steps: 1. Process the data constructor declaration in a delimited context where the trailing documentation comment is not visible. Delimiting the context is done in addConTrailingDoc. When processing the declaration, track whether the constructor or any of its fields have a documentation comment associated with them. This is done using WriterT HasInnerDocs, see ConHdkA. 2. Depending on whether HasInnerDocs is True or False, attach the trailing documentation comment to the data constructor itself or to its last field. -} instance HasHaddock a => HasHaddock (HsScaled GhcPs a) where addHaddock (HsScaled mult a) = HsScaled mult <$> addHaddock a instance HasHaddock a => HasHaddock (HsWildCardBndrs GhcPs a) where addHaddock (HsWC _ t) = HsWC noExtField <$> addHaddock t instance HasHaddock (LocatedA (HsSigType GhcPs)) where addHaddock (L l (HsSig{sig_bndrs = outer_bndrs, sig_body = body})) = extendHdkA (locA l) $ do case outer_bndrs of HsOuterImplicit{} -> pure () HsOuterExplicit{hso_bndrs = bndrs} -> registerLocHdkA (getLHsTyVarBndrsLoc bndrs) body' <- addHaddock body pure $ L l $ HsSig noExtField outer_bndrs body' -- Process a type, adding documentation comments to function arguments -- and the result. Many formatting styles are supported. -- -- my_function :: -- forall a. -- Eq a => -- Maybe a -> -- ^ Comment on Maybe a (function argument) -- Bool -> -- ^ Comment on Bool (function argument) -- String -- ^ Comment on String (the result) -- -- my_function -- :: forall a. Eq a -- => Maybe a -- ^ Comment on Maybe a (function argument) -- -> Bool -- ^ Comment on Bool (function argument) -- -> String -- ^ Comment on String (the result) -- -- my_function :: -- forall a. Eq a => -- -- | Comment on Maybe a (function argument) -- Maybe a -> -- -- | Comment on Bool (function argument) -- Bool -> -- -- | Comment on String (the result) -- String -- -- This is achieved by simply ignoring (not registering the location of) the -- function arrow (->). instance HasHaddock (LocatedA (HsType GhcPs)) where addHaddock (L l t) = extendHdkA (locA l) $ case t of -- forall a b c. t HsForAllTy x tele body -> do registerLocHdkA (getForAllTeleLoc tele) body' <- addHaddock body pure $ L l (HsForAllTy x tele body') -- (Eq a, Num a) => t HsQualTy x lhs rhs -> do registerHdkA lhs rhs' <- addHaddock rhs pure $ L l (HsQualTy x lhs rhs') -- arg -> res HsFunTy u mult lhs rhs -> do lhs' <- addHaddock lhs rhs' <- addHaddock rhs pure $ L l (HsFunTy u mult lhs' rhs') -- other types _ -> liftHdkA $ do mDoc <- getPrevNextDoc (locA l) return (mkLHsDocTy (L l t) mDoc) {- ********************************************************************* * * * HdkA: a layer over HdkM that propagates location information * * * ********************************************************************* -} -- See Note [Adding Haddock comments to the syntax tree]. -- -- 'HdkA' provides a way to propagate location information from surrounding -- computations: -- -- left_neighbour <*> HdkA inner_span inner_m <*> right_neighbour -- -- Here, the following holds: -- -- * the 'left_neighbour' will only see Haddock comments until 'bufSpanStart' of 'inner_span' -- * the 'right_neighbour' will only see Haddock comments after 'bufSpanEnd' of 'inner_span' -- * the 'inner_m' will only see Haddock comments between its 'left_neighbour' and its 'right_neighbour' -- -- In other words, every computation: -- -- * delimits the surrounding computations -- * is delimited by the surrounding computations -- -- Therefore, a 'HdkA' computation must be always considered in the context in -- which it is used. data HdkA a = HdkA !(Strict.Maybe BufSpan) -- Just b <=> BufSpan occupied by the processed AST element. -- The surrounding computations will not look inside. -- -- Nothing <=> No BufSpan (e.g. when the HdkA is constructed by 'pure' or 'liftHdkA'). -- The surrounding computations are not delimited. !(HdkM a) -- The stateful computation that looks up Haddock comments and -- adds them to the resulting AST node. deriving (Functor) instance Applicative HdkA where HdkA l1 m1 <*> HdkA l2 m2 = HdkA (l1 <> l2) -- The combined BufSpan that covers both subcomputations. -- -- The Semigroup instance for Maybe quite conveniently does the right thing: -- Nothing <> b = b -- a <> Nothing = a -- Just a <> Just b = Just (a <> b) (delim1 m1 <*> delim2 m2) -- Stateful computations are run in left-to-right order, -- without any smart reordering strategy. So users of this -- operation must take care to traverse the AST -- in concrete syntax order. -- See Note [Smart reordering in HdkA (or lack thereof)] -- -- Each computation is delimited ("sandboxed") -- in a way that it doesn't see any Haddock -- comments past the neighbouring AST node. -- These delim1/delim2 are key to how HdkA operates. where -- Delimit the LHS by the location information from the RHS delim1 = inLocRange (locRangeTo (fmap @Strict.Maybe bufSpanStart l2)) -- Delimit the RHS by the location information from the LHS delim2 = inLocRange (locRangeFrom (fmap @Strict.Maybe bufSpanEnd l1)) pure a = -- Return a value without performing any stateful computation, and without -- any delimiting effect on the surrounding computations. liftHdkA (pure a) {- Note [Smart reordering in HdkA (or lack thereof)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When traversing the AST, the user must take care to traverse it in concrete syntax order. For example, when processing HsFunTy, it's important to get it right and write it like so: HsFunTy _ mult lhs rhs -> do lhs' <- addHaddock lhs rhs' <- addHaddock rhs pure $ L l (HsFunTy noExtField mult lhs' rhs') Rather than like so: HsFunTy _ mult lhs rhs -> do rhs' <- addHaddock rhs -- bad! wrong order lhs' <- addHaddock lhs -- bad! wrong order pure $ L l (HsFunTy noExtField mult lhs' rhs') This is somewhat bug-prone, so we could try to fix this with some Applicative magic. When we define (<*>) for HdkA, why not reorder the computations as necessary? In pseudo-code: a1 <*> a2 | a1 `before` a2 = ... normal processing ... | otherwise = a1 <**> a2 While this trick could work for any two *adjacent* AST elements out of order (as in HsFunTy example above), it would fail in more elaborate scenarios (e.g. processing a list of declarations out of order). If it's not obvious why this trick doesn't work, ponder this: it's a bit like trying to get a sorted list by defining a 'smart' concatenation operator in the following manner: a ?++ b | a <= b = a ++ b | otherwise = b ++ a At first glance it seems to work: ghci> [1] ?++ [2] ?++ [3] [1,2,3] ghci> [2] ?++ [1] ?++ [3] [1,2,3] -- wow, sorted! But it actually doesn't: ghci> [3] ?++ [1] ?++ [2] [1,3,2] -- not sorted... -} -- Run a HdkA computation in an unrestricted LocRange. This is only used at the -- top level to run the final computation for the entire module. runHdkA :: HdkA a -> HdkSt -> (a, HdkSt) runHdkA (HdkA _ m) = unHdkM m mempty -- Let the neighbours know about an item at this location. -- -- Consider this example: -- -- class -- | peculiarly placed comment -- MyClass a where -- my_method :: a -> a -- -- How do we know to reject the "peculiarly placed comment" instead of -- associating it with my_method? Its indentation level matches. -- -- But clearly, there's "MyClass a where" separating the comment and my_method. -- To take it into account, we must register its location using registerLocHdkA -- or registerHdkA. -- -- See Note [Register keyword location]. -- See Note [Adding Haddock comments to the syntax tree]. registerLocHdkA :: SrcSpan -> HdkA () registerLocHdkA l = HdkA (getBufSpan l) (pure ()) -- Let the neighbours know about an item at this location. -- A small wrapper over registerLocHdkA. -- -- See Note [Adding Haddock comments to the syntax tree]. registerHdkA :: GenLocated (SrcSpanAnn' a) e -> HdkA () registerHdkA a = registerLocHdkA (getLocA a) -- Modify the action of a HdkA computation. hoistHdkA :: (HdkM a -> HdkM b) -> HdkA a -> HdkA b hoistHdkA f (HdkA l m) = HdkA l (f m) -- Lift a HdkM computation to HdkA. liftHdkA :: HdkM a -> HdkA a liftHdkA = HdkA mempty -- Extend the declared location span of a 'HdkA' computation: -- -- left_neighbour <*> extendHdkA l x <*> right_neighbour -- -- The declared location of 'x' now includes 'l', so that the surrounding -- computations 'left_neighbour' and 'right_neighbour' will not look for -- Haddock comments inside the 'l' location span. extendHdkA :: SrcSpan -> HdkA a -> HdkA a extendHdkA l' (HdkA l m) = HdkA (getBufSpan l' <> l) m {- ********************************************************************* * * * HdkM: a stateful computation to associate * * accumulated documentation comments with AST nodes * * * ********************************************************************* -} -- The state of 'HdkM' contains a list of pending Haddock comments. We go -- over the AST, looking up these comments using 'takeHdkComments' and removing -- them from the state. The remaining, un-removed ones are ignored with a -- warning (-Winvalid-haddock). Also, using a state means we never use the same -- Haddock twice. -- -- See Note [Adding Haddock comments to the syntax tree]. newtype HdkM a = HdkM { unHdkM :: LocRange -> HdkSt -> (a, HdkSt) } deriving (Functor, Applicative, Monad) via (ReaderT LocRange (State HdkSt)) -- | The state of HdkM. data HdkSt = HdkSt { hdk_st_pending :: [PsLocated HdkComment] -- a list of pending (unassociated with an AST node) -- Haddock comments, sorted by location: in ascending order of the starting 'BufPos' , hdk_st_warnings :: [HdkWarn] -- accumulated warnings (order doesn't matter) } -- | Warnings accumulated in HdkM. data HdkWarn = HdkWarnInvalidComment (PsLocated HdkComment) | HdkWarnExtraComment (Located HsDocString) -- Restrict the range in which a HdkM computation will look up comments: -- -- inLocRange r1 $ -- inLocRange r2 $ -- takeHdkComments ... -- Only takes comments in the (r1 <> r2) location range. -- -- Note that it does not blindly override the range but tightens it using (<>). -- At many use sites, you will see something along the lines of: -- -- inLocRange (locRangeTo end_pos) $ ... -- -- And 'locRangeTo' defines a location range from the start of the file to -- 'end_pos'. This does not mean that we now search for every comment from the -- start of the file, as this restriction will be combined with other -- restrictions. Somewhere up the callstack we might have: -- -- inLocRange (locRangeFrom start_pos) $ ... -- -- The net result is that the location range is delimited by 'start_pos' on -- one side and by 'end_pos' on the other side. -- -- In 'HdkA', every (<*>) may restrict the location range of its -- subcomputations. inLocRange :: LocRange -> HdkM a -> HdkM a inLocRange r (HdkM m) = HdkM (\r' -> m (r <> r')) -- Take the Haddock comments that satisfy the matching function, -- leaving the rest pending. takeHdkComments :: forall a. (PsLocated HdkComment -> Maybe a) -> HdkM [a] takeHdkComments f = HdkM $ \(LocRange hdk_from hdk_to hdk_col) -> \hdk_st -> let comments = hdk_st_pending hdk_st (comments_before_range, comments') = break (is_after hdk_from) comments (comments_in_range, comments_after_range) = span (is_before hdk_to <&&> is_indented hdk_col) comments' (items, other_comments) = foldr add_comment ([], []) comments_in_range remaining_comments = comments_before_range ++ other_comments ++ comments_after_range hdk_st' = hdk_st{ hdk_st_pending = remaining_comments } in (items, hdk_st') where is_after StartOfFile _ = True is_after (StartLoc l) (L l_comment _) = bufSpanStart (psBufSpan l_comment) >= l is_before EndOfFile _ = True is_before (EndLoc l) (L l_comment _) = bufSpanStart (psBufSpan l_comment) <= l is_indented (ColumnFrom n) (L l_comment _) = srcSpanStartCol (psRealSpan l_comment) >= n add_comment :: PsLocated HdkComment -> ([a], [PsLocated HdkComment]) -> ([a], [PsLocated HdkComment]) add_comment hdk_comment (items, other_hdk_comments) = case f hdk_comment of Just item -> (item : items, other_hdk_comments) Nothing -> (items, hdk_comment : other_hdk_comments) -- Get the docnext or docprev comment for an AST node at the given source span. getPrevNextDoc :: SrcSpan -> HdkM (Maybe (Located HsDocString)) getPrevNextDoc l = do let (l_start, l_end) = (srcSpanStart l, srcSpanEnd l) before_t = locRangeTo (getBufPos l_start) after_t = locRangeFrom (getBufPos l_end) nextDocs <- inLocRange before_t $ takeHdkComments mkDocNext prevDocs <- inLocRange after_t $ takeHdkComments mkDocPrev selectDocString (nextDocs ++ prevDocs) appendHdkWarning :: HdkWarn -> HdkM () appendHdkWarning e = HdkM $ \_ hdk_st -> let hdk_st' = hdk_st { hdk_st_warnings = e : hdk_st_warnings hdk_st } in ((), hdk_st') selectDocString :: [Located HsDocString] -> HdkM (Maybe (Located HsDocString)) selectDocString = select . filterOut (isEmptyDocString . unLoc) where select [] = return Nothing select [doc] = return (Just doc) select (doc : extra_docs) = do reportExtraDocs extra_docs return (Just doc) selectDoc :: forall a. [LHsDoc a] -> HdkM (Maybe (LHsDoc a)) selectDoc = select . filterOut (isEmptyDocString . hsDocString . unLoc) where select [] = return Nothing select [doc] = return (Just doc) select (doc : extra_docs) = do reportExtraDocs $ map (\(L l d) -> L l $ hsDocString d) extra_docs return (Just doc) reportExtraDocs :: [Located HsDocString] -> HdkM () reportExtraDocs = traverse_ (\extra_doc -> appendHdkWarning (HdkWarnExtraComment extra_doc)) {- ********************************************************************* * * * Matching functions for extracting documentation comments * * * ********************************************************************* -} mkDocHsDecl :: LayoutInfo -> PsLocated HdkComment -> Maybe (LHsDecl GhcPs) mkDocHsDecl layout_info a = mapLoc (DocD noExtField) <$> mkDocDecl layout_info a mkDocDecl :: LayoutInfo -> PsLocated HdkComment -> Maybe (LDocDecl GhcPs) mkDocDecl layout_info (L l_comment hdk_comment) | indent_mismatch = Nothing | otherwise = Just $ L (noAnnSrcSpan span) $ case hdk_comment of HdkCommentNext doc -> DocCommentNext (L span $ lexHsDocString doc) HdkCommentPrev doc -> DocCommentPrev (L span $ lexHsDocString doc) HdkCommentNamed s doc -> DocCommentNamed s (L span $ lexHsDocString doc) HdkCommentSection n doc -> DocGroup n (L span $ lexHsDocString doc) where span = mkSrcSpanPs l_comment -- 'indent_mismatch' checks if the documentation comment has the exact -- indentation level expected by the parent node. -- -- For example, when extracting documentation comments between class -- method declarations, there are three cases to consider: -- -- 1. Indent matches (indent_mismatch=False): -- class C a where -- f :: a -> a -- -- ^ doc on f -- -- 2. Indented too much (indent_mismatch=True): -- class C a where -- f :: a -> a -- -- ^ indent mismatch -- -- 3. Indented too little (indent_mismatch=True): -- class C a where -- f :: a -> a -- -- ^ indent mismatch indent_mismatch = case layout_info of NoLayoutInfo -> False ExplicitBraces -> False VirtualBraces n -> n /= srcSpanStartCol (psRealSpan l_comment) mkDocIE :: PsLocated HdkComment -> Maybe (LIE GhcPs) mkDocIE (L l_comment hdk_comment) = case hdk_comment of HdkCommentSection n doc -> Just $ L l (IEGroup noExtField n $ L span $ lexHsDocString doc) HdkCommentNamed s _doc -> Just $ L l (IEDocNamed noExtField s) HdkCommentNext doc -> Just $ L l (IEDoc noExtField $ L span $ lexHsDocString doc) _ -> Nothing where l = noAnnSrcSpan span span = mkSrcSpanPs l_comment mkDocNext :: PsLocated HdkComment -> Maybe (Located HsDocString) mkDocNext (L l (HdkCommentNext doc)) = Just (L (mkSrcSpanPs l) doc) mkDocNext _ = Nothing mkDocPrev :: PsLocated HdkComment -> Maybe (Located HsDocString) mkDocPrev (L l (HdkCommentPrev doc)) = Just (L (mkSrcSpanPs l) doc) mkDocPrev _ = Nothing {- ********************************************************************* * * * LocRange: a location range * * * ********************************************************************* -} -- A location range for extracting documentation comments. data LocRange = LocRange { loc_range_from :: !LowerLocBound, loc_range_to :: !UpperLocBound, loc_range_col :: !ColumnBound } instance Semigroup LocRange where LocRange from1 to1 col1 <> LocRange from2 to2 col2 = LocRange (from1 <> from2) (to1 <> to2) (col1 <> col2) instance Monoid LocRange where mempty = LocRange mempty mempty mempty -- The location range from the specified position to the end of the file. locRangeFrom :: Strict.Maybe BufPos -> LocRange locRangeFrom (Strict.Just l) = mempty { loc_range_from = StartLoc l } locRangeFrom Strict.Nothing = mempty -- The location range from the start of the file to the specified position. locRangeTo :: Strict.Maybe BufPos -> LocRange locRangeTo (Strict.Just l) = mempty { loc_range_to = EndLoc l } locRangeTo Strict.Nothing = mempty -- Represents a predicate on BufPos: -- -- LowerLocBound | BufPos -> Bool -- --------------+----------------- -- StartOfFile | const True -- StartLoc p | (>= p) -- -- The semigroup instance corresponds to (&&). -- -- We don't use the BufPos -> Bool representation -- as it would lead to redundant checks. -- -- That is, instead of -- -- (pos >= 20) && (pos >= 30) && (pos >= 40) -- -- We'd rather only do the (>=40) check. So we reify the predicate to make -- sure we only check for the most restrictive bound. data LowerLocBound = StartOfFile | StartLoc !BufPos deriving Show instance Semigroup LowerLocBound where StartOfFile <> l = l l <> StartOfFile = l StartLoc l1 <> StartLoc l2 = StartLoc (max l1 l2) instance Monoid LowerLocBound where mempty = StartOfFile -- Represents a predicate on BufPos: -- -- UpperLocBound | BufPos -> Bool -- --------------+----------------- -- EndOfFile | const True -- EndLoc p | (<= p) -- -- The semigroup instance corresponds to (&&). -- -- We don't use the BufPos -> Bool representation -- as it would lead to redundant checks. -- -- That is, instead of -- -- (pos <= 40) && (pos <= 30) && (pos <= 20) -- -- We'd rather only do the (<=20) check. So we reify the predicate to make -- sure we only check for the most restrictive bound. data UpperLocBound = EndOfFile | EndLoc !BufPos deriving Show instance Semigroup UpperLocBound where EndOfFile <> l = l l <> EndOfFile = l EndLoc l1 <> EndLoc l2 = EndLoc (min l1 l2) instance Monoid UpperLocBound where mempty = EndOfFile -- | Represents a predicate on the column number. -- -- ColumnBound | Int -> Bool -- --------------+----------------- -- ColumnFrom n | (>=n) -- -- The semigroup instance corresponds to (&&). -- newtype ColumnBound = ColumnFrom Int -- n >= GHC.Types.SrcLoc.leftmostColumn deriving Show instance Semigroup ColumnBound where ColumnFrom n <> ColumnFrom m = ColumnFrom (max n m) instance Monoid ColumnBound where mempty = ColumnFrom leftmostColumn {- ********************************************************************* * * * AST manipulation utilities * * * ********************************************************************* -} mkLHsDocTy :: LHsType GhcPs -> Maybe (Located HsDocString) -> LHsType GhcPs mkLHsDocTy t Nothing = t mkLHsDocTy t (Just doc) = L (getLoc t) (HsDocTy noAnn t $ lexLHsDocString doc) getForAllTeleLoc :: HsForAllTelescope GhcPs -> SrcSpan getForAllTeleLoc tele = case tele of HsForAllVis{ hsf_vis_bndrs } -> getLHsTyVarBndrsLoc hsf_vis_bndrs HsForAllInvis { hsf_invis_bndrs } -> getLHsTyVarBndrsLoc hsf_invis_bndrs getLHsTyVarBndrsLoc :: [LHsTyVarBndr flag GhcPs] -> SrcSpan getLHsTyVarBndrsLoc bndrs = foldr combineSrcSpans noSrcSpan $ map getLocA bndrs -- | The inverse of 'partitionBindsAndSigs' that merges partitioned items back -- into a flat list. Elements are put back into the order in which they -- appeared in the original program before partitioning, using BufPos to order -- them. -- -- Precondition (unchecked): the input lists are already sorted. flattenBindsAndSigs :: (LHsBinds GhcPs, [LSig GhcPs], [LFamilyDecl GhcPs], [LTyFamInstDecl GhcPs], [LDataFamInstDecl GhcPs], [LDocDecl GhcPs]) -> [LHsDecl GhcPs] flattenBindsAndSigs (all_bs, all_ss, all_ts, all_tfis, all_dfis, all_docs) = -- 'cmpBufSpan' is safe here with the following assumptions: -- -- - 'LHsDecl' produced by 'decl_cls' in Parser.y always have a 'BufSpan' -- - 'partitionBindsAndSigs' does not discard this 'BufSpan' mergeListsBy cmpBufSpanA [ mapLL (\b -> ValD noExtField b) (bagToList all_bs), mapLL (\s -> SigD noExtField s) all_ss, mapLL (\t -> TyClD noExtField (FamDecl noExtField t)) all_ts, mapLL (\tfi -> InstD noExtField (TyFamInstD noExtField tfi)) all_tfis, mapLL (\dfi -> InstD noExtField (DataFamInstD noExtField dfi)) all_dfis, mapLL (\d -> DocD noExtField d) all_docs ] cmpBufSpanA :: GenLocated (SrcSpanAnn' a1) a2 -> GenLocated (SrcSpanAnn' a3) a2 -> Ordering cmpBufSpanA (L la a) (L lb b) = cmpBufSpan (L (locA la) a) (L (locA lb) b) {- ********************************************************************* * * * General purpose utilities * * * ********************************************************************* -} -- Cons an element to a list, if exists. mcons :: Maybe a -> [a] -> [a] mcons = maybe id (:) -- Map a function over a list of located items. mapLL :: (a -> b) -> [GenLocated l a] -> [GenLocated l b] mapLL f = map (mapLoc f) {- Note [Old solution: Haddock in the grammar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the past, Haddock comments were incorporated into the grammar (Parser.y). This led to excessive complexity and duplication. For example, here's the grammar production for types without documentation: type : btype | btype '->' ctype To support Haddock, we had to also maintain an additional grammar production for types with documentation on function arguments and function result: typedoc : btype | btype docprev | docnext btype | btype '->' ctypedoc | btype docprev '->' ctypedoc | docnext btype '->' ctypedoc Sometimes handling documentation comments during parsing led to bugs (#17561), and sometimes it simply made it hard to modify and extend the grammar. Another issue was that sometimes Haddock would fail to parse code that GHC could parse successfully: class BadIndent where f :: a -> Int -- ^ comment g :: a -> Int This declaration was accepted by ghc but rejected by ghc -haddock. -} {- Note [Register keyword location] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ At the moment, 'addHaddock' erroneously associates some comments with constructs that are separated by a keyword. For example: data Foo -- | Comment for MkFoo where MkFoo :: Foo We could use EPA (exactprint annotations) to fix this, but not without modification. For example, EpaLocation contains RealSrcSpan but not BufSpan. Also, the fix would be more straghtforward after #19623. For examples, see tests/haddock/should_compile_flag_haddock/T17544_kw.hs -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Types.hs0000644000000000000000000000721114472400112020530 0ustar0000000000000000{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE DataKinds #-} module GHC.Parser.Types ( SumOrTuple(..) , pprSumOrTuple , PatBuilder(..) , DataConBuilder(..) ) where import GHC.Prelude import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Types.Name.Reader import GHC.Hs.Extension import GHC.Hs.Lit import GHC.Hs.Pat import GHC.Hs.Type import GHC.Utils.Outputable as Outputable import GHC.Data.OrdList import Data.Foldable import GHC.Parser.Annotation import Language.Haskell.Syntax data SumOrTuple b = Sum ConTag Arity (LocatedA b) [EpaLocation] [EpaLocation] -- ^ Last two are the locations of the '|' before and after the payload | Tuple [Either (EpAnn EpaLocation) (LocatedA b)] pprSumOrTuple :: Outputable b => Boxity -> SumOrTuple b -> SDoc pprSumOrTuple boxity = \case Sum alt arity e _ _ -> parOpen <+> ppr_bars (alt - 1) <+> ppr e <+> ppr_bars (arity - alt) <+> parClose Tuple xs -> parOpen <> (fcat . punctuate comma $ map ppr_tup xs) <> parClose where ppr_tup (Left _) = empty ppr_tup (Right e) = ppr e ppr_bars n = hsep (replicate n (Outputable.char '|')) (parOpen, parClose) = case boxity of Boxed -> (text "(", text ")") Unboxed -> (text "(#", text "#)") -- | See Note [Ambiguous syntactic categories] and Note [PatBuilder] data PatBuilder p = PatBuilderPat (Pat p) | PatBuilderPar (LHsToken "(" p) (LocatedA (PatBuilder p)) (LHsToken ")" p) | PatBuilderApp (LocatedA (PatBuilder p)) (LocatedA (PatBuilder p)) | PatBuilderAppType (LocatedA (PatBuilder p)) (HsPatSigType GhcPs) | PatBuilderOpApp (LocatedA (PatBuilder p)) (LocatedN RdrName) (LocatedA (PatBuilder p)) (EpAnn [AddEpAnn]) | PatBuilderVar (LocatedN RdrName) | PatBuilderOverLit (HsOverLit GhcPs) instance Outputable (PatBuilder GhcPs) where ppr (PatBuilderPat p) = ppr p ppr (PatBuilderPar _ (L _ p) _) = parens (ppr p) ppr (PatBuilderApp (L _ p1) (L _ p2)) = ppr p1 <+> ppr p2 ppr (PatBuilderAppType (L _ p) t) = ppr p <+> text "@" <> ppr t ppr (PatBuilderOpApp (L _ p1) op (L _ p2) _) = ppr p1 <+> ppr op <+> ppr p2 ppr (PatBuilderVar v) = ppr v ppr (PatBuilderOverLit l) = ppr l -- | An accumulator to build a prefix data constructor, -- e.g. when parsing @MkT A B C@, the accumulator will evolve as follows: -- -- @ -- 1. PrefixDataConBuilder [] MkT -- 2. PrefixDataConBuilder [A] MkT -- 3. PrefixDataConBuilder [A, B] MkT -- 4. PrefixDataConBuilder [A, B, C] MkT -- @ -- -- There are two reasons we have a separate builder type instead of using -- @HsConDeclDetails GhcPs@ directly: -- -- 1. It's faster, because 'OrdList' gives us constant-time snoc. -- 2. Having a separate type helps ensure that we don't forget to finalize a -- 'RecTy' into a 'RecCon' (we do that in 'dataConBuilderDetails'). -- -- See Note [PatBuilder] for another builder type used in the parser. -- Here the technique is similar, but the motivation is different. data DataConBuilder = PrefixDataConBuilder (OrdList (LHsType GhcPs)) -- Data constructor fields (LocatedN RdrName) -- Data constructor name | InfixDataConBuilder (LHsType GhcPs) -- LHS field (LocatedN RdrName) -- Data constructor name (LHsType GhcPs) -- RHS field instance Outputable DataConBuilder where ppr (PrefixDataConBuilder flds data_con) = hang (ppr data_con) 2 (sep (map ppr (toList flds))) ppr (InfixDataConBuilder lhs data_con rhs) = ppr lhs <+> ppr data_con <+> ppr rhs type instance Anno [LocatedA (StmtLR GhcPs GhcPs (LocatedA (PatBuilder GhcPs)))] = SrcSpanAnnL ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform.hs0000644000000000000000000003065614472400112017765 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE LambdaCase #-} -- | Platform description module GHC.Platform ( Platform (..) , PlatformWordSize(..) , platformArch , platformOS , ArchOS(..) , Arch(..) , OS(..) , ArmISA(..) , ArmISAExt(..) , ArmABI(..) , PPC_64ABI(..) , ByteOrder(..) , target32Bit , isARM , osElfTarget , osMachOTarget , osSubsectionsViaSymbols , platformUsesFrameworks , platformWordSizeInBytes , platformWordSizeInBits , platformMinInt , platformMaxInt , platformMaxWord , platformInIntRange , platformInWordRange , platformCConvNeedsExtension , PlatformMisc(..) , SseVersion (..) , BmiVersion (..) , wordAlignment -- * SSE and AVX , isSseEnabled , isSse2Enabled -- * Platform constants , PlatformConstants(..) , lookupPlatformConstants , platformConstants -- * Shared libraries , platformSOName , platformHsSOName , platformSOExt , genericPlatform ) where import Prelude -- See Note [Why do we import Prelude here?] import GHC.Read import GHC.ByteOrder (ByteOrder(..)) import GHC.Platform.Constants import GHC.Platform.ArchOS import GHC.Types.Basic (Alignment, alignmentOf) import GHC.Utils.Panic.Plain import Data.Word import Data.Int import System.FilePath import System.Directory -- | Platform description -- -- This is used to describe platforms so that we can generate code for them. data Platform = Platform { platformArchOS :: !ArchOS -- ^ Architecture and OS , platformWordSize :: !PlatformWordSize -- ^ Word size , platformByteOrder :: !ByteOrder -- ^ Byte order (endianness) , platformUnregisterised :: !Bool , platformHasGnuNonexecStack :: !Bool , platformHasIdentDirective :: !Bool , platformHasSubsectionsViaSymbols :: !Bool , platformIsCrossCompiling :: !Bool , platformLeadingUnderscore :: !Bool -- ^ Symbols need underscore prefix , platformTablesNextToCode :: !Bool -- ^ Determines whether we will be compiling info tables that reside just -- before the entry code, or with an indirection to the entry code. See -- TABLES_NEXT_TO_CODE in rts/include/rts/storage/InfoTables.h. , platformHasLibm :: !Bool -- ^ Some platforms require that we explicitly link against @libm@ if any -- math-y things are used (which we assume to include all programs). See -- #14022. , platform_constants :: !(Maybe PlatformConstants) -- ^ Constants such as structure offsets, type sizes, etc. } deriving (Read, Show, Eq, Ord) wordAlignment :: Platform -> Alignment wordAlignment platform = alignmentOf (platformWordSizeInBytes platform) -- ----------------------------------------------------------------------------- -- SSE and AVX -- TODO: Instead of using a separate predicate (i.e. isSse2Enabled) to -- check if SSE is enabled, we might have x86-64 imply the -msse2 -- flag. isSseEnabled :: Platform -> Bool isSseEnabled platform = case platformArch platform of ArchX86_64 -> True ArchX86 -> True _ -> False isSse2Enabled :: Platform -> Bool isSse2Enabled platform = case platformArch platform of -- We assume SSE1 and SSE2 operations are available on both -- x86 and x86_64. Historically we didn't default to SSE2 and -- SSE1 on x86, which results in defacto nondeterminism for how -- rounding behaves in the associated x87 floating point instructions -- because variations in the spill/fpu stack placement of arguments for -- operations would change the precision and final result of what -- would otherwise be the same expressions with respect to single or -- double precision IEEE floating point computations. ArchX86_64 -> True ArchX86 -> True _ -> False -- ----------------------------------------------------------------------------- -- Platform Constants platformConstants :: Platform -> PlatformConstants platformConstants platform = case platform_constants platform of Nothing -> panic "Platform constants not available!" Just c -> c genericPlatform :: Platform genericPlatform = Platform { platformArchOS = ArchOS ArchX86_64 OSLinux , platformWordSize = PW8 , platformByteOrder = LittleEndian , platformUnregisterised = False , platformHasGnuNonexecStack = False , platformHasIdentDirective = False , platformHasSubsectionsViaSymbols= False , platformHasLibm = False , platformIsCrossCompiling = False , platformLeadingUnderscore = False , platformTablesNextToCode = True , platform_constants = Nothing } data PlatformWordSize = PW4 -- ^ A 32-bit platform | PW8 -- ^ A 64-bit platform deriving (Eq, Ord) instance Show PlatformWordSize where show PW4 = "4" show PW8 = "8" instance Read PlatformWordSize where readPrec = do i :: Int <- readPrec case i of 4 -> return PW4 8 -> return PW8 other -> fail ("Invalid PlatformWordSize: " ++ show other) platformWordSizeInBytes :: Platform -> Int platformWordSizeInBytes p = case platformWordSize p of PW4 -> 4 PW8 -> 8 platformWordSizeInBits :: Platform -> Int platformWordSizeInBits p = platformWordSizeInBytes p * 8 -- | Platform architecture platformArch :: Platform -> Arch platformArch platform = case platformArchOS platform of ArchOS arch _ -> arch -- | Platform OS platformOS :: Platform -> OS platformOS platform = case platformArchOS platform of ArchOS _ os -> os isARM :: Arch -> Bool isARM (ArchARM {}) = True isARM ArchAArch64 = True isARM _ = False -- | This predicate tells us whether the platform is 32-bit. target32Bit :: Platform -> Bool target32Bit p = case platformWordSize p of PW4 -> True PW8 -> False -- | This predicate tells us whether the OS supports ELF-like shared libraries. osElfTarget :: OS -> Bool osElfTarget OSLinux = True osElfTarget OSFreeBSD = True osElfTarget OSDragonFly = True osElfTarget OSOpenBSD = True osElfTarget OSNetBSD = True osElfTarget OSSolaris2 = True osElfTarget OSDarwin = False osElfTarget OSMinGW32 = False osElfTarget OSKFreeBSD = True osElfTarget OSHaiku = True osElfTarget OSQNXNTO = False osElfTarget OSAIX = False osElfTarget OSHurd = True osElfTarget OSUnknown = False -- Defaulting to False is safe; it means don't rely on any -- ELF-specific functionality. It is important to have a default for -- portability, otherwise we have to answer this question for every -- new platform we compile on (even unreg). -- | This predicate tells us whether the OS support Mach-O shared libraries. osMachOTarget :: OS -> Bool osMachOTarget OSDarwin = True osMachOTarget _ = False osUsesFrameworks :: OS -> Bool osUsesFrameworks OSDarwin = True osUsesFrameworks _ = False platformUsesFrameworks :: Platform -> Bool platformUsesFrameworks = osUsesFrameworks . platformOS osSubsectionsViaSymbols :: OS -> Bool osSubsectionsViaSymbols OSDarwin = True osSubsectionsViaSymbols _ = False -- | Minimum representable Int value for the given platform platformMinInt :: Platform -> Integer platformMinInt p = case platformWordSize p of PW4 -> toInteger (minBound :: Int32) PW8 -> toInteger (minBound :: Int64) -- | Maximum representable Int value for the given platform platformMaxInt :: Platform -> Integer platformMaxInt p = case platformWordSize p of PW4 -> toInteger (maxBound :: Int32) PW8 -> toInteger (maxBound :: Int64) -- | Maximum representable Word value for the given platform platformMaxWord :: Platform -> Integer platformMaxWord p = case platformWordSize p of PW4 -> toInteger (maxBound :: Word32) PW8 -> toInteger (maxBound :: Word64) -- | Test if the given Integer is representable with a platform Int platformInIntRange :: Platform -> Integer -> Bool platformInIntRange platform x = x >= platformMinInt platform && x <= platformMaxInt platform -- | Test if the given Integer is representable with a platform Word platformInWordRange :: Platform -> Integer -> Bool platformInWordRange platform x = x >= 0 && x <= platformMaxWord platform -- | For some architectures the C calling convention is that any -- integer shorter than 64 bits is replaced by its 64 bits -- representation using sign or zero extension. platformCConvNeedsExtension :: Platform -> Bool platformCConvNeedsExtension platform = case platformArch platform of ArchPPC_64 _ -> True ArchS390X -> True ArchRISCV64 -> True ArchAArch64 -- Apple's AArch64 ABI requires that the caller sign-extend -- small integer arguments. See -- https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms | OSDarwin <- platformOS platform -> True _ -> False -------------------------------------------------- -- Instruction sets -------------------------------------------------- -- | x86 SSE instructions data SseVersion = SSE1 | SSE2 | SSE3 | SSE4 | SSE42 deriving (Eq, Ord) -- | x86 BMI (bit manipulation) instructions data BmiVersion = BMI1 | BMI2 deriving (Eq, Ord) -- | Platform-specific settings formerly hard-coded in Config.hs. -- -- These should probably be all be triaged whether they can be computed from -- other settings or belong in another another place (like 'Platform' above). data PlatformMisc = PlatformMisc { -- TODO Recalculate string from richer info? platformMisc_targetPlatformString :: String , platformMisc_ghcWithInterpreter :: Bool , platformMisc_libFFI :: Bool , platformMisc_llvmTarget :: String } platformSOName :: Platform -> FilePath -> FilePath platformSOName platform root = case platformOS platform of OSMinGW32 -> root <.> platformSOExt platform _ -> ("lib" ++ root) <.> platformSOExt platform platformHsSOName :: Platform -> FilePath -> FilePath platformHsSOName platform root = ("lib" ++ root) <.> platformSOExt platform platformSOExt :: Platform -> FilePath platformSOExt platform = case platformOS platform of OSDarwin -> "dylib" OSMinGW32 -> "dll" _ -> "so" -- Note [Platform constants] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- The RTS is partly written in C, hence we use an external C compiler to build -- it. Thus GHC must somehow retrieve some information about the produced code -- (sizes of types, offsets of struct fields, etc.) to produce compatible code. -- -- This is the role of utils/deriveConstants utility: it produces a C -- source, compiles it with the same toolchain that will be used to build the -- RTS, and finally retrieves the constants from the built artefact. We can't -- directly run the produced program because we may be cross-compiling. -- -- These constants are then stored in GhclibDerivedConstants.h header file that is -- bundled with the RTS unit. This file is directly imported by Cmm codes and it -- is also read by GHC. deriveConstants also produces the Haskell definition of -- the PlatformConstants datatype and the Haskell parser for the -- GhclibDerivedConstants.h file. -- -- For quite some time, constants used by GHC were globally installed in -- ${libdir}/platformConstants but now GHC reads the GhclibDerivedConstants.h header -- bundled with the RTS unit. GHC detects when it builds the RTS unit itself and -- in this case it loads the header from the include-dirs passed on the -- command-line. -- -- Note that GHC doesn't parse every "#define SOME_CONSTANT 123" individually. -- Instead there is a single #define that contains all the constants useful to -- GHC in a comma separated list: -- -- #define HS_CONSTANTS "123,45,..." -- -- Note that GHC mustn't directly import GhclibDerivedConstants.h as these constants -- are only valid for a specific target platform and we want GHC to be target -- agnostic. -- -- | Try to locate "GhclibDerivedConstants.h" file in the given dirs and to parse the -- PlatformConstants from it. -- -- See Note [Platform constants] lookupPlatformConstants :: [FilePath] -> IO (Maybe PlatformConstants) lookupPlatformConstants include_dirs = find_constants include_dirs where try_parse d = do let p = d "GhclibDerivedConstants.h" doesFileExist p >>= \case True -> Just <$> parseConstantsHeader p False -> return Nothing find_constants [] = return Nothing find_constants (x:xs) = try_parse x >>= \case Nothing -> find_constants xs Just c -> return (Just c) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/AArch64.hs0000644000000000000000000000024114472400112021100 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.AArch64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_aarch64 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/ARM.hs0000644000000000000000000000023214472400112020367 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.ARM where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_arm 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Platform/ArchOS.hs0000644000000000000000000000711214472375231022754 0ustar0000000000000000{-# LANGUAGE LambdaCase, ScopedTypeVariables #-} -- | Platform architecture and OS -- -- We need it in ghc-boot because ghc-pkg needs it. module GHC.Platform.ArchOS ( ArchOS(..) , Arch(..) , OS(..) , ArmISA(..) , ArmISAExt(..) , ArmABI(..) , PPC_64ABI(..) , stringEncodeArch , stringEncodeOS ) where import Prelude -- See Note [Why do we import Prelude here?] -- | Platform architecture and OS. data ArchOS = ArchOS { archOS_arch :: Arch , archOS_OS :: OS } deriving (Read, Show, Eq, Ord) -- | Architectures -- -- TODO: It might be nice to extend these constructors with information about -- what instruction set extensions an architecture might support. -- data Arch = ArchUnknown | ArchX86 | ArchX86_64 | ArchPPC | ArchPPC_64 PPC_64ABI | ArchS390X | ArchARM ArmISA [ArmISAExt] ArmABI | ArchAArch64 | ArchAlpha | ArchMipseb | ArchMipsel | ArchRISCV64 | ArchJavaScript deriving (Read, Show, Eq, Ord) -- | ARM Instruction Set Architecture data ArmISA = ARMv5 | ARMv6 | ARMv7 deriving (Read, Show, Eq, Ord) -- | ARM extensions data ArmISAExt = VFPv2 | VFPv3 | VFPv3D16 | NEON | IWMMX2 deriving (Read, Show, Eq, Ord) -- | ARM ABI data ArmABI = SOFT | SOFTFP | HARD deriving (Read, Show, Eq, Ord) -- | PowerPC 64-bit ABI data PPC_64ABI = ELF_V1 -- ^ PowerPC64 | ELF_V2 -- ^ PowerPC64 LE deriving (Read, Show, Eq, Ord) -- | Operating systems. -- -- Using OSUnknown to generate code should produce a sensible default, but no -- promises. data OS = OSUnknown | OSLinux | OSDarwin | OSSolaris2 | OSMinGW32 | OSFreeBSD | OSDragonFly | OSOpenBSD | OSNetBSD | OSKFreeBSD | OSHaiku | OSQNXNTO | OSAIX | OSHurd deriving (Read, Show, Eq, Ord) -- Note [Platform Syntax] -- ~~~~~~~~~~~~~~~~~~~~~~ -- -- There is a very loose encoding of platforms shared by many tools we are -- encoding to here. GNU Config (http://git.savannah.gnu.org/cgit/config.git), -- and LLVM's http://llvm.org/doxygen/classllvm_1_1Triple.html are perhaps the -- most definitional parsers. The basic syntax is a list of '-'-separated -- components. The Unix 'uname' command syntax is related but briefer. -- -- Those two parsers are quite forgiving, and even the 'config.sub' -- normalization is forgiving too. The "best" way to encode a platform is -- therefore somewhat a matter of taste. -- -- The 'stringEncode*' functions here convert each part of GHC's structured -- notion of a platform into one dash-separated component. -- | See Note [Platform Syntax]. stringEncodeArch :: Arch -> String stringEncodeArch = \case ArchUnknown -> "unknown" ArchX86 -> "i386" ArchX86_64 -> "x86_64" ArchPPC -> "powerpc" ArchPPC_64 ELF_V1 -> "powerpc64" ArchPPC_64 ELF_V2 -> "powerpc64le" ArchS390X -> "s390x" ArchARM ARMv5 _ _ -> "armv5" ArchARM ARMv6 _ _ -> "armv6" ArchARM ARMv7 _ _ -> "armv7" ArchAArch64 -> "aarch64" ArchAlpha -> "alpha" ArchMipseb -> "mipseb" ArchMipsel -> "mipsel" ArchRISCV64 -> "riscv64" ArchJavaScript -> "js" -- | See Note [Platform Syntax]. stringEncodeOS :: OS -> String stringEncodeOS = \case OSUnknown -> "unknown" OSLinux -> "linux" OSDarwin -> "darwin" OSSolaris2 -> "solaris2" OSMinGW32 -> "mingw32" OSFreeBSD -> "freebsd" OSDragonFly -> "dragonfly" OSOpenBSD -> "openbsd" OSNetBSD -> "netbsd" OSKFreeBSD -> "kfreebsdgnu" OSHaiku -> "haiku" OSQNXNTO -> "nto-qnx" OSAIX -> "aix" OSHurd -> "hurd" ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/GHC/Platform/Constants.hs0000644000000000000000000004004314472400113025520 0ustar0000000000000000module GHC.Platform.Constants where import Prelude import Data.Char data PlatformConstants = PlatformConstants { pc_CONTROL_GROUP_CONST_291 :: {-# UNPACK #-} !Int, pc_STD_HDR_SIZE :: {-# UNPACK #-} !Int, pc_PROF_HDR_SIZE :: {-# UNPACK #-} !Int, pc_BLOCK_SIZE :: {-# UNPACK #-} !Int, pc_BLOCKS_PER_MBLOCK :: {-# UNPACK #-} !Int, pc_TICKY_BIN_COUNT :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR7 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR8 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR9 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rR10 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rF6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rD6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rXMM6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rYMM6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM2 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM3 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM4 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM5 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rZMM6 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rL1 :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rSp :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rSpLim :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rHp :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rHpLim :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rCCCS :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rCurrentTSO :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rCurrentNursery :: {-# UNPACK #-} !Int, pc_OFFSET_StgRegTable_rHpAlloc :: {-# UNPACK #-} !Int, pc_OFFSET_stgEagerBlackholeInfo :: {-# UNPACK #-} !Int, pc_OFFSET_stgGCEnter1 :: {-# UNPACK #-} !Int, pc_OFFSET_stgGCFun :: {-# UNPACK #-} !Int, pc_OFFSET_Capability_r :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_start :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_free :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_blocks :: {-# UNPACK #-} !Int, pc_OFFSET_bdescr_flags :: {-# UNPACK #-} !Int, pc_SIZEOF_CostCentreStack :: {-# UNPACK #-} !Int, pc_OFFSET_CostCentreStack_mem_alloc :: {-# UNPACK #-} !Int, pc_REP_CostCentreStack_mem_alloc :: {-# UNPACK #-} !Int, pc_OFFSET_CostCentreStack_scc_count :: {-# UNPACK #-} !Int, pc_REP_CostCentreStack_scc_count :: {-# UNPACK #-} !Int, pc_OFFSET_StgHeader_ccs :: {-# UNPACK #-} !Int, pc_OFFSET_StgHeader_ldvw :: {-# UNPACK #-} !Int, pc_SIZEOF_StgSMPThunkHeader :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_allocs :: {-# UNPACK #-} !Int, pc_REP_StgEntCounter_allocs :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_allocd :: {-# UNPACK #-} !Int, pc_REP_StgEntCounter_allocd :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_registeredp :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_link :: {-# UNPACK #-} !Int, pc_OFFSET_StgEntCounter_entry_count :: {-# UNPACK #-} !Int, pc_SIZEOF_StgUpdateFrame_NoHdr :: {-# UNPACK #-} !Int, pc_SIZEOF_StgMutArrPtrs_NoHdr :: {-# UNPACK #-} !Int, pc_OFFSET_StgMutArrPtrs_ptrs :: {-# UNPACK #-} !Int, pc_OFFSET_StgMutArrPtrs_size :: {-# UNPACK #-} !Int, pc_SIZEOF_StgSmallMutArrPtrs_NoHdr :: {-# UNPACK #-} !Int, pc_OFFSET_StgSmallMutArrPtrs_ptrs :: {-# UNPACK #-} !Int, pc_SIZEOF_StgArrBytes_NoHdr :: {-# UNPACK #-} !Int, pc_OFFSET_StgArrBytes_bytes :: {-# UNPACK #-} !Int, pc_OFFSET_StgTSO_alloc_limit :: {-# UNPACK #-} !Int, pc_OFFSET_StgTSO_cccs :: {-# UNPACK #-} !Int, pc_OFFSET_StgTSO_stackobj :: {-# UNPACK #-} !Int, pc_OFFSET_StgStack_sp :: {-# UNPACK #-} !Int, pc_OFFSET_StgStack_stack :: {-# UNPACK #-} !Int, pc_OFFSET_StgUpdateFrame_updatee :: {-# UNPACK #-} !Int, pc_OFFSET_StgFunInfoExtraFwd_arity :: {-# UNPACK #-} !Int, pc_REP_StgFunInfoExtraFwd_arity :: {-# UNPACK #-} !Int, pc_SIZEOF_StgFunInfoExtraRev :: {-# UNPACK #-} !Int, pc_OFFSET_StgFunInfoExtraRev_arity :: {-# UNPACK #-} !Int, pc_REP_StgFunInfoExtraRev_arity :: {-# UNPACK #-} !Int, pc_MAX_SPEC_SELECTEE_SIZE :: {-# UNPACK #-} !Int, pc_MAX_SPEC_AP_SIZE :: {-# UNPACK #-} !Int, pc_MIN_PAYLOAD_SIZE :: {-# UNPACK #-} !Int, pc_MIN_INTLIKE :: {-# UNPACK #-} !Int, pc_MAX_INTLIKE :: {-# UNPACK #-} !Int, pc_MIN_CHARLIKE :: {-# UNPACK #-} !Int, pc_MAX_CHARLIKE :: {-# UNPACK #-} !Int, pc_MUT_ARR_PTRS_CARD_BITS :: {-# UNPACK #-} !Int, pc_MAX_Vanilla_REG :: {-# UNPACK #-} !Int, pc_MAX_Float_REG :: {-# UNPACK #-} !Int, pc_MAX_Double_REG :: {-# UNPACK #-} !Int, pc_MAX_Long_REG :: {-# UNPACK #-} !Int, pc_MAX_XMM_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Vanilla_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Float_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Double_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_XMM_REG :: {-# UNPACK #-} !Int, pc_MAX_Real_Long_REG :: {-# UNPACK #-} !Int, pc_RESERVED_C_STACK_BYTES :: {-# UNPACK #-} !Int, pc_RESERVED_STACK_WORDS :: {-# UNPACK #-} !Int, pc_AP_STACK_SPLIM :: {-# UNPACK #-} !Int, pc_WORD_SIZE :: {-# UNPACK #-} !Int, pc_CINT_SIZE :: {-# UNPACK #-} !Int, pc_CLONG_SIZE :: {-# UNPACK #-} !Int, pc_CLONG_LONG_SIZE :: {-# UNPACK #-} !Int, pc_BITMAP_BITS_SHIFT :: {-# UNPACK #-} !Int, pc_TAG_BITS :: {-# UNPACK #-} !Int, pc_LDV_SHIFT :: {-# UNPACK #-} !Int, pc_ILDV_CREATE_MASK :: !Integer, pc_ILDV_STATE_CREATE :: !Integer, pc_ILDV_STATE_USE :: !Integer, pc_USE_INLINE_SRT_FIELD :: !Bool } deriving (Show, Read, Eq, Ord) parseConstantsHeader :: FilePath -> IO PlatformConstants parseConstantsHeader fp = do s <- readFile fp let def = "#define HS_CONSTANTS \"" find [] xs = xs find _ [] = error $ "GHC couldn't find the RTS constants ("++def++") in " ++ fp ++ ": the RTS package you are trying to use is perhaps for another GHC version" ++ "(e.g. you are using the wrong package database) or the package database is broken.\n" find (d:ds) (x:xs) | d == x = find ds xs | otherwise = find def xs readVal' :: Bool -> Integer -> String -> [Integer] readVal' n c (x:xs) = case x of '"' -> [if n then negate c else c] '-' -> readVal' True c xs ',' -> (if n then negate c else c) : readVal' False 0 xs _ -> readVal' n (c*10 + fromIntegral (ord x - ord '0')) xs readVal' n c [] = [if n then negate c else c] readVal = readVal' False 0 return $! case readVal (find def s) of [v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15 ,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29,v30,v31 ,v32,v33,v34,v35,v36,v37,v38,v39,v40,v41,v42,v43,v44,v45,v46,v47 ,v48,v49,v50,v51,v52,v53,v54,v55,v56,v57,v58,v59,v60,v61,v62,v63 ,v64,v65,v66,v67,v68,v69,v70,v71,v72,v73,v74,v75,v76,v77,v78,v79 ,v80,v81,v82,v83,v84,v85,v86,v87,v88,v89,v90,v91,v92,v93,v94,v95 ,v96,v97,v98,v99,v100,v101,v102,v103,v104,v105,v106,v107,v108,v109,v110,v111 ,v112,v113,v114,v115,v116,v117,v118,v119,v120,v121,v122,v123,v124,v125,v126,v127 ,v128 ] -> PlatformConstants { pc_CONTROL_GROUP_CONST_291 = fromIntegral v0 , pc_STD_HDR_SIZE = fromIntegral v1 , pc_PROF_HDR_SIZE = fromIntegral v2 , pc_BLOCK_SIZE = fromIntegral v3 , pc_BLOCKS_PER_MBLOCK = fromIntegral v4 , pc_TICKY_BIN_COUNT = fromIntegral v5 , pc_OFFSET_StgRegTable_rR1 = fromIntegral v6 , pc_OFFSET_StgRegTable_rR2 = fromIntegral v7 , pc_OFFSET_StgRegTable_rR3 = fromIntegral v8 , pc_OFFSET_StgRegTable_rR4 = fromIntegral v9 , pc_OFFSET_StgRegTable_rR5 = fromIntegral v10 , pc_OFFSET_StgRegTable_rR6 = fromIntegral v11 , pc_OFFSET_StgRegTable_rR7 = fromIntegral v12 , pc_OFFSET_StgRegTable_rR8 = fromIntegral v13 , pc_OFFSET_StgRegTable_rR9 = fromIntegral v14 , pc_OFFSET_StgRegTable_rR10 = fromIntegral v15 , pc_OFFSET_StgRegTable_rF1 = fromIntegral v16 , pc_OFFSET_StgRegTable_rF2 = fromIntegral v17 , pc_OFFSET_StgRegTable_rF3 = fromIntegral v18 , pc_OFFSET_StgRegTable_rF4 = fromIntegral v19 , pc_OFFSET_StgRegTable_rF5 = fromIntegral v20 , pc_OFFSET_StgRegTable_rF6 = fromIntegral v21 , pc_OFFSET_StgRegTable_rD1 = fromIntegral v22 , pc_OFFSET_StgRegTable_rD2 = fromIntegral v23 , pc_OFFSET_StgRegTable_rD3 = fromIntegral v24 , pc_OFFSET_StgRegTable_rD4 = fromIntegral v25 , pc_OFFSET_StgRegTable_rD5 = fromIntegral v26 , pc_OFFSET_StgRegTable_rD6 = fromIntegral v27 , pc_OFFSET_StgRegTable_rXMM1 = fromIntegral v28 , pc_OFFSET_StgRegTable_rXMM2 = fromIntegral v29 , pc_OFFSET_StgRegTable_rXMM3 = fromIntegral v30 , pc_OFFSET_StgRegTable_rXMM4 = fromIntegral v31 , pc_OFFSET_StgRegTable_rXMM5 = fromIntegral v32 , pc_OFFSET_StgRegTable_rXMM6 = fromIntegral v33 , pc_OFFSET_StgRegTable_rYMM1 = fromIntegral v34 , pc_OFFSET_StgRegTable_rYMM2 = fromIntegral v35 , pc_OFFSET_StgRegTable_rYMM3 = fromIntegral v36 , pc_OFFSET_StgRegTable_rYMM4 = fromIntegral v37 , pc_OFFSET_StgRegTable_rYMM5 = fromIntegral v38 , pc_OFFSET_StgRegTable_rYMM6 = fromIntegral v39 , pc_OFFSET_StgRegTable_rZMM1 = fromIntegral v40 , pc_OFFSET_StgRegTable_rZMM2 = fromIntegral v41 , pc_OFFSET_StgRegTable_rZMM3 = fromIntegral v42 , pc_OFFSET_StgRegTable_rZMM4 = fromIntegral v43 , pc_OFFSET_StgRegTable_rZMM5 = fromIntegral v44 , pc_OFFSET_StgRegTable_rZMM6 = fromIntegral v45 , pc_OFFSET_StgRegTable_rL1 = fromIntegral v46 , pc_OFFSET_StgRegTable_rSp = fromIntegral v47 , pc_OFFSET_StgRegTable_rSpLim = fromIntegral v48 , pc_OFFSET_StgRegTable_rHp = fromIntegral v49 , pc_OFFSET_StgRegTable_rHpLim = fromIntegral v50 , pc_OFFSET_StgRegTable_rCCCS = fromIntegral v51 , pc_OFFSET_StgRegTable_rCurrentTSO = fromIntegral v52 , pc_OFFSET_StgRegTable_rCurrentNursery = fromIntegral v53 , pc_OFFSET_StgRegTable_rHpAlloc = fromIntegral v54 , pc_OFFSET_stgEagerBlackholeInfo = fromIntegral v55 , pc_OFFSET_stgGCEnter1 = fromIntegral v56 , pc_OFFSET_stgGCFun = fromIntegral v57 , pc_OFFSET_Capability_r = fromIntegral v58 , pc_OFFSET_bdescr_start = fromIntegral v59 , pc_OFFSET_bdescr_free = fromIntegral v60 , pc_OFFSET_bdescr_blocks = fromIntegral v61 , pc_OFFSET_bdescr_flags = fromIntegral v62 , pc_SIZEOF_CostCentreStack = fromIntegral v63 , pc_OFFSET_CostCentreStack_mem_alloc = fromIntegral v64 , pc_REP_CostCentreStack_mem_alloc = fromIntegral v65 , pc_OFFSET_CostCentreStack_scc_count = fromIntegral v66 , pc_REP_CostCentreStack_scc_count = fromIntegral v67 , pc_OFFSET_StgHeader_ccs = fromIntegral v68 , pc_OFFSET_StgHeader_ldvw = fromIntegral v69 , pc_SIZEOF_StgSMPThunkHeader = fromIntegral v70 , pc_OFFSET_StgEntCounter_allocs = fromIntegral v71 , pc_REP_StgEntCounter_allocs = fromIntegral v72 , pc_OFFSET_StgEntCounter_allocd = fromIntegral v73 , pc_REP_StgEntCounter_allocd = fromIntegral v74 , pc_OFFSET_StgEntCounter_registeredp = fromIntegral v75 , pc_OFFSET_StgEntCounter_link = fromIntegral v76 , pc_OFFSET_StgEntCounter_entry_count = fromIntegral v77 , pc_SIZEOF_StgUpdateFrame_NoHdr = fromIntegral v78 , pc_SIZEOF_StgMutArrPtrs_NoHdr = fromIntegral v79 , pc_OFFSET_StgMutArrPtrs_ptrs = fromIntegral v80 , pc_OFFSET_StgMutArrPtrs_size = fromIntegral v81 , pc_SIZEOF_StgSmallMutArrPtrs_NoHdr = fromIntegral v82 , pc_OFFSET_StgSmallMutArrPtrs_ptrs = fromIntegral v83 , pc_SIZEOF_StgArrBytes_NoHdr = fromIntegral v84 , pc_OFFSET_StgArrBytes_bytes = fromIntegral v85 , pc_OFFSET_StgTSO_alloc_limit = fromIntegral v86 , pc_OFFSET_StgTSO_cccs = fromIntegral v87 , pc_OFFSET_StgTSO_stackobj = fromIntegral v88 , pc_OFFSET_StgStack_sp = fromIntegral v89 , pc_OFFSET_StgStack_stack = fromIntegral v90 , pc_OFFSET_StgUpdateFrame_updatee = fromIntegral v91 , pc_OFFSET_StgFunInfoExtraFwd_arity = fromIntegral v92 , pc_REP_StgFunInfoExtraFwd_arity = fromIntegral v93 , pc_SIZEOF_StgFunInfoExtraRev = fromIntegral v94 , pc_OFFSET_StgFunInfoExtraRev_arity = fromIntegral v95 , pc_REP_StgFunInfoExtraRev_arity = fromIntegral v96 , pc_MAX_SPEC_SELECTEE_SIZE = fromIntegral v97 , pc_MAX_SPEC_AP_SIZE = fromIntegral v98 , pc_MIN_PAYLOAD_SIZE = fromIntegral v99 , pc_MIN_INTLIKE = fromIntegral v100 , pc_MAX_INTLIKE = fromIntegral v101 , pc_MIN_CHARLIKE = fromIntegral v102 , pc_MAX_CHARLIKE = fromIntegral v103 , pc_MUT_ARR_PTRS_CARD_BITS = fromIntegral v104 , pc_MAX_Vanilla_REG = fromIntegral v105 , pc_MAX_Float_REG = fromIntegral v106 , pc_MAX_Double_REG = fromIntegral v107 , pc_MAX_Long_REG = fromIntegral v108 , pc_MAX_XMM_REG = fromIntegral v109 , pc_MAX_Real_Vanilla_REG = fromIntegral v110 , pc_MAX_Real_Float_REG = fromIntegral v111 , pc_MAX_Real_Double_REG = fromIntegral v112 , pc_MAX_Real_XMM_REG = fromIntegral v113 , pc_MAX_Real_Long_REG = fromIntegral v114 , pc_RESERVED_C_STACK_BYTES = fromIntegral v115 , pc_RESERVED_STACK_WORDS = fromIntegral v116 , pc_AP_STACK_SPLIM = fromIntegral v117 , pc_WORD_SIZE = fromIntegral v118 , pc_CINT_SIZE = fromIntegral v119 , pc_CLONG_SIZE = fromIntegral v120 , pc_CLONG_LONG_SIZE = fromIntegral v121 , pc_BITMAP_BITS_SHIFT = fromIntegral v122 , pc_TAG_BITS = fromIntegral v123 , pc_LDV_SHIFT = fromIntegral v124 , pc_ILDV_CREATE_MASK = v125 , pc_ILDV_STATE_CREATE = v126 , pc_ILDV_STATE_USE = v127 , pc_USE_INLINE_SRT_FIELD = 0 < v128 } _ -> error "Invalid platform constants" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/NoRegs.hs0000644000000000000000000000020614472400112021146 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.NoRegs where import GHC.Prelude #define MACHREGS_NO_REGS 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/PPC.hs0000644000000000000000000000023614472400112020376 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.PPC where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_powerpc 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/Profile.hs0000644000000000000000000000317614472400112021362 0ustar0000000000000000-- | Platform profiles module GHC.Platform.Profile ( Profile (..) , profileBuildTag , profileConstants , profileIsProfiling , profileWordSizeInBytes ) where import GHC.Prelude import GHC.Platform import GHC.Platform.Ways -- | A platform profile fully describes the kind of objects that are generated -- for a platform. -- -- 'Platform' doesn't fully describe the ABI of an object. Compiler ways -- (profiling, debug, dynamic) also modify the ABI. -- data Profile = Profile { profilePlatform :: !Platform -- ^ Platform , profileWays :: !Ways -- ^ Ways } deriving (Eq, Ord, Show, Read) -- | Get platform constants profileConstants :: Profile -> PlatformConstants {-# INLINE profileConstants #-} profileConstants profile = platformConstants (profilePlatform profile) -- | Is profiling enabled profileIsProfiling :: Profile -> Bool {-# INLINE profileIsProfiling #-} profileIsProfiling profile = profileWays profile `hasWay` WayProf -- | Word size in bytes profileWordSizeInBytes :: Profile -> Int {-# INLINE profileWordSizeInBytes #-} profileWordSizeInBytes profile = platformWordSizeInBytes (profilePlatform profile) -- | Unique build tag for the profile profileBuildTag :: Profile -> String profileBuildTag profile -- profiles using unregisterised convention are not binary compatible with -- those that don't. Make sure to make it apparent in the tag so that our -- interface files can't be mismatched by mistake. | platformUnregisterised platform = 'u':wayTag | otherwise = wayTag where platform = profilePlatform profile wayTag = waysBuildTag (profileWays profile) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/RISCV64.hs0000644000000000000000000000024214472400112021011 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.RISCV64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_riscv64 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/Reg.hs0000644000000000000000000001654514472400112020503 0ustar0000000000000000-- | An architecture independent description of a register. -- This needs to stay architecture independent because it is used -- by NCGMonad and the register allocators, which are shared -- by all architectures. -- module GHC.Platform.Reg ( RegNo, Reg(..), regSingle, realRegSingle, isRealReg, takeRealReg, isVirtualReg, takeVirtualReg, VirtualReg(..), renameVirtualReg, classOfVirtualReg, getHiVirtualRegFromLo, getHiVRegFromLo, RealReg(..), regNosOfRealReg, realRegsAlias, liftPatchFnToRegReg ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Platform.Reg.Class -- | An identifier for a primitive real machine register. type RegNo = Int -- VirtualRegs are virtual registers. The register allocator will -- eventually have to map them into RealRegs, or into spill slots. -- -- VirtualRegs are allocated on the fly, usually to represent a single -- value in the abstract assembly code (i.e. dynamic registers are -- usually single assignment). -- -- The single assignment restriction isn't necessary to get correct code, -- although a better register allocation will result if single -- assignment is used -- because the allocator maps a VirtualReg into -- a single RealReg, even if the VirtualReg has multiple live ranges. -- -- Virtual regs can be of either class, so that info is attached. -- data VirtualReg = VirtualRegI {-# UNPACK #-} !Unique | VirtualRegHi {-# UNPACK #-} !Unique -- High part of 2-word register | VirtualRegF {-# UNPACK #-} !Unique | VirtualRegD {-# UNPACK #-} !Unique deriving (Eq, Show) -- This is laborious, but necessary. We can't derive Ord because -- Unique doesn't have an Ord instance. Note nonDetCmpUnique in the -- implementation. See Note [No Ord for Unique] -- This is non-deterministic but we do not currently support deterministic -- code-generation. See Note [Unique Determinism and code generation] instance Ord VirtualReg where compare (VirtualRegI a) (VirtualRegI b) = nonDetCmpUnique a b compare (VirtualRegHi a) (VirtualRegHi b) = nonDetCmpUnique a b compare (VirtualRegF a) (VirtualRegF b) = nonDetCmpUnique a b compare (VirtualRegD a) (VirtualRegD b) = nonDetCmpUnique a b compare VirtualRegI{} _ = LT compare _ VirtualRegI{} = GT compare VirtualRegHi{} _ = LT compare _ VirtualRegHi{} = GT compare VirtualRegF{} _ = LT compare _ VirtualRegF{} = GT instance Uniquable VirtualReg where getUnique reg = case reg of VirtualRegI u -> u VirtualRegHi u -> u VirtualRegF u -> u VirtualRegD u -> u instance Outputable VirtualReg where ppr reg = case reg of VirtualRegI u -> text "%vI_" <> pprUniqueAlways u VirtualRegHi u -> text "%vHi_" <> pprUniqueAlways u -- this code is kinda wrong on x86 -- because float and double occupy the same register set -- namely SSE2 register xmm0 .. xmm15 VirtualRegF u -> text "%vFloat_" <> pprUniqueAlways u VirtualRegD u -> text "%vDouble_" <> pprUniqueAlways u renameVirtualReg :: Unique -> VirtualReg -> VirtualReg renameVirtualReg u r = case r of VirtualRegI _ -> VirtualRegI u VirtualRegHi _ -> VirtualRegHi u VirtualRegF _ -> VirtualRegF u VirtualRegD _ -> VirtualRegD u classOfVirtualReg :: VirtualReg -> RegClass classOfVirtualReg vr = case vr of VirtualRegI{} -> RcInteger VirtualRegHi{} -> RcInteger VirtualRegF{} -> RcFloat VirtualRegD{} -> RcDouble -- Determine the upper-half vreg for a 64-bit quantity on a 32-bit platform -- when supplied with the vreg for the lower-half of the quantity. -- (NB. Not reversible). getHiVirtualRegFromLo :: VirtualReg -> VirtualReg getHiVirtualRegFromLo reg = case reg of -- makes a pseudo-unique with tag 'H' VirtualRegI u -> VirtualRegHi (newTagUnique u 'H') _ -> panic "Reg.getHiVirtualRegFromLo" getHiVRegFromLo :: Reg -> Reg getHiVRegFromLo reg = case reg of RegVirtual vr -> RegVirtual (getHiVirtualRegFromLo vr) RegReal _ -> panic "Reg.getHiVRegFromLo" ------------------------------------------------------------------------------------ -- | RealRegs are machine regs which are available for allocation, in -- the usual way. We know what class they are, because that's part of -- the processor's architecture. -- newtype RealReg = RealRegSingle RegNo deriving (Eq, Show, Ord) instance Uniquable RealReg where getUnique reg = case reg of RealRegSingle i -> mkRegSingleUnique i instance Outputable RealReg where ppr reg = case reg of RealRegSingle i -> text "%r" <> int i regNosOfRealReg :: RealReg -> [RegNo] regNosOfRealReg rr = case rr of RealRegSingle r1 -> [r1] realRegsAlias :: RealReg -> RealReg -> Bool realRegsAlias rr1 rr2 = -- used to be `not $ null $ intersect (regNosOfRealReg rr1) (regNosOfRealReg rr2)` -- but that resulted in some gnarly, gnarly, allocating code. So we manually -- write out all the cases which gives us nice non-allocating code. case rr1 of RealRegSingle r1 -> case rr2 of RealRegSingle r2 -> r1 == r2 -------------------------------------------------------------------------------- -- | A register, either virtual or real data Reg = RegVirtual !VirtualReg | RegReal !RealReg deriving (Eq, Ord, Show) regSingle :: RegNo -> Reg regSingle regNo = RegReal (realRegSingle regNo) realRegSingle :: RegNo -> RealReg realRegSingle regNo = RealRegSingle regNo -- We like to have Uniques for Reg so that we can make UniqFM and UniqSets -- in the register allocator. instance Uniquable Reg where getUnique reg = case reg of RegVirtual vr -> getUnique vr RegReal rr -> getUnique rr -- | Print a reg in a generic manner -- If you want the architecture specific names, then use the pprReg -- function from the appropriate Ppr module. instance Outputable Reg where ppr reg = case reg of RegVirtual vr -> ppr vr RegReal rr -> ppr rr isRealReg :: Reg -> Bool isRealReg reg = case reg of RegReal _ -> True RegVirtual _ -> False takeRealReg :: Reg -> Maybe RealReg takeRealReg reg = case reg of RegReal rr -> Just rr _ -> Nothing isVirtualReg :: Reg -> Bool isVirtualReg reg = case reg of RegReal _ -> False RegVirtual _ -> True takeVirtualReg :: Reg -> Maybe VirtualReg takeVirtualReg reg = case reg of RegReal _ -> Nothing RegVirtual vr -> Just vr -- | The patch function supplied by the allocator maps VirtualReg to RealReg -- regs, but sometimes we want to apply it to plain old Reg. -- liftPatchFnToRegReg :: (VirtualReg -> RealReg) -> (Reg -> Reg) liftPatchFnToRegReg patchF reg = case reg of RegVirtual vr -> RegReal (patchF vr) RegReal _ -> reg ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/Reg/Class.hs0000644000000000000000000000147014472400112021537 0ustar0000000000000000-- | An architecture independent description of a register's class. module GHC.Platform.Reg.Class ( RegClass (..) ) where import GHC.Prelude import GHC.Utils.Outputable as Outputable import GHC.Types.Unique import GHC.Builtin.Uniques -- | The class of a register. -- Used in the register allocator. -- We treat all registers in a class as being interchangeable. -- data RegClass = RcInteger | RcFloat | RcDouble deriving Eq instance Uniquable RegClass where getUnique RcInteger = mkRegClassUnique 0 getUnique RcFloat = mkRegClassUnique 1 getUnique RcDouble = mkRegClassUnique 2 instance Outputable RegClass where ppr RcInteger = Outputable.text "I" ppr RcFloat = Outputable.text "F" ppr RcDouble = Outputable.text "D" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/Regs.hs0000644000000000000000000000711514472400112020657 0ustar0000000000000000module GHC.Platform.Regs (callerSaves, activeStgRegs, haveRegBase, globalRegMaybe, freeReg) where import GHC.Prelude import GHC.Cmm.Expr import GHC.Platform import GHC.Platform.Reg import qualified GHC.Platform.ARM as ARM import qualified GHC.Platform.AArch64 as AArch64 import qualified GHC.Platform.PPC as PPC import qualified GHC.Platform.S390X as S390X import qualified GHC.Platform.X86 as X86 import qualified GHC.Platform.X86_64 as X86_64 import qualified GHC.Platform.RISCV64 as RISCV64 import qualified GHC.Platform.NoRegs as NoRegs -- | Returns 'True' if this global register is stored in a caller-saves -- machine register. callerSaves :: Platform -> GlobalReg -> Bool callerSaves platform | platformUnregisterised platform = NoRegs.callerSaves | otherwise = case platformArch platform of ArchX86 -> X86.callerSaves ArchX86_64 -> X86_64.callerSaves ArchS390X -> S390X.callerSaves ArchARM {} -> ARM.callerSaves ArchAArch64 -> AArch64.callerSaves ArchRISCV64 -> RISCV64.callerSaves arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.callerSaves | otherwise -> NoRegs.callerSaves -- | Here is where the STG register map is defined for each target arch. -- The order matters (for the llvm backend anyway)! We must make sure to -- maintain the order here with the order used in the LLVM calling conventions. -- Note that also, this isn't all registers, just the ones that are currently -- possibly mapped to real registers. activeStgRegs :: Platform -> [GlobalReg] activeStgRegs platform | platformUnregisterised platform = NoRegs.activeStgRegs | otherwise = case platformArch platform of ArchX86 -> X86.activeStgRegs ArchX86_64 -> X86_64.activeStgRegs ArchS390X -> S390X.activeStgRegs ArchARM {} -> ARM.activeStgRegs ArchAArch64 -> AArch64.activeStgRegs ArchRISCV64 -> RISCV64.activeStgRegs arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.activeStgRegs | otherwise -> NoRegs.activeStgRegs haveRegBase :: Platform -> Bool haveRegBase platform | platformUnregisterised platform = NoRegs.haveRegBase | otherwise = case platformArch platform of ArchX86 -> X86.haveRegBase ArchX86_64 -> X86_64.haveRegBase ArchS390X -> S390X.haveRegBase ArchARM {} -> ARM.haveRegBase ArchAArch64 -> AArch64.haveRegBase ArchRISCV64 -> RISCV64.haveRegBase arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.haveRegBase | otherwise -> NoRegs.haveRegBase globalRegMaybe :: Platform -> GlobalReg -> Maybe RealReg globalRegMaybe platform | platformUnregisterised platform = NoRegs.globalRegMaybe | otherwise = case platformArch platform of ArchX86 -> X86.globalRegMaybe ArchX86_64 -> X86_64.globalRegMaybe ArchS390X -> S390X.globalRegMaybe ArchARM {} -> ARM.globalRegMaybe ArchAArch64 -> AArch64.globalRegMaybe ArchRISCV64 -> RISCV64.globalRegMaybe arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.globalRegMaybe | otherwise -> NoRegs.globalRegMaybe freeReg :: Platform -> RegNo -> Bool freeReg platform | platformUnregisterised platform = NoRegs.freeReg | otherwise = case platformArch platform of ArchX86 -> X86.freeReg ArchX86_64 -> X86_64.freeReg ArchS390X -> S390X.freeReg ArchARM {} -> ARM.freeReg ArchAArch64 -> AArch64.freeReg ArchRISCV64 -> RISCV64.freeReg arch | arch `elem` [ArchPPC, ArchPPC_64 ELF_V1, ArchPPC_64 ELF_V2] -> PPC.freeReg | otherwise -> NoRegs.freeReg ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/S390X.hs0000644000000000000000000000023614472400112020542 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.S390X where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_s390x 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/Ways.hs0000644000000000000000000002047614472400112020707 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Ways -- -- The central concept of a "way" is that all objects in a given -- program must be compiled in the same "way". Certain options change -- parameters of the virtual machine, eg. profiling adds an extra word -- to the object header, so profiling objects cannot be linked with -- non-profiling objects. -- -- After parsing the command-line options, we determine which "way" we -- are building - this might be a combination way, eg. profiling+threaded. -- -- There are two kinds of ways: -- - RTS only: only affect the runtime system (RTS) and don't affect code -- generation (e.g. threaded, debug) -- - Full ways: affect code generation and the RTS (e.g. profiling, dynamic -- linking) -- -- We then find the "build-tag" associated with this way, and this -- becomes the suffix used to find .hi files and libraries used in -- this compilation. module GHC.Platform.Ways ( Way(..) , Ways , hasWay , hasNotWay , addWay , removeWay , allowed_combination , wayGeneralFlags , wayUnsetGeneralFlags , wayOptc , wayOptl , wayOptP , wayDesc , wayRTSOnly , wayTag , waysTag , waysBuildTag , fullWays , rtsWays -- * Host GHC ways , hostWays , hostFullWays , hostIsProfiled , hostIsDynamic , hostIsThreaded , hostIsDebugged , hostIsTracing ) where import GHC.Prelude import GHC.Platform import GHC.Driver.Flags import qualified Data.Set as Set import Data.Set (Set) import Data.List (intersperse) -- | A way -- -- Don't change the constructor order as it us used by `waysTag` to create a -- unique tag (e.g. thr_debug_p) which is expected by other tools (e.g. Cabal). data Way = WayCustom String -- ^ for GHC API clients building custom variants | WayThreaded -- ^ (RTS only) Multithreaded runtime system | WayDebug -- ^ Debugging, enable trace messages and extra checks | WayProf -- ^ Profiling, enable cost-centre stacks and profiling reports | WayDyn -- ^ Dynamic linking deriving (Eq, Ord, Show, Read) type Ways = Set Way -- | Test if a way is enabled hasWay :: Ways -> Way -> Bool hasWay ws w = Set.member w ws -- | Test if a way is not enabled hasNotWay :: Ways -> Way -> Bool hasNotWay ws w = Set.notMember w ws -- | Add a way addWay :: Way -> Ways -> Ways addWay = Set.insert -- | Remove a way removeWay :: Way -> Ways -> Ways removeWay = Set.delete -- | Check if a combination of ways is allowed allowed_combination :: Ways -> Bool allowed_combination ways = not disallowed where disallowed = or [ hasWay ways x && hasWay ways y | (x,y) <- couples ] -- List of disallowed couples of ways couples = [] -- we don't have any disallowed combination of ways nowadays -- | Unique tag associated to a list of ways waysTag :: Ways -> String waysTag = concat . intersperse "_" . map wayTag . Set.toAscList -- | Unique build-tag associated to a list of ways -- -- RTS only ways are filtered out because they have no impact on the build. waysBuildTag :: Ways -> String waysBuildTag ws = waysTag (Set.filter (not . wayRTSOnly) ws) -- | Unique build-tag associated to a way wayTag :: Way -> String wayTag (WayCustom xs) = xs wayTag WayThreaded = "thr" wayTag WayDebug = "debug" wayTag WayDyn = "dyn" wayTag WayProf = "p" -- | Return true for ways that only impact the RTS, not the generated code wayRTSOnly :: Way -> Bool wayRTSOnly (WayCustom {}) = False wayRTSOnly WayDyn = False wayRTSOnly WayProf = False wayRTSOnly WayThreaded = True wayRTSOnly WayDebug = True -- | Filter ways that have an impact on compilation fullWays :: Ways -> Ways fullWays ws = Set.filter (not . wayRTSOnly) ws -- | Filter RTS-only ways (ways that don't have an impact on compilation) rtsWays :: Ways -> Ways rtsWays ws = Set.filter wayRTSOnly ws wayDesc :: Way -> String wayDesc (WayCustom xs) = xs wayDesc WayThreaded = "Threaded" wayDesc WayDebug = "Debug" wayDesc WayDyn = "Dynamic" wayDesc WayProf = "Profiling" -- | Turn these flags on when enabling this way wayGeneralFlags :: Platform -> Way -> [GeneralFlag] wayGeneralFlags _ (WayCustom {}) = [] wayGeneralFlags _ WayThreaded = [] wayGeneralFlags _ WayDebug = [] wayGeneralFlags _ WayDyn = [Opt_PIC, Opt_ExternalDynamicRefs] -- We could get away without adding -fPIC when compiling the -- modules of a program that is to be linked with -dynamic; the -- program itself does not need to be position-independent, only -- the libraries need to be. HOWEVER, GHCi links objects into a -- .so before loading the .so using the system linker. Since only -- PIC objects can be linked into a .so, we have to compile even -- modules of the main program with -fPIC when using -dynamic. wayGeneralFlags _ WayProf = [] -- | Turn these flags off when enabling this way wayUnsetGeneralFlags :: Platform -> Way -> [GeneralFlag] wayUnsetGeneralFlags _ (WayCustom {}) = [] wayUnsetGeneralFlags _ WayThreaded = [] wayUnsetGeneralFlags _ WayDebug = [] wayUnsetGeneralFlags _ WayDyn = [Opt_SplitSections] -- There's no point splitting when we're going to be dynamically linking. -- Plus it breaks compilation on OSX x86. wayUnsetGeneralFlags _ WayProf = [] -- | Pass these options to the C compiler when enabling this way wayOptc :: Platform -> Way -> [String] wayOptc _ (WayCustom {}) = [] wayOptc platform WayThreaded = case platformOS platform of OSOpenBSD -> ["-pthread"] OSNetBSD -> ["-pthread"] _ -> [] wayOptc _ WayDebug = [] wayOptc _ WayDyn = [] wayOptc _ WayProf = ["-DPROFILING"] -- | Pass these options to linker when enabling this way wayOptl :: Platform -> Way -> [String] wayOptl _ (WayCustom {}) = [] wayOptl platform WayThreaded = case platformOS platform of -- N.B. FreeBSD cc throws a warning if we pass -pthread without -- actually using any pthread symbols. OSFreeBSD -> ["-pthread", "-Wno-unused-command-line-argument"] OSOpenBSD -> ["-pthread"] OSNetBSD -> ["-pthread"] _ -> [] wayOptl _ WayDebug = [] wayOptl _ WayDyn = [] wayOptl _ WayProf = [] -- | Pass these options to the preprocessor when enabling this way wayOptP :: Platform -> Way -> [String] wayOptP _ (WayCustom {}) = [] wayOptP _ WayThreaded = [] wayOptP _ WayDebug = [] wayOptP _ WayDyn = [] wayOptP _ WayProf = ["-DPROFILING"] -- | Consult the RTS to find whether it has been built with profiling enabled. hostIsProfiled :: Bool hostIsProfiled = rtsIsProfiled_ /= 0 foreign import ccall unsafe "rts_isProfiled" rtsIsProfiled_ :: Int -- | Consult the RTS to find whether GHC itself has been built with -- dynamic linking. This can't be statically known at compile-time, -- because we build both the static and dynamic versions together with -- -dynamic-too. hostIsDynamic :: Bool hostIsDynamic = rtsIsDynamic_ /= 0 foreign import ccall unsafe "rts_isDynamic" rtsIsDynamic_ :: Int -- we need this until the bootstrap GHC is always recent enough #if MIN_VERSION_GLASGOW_HASKELL(9,1,0,0) -- | Consult the RTS to find whether it is threaded. hostIsThreaded :: Bool hostIsThreaded = rtsIsThreaded_ /= 0 foreign import ccall unsafe "rts_isThreaded" rtsIsThreaded_ :: Int -- | Consult the RTS to find whether it is debugged. hostIsDebugged :: Bool hostIsDebugged = rtsIsDebugged_ /= 0 foreign import ccall unsafe "rts_isDebugged" rtsIsDebugged_ :: Int -- | Consult the RTS to find whether it is tracing. hostIsTracing :: Bool hostIsTracing = rtsIsTracing_ /= 0 foreign import ccall unsafe "rts_isTracing" rtsIsTracing_ :: Int #else hostIsThreaded :: Bool hostIsThreaded = False hostIsDebugged :: Bool hostIsDebugged = False hostIsTracing :: Bool hostIsTracing = False #endif -- | Host ways. hostWays :: Ways hostWays = Set.unions [ if hostIsDynamic then Set.singleton WayDyn else Set.empty , if hostIsProfiled then Set.singleton WayProf else Set.empty , if hostIsThreaded then Set.singleton WayThreaded else Set.empty , if hostIsDebugged then Set.singleton WayDebug else Set.empty ] -- | Host "full" ways (i.e. ways that have an impact on the compilation, -- not RTS only ways). -- -- These ways must be used when compiling codes targeting the internal -- interpreter. hostFullWays :: Ways hostFullWays = fullWays hostWays ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/X86.hs0000644000000000000000000000023314472400112020336 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.X86 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_i386 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Platform/X86_64.hs0000644000000000000000000000024014472400112020645 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Platform.X86_64 where import GHC.Prelude #define MACHREGS_NO_REGS 0 #define MACHREGS_x86_64 1 #include "CodeGen.Platform.h" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Prelude.hs0000644000000000000000000000616414472400112017576 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_HADDOCK not-home #-} {-# OPTIONS_GHC -O2 #-} -- See Note [-O2 Prelude] -- | Custom GHC "Prelude" -- -- This module serves as a replacement for the "Prelude" module -- and abstracts over differences between the bootstrapping -- GHC version, and may also provide a common default vocabulary. -- Every module in GHC -- * Is compiled with -XNoImplicitPrelude -- * Explicitly imports GHC.Prelude module GHC.Prelude (module X ,module Bits ,shiftL, shiftR ) where {- Note [-O2 Prelude] ~~~~~~~~~~~~~~~~~~~~~ There is some code in GHC that is *always* compiled with -O[2] because of it's impact on compile time performance. Some of this code might depend on the definitions like shiftL being defined here being performant. So we always compile this module with -O2. It's (currently) tiny so I have little reason to suspect this impacts overall GHC compile times negatively. -} -- We export the 'Semigroup' class but w/o the (<>) operator to avoid -- clashing with the (Outputable.<>) operator which is heavily used -- through GHC's code-base. {- Note [Why do we import Prelude here?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The files ghc-boot-th.cabal, ghc-boot.cabal, ghci.cabal and ghc-heap.cabal contain the directive default-extensions: NoImplicitPrelude. There are two motivations for this: - Consistency with the compiler directory, which enables NoImplicitPrelude; - Allows loading the above dependent packages with ghc-in-ghci, giving a smoother development experience when adding new extensions. -} import Prelude as X hiding ((<>)) import Data.Foldable as X (foldl') #if MIN_VERSION_base(4,16,0) import GHC.Bits as Bits hiding (shiftL, shiftR) # if defined(DEBUG) import qualified GHC.Bits as Bits (shiftL, shiftR) # endif #else --base <4.15 import Data.Bits as Bits hiding (shiftL, shiftR) # if defined(DEBUG) import qualified Data.Bits as Bits (shiftL, shiftR) # endif #endif {- Note [Default to unsafe shifts inside GHC] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The safe shifts can introduce branches which come at the cost of performance. We still want the additional debugability for debug builds. So we define it as one or the other depending on the DEBUG setting. Why do we then continue on to re-export the rest of Data.Bits? If we would not what is likely to happen is: * Someone imports Data.Bits, uses xor. Things are fine. * They add a shift and get an ambigious definition error. * The are puzzled for a bit. * They either: + Remove the import of Data.Bits and get an error because xor is not in scope. + Add the hiding clause to the Data.Bits import for the shifts. Either is quite annoying. Simply re-exporting all of Data.Bits avoids this making for a smoother developer experience. At the cost of having a few more names in scope at all time. But that seems like a fair tradeoff. See also #19618 -} -- We always want the Data.Bits method to show up for rules etc. {-# INLINE shiftL #-} {-# INLINE shiftR #-} shiftL, shiftR :: Bits.Bits a => a -> Int -> a #if defined(DEBUG) shiftL = Bits.shiftL shiftR = Bits.shiftR #else shiftL = Bits.unsafeShiftL shiftR = Bits.unsafeShiftR #endif ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Context.hs0000644000000000000000000004372614472400112021252 0ustar0000000000000000module GHC.Runtime.Context ( InteractiveContext (..) , InteractiveImport (..) , emptyInteractiveContext , extendInteractiveContext , extendInteractiveContextWithIds , setInteractivePrintName , substInteractiveContext , replaceImportEnv , icReaderEnv , icInteractiveModule , icInScopeTTs , icPrintUnqual ) where import GHC.Prelude import GHC.Hs import GHC.Driver.Session import {-# SOURCE #-} GHC.Driver.Plugins import GHC.Runtime.Eval.Types ( IcGlobalRdrEnv(..), Resume ) import GHC.Unit import GHC.Unit.Env import GHC.Core.FamInstEnv import GHC.Core.InstEnv import GHC.Core.Type import GHC.Types.Avail import GHC.Types.Fixity.Env import GHC.Types.Id.Info ( IdDetails(..) ) import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Reader import GHC.Types.Name.Ppr import GHC.Types.TyThing import GHC.Types.Var import GHC.Builtin.Names ( ioTyConName, printName, mkInteractiveModule ) import GHC.Utils.Outputable {- Note [The interactive package] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Type, class, and value declarations at the command prompt are treated as if they were defined in modules interactive:Ghci1 interactive:Ghci2 ...etc... with each bunch of declarations using a new module, all sharing a common package 'interactive' (see Module.interactiveUnitId, and GHC.Builtin.Names.mkInteractiveModule). This scheme deals well with shadowing. For example: ghci> data T = A ghci> data T = B ghci> :i A data Ghci1.T = A -- Defined at :2:10 Here we must display info about constructor A, but its type T has been shadowed by the second declaration. But it has a respectable qualified name (Ghci1.T), and its source location says where it was defined, and it can also be used with the qualified name. So the main invariant continues to hold, that in any session an original name M.T only refers to one unique thing. (In a previous iteration both the T's above were called :Interactive.T, albeit with different uniques, which gave rise to all sorts of trouble.) The details are a bit tricky though: * The field ic_mod_index counts which Ghci module we've got up to. It is incremented when extending ic_tythings * ic_tythings contains only things from the 'interactive' package. * Module from the 'interactive' package (Ghci1, Ghci2 etc) never go in the Home Package Table (HPT). When you say :load, that's when we extend the HPT. * The 'homeUnitId' field of DynFlags is *not* set to 'interactive'. It stays as 'main' (or whatever -this-unit-id says), and is the package to which :load'ed modules are added to. * So how do we arrange that declarations at the command prompt get to be in the 'interactive' package? Simply by setting the tcg_mod field of the TcGblEnv to "interactive:Ghci1". This is done by the call to initTc in initTcInteractive, which in turn get the module from it 'icInteractiveModule' field of the interactive context. The 'homeUnitId' field stays as 'main' (or whatever -this-unit-id says. * The main trickiness is that the type environment (tcg_type_env) and fixity envt (tcg_fix_env), now contain entities from all the interactive-package modules (Ghci1, Ghci2, ...) together, rather than just a single module as is usually the case. So you can't use "nameIsLocalOrFrom" to decide whether to look in the TcGblEnv vs the HPT/PTE. This is a change, but not a problem provided you know. * However, the tcg_binds, tcg_sigs, tcg_insts, tcg_fam_insts, etc fields of the TcGblEnv, which collect "things defined in this module", all refer to stuff define in a single GHCi command, *not* all the commands so far. In contrast, tcg_inst_env, tcg_fam_inst_env, have instances from all GhciN modules, which makes sense -- they are all "home package" modules. Note [Interactively-bound Ids in GHCi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Ids bound by previous Stmts in GHCi are currently a) GlobalIds, with b) An External Name, like Ghci4.foo See Note [The interactive package] above c) A tidied type (a) They must be GlobalIds (not LocalIds) otherwise when we come to compile an expression using these ids later, the byte code generator will consider the occurrences to be free rather than global. (b) Having an External Name is important because of Note [GlobalRdrEnv shadowing] in GHC.Types.Names.RdrName (c) Their types are tidied. This is important, because :info may ask to look at them, and :info expects the things it looks up to have tidy types Where do interactively-bound Ids come from? - GHCi REPL Stmts e.g. ghci> let foo x = x+1 These start with an Internal Name because a Stmt is a local construct, so the renamer naturally builds an Internal name for each of its binders. Then in tcRnStmt they are externalised via GHC.Tc.Module.externaliseAndTidyId, so they get Names like Ghic4.foo. - Ids bound by the debugger etc have Names constructed by GHC.Iface.Env.newInteractiveBinder; at the call sites it is followed by mkVanillaGlobal or mkVanillaGlobalWithInfo. So again, they are all Global, External. - TyCons, Classes, and Ids bound by other top-level declarations in GHCi (eg foreign import, record selectors) also get External Names, with Ghci9 (or 8, or 7, etc) as the module name. Note [ic_tythings] ~~~~~~~~~~~~~~~~~~ The ic_tythings field contains * The TyThings declared by the user at the command prompt (eg Ids, TyCons, Classes) * The user-visible Ids that arise from such things, which *don't* come from 'implicitTyThings', notably: - record selectors - class ops The implicitTyThings are readily obtained from the TyThings but record selectors etc are not It does *not* contain * DFunIds (they can be gotten from ic_instances) * CoAxioms (ditto) See also Note [Interactively-bound Ids in GHCi] Note [Override identical instances in GHCi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you declare a new instance in GHCi that is identical to a previous one, we simply override the previous one; we don't regard it as overlapping. e.g. Prelude> data T = A | B Prelude> instance Eq T where ... Prelude> instance Eq T where ... -- This one overrides It's exactly the same for type-family instances. See #7102 Note [icReaderEnv recalculation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The GlobalRdrEnv describing what’s in scope at the prompts consists of all the imported things, followed by all the things defined on the prompt, with shadowing. Defining new things on the prompt is easy: we shadow as needed and then extend the environment. But changing the set of imports, which can happen later as well, is tricky: we need to re-apply the shadowing from all the things defined at the prompt! For example: ghci> let empty = True ghci> import Data.IntMap.Strict -- Exports 'empty' ghci> empty -- Still gets the 'empty' defined at the prompt True It would be correct ot re-construct the env from scratch based on `ic_tythings`, but that'd be quite expensive if there are many entires in `ic_tythings` that shadow each other. Therefore we keep around a that `GlobalRdrEnv` in `igre_prompt_env` that contians _just_ the things defined at the prompt, and use that in `replaceImportEnv` to rebuild the full env. Conveniently, `shadowNames` takes such an `OccEnv` to denote the set of names to shadow. INVARIANT: Every `OccName` in `igre_prompt_env` is present unqualified as well (else it would not be right to use pass `igre_prompt_env` to `shadowNames`.) The definition of the IcGlobalRdrEnv type should conceptually be in this module, and made abstract, but it’s used in `Resume`, so it lives in GHC.Runtime.Eval.Type. - -} -- | Interactive context, recording information about the state of the -- context in which statements are executed in a GHCi session. data InteractiveContext = InteractiveContext { ic_dflags :: DynFlags, -- ^ The 'DynFlags' used to evaluate interactive expressions -- and statements. ic_mod_index :: Int, -- ^ Each GHCi stmt or declaration brings some new things into -- scope. We give them names like interactive:Ghci9.T, -- where the ic_index is the '9'. The ic_mod_index is -- incremented whenever we add something to ic_tythings -- See Note [The interactive package] ic_imports :: [InteractiveImport], -- ^ The GHCi top-level scope (icReaderEnv) is extended with -- these imports -- -- This field is only stored here so that the client -- can retrieve it with GHC.getContext. GHC itself doesn't -- use it, but does reset it to empty sometimes (such -- as before a GHC.load). The context is set with GHC.setContext. ic_tythings :: [TyThing], -- ^ TyThings defined by the user, in reverse order of -- definition (ie most recent at the front). -- Also used in GHC.Tc.Module.runTcInteractive to fill the type -- checker environment. -- See Note [ic_tythings] ic_gre_cache :: IcGlobalRdrEnv, -- ^ Essentially the cached 'GlobalRdrEnv'. -- -- The GlobalRdrEnv contains everything in scope at the command -- line, both imported and everything in ic_tythings, with the -- correct shadowing. -- -- The IcGlobalRdrEnv contains extra data to allow efficient -- recalculation when the set of imports change. -- See Note [icReaderEnv recalculation] ic_instances :: (InstEnv, [FamInst]), -- ^ All instances and family instances created during -- this session. These are grabbed en masse after each -- update to be sure that proper overlapping is retained. -- That is, rather than re-check the overlapping each -- time we update the context, we just take the results -- from the instance code that already does that. ic_fix_env :: FixityEnv, -- ^ Fixities declared in let statements ic_default :: Maybe [Type], -- ^ The current default types, set by a 'default' declaration ic_resume :: [Resume], -- ^ The stack of breakpoint contexts ic_monad :: Name, -- ^ The monad that GHCi is executing in ic_int_print :: Name, -- ^ The function that is used for printing results -- of expressions in ghci and -e mode. ic_cwd :: Maybe FilePath, -- ^ virtual CWD of the program ic_plugins :: !Plugins -- ^ Cache of loaded plugins. We store them here to avoid having to -- load them everytime we switch to the interctive context. } data InteractiveImport = IIDecl (ImportDecl GhcPs) -- ^ Bring the exports of a particular module -- (filtered by an import decl) into scope | IIModule ModuleName -- ^ Bring into scope the entire top-level envt of -- of this module, including the things imported -- into it. emptyIcGlobalRdrEnv :: IcGlobalRdrEnv emptyIcGlobalRdrEnv = IcGlobalRdrEnv { igre_env = emptyGlobalRdrEnv , igre_prompt_env = emptyGlobalRdrEnv } -- | Constructs an empty InteractiveContext. emptyInteractiveContext :: DynFlags -> InteractiveContext emptyInteractiveContext dflags = InteractiveContext { ic_dflags = dflags, ic_imports = [], ic_gre_cache = emptyIcGlobalRdrEnv, ic_mod_index = 1, ic_tythings = [], ic_instances = (emptyInstEnv,[]), ic_fix_env = emptyNameEnv, ic_monad = ioTyConName, -- IO monad by default ic_int_print = printName, -- System.IO.print by default ic_default = Nothing, ic_resume = [], ic_cwd = Nothing, ic_plugins = emptyPlugins } icReaderEnv :: InteractiveContext -> GlobalRdrEnv icReaderEnv = igre_env . ic_gre_cache icInteractiveModule :: InteractiveContext -> Module icInteractiveModule (InteractiveContext { ic_mod_index = index }) = mkInteractiveModule index -- | This function returns the list of visible TyThings (useful for -- e.g. showBindings). -- -- It picks only those TyThings that are not shadowed by later definitions on the interpreter, -- to not clutter :showBindings with shadowed ids, which would show up as Ghci9.foo. -- -- Some TyThings define many names; we include them if _any_ name is still -- available unqualified. icInScopeTTs :: InteractiveContext -> [TyThing] icInScopeTTs ictxt = filter in_scope_unqualified (ic_tythings ictxt) where in_scope_unqualified thing = or [ unQualOK gre | avail <- tyThingAvailInfo thing , name <- availNames avail , Just gre <- [lookupGRE_Name (icReaderEnv ictxt) name] ] -- | Get the PrintUnqualified function based on the flags and this InteractiveContext icPrintUnqual :: UnitEnv -> InteractiveContext -> PrintUnqualified icPrintUnqual unit_env ictxt = mkPrintUnqualified unit_env (icReaderEnv ictxt) -- | extendInteractiveContext is called with new TyThings recently defined to update the -- InteractiveContext to include them. By putting new things first, unqualified -- use will pick the most recently defined thing with a given name, while -- still keeping the old names in scope in their qualified form (Ghci1.foo). extendInteractiveContext :: InteractiveContext -> [TyThing] -> InstEnv -> [FamInst] -> Maybe [Type] -> FixityEnv -> InteractiveContext extendInteractiveContext ictxt new_tythings new_cls_insts new_fam_insts defaults fix_env = ictxt { ic_mod_index = ic_mod_index ictxt + 1 -- Always bump this; even instances should create -- a new mod_index (#9426) , ic_tythings = new_tythings ++ ic_tythings ictxt , ic_gre_cache = ic_gre_cache ictxt `icExtendIcGblRdrEnv` new_tythings , ic_instances = ( new_cls_insts `unionInstEnv` old_cls_insts , new_fam_insts ++ fam_insts ) -- we don't shadow old family instances (#7102), -- so don't need to remove them here , ic_default = defaults , ic_fix_env = fix_env -- See Note [Fixity declarations in GHCi] } where -- Discard old instances that have been fully overridden -- See Note [Override identical instances in GHCi] (cls_insts, fam_insts) = ic_instances ictxt old_cls_insts = filterInstEnv (\i -> not $ anyInstEnv (identicalClsInstHead i) new_cls_insts) cls_insts extendInteractiveContextWithIds :: InteractiveContext -> [Id] -> InteractiveContext -- Just a specialised version extendInteractiveContextWithIds ictxt new_ids | null new_ids = ictxt | otherwise = ictxt { ic_mod_index = ic_mod_index ictxt + 1 , ic_tythings = new_tythings ++ ic_tythings ictxt , ic_gre_cache = ic_gre_cache ictxt `icExtendIcGblRdrEnv` new_tythings } where new_tythings = map AnId new_ids setInteractivePrintName :: InteractiveContext -> Name -> InteractiveContext setInteractivePrintName ic n = ic{ic_int_print = n} icExtendIcGblRdrEnv :: IcGlobalRdrEnv -> [TyThing] -> IcGlobalRdrEnv icExtendIcGblRdrEnv igre tythings = IcGlobalRdrEnv { igre_env = igre_env igre `icExtendGblRdrEnv` tythings , igre_prompt_env = igre_prompt_env igre `icExtendGblRdrEnv` tythings } -- This is used by setContext in GHC.Runtime.Eval when the set of imports -- changes, and recalculates the GlobalRdrEnv. See Note [icReaderEnv recalculation] replaceImportEnv :: IcGlobalRdrEnv -> GlobalRdrEnv -> IcGlobalRdrEnv replaceImportEnv igre import_env = igre { igre_env = new_env } where import_env_shadowed = import_env `shadowNames` igre_prompt_env igre new_env = import_env_shadowed `plusGlobalRdrEnv` igre_prompt_env igre -- | Add TyThings to the GlobalRdrEnv, earlier ones in the list shadowing -- later ones, and shadowing existing entries in the GlobalRdrEnv. icExtendGblRdrEnv :: GlobalRdrEnv -> [TyThing] -> GlobalRdrEnv icExtendGblRdrEnv env tythings = foldr add env tythings -- Foldr makes things in the front of -- the list shadow things at the back where -- One at a time, to ensure each shadows the previous ones add thing env | is_sub_bndr thing = env | otherwise = foldl' extendGlobalRdrEnv env1 (concatMap localGREsFromAvail avail) where new_gres = concatMap availGreNames avail new_occs = occSetToEnv (mkOccSet (map occName new_gres)) env1 = shadowNames env new_occs avail = tyThingAvailInfo thing -- Ugh! The new_tythings may include record selectors, since they -- are not implicit-ids, and must appear in the TypeEnv. But they -- will also be brought into scope by the corresponding (ATyCon -- tc). And we want the latter, because that has the correct -- parent (#10520) is_sub_bndr (AnId f) = case idDetails f of RecSelId {} -> True ClassOpId {} -> True _ -> False is_sub_bndr _ = False substInteractiveContext :: InteractiveContext -> TCvSubst -> InteractiveContext substInteractiveContext ictxt@InteractiveContext{ ic_tythings = tts } subst | isEmptyTCvSubst subst = ictxt | otherwise = ictxt { ic_tythings = map subst_ty tts } where subst_ty (AnId id) = AnId $ updateIdTypeAndMult (substTyAddInScope subst) id -- Variables in the interactive context *can* mention free type variables -- because of the runtime debugger. Otherwise you'd expect all -- variables bound in the interactive context to be closed. subst_ty tt = tt instance Outputable InteractiveImport where ppr (IIModule m) = char '*' <> ppr m ppr (IIDecl d) = ppr d ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Eval/Types.hs0000644000000000000000000000645714472400113021622 0ustar0000000000000000-- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow, 2005-2007 -- -- Running statements interactively -- -- ----------------------------------------------------------------------------- module GHC.Runtime.Eval.Types ( Resume(..), ResumeBindings, IcGlobalRdrEnv(..), History(..), ExecResult(..), SingleStep(..), isStep, ExecOptions(..) ) where import GHC.Prelude import GHCi.RemoteTypes import GHCi.Message (EvalExpr, ResumeContext) import GHC.Types.Id import GHC.Types.Name import GHC.Types.TyThing import GHC.Types.BreakInfo import GHC.Types.Name.Reader import GHC.Types.SrcLoc import GHC.Utils.Exception import Data.Word import GHC.Stack.CCS data ExecOptions = ExecOptions { execSingleStep :: SingleStep -- ^ stepping mode , execSourceFile :: String -- ^ filename (for errors) , execLineNumber :: Int -- ^ line number (for errors) , execWrap :: ForeignHValue -> EvalExpr ForeignHValue } data SingleStep = RunToCompletion | SingleStep | RunAndLogSteps isStep :: SingleStep -> Bool isStep RunToCompletion = False isStep _ = True data ExecResult = ExecComplete { execResult :: Either SomeException [Name] , execAllocation :: Word64 } | ExecBreak { breakNames :: [Name] , breakInfo :: Maybe BreakInfo } -- | Essentially a GlobalRdrEnv, but with additional cached values to allow -- efficient re-calculation when the imports change. -- Fields are strict to avoid space leaks (see T4029) -- All operations are in GHC.Runtime.Context. -- See Note [icReaderEnv recalculation] data IcGlobalRdrEnv = IcGlobalRdrEnv { igre_env :: !GlobalRdrEnv -- ^ The final environment , igre_prompt_env :: !GlobalRdrEnv -- ^ Just the things defined at the prompt (excluding imports!) } data Resume = Resume { resumeStmt :: String -- the original statement , resumeContext :: ForeignRef (ResumeContext [HValueRef]) , resumeBindings :: ResumeBindings , resumeFinalIds :: [Id] -- [Id] to bind on completion , resumeApStack :: ForeignHValue -- The object from which we can get -- value of the free variables. , resumeBreakInfo :: Maybe BreakInfo -- the breakpoint we stopped at -- (module, index) -- (Nothing <=> exception) , resumeSpan :: SrcSpan -- just a copy of the SrcSpan -- from the ModBreaks, -- otherwise it's a pain to -- fetch the ModDetails & -- ModBreaks to get this. , resumeDecl :: String -- ditto , resumeCCS :: RemotePtr CostCentreStack , resumeHistory :: [History] , resumeHistoryIx :: Int -- 0 <==> at the top of the history } type ResumeBindings = ([TyThing], IcGlobalRdrEnv) data History = History { historyApStack :: ForeignHValue, historyBreakInfo :: BreakInfo, historyEnclosingDecls :: [String] -- declarations enclosing the breakpoint } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Heap/Layout.hs0000644000000000000000000005111514472400113021750 0ustar0000000000000000-- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -- -- Storage manager representation of closures {-# LANGUAGE CPP #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Runtime.Heap.Layout ( -- * Words and bytes WordOff, ByteOff, wordsToBytes, bytesToWordsRoundUp, roundUpToWords, roundUpTo, StgWord, fromStgWord, toStgWord, StgHalfWord, fromStgHalfWord, toStgHalfWord, halfWordSize, halfWordSizeInBits, -- * Closure representation SMRep(..), -- CmmInfo sees the rep; no one else does IsStatic, ClosureTypeInfo(..), ArgDescr(..), Liveness, ConstrDescription, -- ** Construction mkHeapRep, blackHoleRep, indStaticRep, mkStackRep, mkRTSRep, arrPtrsRep, smallArrPtrsRep, arrWordsRep, -- ** Predicates isStaticRep, isConRep, isThunkRep, isFunRep, isStaticNoCafCon, isStackRep, -- ** Size-related things heapClosureSizeW, fixedHdrSizeW, arrWordsHdrSize, arrWordsHdrSizeW, arrPtrsHdrSize, arrPtrsHdrSizeW, profHdrSize, thunkHdrSize, nonHdrSize, nonHdrSizeW, smallArrPtrsHdrSize, smallArrPtrsHdrSizeW, hdrSize, hdrSizeW, fixedHdrSize, -- ** RTS closure types rtsClosureType, rET_SMALL, rET_BIG, aRG_GEN, aRG_GEN_BIG, -- ** Arrays card, cardRoundUp, cardTableSizeB, cardTableSizeW ) where import GHC.Prelude import GHC.Types.Basic( ConTagZ ) import GHC.Platform import GHC.Platform.Profile import GHC.Utils.Outputable import GHC.Utils.Panic import Data.Word import Data.ByteString (ByteString) {- ************************************************************************ * * Words and bytes * * ************************************************************************ -} -- | Byte offset, or byte count type ByteOff = Int -- | Word offset, or word count type WordOff = Int -- | Round up the given byte count to the next byte count that's a -- multiple of the machine's word size. roundUpToWords :: Platform -> ByteOff -> ByteOff roundUpToWords platform n = roundUpTo n (platformWordSizeInBytes platform) -- | Round up @base@ to a multiple of @size@. roundUpTo :: ByteOff -> ByteOff -> ByteOff roundUpTo base size = (base + (size - 1)) .&. (complement (size - 1)) -- | Convert the given number of words to a number of bytes. -- -- This function morally has type @WordOff -> ByteOff@, but uses @Num -- a@ to allow for overloading. wordsToBytes :: Num a => Platform -> a -> a wordsToBytes platform n = fromIntegral (platformWordSizeInBytes platform) * n {-# SPECIALIZE wordsToBytes :: Platform -> Int -> Int #-} {-# SPECIALIZE wordsToBytes :: Platform -> Word -> Word #-} {-# SPECIALIZE wordsToBytes :: Platform -> Integer -> Integer #-} -- | First round the given byte count up to a multiple of the -- machine's word size and then convert the result to words. bytesToWordsRoundUp :: Platform -> ByteOff -> WordOff bytesToWordsRoundUp platform n = (n + word_size - 1) `quot` word_size where word_size = platformWordSizeInBytes platform -- StgWord is a type representing an StgWord on the target platform. -- A Word64 is large enough to hold a Word for either a 32bit or 64bit platform newtype StgWord = StgWord Word64 deriving (Eq, Bits) fromStgWord :: StgWord -> Integer fromStgWord (StgWord i) = toInteger i toStgWord :: Platform -> Integer -> StgWord toStgWord platform i = case platformWordSize platform of -- These conversions mean that things like toStgWord (-1) -- do the right thing PW4 -> StgWord (fromIntegral (fromInteger i :: Word32)) PW8 -> StgWord (fromInteger i) instance Outputable StgWord where ppr (StgWord i) = integer (toInteger i) -- -- A Word32 is large enough to hold half a Word for either a 32bit or -- 64bit platform newtype StgHalfWord = StgHalfWord Word32 deriving Eq fromStgHalfWord :: StgHalfWord -> Integer fromStgHalfWord (StgHalfWord w) = toInteger w toStgHalfWord :: Platform -> Integer -> StgHalfWord toStgHalfWord platform i = case platformWordSize platform of -- These conversions mean that things like toStgHalfWord (-1) -- do the right thing PW4 -> StgHalfWord (fromIntegral (fromInteger i :: Word16)) PW8 -> StgHalfWord (fromInteger i :: Word32) instance Outputable StgHalfWord where ppr (StgHalfWord w) = integer (toInteger w) -- | Half word size in bytes halfWordSize :: Platform -> ByteOff halfWordSize platform = platformWordSizeInBytes platform `div` 2 halfWordSizeInBits :: Platform -> Int halfWordSizeInBits platform = platformWordSizeInBits platform `div` 2 {- ************************************************************************ * * \subsubsection[SMRep-datatype]{@SMRep@---storage manager representation} * * ************************************************************************ -} -- | A description of the layout of a closure. Corresponds directly -- to the closure types in includes\/rts\/storage\/ClosureTypes.h. data SMRep = HeapRep -- GC routines consult sizes in info tbl IsStatic !WordOff -- # ptr words !WordOff -- # non-ptr words INCLUDING SLOP (see mkHeapRep below) ClosureTypeInfo -- type-specific info | ArrayPtrsRep !WordOff -- # ptr words !WordOff -- # card table words | SmallArrayPtrsRep !WordOff -- # ptr words | ArrayWordsRep !WordOff -- # bytes expressed in words, rounded up | StackRep -- Stack frame (RET_SMALL or RET_BIG) Liveness | RTSRep -- The RTS needs to declare info tables with specific Int -- type tags, so this form lets us override the default SMRep -- tag for an SMRep. deriving Eq -- | True \<=> This is a static closure. Affects how we garbage-collect it. -- Static closure have an extra static link field at the end. -- Constructors do not have a static variant; see Note [static constructors] type IsStatic = Bool -- From an SMRep you can get to the closure type defined in -- rts/include/rts/storage/ClosureTypes.h. Described by the function -- rtsClosureType below. data ClosureTypeInfo = Constr ConTagZ ConstrDescription | Fun FunArity ArgDescr | Thunk | ThunkSelector SelectorOffset | BlackHole | IndStatic deriving Eq type ConstrDescription = ByteString -- result of dataConIdentity type FunArity = Int type SelectorOffset = Int -- | We represent liveness bitmaps as a Bitmap (whose internal representation -- really is a bitmap). These are pinned onto case return vectors to indicate -- the state of the stack for the garbage collector. -- -- In the compiled program, liveness bitmaps that fit inside a single word -- (StgWord) are stored as a single word, while larger bitmaps are stored as a -- pointer to an array of words. type Liveness = [Bool] -- One Bool per word; True <=> non-ptr or dead -- False <=> ptr -------------------------------------------------------------------------------- -- | An ArgDescr describes the argument pattern of a function data ArgDescr = ArgSpec -- Fits one of the standard patterns !Int -- RTS type identifier ARG_P, ARG_N, ... | ArgGen -- General case Liveness -- Details about the arguments | ArgUnknown -- For imported binds. -- Invariant: Never Unknown for binds of the module -- we are compiling. deriving (Eq) instance Outputable ArgDescr where ppr (ArgSpec n) = text "ArgSpec" <+> ppr n ppr (ArgGen ls) = text "ArgGen" <+> ppr ls ppr ArgUnknown = text "ArgUnknown" ----------------------------------------------------------------------------- -- Construction mkHeapRep :: Profile -> IsStatic -> WordOff -> WordOff -> ClosureTypeInfo -> SMRep mkHeapRep profile is_static ptr_wds nonptr_wds cl_type_info = HeapRep is_static ptr_wds (nonptr_wds + slop_wds) cl_type_info where slop_wds | is_static = 0 | otherwise = max 0 (minClosureSize profile - (hdr_size + payload_size)) hdr_size = closureTypeHdrSize profile cl_type_info payload_size = ptr_wds + nonptr_wds mkRTSRep :: Int -> SMRep -> SMRep mkRTSRep = RTSRep mkStackRep :: [Bool] -> SMRep mkStackRep liveness = StackRep liveness blackHoleRep :: SMRep blackHoleRep = HeapRep False 0 0 BlackHole indStaticRep :: SMRep indStaticRep = HeapRep True 1 0 IndStatic arrPtrsRep :: Platform -> WordOff -> SMRep arrPtrsRep platform elems = ArrayPtrsRep elems (cardTableSizeW platform elems) smallArrPtrsRep :: WordOff -> SMRep smallArrPtrsRep elems = SmallArrayPtrsRep elems arrWordsRep :: Platform -> ByteOff -> SMRep arrWordsRep platform bytes = ArrayWordsRep (bytesToWordsRoundUp platform bytes) ----------------------------------------------------------------------------- -- Predicates isStaticRep :: SMRep -> IsStatic isStaticRep (HeapRep is_static _ _ _) = is_static isStaticRep (RTSRep _ rep) = isStaticRep rep isStaticRep _ = False isStackRep :: SMRep -> Bool isStackRep StackRep{} = True isStackRep (RTSRep _ rep) = isStackRep rep isStackRep _ = False isConRep :: SMRep -> Bool isConRep (HeapRep _ _ _ Constr{}) = True isConRep _ = False isThunkRep :: SMRep -> Bool isThunkRep (HeapRep _ _ _ Thunk) = True isThunkRep (HeapRep _ _ _ ThunkSelector{}) = True isThunkRep (HeapRep _ _ _ BlackHole) = True isThunkRep (HeapRep _ _ _ IndStatic) = True isThunkRep _ = False isFunRep :: SMRep -> Bool isFunRep (HeapRep _ _ _ Fun{}) = True isFunRep _ = False isStaticNoCafCon :: SMRep -> Bool -- This should line up exactly with CONSTR_NOCAF below -- See Note [Static NoCaf constructors] isStaticNoCafCon (HeapRep _ 0 _ Constr{}) = True isStaticNoCafCon _ = False ----------------------------------------------------------------------------- -- Size-related things fixedHdrSize :: Profile -> ByteOff fixedHdrSize profile = wordsToBytes (profilePlatform profile) (fixedHdrSizeW profile) -- | Size of a closure header (StgHeader in includes\/rts\/storage\/Closures.h) fixedHdrSizeW :: Profile -> WordOff fixedHdrSizeW profile = pc_STD_HDR_SIZE (profileConstants profile) + profHdrSize profile -- | Size of the profiling part of a closure header -- (StgProfHeader in includes\/rts\/storage\/Closures.h) profHdrSize :: Profile -> WordOff profHdrSize profile = if profileIsProfiling profile then pc_PROF_HDR_SIZE (profileConstants profile) else 0 -- | The garbage collector requires that every closure is at least as -- big as this. minClosureSize :: Profile -> WordOff minClosureSize profile = fixedHdrSizeW profile + pc_MIN_PAYLOAD_SIZE (profileConstants profile) arrWordsHdrSize :: Profile -> ByteOff arrWordsHdrSize profile = fixedHdrSize profile + pc_SIZEOF_StgArrBytes_NoHdr (profileConstants profile) arrWordsHdrSizeW :: Profile -> WordOff arrWordsHdrSizeW profile = fixedHdrSizeW profile + (pc_SIZEOF_StgArrBytes_NoHdr (profileConstants profile) `quot` platformWordSizeInBytes (profilePlatform profile)) arrPtrsHdrSize :: Profile -> ByteOff arrPtrsHdrSize profile = fixedHdrSize profile + pc_SIZEOF_StgMutArrPtrs_NoHdr (profileConstants profile) arrPtrsHdrSizeW :: Profile -> WordOff arrPtrsHdrSizeW profile = fixedHdrSizeW profile + (pc_SIZEOF_StgMutArrPtrs_NoHdr (profileConstants profile) `quot` platformWordSizeInBytes (profilePlatform profile)) smallArrPtrsHdrSize :: Profile -> ByteOff smallArrPtrsHdrSize profile = fixedHdrSize profile + pc_SIZEOF_StgSmallMutArrPtrs_NoHdr (profileConstants profile) smallArrPtrsHdrSizeW :: Profile -> WordOff smallArrPtrsHdrSizeW profile = fixedHdrSizeW profile + (pc_SIZEOF_StgSmallMutArrPtrs_NoHdr (profileConstants profile) `quot` platformWordSizeInBytes (profilePlatform profile)) -- Thunks have an extra header word on SMP, so the update doesn't -- splat the payload. thunkHdrSize :: Profile -> WordOff thunkHdrSize profile = fixedHdrSizeW profile + smp_hdr where platform = profilePlatform profile smp_hdr = pc_SIZEOF_StgSMPThunkHeader (platformConstants platform) `quot` platformWordSizeInBytes platform hdrSize :: Profile -> SMRep -> ByteOff hdrSize profile rep = wordsToBytes (profilePlatform profile) (hdrSizeW profile rep) hdrSizeW :: Profile -> SMRep -> WordOff hdrSizeW profile (HeapRep _ _ _ ty) = closureTypeHdrSize profile ty hdrSizeW profile (ArrayPtrsRep _ _) = arrPtrsHdrSizeW profile hdrSizeW profile (SmallArrayPtrsRep _) = smallArrPtrsHdrSizeW profile hdrSizeW profile (ArrayWordsRep _) = arrWordsHdrSizeW profile hdrSizeW _ _ = panic "GHC.Runtime.Heap.Layout.hdrSizeW" nonHdrSize :: Platform -> SMRep -> ByteOff nonHdrSize platform rep = wordsToBytes platform (nonHdrSizeW rep) nonHdrSizeW :: SMRep -> WordOff nonHdrSizeW (HeapRep _ p np _) = p + np nonHdrSizeW (ArrayPtrsRep elems ct) = elems + ct nonHdrSizeW (SmallArrayPtrsRep elems) = elems nonHdrSizeW (ArrayWordsRep words) = words nonHdrSizeW (StackRep bs) = length bs nonHdrSizeW (RTSRep _ rep) = nonHdrSizeW rep -- | The total size of the closure, in words. heapClosureSizeW :: Profile -> SMRep -> WordOff heapClosureSizeW profile rep = case rep of HeapRep _ p np ty -> closureTypeHdrSize profile ty + p + np ArrayPtrsRep elems ct -> arrPtrsHdrSizeW profile + elems + ct SmallArrayPtrsRep elems -> smallArrPtrsHdrSizeW profile + elems ArrayWordsRep words -> arrWordsHdrSizeW profile + words _ -> panic "GHC.Runtime.Heap.Layout.heapClosureSize" closureTypeHdrSize :: Profile -> ClosureTypeInfo -> WordOff closureTypeHdrSize profile ty = case ty of Thunk -> thunkHdrSize profile ThunkSelector{} -> thunkHdrSize profile BlackHole -> thunkHdrSize profile IndStatic -> thunkHdrSize profile _ -> fixedHdrSizeW profile -- All thunks use thunkHdrSize, even if they are non-updatable. -- this is because we don't have separate closure types for -- updatable vs. non-updatable thunks, so the GC can't tell the -- difference. If we ever have significant numbers of non- -- updatable thunks, it might be worth fixing this. -- --------------------------------------------------------------------------- -- Arrays -- | The byte offset into the card table of the card for a given element card :: Platform -> Int -> Int card platform i = i `shiftR` pc_MUT_ARR_PTRS_CARD_BITS (platformConstants platform) -- | Convert a number of elements to a number of cards, rounding up cardRoundUp :: Platform -> Int -> Int cardRoundUp platform i = card platform (i + ((1 `shiftL` pc_MUT_ARR_PTRS_CARD_BITS (platformConstants platform)) - 1)) -- | The size of a card table, in bytes cardTableSizeB :: Platform -> Int -> ByteOff cardTableSizeB platform elems = cardRoundUp platform elems -- | The size of a card table, in words cardTableSizeW :: Platform -> Int -> WordOff cardTableSizeW platform elems = bytesToWordsRoundUp platform (cardTableSizeB platform elems) ----------------------------------------------------------------------------- -- deriving the RTS closure type from an SMRep #include "ClosureTypes.h" #include "FunTypes.h" -- Defines CONSTR, CONSTR_1_0 etc -- | Derives the RTS closure type from an 'SMRep' rtsClosureType :: SMRep -> Int rtsClosureType rep = case rep of RTSRep ty _ -> ty -- See Note [static constructors] HeapRep _ 1 0 Constr{} -> CONSTR_1_0 HeapRep _ 0 1 Constr{} -> CONSTR_0_1 HeapRep _ 2 0 Constr{} -> CONSTR_2_0 HeapRep _ 1 1 Constr{} -> CONSTR_1_1 HeapRep _ 0 2 Constr{} -> CONSTR_0_2 HeapRep _ 0 _ Constr{} -> CONSTR_NOCAF -- See Note [Static NoCaf constructors] HeapRep _ _ _ Constr{} -> CONSTR HeapRep False 1 0 Fun{} -> FUN_1_0 HeapRep False 0 1 Fun{} -> FUN_0_1 HeapRep False 2 0 Fun{} -> FUN_2_0 HeapRep False 1 1 Fun{} -> FUN_1_1 HeapRep False 0 2 Fun{} -> FUN_0_2 HeapRep False _ _ Fun{} -> FUN HeapRep False 1 0 Thunk -> THUNK_1_0 HeapRep False 0 1 Thunk -> THUNK_0_1 HeapRep False 2 0 Thunk -> THUNK_2_0 HeapRep False 1 1 Thunk -> THUNK_1_1 HeapRep False 0 2 Thunk -> THUNK_0_2 HeapRep False _ _ Thunk -> THUNK HeapRep False _ _ ThunkSelector{} -> THUNK_SELECTOR HeapRep True _ _ Fun{} -> FUN_STATIC HeapRep True _ _ Thunk -> THUNK_STATIC HeapRep False _ _ BlackHole -> BLACKHOLE HeapRep False _ _ IndStatic -> IND_STATIC StackRep _ -> STACK _ -> panic "rtsClosureType" -- We export these ones rET_SMALL, rET_BIG, aRG_GEN, aRG_GEN_BIG :: Int rET_SMALL = RET_SMALL rET_BIG = RET_BIG aRG_GEN = ARG_GEN aRG_GEN_BIG = ARG_GEN_BIG {- Note [static constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to have a CONSTR_STATIC closure type, and each constructor had two info tables: one with CONSTR (or CONSTR_1_0 etc.), and one with CONSTR_STATIC. This distinction was removed, because when copying a data structure into a compact region, we must copy static constructors into the compact region too. If we didn't do this, we would need to track the references from the compact region out to the static constructors, because they might (indirectly) refer to CAFs. Since static constructors will be copied to the heap, if we wanted to use different info tables for static and dynamic constructors, we would have to switch the info pointer when copying the constructor into the compact region, which means we would need an extra field of the static info table to point to the dynamic one. However, since the distinction between static and dynamic closure types is never actually needed (other than for assertions), we can just drop the distinction and use the same info table for both. The GC *does* need to distinguish between static and dynamic closures, but it does this using the HEAP_ALLOCED() macro which checks whether the address of the closure resides within the dynamic heap. HEAP_ALLOCED() doesn't read the closure's info table. Note [Static NoCaf constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we know that a top-level binding 'x' is not Caffy (ie no CAFs are reachable from 'x'), then a statically allocated constructor (Just x) is also not Caffy, and the garbage collector need not follow its argument fields. Exploiting this would require two static info tables for Just, for the two cases where the argument was Caffy or non-Caffy. Currently we don't do this; instead we treat nullary constructors as non-Caffy, and the others as potentially Caffy. ************************************************************************ * * Pretty printing of SMRep and friends * * ************************************************************************ -} instance Outputable ClosureTypeInfo where ppr = pprTypeInfo instance Outputable SMRep where ppr (HeapRep static ps nps tyinfo) = hang (header <+> lbrace) 2 (ppr tyinfo <+> rbrace) where header = text "HeapRep" <+> if static then text "static" else empty <+> pp_n "ptrs" ps <+> pp_n "nonptrs" nps pp_n :: String -> Int -> SDoc pp_n _ 0 = empty pp_n s n = int n <+> text s ppr (ArrayPtrsRep size _) = text "ArrayPtrsRep" <+> ppr size ppr (SmallArrayPtrsRep size) = text "SmallArrayPtrsRep" <+> ppr size ppr (ArrayWordsRep words) = text "ArrayWordsRep" <+> ppr words ppr (StackRep bs) = text "StackRep" <+> ppr bs ppr (RTSRep ty rep) = text "tag:" <> ppr ty <+> ppr rep pprTypeInfo :: ClosureTypeInfo -> SDoc pprTypeInfo (Constr tag descr) = text "Con" <+> braces (sep [ text "tag:" <+> ppr tag , text "descr:" <> text (show descr) ]) pprTypeInfo (Fun arity args) = text "Fun" <+> braces (sep [ text "arity:" <+> ppr arity , text "fun_type:" <+> ppr args ]) pprTypeInfo (ThunkSelector offset) = text "ThunkSel" <+> ppr offset pprTypeInfo Thunk = text "Thunk" pprTypeInfo BlackHole = text "BlackHole" pprTypeInfo IndStatic = text "IndStatic" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Interpreter.hs0000644000000000000000000006341614472400112022127 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} -- | Interacting with the iserv interpreter, whether it is running on an -- external process or in the current process. -- module GHC.Runtime.Interpreter ( module GHC.Runtime.Interpreter.Types -- * High-level interface to the interpreter , BCOOpts (..) , evalStmt, EvalStatus_(..), EvalStatus, EvalResult(..), EvalExpr(..) , resumeStmt , abandonStmt , evalIO , evalString , evalStringToIOString , mallocData , createBCOs , addSptEntry , mkCostCentres , costCentreStackInfo , newBreakArray , storeBreakpoint , breakpointStatus , getBreakpointVar , getClosure , getModBreaks , seqHValue , interpreterDynamic , interpreterProfiled -- * The object-code linker , initObjLinker , lookupSymbol , lookupClosure , loadDLL , loadArchive , loadObj , unloadObj , addLibrarySearchPath , removeLibrarySearchPath , resolveObjs , findSystemLibrary -- * Lower-level API using messages , interpCmd, Message(..), withIServ, withIServ_ , stopInterp , iservCall, readIServ, writeIServ , purgeLookupSymbolCache , freeHValueRefs , mkFinalizedHValue , wormhole, wormholeRef , fromEvalResult ) where import GHC.Prelude import GHC.IO (catchException) import GHC.Runtime.Interpreter.Types import GHCi.Message import GHCi.RemoteTypes import GHCi.ResolvedBCO import GHCi.BreakArray (BreakArray) import GHC.Types.BreakInfo (BreakInfo(..)) import GHC.ByteCode.Types import GHC.Linker.Types import GHC.Data.Maybe import GHC.Data.FastString import GHC.Types.Unique import GHC.Types.SrcLoc import GHC.Types.Unique.FM import GHC.Types.Basic import GHC.Utils.Panic import GHC.Utils.Exception as Ex import GHC.Utils.Outputable(brackets, ppr, showSDocUnsafe) import GHC.Utils.Fingerprint import GHC.Utils.Misc import GHC.Unit.Module import GHC.Unit.Module.ModIface import GHC.Unit.Home.ModInfo import GHC.Unit.Env #if defined(HAVE_INTERNAL_INTERPRETER) import GHCi.Run import GHC.Platform.Ways #endif import Control.Concurrent import Control.Monad import Control.Monad.IO.Class import Control.Monad.Catch as MC (mask, onException) import Data.Binary import Data.Binary.Put import Data.ByteString (ByteString) import qualified Data.ByteString.Lazy as LB import Data.Array ((!)) import Data.IORef import Foreign hiding (void) import qualified GHC.Exts.Heap as Heap import GHC.Stack.CCS (CostCentre,CostCentreStack) import System.Exit import GHC.IO.Handle.Types (Handle) #if defined(mingw32_HOST_OS) import Foreign.C import GHC.IO.Handle.FD (fdToHandle) #else import System.Posix as Posix #endif import System.Directory import System.Process import GHC.Conc (pseq, par) {- Note [Remote GHCi] ~~~~~~~~~~~~~~~~~~ When the flag -fexternal-interpreter is given to GHC, interpreted code is run in a separate process called iserv, and we communicate with the external process over a pipe using Binary-encoded messages. Motivation ~~~~~~~~~~ When the interpreted code is running in a separate process, it can use a different "way", e.g. profiled or dynamic. This means - compiling Template Haskell code with -prof does not require building the code without -prof first - when GHC itself is profiled, it can interpret unprofiled code, and the same applies to dynamic linking. - An unprofiled GHCi can load and run profiled code, which means it can use the stack-trace functionality provided by profiling without taking the performance hit on the compiler that profiling would entail. For other reasons see remote-GHCi on the wiki. Implementation Overview ~~~~~~~~~~~~~~~~~~~~~~~ The main pieces are: - libraries/ghci, containing: - types for talking about remote values (GHCi.RemoteTypes) - the message protocol (GHCi.Message), - implementation of the messages (GHCi.Run) - implementation of Template Haskell (GHCi.TH) - a few other things needed to run interpreted code - top-level iserv directory, containing the codefor the external server. This is a fairly simple wrapper, most of the functionality is provided by modules in libraries/ghci. - This module which provides the interface to the server used by the rest of GHC. GHC works with and without -fexternal-interpreter. With the flag, all interpreted code is run by the iserv binary. Without the flag, interpreted code is run in the same process as GHC. Things that do not work with -fexternal-interpreter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ dynCompileExpr cannot work, because we have no way to run code of an unknown type in the remote process. This API fails with an error message if it is used with -fexternal-interpreter. Other Notes on Remote GHCi ~~~~~~~~~~~~~~~~~~~~~~~~~~ * This wiki page has an implementation overview: https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/external-interpreter * Note [External GHCi pointers] in "GHC.Runtime.Interpreter" * Note [Remote Template Haskell] in libraries/ghci/GHCi/TH.hs -} -- | Run a command in the interpreter's context. With -- @-fexternal-interpreter@, the command is serialized and sent to an -- external iserv process, and the response is deserialized (hence the -- @Binary@ constraint). With @-fno-external-interpreter@ we execute -- the command directly here. interpCmd :: Binary a => Interp -> Message a -> IO a interpCmd interp msg = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> run msg -- Just run it directly #endif ExternalInterp c i -> withIServ_ c i $ \iserv -> uninterruptibleMask_ $ -- Note [uninterruptibleMask_] iservCall iserv msg -- Note [uninterruptibleMask_ and interpCmd] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- If we receive an async exception, such as ^C, while communicating -- with the iserv process then we will be out-of-sync and not be able -- to recover. Thus we use uninterruptibleMask_ during -- communication. A ^C will be delivered to the iserv process (because -- signals get sent to the whole process group) which will interrupt -- the running computation and return an EvalException result. -- | Grab a lock on the 'IServ' and do something with it. -- Overloaded because this is used from TcM as well as IO. withIServ :: (ExceptionMonad m) => IServConfig -> IServ -> (IServInstance -> m (IServInstance, a)) -> m a withIServ conf (IServ mIServState) action = MC.mask $ \restore -> do state <- liftIO $ takeMVar mIServState iserv <- case state of -- start the external iserv process if we haven't done so yet IServPending -> liftIO (spawnIServ conf) `MC.onException` (liftIO $ putMVar mIServState state) IServRunning inst -> return inst let iserv' = iserv{ iservPendingFrees = [] } (iserv'',a) <- (do -- free any ForeignHValues that have been garbage collected. liftIO $ when (not (null (iservPendingFrees iserv))) $ iservCall iserv (FreeHValueRefs (iservPendingFrees iserv)) -- run the inner action restore $ action iserv') `MC.onException` (liftIO $ putMVar mIServState (IServRunning iserv')) liftIO $ putMVar mIServState (IServRunning iserv'') return a withIServ_ :: (MonadIO m, ExceptionMonad m) => IServConfig -> IServ -> (IServInstance -> m a) -> m a withIServ_ conf iserv action = withIServ conf iserv $ \inst -> (inst,) <$> action inst -- ----------------------------------------------------------------------------- -- Wrappers around messages -- | Execute an action of type @IO [a]@, returning 'ForeignHValue's for -- each of the results. evalStmt :: Interp -> EvalOpts -> EvalExpr ForeignHValue -> IO (EvalStatus_ [ForeignHValue] [HValueRef]) evalStmt interp opts foreign_expr = do status <- withExpr foreign_expr $ \expr -> interpCmd interp (EvalStmt opts expr) handleEvalStatus interp status where withExpr :: EvalExpr ForeignHValue -> (EvalExpr HValueRef -> IO a) -> IO a withExpr (EvalThis fhv) cont = withForeignRef fhv $ \hvref -> cont (EvalThis hvref) withExpr (EvalApp fl fr) cont = withExpr fl $ \fl' -> withExpr fr $ \fr' -> cont (EvalApp fl' fr') resumeStmt :: Interp -> EvalOpts -> ForeignRef (ResumeContext [HValueRef]) -> IO (EvalStatus_ [ForeignHValue] [HValueRef]) resumeStmt interp opts resume_ctxt = do status <- withForeignRef resume_ctxt $ \rhv -> interpCmd interp (ResumeStmt opts rhv) handleEvalStatus interp status abandonStmt :: Interp -> ForeignRef (ResumeContext [HValueRef]) -> IO () abandonStmt interp resume_ctxt = withForeignRef resume_ctxt $ \rhv -> interpCmd interp (AbandonStmt rhv) handleEvalStatus :: Interp -> EvalStatus [HValueRef] -> IO (EvalStatus_ [ForeignHValue] [HValueRef]) handleEvalStatus interp status = case status of EvalBreak a b c d e f -> return (EvalBreak a b c d e f) EvalComplete alloc res -> EvalComplete alloc <$> addFinalizer res where addFinalizer (EvalException e) = return (EvalException e) addFinalizer (EvalSuccess rs) = EvalSuccess <$> mapM (mkFinalizedHValue interp) rs -- | Execute an action of type @IO ()@ evalIO :: Interp -> ForeignHValue -> IO () evalIO interp fhv = liftIO $ withForeignRef fhv $ \fhv -> interpCmd interp (EvalIO fhv) >>= fromEvalResult -- | Execute an action of type @IO String@ evalString :: Interp -> ForeignHValue -> IO String evalString interp fhv = liftIO $ withForeignRef fhv $ \fhv -> interpCmd interp (EvalString fhv) >>= fromEvalResult -- | Execute an action of type @String -> IO String@ evalStringToIOString :: Interp -> ForeignHValue -> String -> IO String evalStringToIOString interp fhv str = liftIO $ withForeignRef fhv $ \fhv -> interpCmd interp (EvalStringToString fhv str) >>= fromEvalResult -- | Allocate and store the given bytes in memory, returning a pointer -- to the memory in the remote process. mallocData :: Interp -> ByteString -> IO (RemotePtr ()) mallocData interp bs = interpCmd interp (MallocData bs) mkCostCentres :: Interp -> String -> [(String,String)] -> IO [RemotePtr CostCentre] mkCostCentres interp mod ccs = interpCmd interp (MkCostCentres mod ccs) newtype BCOOpts = BCOOpts { bco_n_jobs :: Int -- ^ Number of parallel jobs doing BCO serialization } -- | Create a set of BCOs that may be mutually recursive. createBCOs :: Interp -> BCOOpts -> [ResolvedBCO] -> IO [HValueRef] createBCOs interp opts rbcos = do let n_jobs = bco_n_jobs opts -- Serializing ResolvedBCO is expensive, so if we support doing it in parallel if (n_jobs == 1) then interpCmd interp (CreateBCOs [runPut (put rbcos)]) else do old_caps <- getNumCapabilities if old_caps == n_jobs then void $ evaluate puts else bracket_ (setNumCapabilities n_jobs) (setNumCapabilities old_caps) (void $ evaluate puts) interpCmd interp (CreateBCOs puts) where puts = parMap doChunk (chunkList 100 rbcos) -- make sure we force the whole lazy ByteString doChunk c = pseq (LB.length bs) bs where bs = runPut (put c) -- We don't have the parallel package, so roll our own simple parMap parMap _ [] = [] parMap f (x:xs) = fx `par` (fxs `pseq` (fx : fxs)) where fx = f x; fxs = parMap f xs addSptEntry :: Interp -> Fingerprint -> ForeignHValue -> IO () addSptEntry interp fpr ref = withForeignRef ref $ \val -> interpCmd interp (AddSptEntry fpr val) costCentreStackInfo :: Interp -> RemotePtr CostCentreStack -> IO [String] costCentreStackInfo interp ccs = interpCmd interp (CostCentreStackInfo ccs) newBreakArray :: Interp -> Int -> IO (ForeignRef BreakArray) newBreakArray interp size = do breakArray <- interpCmd interp (NewBreakArray size) mkFinalizedHValue interp breakArray storeBreakpoint :: Interp -> ForeignRef BreakArray -> Int -> Int -> IO () storeBreakpoint interp ref ix cnt = do -- #19157 withForeignRef ref $ \breakarray -> interpCmd interp (SetupBreakpoint breakarray ix cnt) breakpointStatus :: Interp -> ForeignRef BreakArray -> Int -> IO Bool breakpointStatus interp ref ix = withForeignRef ref $ \breakarray -> interpCmd interp (BreakpointStatus breakarray ix) getBreakpointVar :: Interp -> ForeignHValue -> Int -> IO (Maybe ForeignHValue) getBreakpointVar interp ref ix = withForeignRef ref $ \apStack -> do mb <- interpCmd interp (GetBreakpointVar apStack ix) mapM (mkFinalizedHValue interp) mb getClosure :: Interp -> ForeignHValue -> IO (Heap.GenClosure ForeignHValue) getClosure interp ref = withForeignRef ref $ \hval -> do mb <- interpCmd interp (GetClosure hval) mapM (mkFinalizedHValue interp) mb -- | Send a Seq message to the iserv process to force a value #2950 seqHValue :: Interp -> UnitEnv -> ForeignHValue -> IO (EvalResult ()) seqHValue interp unit_env ref = withForeignRef ref $ \hval -> do status <- interpCmd interp (Seq hval) handleSeqHValueStatus interp unit_env status -- | Process the result of a Seq or ResumeSeq message. #2950 handleSeqHValueStatus :: Interp -> UnitEnv -> EvalStatus () -> IO (EvalResult ()) handleSeqHValueStatus interp unit_env eval_status = case eval_status of (EvalBreak is_exception _ ix mod_uniq resume_ctxt _) -> do -- A breakpoint was hit; inform the user and tell them -- which breakpoint was hit. resume_ctxt_fhv <- liftIO $ mkFinalizedHValue interp resume_ctxt let hmi = expectJust "handleRunStatus" $ lookupHptDirectly (ue_hpt unit_env) (mkUniqueGrimily mod_uniq) modl = mi_module (hm_iface hmi) bp | is_exception = Nothing | otherwise = Just (BreakInfo modl ix) sdocBpLoc = brackets . ppr . getSeqBpSpan putStrLn ("*** Ignoring breakpoint " ++ (showSDocUnsafe $ sdocBpLoc bp)) -- resume the seq (:force) processing in the iserv process withForeignRef resume_ctxt_fhv $ \hval -> do status <- interpCmd interp (ResumeSeq hval) handleSeqHValueStatus interp unit_env status (EvalComplete _ r) -> return r where getSeqBpSpan :: Maybe BreakInfo -> SrcSpan -- Just case: Stopped at a breakpoint, extract SrcSpan information -- from the breakpoint. getSeqBpSpan (Just BreakInfo{..}) = (modBreaks_locs (breaks breakInfo_module)) ! breakInfo_number -- Nothing case - should not occur! -- Reason: Setting of flags in libraries/ghci/GHCi/Run.hs:evalOptsSeq getSeqBpSpan Nothing = mkGeneralSrcSpan (fsLit "") breaks mod = getModBreaks $ expectJust "getSeqBpSpan" $ lookupHpt (ue_hpt unit_env) (moduleName mod) -- ----------------------------------------------------------------------------- -- Interface to the object-code linker initObjLinker :: Interp -> IO () initObjLinker interp = interpCmd interp InitLinker lookupSymbol :: Interp -> FastString -> IO (Maybe (Ptr ())) lookupSymbol interp str = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> fmap fromRemotePtr <$> run (LookupSymbol (unpackFS str)) #endif ExternalInterp c i -> withIServ c i $ \iserv -> do -- Profiling of GHCi showed a lot of time and allocation spent -- making cross-process LookupSymbol calls, so I added a GHC-side -- cache which sped things up quite a lot. We have to be careful -- to purge this cache when unloading code though. let cache = iservLookupSymbolCache iserv case lookupUFM cache str of Just p -> return (iserv, Just p) Nothing -> do m <- uninterruptibleMask_ $ iservCall iserv (LookupSymbol (unpackFS str)) case m of Nothing -> return (iserv, Nothing) Just r -> do let p = fromRemotePtr r cache' = addToUFM cache str p iserv' = iserv {iservLookupSymbolCache = cache'} return (iserv', Just p) lookupClosure :: Interp -> String -> IO (Maybe HValueRef) lookupClosure interp str = interpCmd interp (LookupClosure str) purgeLookupSymbolCache :: Interp -> IO () purgeLookupSymbolCache interp = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> pure () #endif ExternalInterp _ (IServ mstate) -> modifyMVar_ mstate $ \state -> pure $ case state of IServPending -> state IServRunning iserv -> IServRunning (iserv { iservLookupSymbolCache = emptyUFM }) -- | loadDLL loads a dynamic library using the OS's native linker -- (i.e. dlopen() on Unix, LoadLibrary() on Windows). It takes either -- an absolute pathname to the file, or a relative filename -- (e.g. "libfoo.so" or "foo.dll"). In the latter case, loadDLL -- searches the standard locations for the appropriate library. -- -- Returns: -- -- Nothing => success -- Just err_msg => failure loadDLL :: Interp -> String -> IO (Maybe String) loadDLL interp str = interpCmd interp (LoadDLL str) loadArchive :: Interp -> String -> IO () loadArchive interp path = do path' <- canonicalizePath path -- Note [loadObj and relative paths] interpCmd interp (LoadArchive path') loadObj :: Interp -> String -> IO () loadObj interp path = do path' <- canonicalizePath path -- Note [loadObj and relative paths] interpCmd interp (LoadObj path') unloadObj :: Interp -> String -> IO () unloadObj interp path = do path' <- canonicalizePath path -- Note [loadObj and relative paths] interpCmd interp (UnloadObj path') -- Note [loadObj and relative paths] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- the iserv process might have a different current directory from the -- GHC process, so we must make paths absolute before sending them -- over. addLibrarySearchPath :: Interp -> String -> IO (Ptr ()) addLibrarySearchPath interp str = fromRemotePtr <$> interpCmd interp (AddLibrarySearchPath str) removeLibrarySearchPath :: Interp -> Ptr () -> IO Bool removeLibrarySearchPath interp p = interpCmd interp (RemoveLibrarySearchPath (toRemotePtr p)) resolveObjs :: Interp -> IO SuccessFlag resolveObjs interp = successIf <$> interpCmd interp ResolveObjs findSystemLibrary :: Interp -> String -> IO (Maybe String) findSystemLibrary interp str = interpCmd interp (FindSystemLibrary str) -- ----------------------------------------------------------------------------- -- Raw calls and messages -- | Send a 'Message' and receive the response from the iserv process iservCall :: Binary a => IServInstance -> Message a -> IO a iservCall iserv msg = remoteCall (iservPipe iserv) msg `catchException` \(e :: SomeException) -> handleIServFailure iserv e -- | Read a value from the iserv process readIServ :: IServInstance -> Get a -> IO a readIServ iserv get = readPipe (iservPipe iserv) get `catchException` \(e :: SomeException) -> handleIServFailure iserv e -- | Send a value to the iserv process writeIServ :: IServInstance -> Put -> IO () writeIServ iserv put = writePipe (iservPipe iserv) put `catchException` \(e :: SomeException) -> handleIServFailure iserv e handleIServFailure :: IServInstance -> SomeException -> IO a handleIServFailure iserv e = do let proc = iservProcess iserv ex <- getProcessExitCode proc case ex of Just (ExitFailure n) -> throwIO (InstallationError ("ghc-iserv terminated (" ++ show n ++ ")")) _ -> do terminateProcess proc _ <- waitForProcess proc throw e -- | Spawn an external interpreter spawnIServ :: IServConfig -> IO IServInstance spawnIServ conf = do iservConfTrace conf let createProc = fromMaybe (\cp -> do { (_,_,_,ph) <- createProcess cp ; return ph }) (iservConfHook conf) (ph, rh, wh) <- runWithPipes createProc (iservConfProgram conf) (iservConfOpts conf) lo_ref <- newIORef Nothing return $ IServInstance { iservPipe = Pipe { pipeRead = rh, pipeWrite = wh, pipeLeftovers = lo_ref } , iservProcess = ph , iservLookupSymbolCache = emptyUFM , iservPendingFrees = [] } -- | Stop the interpreter stopInterp :: Interp -> IO () stopInterp interp = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> pure () #endif ExternalInterp _ (IServ mstate) -> MC.mask $ \_restore -> modifyMVar_ mstate $ \state -> do case state of IServPending -> pure state -- already stopped IServRunning i -> do ex <- getProcessExitCode (iservProcess i) if isJust ex then pure () else iservCall i Shutdown pure IServPending runWithPipes :: (CreateProcess -> IO ProcessHandle) -> FilePath -> [String] -> IO (ProcessHandle, Handle, Handle) #if defined(mingw32_HOST_OS) foreign import ccall "io.h _close" c__close :: CInt -> IO CInt foreign import ccall unsafe "io.h _get_osfhandle" _get_osfhandle :: CInt -> IO CInt runWithPipes createProc prog opts = do (rfd1, wfd1) <- createPipeFd -- we read on rfd1 (rfd2, wfd2) <- createPipeFd -- we write on wfd2 wh_client <- _get_osfhandle wfd1 rh_client <- _get_osfhandle rfd2 let args = show wh_client : show rh_client : opts ph <- createProc (proc prog args) rh <- mkHandle rfd1 wh <- mkHandle wfd2 return (ph, rh, wh) where mkHandle :: CInt -> IO Handle mkHandle fd = (fdToHandle fd) `Ex.onException` (c__close fd) #else runWithPipes createProc prog opts = do (rfd1, wfd1) <- Posix.createPipe -- we read on rfd1 (rfd2, wfd2) <- Posix.createPipe -- we write on wfd2 setFdOption rfd1 CloseOnExec True setFdOption wfd2 CloseOnExec True let args = show wfd1 : show rfd2 : opts ph <- createProc (proc prog args) closeFd wfd1 closeFd rfd2 rh <- fdToHandle rfd1 wh <- fdToHandle wfd2 return (ph, rh, wh) #endif -- ----------------------------------------------------------------------------- {- Note [External GHCi pointers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have the following ways to reference things in GHCi: HValue ------ HValue is a direct reference to a value in the local heap. Obviously we cannot use this to refer to things in the external process. RemoteRef --------- RemoteRef is a StablePtr to a heap-resident value. When -fexternal-interpreter is used, this value resides in the external process's heap. RemoteRefs are mostly used to send pointers in messages between GHC and iserv. A RemoteRef must be explicitly freed when no longer required, using freeHValueRefs, or by attaching a finalizer with mkForeignHValue. To get from a RemoteRef to an HValue you can use 'wormholeRef', which fails with an error message if -fexternal-interpreter is in use. ForeignRef ---------- A ForeignRef is a RemoteRef with a finalizer that will free the 'RemoteRef' when it is garbage collected. We mostly use ForeignHValue on the GHC side. The finalizer adds the RemoteRef to the iservPendingFrees list in the IServ record. The next call to interpCmd will free any RemoteRefs in the list. It was done this way rather than calling interpCmd directly, because I didn't want to have arbitrary threads calling interpCmd. In principle it would probably be ok, but it seems less hairy this way. -} -- | Creates a 'ForeignRef' that will automatically release the -- 'RemoteRef' when it is no longer referenced. mkFinalizedHValue :: Interp -> RemoteRef a -> IO (ForeignRef a) mkFinalizedHValue interp rref = do let hvref = toHValueRef rref free <- case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> return (freeRemoteRef hvref) #endif ExternalInterp _ (IServ i) -> return $ modifyMVar_ i $ \state -> case state of IServPending {} -> pure state -- already shut down IServRunning inst -> do let !inst' = inst {iservPendingFrees = hvref:iservPendingFrees inst} pure (IServRunning inst') mkForeignRef rref free freeHValueRefs :: Interp -> [HValueRef] -> IO () freeHValueRefs _ [] = return () freeHValueRefs interp refs = interpCmd interp (FreeHValueRefs refs) -- | Convert a 'ForeignRef' to the value it references directly. This -- only works when the interpreter is running in the same process as -- the compiler, so it fails when @-fexternal-interpreter@ is on. wormhole :: Interp -> ForeignRef a -> IO a wormhole interp r = wormholeRef interp (unsafeForeignRefToRemoteRef r) -- | Convert an 'RemoteRef' to the value it references directly. This -- only works when the interpreter is running in the same process as -- the compiler, so it fails when @-fexternal-interpreter@ is on. wormholeRef :: Interp -> RemoteRef a -> IO a wormholeRef interp _r = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> localRef _r #endif ExternalInterp {} -> throwIO (InstallationError "this operation requires -fno-external-interpreter") -- ----------------------------------------------------------------------------- -- Misc utils fromEvalResult :: EvalResult a -> IO a fromEvalResult (EvalException e) = throwIO (fromSerializableException e) fromEvalResult (EvalSuccess a) = return a getModBreaks :: HomeModInfo -> ModBreaks getModBreaks hmi | Just linkable <- hm_linkable hmi, [cbc] <- mapMaybe onlyBCOs $ linkableUnlinked linkable = fromMaybe emptyModBreaks (bc_breaks cbc) | otherwise = emptyModBreaks -- probably object code where -- The linkable may have 'DotO's as well; only consider BCOs. See #20570. onlyBCOs :: Unlinked -> Maybe CompiledByteCode onlyBCOs (BCOs cbc _) = Just cbc onlyBCOs _ = Nothing -- | Interpreter uses Profiling way interpreterProfiled :: Interp -> Bool interpreterProfiled interp = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> hostIsProfiled #endif ExternalInterp c _ -> iservConfProfiled c -- | Interpreter uses Dynamic way interpreterDynamic :: Interp -> Bool interpreterDynamic interp = case interpInstance interp of #if defined(HAVE_INTERNAL_INTERPRETER) InternalInterp -> hostIsDynamic #endif ExternalInterp c _ -> iservConfDynamic c ghc-lib-parser-9.4.7.20230826/compiler/GHC/Runtime/Interpreter/Types.hs0000644000000000000000000000432414472400113023225 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Types used by the runtime interpreter module GHC.Runtime.Interpreter.Types ( Interp(..) , InterpInstance(..) , IServ(..) , IServInstance(..) , IServConfig(..) , IServState(..) ) where import GHC.Prelude import GHC.Linker.Types import GHCi.RemoteTypes import GHCi.Message ( Pipe ) import GHC.Types.Unique.FM import GHC.Data.FastString ( FastString ) import Foreign import Control.Concurrent import System.Process ( ProcessHandle, CreateProcess ) -- | Interpreter data Interp = Interp { interpInstance :: !InterpInstance -- ^ Interpreter instance (internal, external) , interpLoader :: !Loader -- ^ Interpreter loader } data InterpInstance = ExternalInterp !IServConfig !IServ -- ^ External interpreter #if defined(HAVE_INTERNAL_INTERPRETER) | InternalInterp -- ^ Internal interpreter #endif -- | External interpreter -- -- The external interpreter is spawned lazily (on first use) to avoid slowing -- down sessions that don't require it. The contents of the MVar reflects the -- state of the interpreter (running or not). newtype IServ = IServ (MVar IServState) -- | State of an external interpreter data IServState = IServPending -- ^ Not spawned yet | IServRunning !IServInstance -- ^ Running -- | Configuration needed to spawn an external interpreter data IServConfig = IServConfig { iservConfProgram :: !String -- ^ External program to run , iservConfOpts :: ![String] -- ^ Command-line options , iservConfProfiled :: !Bool -- ^ Use Profiling way , iservConfDynamic :: !Bool -- ^ Use Dynamic way , iservConfHook :: !(Maybe (CreateProcess -> IO ProcessHandle)) -- ^ Hook , iservConfTrace :: IO () -- ^ Trace action executed after spawn } -- | External interpreter instance data IServInstance = IServInstance { iservPipe :: !Pipe , iservProcess :: !ProcessHandle , iservLookupSymbolCache :: !(UniqFM FastString (Ptr ())) , iservPendingFrees :: ![HValueRef] -- ^ Values that need to be freed before the next command is sent. -- Threads can append values to this list asynchronously (by modifying the -- IServ state MVar). } ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Serialized.hs0000644000000000000000000001477114470055371022153 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE RankNTypes, ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-name-shadowing #-} -- -- (c) The University of Glasgow 2002-2006 -- -- Serialized values module GHC.Serialized ( -- * Main Serialized data type Serialized(..), -- * Going into and out of 'Serialized' toSerialized, fromSerialized, -- * Handy serialization functions serializeWithData, deserializeWithData, ) where import Prelude -- See note [Why do we import Prelude here?] import Data.Bits import Data.Word ( Word8 ) import Data.Data -- | Represents a serialized value of a particular type. Attempts can be made to deserialize it at certain types data Serialized = Serialized TypeRep [Word8] -- | Put a Typeable value that we are able to actually turn into bytes into a 'Serialized' value ready for deserialization later toSerialized :: forall a. Typeable a => (a -> [Word8]) -> a -> Serialized toSerialized serialize what = Serialized (typeOf what) (serialize what) -- | If the 'Serialized' value contains something of the given type, then use the specified deserializer to return @Just@ that. -- Otherwise return @Nothing@. fromSerialized :: forall a. Typeable a => ([Word8] -> a) -> Serialized -> Maybe a fromSerialized deserialize (Serialized the_type bytes) | the_type == rep = Just (deserialize bytes) | otherwise = Nothing where rep = typeRep (Proxy :: Proxy a) -- | Use a 'Data' instance to implement a serialization scheme dual to that of 'deserializeWithData' serializeWithData :: Data a => a -> [Word8] serializeWithData what = serializeWithData' what [] serializeWithData' :: Data a => a -> [Word8] -> [Word8] serializeWithData' what = fst $ gfoldl (\(before, a_to_b) a -> (before . serializeWithData' a, a_to_b a)) (\x -> (serializeConstr (constrRep (toConstr what)), x)) what -- | Use a 'Data' instance to implement a deserialization scheme dual to that of 'serializeWithData' deserializeWithData :: Data a => [Word8] -> a deserializeWithData = snd . deserializeWithData' deserializeWithData' :: forall a. Data a => [Word8] -> ([Word8], a) deserializeWithData' bytes = deserializeConstr bytes $ \constr_rep bytes -> gunfold (\(bytes, b_to_r) -> let (bytes', b) = deserializeWithData' bytes in (bytes', b_to_r b)) (\x -> (bytes, x)) (repConstr (dataTypeOf (undefined :: a)) constr_rep) serializeConstr :: ConstrRep -> [Word8] -> [Word8] serializeConstr (AlgConstr ix) = serializeWord8 1 . serializeInt ix serializeConstr (IntConstr i) = serializeWord8 2 . serializeInteger i serializeConstr (FloatConstr r) = serializeWord8 3 . serializeRational r serializeConstr (CharConstr c) = serializeWord8 4 . serializeChar c deserializeConstr :: [Word8] -> (ConstrRep -> [Word8] -> a) -> a deserializeConstr bytes k = deserializeWord8 bytes $ \constr_ix bytes -> case constr_ix of 1 -> deserializeInt bytes $ \ix -> k (AlgConstr ix) 2 -> deserializeInteger bytes $ \i -> k (IntConstr i) 3 -> deserializeRational bytes $ \r -> k (FloatConstr r) 4 -> deserializeChar bytes $ \c -> k (CharConstr c) x -> error $ "deserializeConstr: unrecognised serialized constructor type " ++ show x ++ " in context " ++ show bytes serializeFixedWidthNum :: forall a. (Integral a, FiniteBits a) => a -> [Word8] -> [Word8] serializeFixedWidthNum what = go (finiteBitSize what) what where go :: Int -> a -> [Word8] -> [Word8] go size current rest | size <= 0 = rest | otherwise = fromIntegral (current .&. 255) : go (size - 8) (current `shiftR` 8) rest deserializeFixedWidthNum :: forall a b. (Integral a, FiniteBits a) => [Word8] -> (a -> [Word8] -> b) -> b deserializeFixedWidthNum bytes k = go (finiteBitSize (undefined :: a)) bytes k where go :: Int -> [Word8] -> (a -> [Word8] -> b) -> b go size bytes k | size <= 0 = k 0 bytes | otherwise = case bytes of (byte:bytes) -> go (size - 8) bytes (\x -> k ((x `shiftL` 8) .|. fromIntegral byte)) [] -> error "deserializeFixedWidthNum: unexpected end of stream" serializeEnum :: (Enum a) => a -> [Word8] -> [Word8] serializeEnum = serializeInt . fromEnum deserializeEnum :: Enum a => [Word8] -> (a -> [Word8] -> b) -> b deserializeEnum bytes k = deserializeInt bytes (k . toEnum) serializeWord8 :: Word8 -> [Word8] -> [Word8] serializeWord8 x = (x:) deserializeWord8 :: [Word8] -> (Word8 -> [Word8] -> a) -> a deserializeWord8 (byte:bytes) k = k byte bytes deserializeWord8 [] _ = error "deserializeWord8: unexpected end of stream" serializeInt :: Int -> [Word8] -> [Word8] serializeInt = serializeFixedWidthNum deserializeInt :: [Word8] -> (Int -> [Word8] -> a) -> a deserializeInt = deserializeFixedWidthNum serializeRational :: (Real a) => a -> [Word8] -> [Word8] serializeRational = serializeString . show . toRational deserializeRational :: (Fractional a) => [Word8] -> (a -> [Word8] -> b) -> b deserializeRational bytes k = deserializeString bytes (k . fromRational . read) serializeInteger :: Integer -> [Word8] -> [Word8] serializeInteger = serializeString . show deserializeInteger :: [Word8] -> (Integer -> [Word8] -> a) -> a deserializeInteger bytes k = deserializeString bytes (k . read) serializeChar :: Char -> [Word8] -> [Word8] serializeChar = serializeString . show deserializeChar :: [Word8] -> (Char -> [Word8] -> a) -> a deserializeChar bytes k = deserializeString bytes (k . read) serializeString :: String -> [Word8] -> [Word8] serializeString = serializeList serializeEnum deserializeString :: [Word8] -> (String -> [Word8] -> a) -> a deserializeString = deserializeList deserializeEnum serializeList :: (a -> [Word8] -> [Word8]) -> [a] -> [Word8] -> [Word8] serializeList serialize_element xs = serializeInt (length xs) . foldr (.) id (map serialize_element xs) deserializeList :: forall a b. (forall c. [Word8] -> (a -> [Word8] -> c) -> c) -> [Word8] -> ([a] -> [Word8] -> b) -> b deserializeList deserialize_element bytes k = deserializeInt bytes $ \len bytes -> go len bytes k where go :: Int -> [Word8] -> ([a] -> [Word8] -> b) -> b go len bytes k | len <= 0 = k [] bytes | otherwise = deserialize_element bytes (\elt bytes -> go (len - 1) bytes (k . (elt:))) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Settings.hs0000644000000000000000000002322514472400112017773 0ustar0000000000000000 -- | Run-time settings module GHC.Settings ( Settings (..) , ToolSettings (..) , FileSettings (..) , GhcNameVersion (..) , Platform (..) , PlatformMisc (..) -- * Accessors , dynLibSuffix , sProgramName , sProjectVersion , sGhcUsagePath , sGhciUsagePath , sToolDir , sTopDir , sGlobalPackageDatabasePath , sLdSupportsCompactUnwind , sLdSupportsBuildId , sLdSupportsFilelist , sLdIsGnuLd , sGccSupportsNoPie , sUseInplaceMinGW , sArSupportsDashL , sPgm_L , sPgm_P , sPgm_F , sPgm_c , sPgm_cxx , sPgm_a , sPgm_l , sPgm_lm , sPgm_dll , sPgm_T , sPgm_windres , sPgm_libtool , sPgm_ar , sPgm_otool , sPgm_install_name_tool , sPgm_ranlib , sPgm_lo , sPgm_lc , sPgm_lcc , sPgm_i , sOpt_L , sOpt_P , sOpt_P_fingerprint , sOpt_F , sOpt_c , sOpt_cxx , sOpt_a , sOpt_l , sOpt_lm , sOpt_windres , sOpt_lo , sOpt_lc , sOpt_lcc , sOpt_i , sExtraGccViaCFlags , sTargetPlatformString , sGhcWithInterpreter , sLibFFI ) where import GHC.Prelude import GHC.Utils.CliOption import GHC.Utils.Fingerprint import GHC.Platform data Settings = Settings { sGhcNameVersion :: {-# UNPACk #-} !GhcNameVersion , sFileSettings :: {-# UNPACK #-} !FileSettings , sTargetPlatform :: Platform -- Filled in by SysTools , sToolSettings :: {-# UNPACK #-} !ToolSettings , sPlatformMisc :: {-# UNPACK #-} !PlatformMisc -- You shouldn't need to look things up in rawSettings directly. -- They should have their own fields instead. , sRawSettings :: [(String, String)] } -- | Settings for other executables GHC calls. -- -- Probably should further split down by phase, or split between -- platform-specific and platform-agnostic. data ToolSettings = ToolSettings { toolSettings_ldSupportsCompactUnwind :: Bool , toolSettings_ldSupportsBuildId :: Bool , toolSettings_ldSupportsFilelist :: Bool , toolSettings_ldIsGnuLd :: Bool , toolSettings_ccSupportsNoPie :: Bool , toolSettings_useInplaceMinGW :: Bool , toolSettings_arSupportsDashL :: Bool -- commands for particular phases , toolSettings_pgm_L :: String , toolSettings_pgm_P :: (String, [Option]) , toolSettings_pgm_F :: String , toolSettings_pgm_c :: String , toolSettings_pgm_cxx :: String , toolSettings_pgm_a :: (String, [Option]) , toolSettings_pgm_l :: (String, [Option]) , toolSettings_pgm_lm :: Maybe (String, [Option]) -- ^ N.B. On Windows we don't have a linker which supports object -- merging, hence the 'Maybe'. See Note [Object merging] in -- "GHC.Driver.Pipeline.Execute" for details. , toolSettings_pgm_dll :: (String, [Option]) , toolSettings_pgm_T :: String , toolSettings_pgm_windres :: String , toolSettings_pgm_libtool :: String , toolSettings_pgm_ar :: String , toolSettings_pgm_otool :: String , toolSettings_pgm_install_name_tool :: String , toolSettings_pgm_ranlib :: String , -- | LLVM: opt llvm optimiser toolSettings_pgm_lo :: (String, [Option]) , -- | LLVM: llc static compiler toolSettings_pgm_lc :: (String, [Option]) , -- | LLVM: c compiler toolSettings_pgm_lcc :: (String, [Option]) , toolSettings_pgm_i :: String -- options for particular phases , toolSettings_opt_L :: [String] , toolSettings_opt_P :: [String] , -- | cached Fingerprint of sOpt_P -- See Note [Repeated -optP hashing] toolSettings_opt_P_fingerprint :: Fingerprint , toolSettings_opt_F :: [String] , toolSettings_opt_c :: [String] , toolSettings_opt_cxx :: [String] , toolSettings_opt_a :: [String] , toolSettings_opt_l :: [String] , toolSettings_opt_lm :: [String] , toolSettings_opt_windres :: [String] , -- | LLVM: llvm optimiser toolSettings_opt_lo :: [String] , -- | LLVM: llc static compiler toolSettings_opt_lc :: [String] , -- | LLVM: c compiler toolSettings_opt_lcc :: [String] , -- | iserv options toolSettings_opt_i :: [String] , toolSettings_extraGccViaCFlags :: [String] } -- | Paths to various files and directories used by GHC, including those that -- provide more settings. data FileSettings = FileSettings { fileSettings_ghcUsagePath :: FilePath -- ditto , fileSettings_ghciUsagePath :: FilePath -- ditto , fileSettings_toolDir :: Maybe FilePath -- ditto , fileSettings_topDir :: FilePath -- ditto , fileSettings_globalPackageDatabase :: FilePath } -- | Settings for what GHC this is. data GhcNameVersion = GhcNameVersion { ghcNameVersion_programName :: String , ghcNameVersion_projectVersion :: String } -- | Dynamic library suffix dynLibSuffix :: GhcNameVersion -> String dynLibSuffix (GhcNameVersion name ver) = '-':name ++ ver ----------------------------------------------------------------------------- -- Accessessors from 'Settings' sProgramName :: Settings -> String sProgramName = ghcNameVersion_programName . sGhcNameVersion sProjectVersion :: Settings -> String sProjectVersion = ghcNameVersion_projectVersion . sGhcNameVersion sGhcUsagePath :: Settings -> FilePath sGhcUsagePath = fileSettings_ghcUsagePath . sFileSettings sGhciUsagePath :: Settings -> FilePath sGhciUsagePath = fileSettings_ghciUsagePath . sFileSettings sToolDir :: Settings -> Maybe FilePath sToolDir = fileSettings_toolDir . sFileSettings sTopDir :: Settings -> FilePath sTopDir = fileSettings_topDir . sFileSettings sGlobalPackageDatabasePath :: Settings -> FilePath sGlobalPackageDatabasePath = fileSettings_globalPackageDatabase . sFileSettings sLdSupportsCompactUnwind :: Settings -> Bool sLdSupportsCompactUnwind = toolSettings_ldSupportsCompactUnwind . sToolSettings sLdSupportsBuildId :: Settings -> Bool sLdSupportsBuildId = toolSettings_ldSupportsBuildId . sToolSettings sLdSupportsFilelist :: Settings -> Bool sLdSupportsFilelist = toolSettings_ldSupportsFilelist . sToolSettings sLdIsGnuLd :: Settings -> Bool sLdIsGnuLd = toolSettings_ldIsGnuLd . sToolSettings sGccSupportsNoPie :: Settings -> Bool sGccSupportsNoPie = toolSettings_ccSupportsNoPie . sToolSettings sUseInplaceMinGW :: Settings -> Bool sUseInplaceMinGW = toolSettings_useInplaceMinGW . sToolSettings sArSupportsDashL :: Settings -> Bool sArSupportsDashL = toolSettings_arSupportsDashL . sToolSettings sPgm_L :: Settings -> String sPgm_L = toolSettings_pgm_L . sToolSettings sPgm_P :: Settings -> (String, [Option]) sPgm_P = toolSettings_pgm_P . sToolSettings sPgm_F :: Settings -> String sPgm_F = toolSettings_pgm_F . sToolSettings sPgm_c :: Settings -> String sPgm_c = toolSettings_pgm_c . sToolSettings sPgm_cxx :: Settings -> String sPgm_cxx = toolSettings_pgm_cxx . sToolSettings sPgm_a :: Settings -> (String, [Option]) sPgm_a = toolSettings_pgm_a . sToolSettings sPgm_l :: Settings -> (String, [Option]) sPgm_l = toolSettings_pgm_l . sToolSettings sPgm_lm :: Settings -> Maybe (String, [Option]) sPgm_lm = toolSettings_pgm_lm . sToolSettings sPgm_dll :: Settings -> (String, [Option]) sPgm_dll = toolSettings_pgm_dll . sToolSettings sPgm_T :: Settings -> String sPgm_T = toolSettings_pgm_T . sToolSettings sPgm_windres :: Settings -> String sPgm_windres = toolSettings_pgm_windres . sToolSettings sPgm_libtool :: Settings -> String sPgm_libtool = toolSettings_pgm_libtool . sToolSettings sPgm_ar :: Settings -> String sPgm_ar = toolSettings_pgm_ar . sToolSettings sPgm_otool :: Settings -> String sPgm_otool = toolSettings_pgm_otool . sToolSettings sPgm_install_name_tool :: Settings -> String sPgm_install_name_tool = toolSettings_pgm_install_name_tool . sToolSettings sPgm_ranlib :: Settings -> String sPgm_ranlib = toolSettings_pgm_ranlib . sToolSettings sPgm_lo :: Settings -> (String, [Option]) sPgm_lo = toolSettings_pgm_lo . sToolSettings sPgm_lc :: Settings -> (String, [Option]) sPgm_lc = toolSettings_pgm_lc . sToolSettings sPgm_lcc :: Settings -> (String, [Option]) sPgm_lcc = toolSettings_pgm_lcc . sToolSettings sPgm_i :: Settings -> String sPgm_i = toolSettings_pgm_i . sToolSettings sOpt_L :: Settings -> [String] sOpt_L = toolSettings_opt_L . sToolSettings sOpt_P :: Settings -> [String] sOpt_P = toolSettings_opt_P . sToolSettings sOpt_P_fingerprint :: Settings -> Fingerprint sOpt_P_fingerprint = toolSettings_opt_P_fingerprint . sToolSettings sOpt_F :: Settings -> [String] sOpt_F = toolSettings_opt_F . sToolSettings sOpt_c :: Settings -> [String] sOpt_c = toolSettings_opt_c . sToolSettings sOpt_cxx :: Settings -> [String] sOpt_cxx = toolSettings_opt_cxx . sToolSettings sOpt_a :: Settings -> [String] sOpt_a = toolSettings_opt_a . sToolSettings sOpt_l :: Settings -> [String] sOpt_l = toolSettings_opt_l . sToolSettings sOpt_lm :: Settings -> [String] sOpt_lm = toolSettings_opt_lm . sToolSettings sOpt_windres :: Settings -> [String] sOpt_windres = toolSettings_opt_windres . sToolSettings sOpt_lo :: Settings -> [String] sOpt_lo = toolSettings_opt_lo . sToolSettings sOpt_lc :: Settings -> [String] sOpt_lc = toolSettings_opt_lc . sToolSettings sOpt_lcc :: Settings -> [String] sOpt_lcc = toolSettings_opt_lcc . sToolSettings sOpt_i :: Settings -> [String] sOpt_i = toolSettings_opt_i . sToolSettings sExtraGccViaCFlags :: Settings -> [String] sExtraGccViaCFlags = toolSettings_extraGccViaCFlags . sToolSettings sTargetPlatformString :: Settings -> String sTargetPlatformString = platformMisc_targetPlatformString . sPlatformMisc sGhcWithInterpreter :: Settings -> Bool sGhcWithInterpreter = platformMisc_ghcWithInterpreter . sPlatformMisc sLibFFI :: Settings -> Bool sLibFFI = platformMisc_libFFI . sPlatformMisc ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/GHC/Settings/Config.hs0000644000000000000000000000112114472400113024757 0ustar0000000000000000module GHC.Settings.Config ( module GHC.Version , cBuildPlatformString , cHostPlatformString , cProjectName , cBooterVersion , cStage ) where import GHC.Prelude import GHC.Version cBuildPlatformString :: String cBuildPlatformString = "x86_64-apple-darwin" cHostPlatformString :: String cHostPlatformString = "x86_64-apple-darwin" cProjectName :: String cProjectName = "The Glorious Glasgow Haskell Compilation System" cBooterVersion :: String cBooterVersion = "9.2.8" cStage :: String cStage = show (1 :: Int) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Settings/Constants.hs0000644000000000000000000000200414472400113021740 0ustar0000000000000000-- | Compile-time settings module GHC.Settings.Constants where import GHC.Prelude import GHC.Settings.Config hiVersion :: Integer hiVersion = read (cProjectVersionInt ++ cProjectPatchLevel) :: Integer -- All pretty arbitrary: mAX_TUPLE_SIZE :: Int mAX_TUPLE_SIZE = 64 -- Should really match the number -- of decls in GHC.Tuple mAX_CTUPLE_SIZE :: Int -- Constraint tuples mAX_CTUPLE_SIZE = 64 -- Should match the number of decls in GHC.Classes mAX_SUM_SIZE :: Int mAX_SUM_SIZE = 64 -- | Default maximum depth for both class instance search and type family -- reduction. See also #5395. mAX_REDUCTION_DEPTH :: Int mAX_REDUCTION_DEPTH = 200 -- | Default maximum constraint-solver iterations -- Typically there should be very few mAX_SOLVER_ITERATIONS :: Int mAX_SOLVER_ITERATIONS = 4 wORD64_SIZE :: Int wORD64_SIZE = 8 -- Size of float in bytes. fLOAT_SIZE :: Int fLOAT_SIZE = 4 -- Size of double in bytes. dOUBLE_SIZE :: Int dOUBLE_SIZE = 8 tARGET_MAX_CHAR :: Int tARGET_MAX_CHAR = 0x10ffff ghc-lib-parser-9.4.7.20230826/compiler/GHC/Stg/InferTags/TagSig.hs0000644000000000000000000000476214472400113021776 0ustar0000000000000000{-# LANGUAGE TypeFamilies, DataKinds, GADTs, FlexibleInstances #-} {-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} -- We export this type from this module instead of GHC.Stg.InferTags.Types -- because it's used by more than the analysis itself. For example in interface -- files where we record a tag signature for bindings. -- By putting the sig into it's own module we can avoid module loops. module GHC.Stg.InferTags.TagSig where import GHC.Prelude import GHC.Types.Var import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Utils.Panic.Plain import Data.Coerce data TagInfo = TagDunno -- We don't know anything about the tag. | TagTuple [TagInfo] -- Represents a function/thunk which when evaluated -- will return a Unboxed tuple whos components have -- the given TagInfos. | TagProper -- Heap pointer to properly-tagged value | TagTagged -- Bottom of the domain. deriving (Eq) instance Outputable TagInfo where ppr TagTagged = text "TagTagged" ppr TagDunno = text "TagDunno" ppr TagProper = text "TagProper" ppr (TagTuple tis) = text "TagTuple" <> brackets (pprWithCommas ppr tis) instance Binary TagInfo where put_ bh TagDunno = putByte bh 1 put_ bh (TagTuple flds) = putByte bh 2 >> put_ bh flds put_ bh TagProper = putByte bh 3 put_ bh TagTagged = putByte bh 4 get bh = do tag <- getByte bh case tag of 1 -> return TagDunno 2 -> TagTuple <$> get bh 3 -> return TagProper 4 -> return TagTagged _ -> panic ("get TagInfo " ++ show tag) newtype TagSig -- The signature for each binding, this is a newtype as we might -- want to track more information in the future. = TagSig TagInfo deriving (Eq) instance Outputable TagSig where ppr (TagSig ti) = char '<' <> ppr ti <> char '>' instance OutputableBndr (Id,TagSig) where pprInfixOcc = ppr pprPrefixOcc = ppr instance Binary TagSig where put_ bh (TagSig sig) = put_ bh sig get bh = pure TagSig <*> get bh isTaggedSig :: TagSig -> Bool isTaggedSig (TagSig TagProper) = True isTaggedSig (TagSig TagTagged) = True isTaggedSig _ = False seqTagSig :: TagSig -> () seqTagSig = coerce seqTagInfo seqTagInfo :: TagInfo -> () seqTagInfo TagTagged = () seqTagInfo TagDunno = () seqTagInfo TagProper = () seqTagInfo (TagTuple tis) = foldl' (\_unit sig -> seqTagSig (coerce sig)) () tisghc-lib-parser-9.4.7.20230826/compiler/GHC/Stg/Syntax.hs0000644000000000000000000007660014472400113020224 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE UndecidableInstances #-} {- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 Shared term graph (STG) syntax for spineless-tagless code generation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This data type represents programs just before code generation (conversion to @Cmm@): basically, what we have is a stylised form of Core syntax, the style being one that happens to be ideally suited to spineless tagless code generation. -} module GHC.Stg.Syntax ( StgArg(..), GenStgTopBinding(..), GenStgBinding(..), GenStgExpr(..), GenStgRhs(..), GenStgAlt(..), AltType(..), StgPass(..), BinderP, XRhsClosure, XLet, XLetNoEscape, NoExtFieldSilent, noExtFieldSilent, OutputablePass, UpdateFlag(..), isUpdatable, ConstructorNumber(..), -- a set of synonyms for the vanilla parameterisation StgTopBinding, StgBinding, StgExpr, StgRhs, StgAlt, -- a set of synonyms for the code gen parameterisation CgStgTopBinding, CgStgBinding, CgStgExpr, CgStgRhs, CgStgAlt, -- Same for taggedness TgStgTopBinding, TgStgBinding, TgStgExpr, TgStgRhs, TgStgAlt, -- a set of synonyms for the lambda lifting parameterisation LlStgTopBinding, LlStgBinding, LlStgExpr, LlStgRhs, LlStgAlt, -- a set of synonyms to distinguish in- and out variants InStgArg, InStgTopBinding, InStgBinding, InStgExpr, InStgRhs, InStgAlt, OutStgArg, OutStgTopBinding, OutStgBinding, OutStgExpr, OutStgRhs, OutStgAlt, -- StgOp StgOp(..), -- utils stgRhsArity, freeVarsOfRhs, isDllConApp, stgArgType, stgCaseBndrInScope, -- ppr StgPprOpts(..), panicStgPprOpts, shortStgPprOpts, pprStgArg, pprStgExpr, pprStgRhs, pprStgBinding, pprGenStgTopBinding, pprStgTopBinding, pprGenStgTopBindings, pprStgTopBindings ) where import GHC.Prelude import GHC.Core ( AltCon ) import GHC.Types.CostCentre ( CostCentreStack ) import Data.ByteString ( ByteString ) import Data.Data ( Data ) import Data.List ( intersperse ) import GHC.Core.DataCon import GHC.Types.ForeignCall ( ForeignCall ) import GHC.Types.Id import GHC.Types.Name ( isDynLinkName ) import GHC.Types.Tickish ( StgTickish ) import GHC.Types.Var.Set import GHC.Types.Literal ( Literal, literalType ) import GHC.Unit.Module ( Module ) import GHC.Utils.Outputable import GHC.Platform import GHC.Core.Ppr( {- instances -} ) import GHC.Builtin.PrimOps ( PrimOp, PrimCall ) import GHC.Core.TyCon ( PrimRep(..), TyCon ) import GHC.Core.Type ( Type ) import GHC.Types.RepType ( typePrimRep1, typePrimRep ) import GHC.Utils.Panic.Plain {- ************************************************************************ * * GenStgBinding * * ************************************************************************ As usual, expressions are interesting; other things are boring. Here are the boring things (except note the @GenStgRhs@), parameterised with respect to binder and occurrence information (just as in @GHC.Core@): -} -- | A top-level binding. data GenStgTopBinding pass -- See Note [Core top-level string literals] = StgTopLifted (GenStgBinding pass) | StgTopStringLit Id ByteString data GenStgBinding pass = StgNonRec (BinderP pass) (GenStgRhs pass) | StgRec [(BinderP pass, GenStgRhs pass)] {- ************************************************************************ * * StgArg * * ************************************************************************ -} data StgArg = StgVarArg Id | StgLitArg Literal -- | Does this constructor application refer to anything in a different -- *Windows* DLL? -- If so, we can't allocate it statically isDllConApp :: Platform -> Bool -- is Opt_ExternalDynamicRefs enabled? -> Module -> DataCon -> [StgArg] -> Bool isDllConApp platform ext_dyn_refs this_mod con args | not ext_dyn_refs = False | platformOS platform == OSMinGW32 = isDynLinkName platform this_mod (dataConName con) || any is_dll_arg args | otherwise = False where -- NB: typePrimRep1 is legit because any free variables won't have -- unlifted type (there are no unlifted things at top level) is_dll_arg :: StgArg -> Bool is_dll_arg (StgVarArg v) = isAddrRep (typePrimRep1 (idType v)) && isDynLinkName platform this_mod (idName v) is_dll_arg _ = False -- True of machine addresses; these are the things that don't work across DLLs. -- The key point here is that VoidRep comes out False, so that a top level -- nullary GADT constructor is False for isDllConApp -- -- data T a where -- T1 :: T Int -- -- gives -- -- T1 :: forall a. (a~Int) -> T a -- -- and hence the top-level binding -- -- $WT1 :: T Int -- $WT1 = T1 Int (Coercion (Refl Int)) -- -- The coercion argument here gets VoidRep isAddrRep :: PrimRep -> Bool isAddrRep AddrRep = True isAddrRep LiftedRep = True isAddrRep UnliftedRep = True isAddrRep _ = False -- | Type of an @StgArg@ -- -- Very half baked because we have lost the type arguments. stgArgType :: StgArg -> Type stgArgType (StgVarArg v) = idType v stgArgType (StgLitArg lit) = literalType lit -- | Given an alt type and whether the program is unarised, return whether the -- case binder is in scope. -- -- Case binders of unboxed tuple or unboxed sum type always dead after the -- unariser has run. See Note [Post-unarisation invariants]. stgCaseBndrInScope :: AltType -> Bool {- ^ unarised? -} -> Bool stgCaseBndrInScope alt_ty unarised = case alt_ty of AlgAlt _ -> True PrimAlt _ -> True MultiValAlt _ -> not unarised PolyAlt -> True {- ************************************************************************ * * STG expressions * * ************************************************************************ The @GenStgExpr@ data type is parameterised on binder and occurrence info, as before. ************************************************************************ * * GenStgExpr * * ************************************************************************ An application is of a function to a list of atoms (not expressions). Operationally, we want to push the arguments on the stack and call the function. (If the arguments were expressions, we would have to build their closures first.) There is no constructor for a lone variable; it would appear as @StgApp var []@. -} data GenStgExpr pass = StgApp Id -- function [StgArg] -- arguments; may be empty {- ************************************************************************ * * StgConApp and StgPrimApp --- saturated applications * * ************************************************************************ There are specialised forms of application, for constructors, primitives, and literals. -} | StgLit Literal -- StgConApp is vital for returning unboxed tuples or sums -- which can't be let-bound | StgConApp DataCon ConstructorNumber [StgArg] -- Saturated. (After Unarisation, [NonVoid StgArg]) [Type] -- See Note [Types in StgConApp] in GHC.Stg.Unarise | StgOpApp StgOp -- Primitive op or foreign call [StgArg] -- Saturated. Type -- Result type -- We need to know this so that we can -- assign result registers {- ************************************************************************ * * GenStgExpr: case-expressions * * ************************************************************************ This has the same boxed/unboxed business as Core case expressions. -} | StgCase (GenStgExpr pass) -- the thing to examine (BinderP pass) -- binds the result of evaluating the scrutinee AltType [GenStgAlt pass] -- The DEFAULT case is always *first* -- if it is there at all {- ************************************************************************ * * GenStgExpr: let(rec)-expressions * * ************************************************************************ The various forms of let(rec)-expression encode most of the interesting things we want to do. - let-closure x = [free-vars] [args] expr in e is equivalent to let x = (\free-vars -> \args -> expr) free-vars @args@ may be empty (and is for most closures). It isn't under circumstances like this: let x = (\y -> y+z) This gets mangled to let-closure x = [z] [y] (y+z) The idea is that we compile code for @(y+z)@ in an environment in which @z@ is bound to an offset from Node, and `y` is bound to an offset from the stack pointer. (A let-closure is an @StgLet@ with a @StgRhsClosure@ RHS.) - let-constructor x = Constructor [args] in e (A let-constructor is an @StgLet@ with a @StgRhsCon@ RHS.) - Letrec-expressions are essentially the same deal as let-closure/ let-constructor, so we use a common structure and distinguish between them with an @is_recursive@ boolean flag. - let-unboxed u = in e All the stuff on the RHS must be fully evaluated. No function calls either! (We've backed away from this toward case-expressions with suitably-magical alts ...) - Advanced stuff here! Not to start with, but makes pattern matching generate more efficient code. let-escapes-not fail = expr in e' Here the idea is that @e'@ guarantees not to put @fail@ in a data structure, or pass it to another function. All @e'@ will ever do is tail-call @fail@. Rather than build a closure for @fail@, all we need do is to record the stack level at the moment of the @let-escapes-not@; then entering @fail@ is just a matter of adjusting the stack pointer back down to that point and entering the code for it. Another example: f x y = let z = huge-expression in if y==1 then z else if y==2 then z else 1 (A let-escapes-not is an @StgLetNoEscape@.) - We may eventually want: let-literal x = Literal in e And so the code for let(rec)-things: -} | StgLet (XLet pass) (GenStgBinding pass) -- right hand sides (see below) (GenStgExpr pass) -- body | StgLetNoEscape (XLetNoEscape pass) (GenStgBinding pass) -- right hand sides (see below) (GenStgExpr pass) -- body {- ************************************************************************* * * GenStgExpr: hpc, scc and other debug annotations * * ************************************************************************* Finally for @hpc@ expressions we introduce a new STG construct. -} | StgTick StgTickish (GenStgExpr pass) -- sub expression -- END of GenStgExpr {- ************************************************************************ * * STG right-hand sides * * ************************************************************************ Here's the rest of the interesting stuff for @StgLet@s; the first flavour is for closures: -} data GenStgRhs pass = StgRhsClosure (XRhsClosure pass) -- ^ Extension point for non-global free var -- list just before 'CodeGen'. CostCentreStack -- ^ CCS to be attached (default is CurrentCCS) !UpdateFlag -- ^ 'ReEntrant' | 'Updatable' | 'SingleEntry' [BinderP pass] -- ^ arguments; if empty, then not a function; -- as above, order is important. (GenStgExpr pass) -- ^ body {- An example may be in order. Consider: let t = \x -> \y -> ... x ... y ... p ... q in e Pulling out the free vars and stylising somewhat, we get the equivalent: let t = (\[p,q] -> \[x,y] -> ... x ... y ... p ...q) p q Stg-operationally, the @[x,y]@ are on the stack, the @[p,q]@ are offsets from @Node@ into the closure, and the code ptr for the closure will be exactly that in parentheses above. The second flavour of right-hand-side is for constructors (simple but important): -} | StgRhsCon CostCentreStack -- CCS to be attached (default is CurrentCCS). -- Top-level (static) ones will end up with -- DontCareCCS, because we don't count static -- data in heap profiles, and we don't set CCCS -- from static closure. DataCon -- Constructor. Never an unboxed tuple or sum, as those -- are not allocated. ConstructorNumber [StgTickish] [StgArg] -- Args -- | Like 'GHC.Hs.Extension.NoExtField', but with an 'Outputable' instance that -- returns 'empty'. data NoExtFieldSilent = NoExtFieldSilent deriving (Data, Eq, Ord) instance Outputable NoExtFieldSilent where ppr _ = empty -- | Used when constructing a term with an unused extension point that should -- not appear in pretty-printed output at all. noExtFieldSilent :: NoExtFieldSilent noExtFieldSilent = NoExtFieldSilent -- TODO: Maybe move this to GHC.Hs.Extension? I'm not sure about the -- implications on build time... stgRhsArity :: StgRhs -> Int stgRhsArity (StgRhsClosure _ _ _ bndrs _) = assert (all isId bndrs) $ length bndrs -- The arity never includes type parameters, but they should have gone by now stgRhsArity (StgRhsCon {}) = 0 freeVarsOfRhs :: (XRhsClosure pass ~ DIdSet) => GenStgRhs pass -> DIdSet freeVarsOfRhs (StgRhsCon _ _ _ _ args) = mkDVarSet [ id | StgVarArg id <- args ] freeVarsOfRhs (StgRhsClosure fvs _ _ _ _) = fvs {- ************************************************************************ * * STG case alternatives * * ************************************************************************ Very like in Core syntax (except no type-world stuff). The type constructor is guaranteed not to be abstract; that is, we can see its representation. This is important because the code generator uses it to determine return conventions etc. But it's not trivial where there's a module loop involved, because some versions of a type constructor might not have all the constructors visible. So mkStgAlgAlts (in CoreToStg) ensures that it gets the TyCon from the constructors or literals (which are guaranteed to have the Real McCoy) rather than from the scrutinee type. -} data GenStgAlt pass = GenStgAlt { alt_con :: !AltCon -- alts: data constructor, , alt_bndrs :: ![BinderP pass] -- constructor's parameters, , alt_rhs :: !(GenStgExpr pass) -- right-hand side. } data AltType = PolyAlt -- Polymorphic (a boxed type variable, lifted or unlifted) | MultiValAlt Int -- Multi value of this arity (unboxed tuple or sum) -- the arity could indeed be 1 for unary unboxed tuple -- or enum-like unboxed sums | AlgAlt TyCon -- Algebraic data type; the AltCons will be DataAlts | PrimAlt PrimRep -- Primitive data type; the AltCons (if any) will be LitAlts {- ************************************************************************ * * The Plain STG parameterisation * * ************************************************************************ Note [STG Extension points] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We now make use of extension points in STG for different passes which want to associate information with AST nodes. Currently the pipeline is roughly: CoreToStg: Core -> Stg StgSimpl: Stg -> Stg CodeGen: Stg -> Cmm As part of StgSimpl we run late lambda lifting (Ll). Late lambda lift: Stg -> FvStg -> LlStg -> Stg CodeGen: As part of CodeGen we run tag inference. Tag Inference: Stg -> Stg 'InferTaggedBinders` -> Stg And at a last step we add the free Variables: Stg -> CgStg Which finally CgStg being used to generate Cmm. -} type StgTopBinding = GenStgTopBinding 'Vanilla type StgBinding = GenStgBinding 'Vanilla type StgExpr = GenStgExpr 'Vanilla type StgRhs = GenStgRhs 'Vanilla type StgAlt = GenStgAlt 'Vanilla type LlStgTopBinding = GenStgTopBinding 'LiftLams type LlStgBinding = GenStgBinding 'LiftLams type LlStgExpr = GenStgExpr 'LiftLams type LlStgRhs = GenStgRhs 'LiftLams type LlStgAlt = GenStgAlt 'LiftLams type CgStgTopBinding = GenStgTopBinding 'CodeGen type CgStgBinding = GenStgBinding 'CodeGen type CgStgExpr = GenStgExpr 'CodeGen type CgStgRhs = GenStgRhs 'CodeGen type CgStgAlt = GenStgAlt 'CodeGen type TgStgTopBinding = GenStgTopBinding 'CodeGen type TgStgBinding = GenStgBinding 'CodeGen type TgStgExpr = GenStgExpr 'CodeGen type TgStgRhs = GenStgRhs 'CodeGen type TgStgAlt = GenStgAlt 'CodeGen {- Many passes apply a substitution, and it's very handy to have type synonyms to remind us whether or not the substitution has been applied. See GHC.Core for precedence in Core land -} type InStgTopBinding = StgTopBinding type InStgBinding = StgBinding type InStgArg = StgArg type InStgExpr = StgExpr type InStgRhs = StgRhs type InStgAlt = StgAlt type OutStgTopBinding = StgTopBinding type OutStgBinding = StgBinding type OutStgArg = StgArg type OutStgExpr = StgExpr type OutStgRhs = StgRhs type OutStgAlt = StgAlt -- | When `-fdistinct-constructor-tables` is turned on then -- each usage of a constructor is given an unique number and -- an info table is generated for each different constructor. data ConstructorNumber = NoNumber | Numbered Int instance Outputable ConstructorNumber where ppr NoNumber = empty ppr (Numbered n) = text "#" <> ppr n {- Note Stg Passes ~~~~~~~~~~~~~~~ Here is a short summary of the STG pipeline and where we use the different StgPass data type indexes: 1. CoreToStg.Prep performs several transformations that prepare the desugared and simplified core to be converted to STG. One of these transformations is making it so that value lambdas only exist as the RHS of a binding. See Note [CorePrep Overview]. 2. CoreToStg converts the prepared core to STG, specifically GenStg* parameterised by 'Vanilla. See the GHC.CoreToStg Module. 3. Stg.Pipeline does a number of passes on the generated STG. One of these is the lambda-lifting pass, which internally uses the 'LiftLams parameterisation to store information for deciding whether or not to lift each binding. See Note [Late lambda lifting in STG]. 4. Tag inference takes in 'Vanilla and produces 'InferTagged STG, while using the InferTaggedBinders annotated AST internally. See Note [Tag Inference]. 5. Stg.FVs annotates closures with their free variables. To store these annotations we use the 'CodeGen parameterisation. See the GHC.Stg.FVs module. 6. The Module Stg.StgToCmm generates Cmm from the CodeGen annotated STG. -} -- | Used as a data type index for the stgSyn AST data StgPass = Vanilla | LiftLams -- ^ Use internally by the lambda lifting pass | InferTaggedBinders -- ^ Tag inference information on binders. -- See Note [Tag inference passes] in GHC.Stg.InferTags | InferTagged -- ^ Tag inference information put on relevant StgApp nodes -- See Note [Tag inference passes] in GHC.Stg.InferTags | CodeGen type family BinderP (pass :: StgPass) type instance BinderP 'Vanilla = Id type instance BinderP 'CodeGen = Id type instance BinderP 'InferTagged = Id type family XRhsClosure (pass :: StgPass) type instance XRhsClosure 'Vanilla = NoExtFieldSilent type instance XRhsClosure 'InferTagged = NoExtFieldSilent -- | Code gen needs to track non-global free vars type instance XRhsClosure 'CodeGen = DIdSet type family XLet (pass :: StgPass) type instance XLet 'Vanilla = NoExtFieldSilent type instance XLet 'InferTagged = NoExtFieldSilent type instance XLet 'CodeGen = NoExtFieldSilent type family XLetNoEscape (pass :: StgPass) type instance XLetNoEscape 'Vanilla = NoExtFieldSilent type instance XLetNoEscape 'InferTagged = NoExtFieldSilent type instance XLetNoEscape 'CodeGen = NoExtFieldSilent {- ************************************************************************ * * UpdateFlag * * ************************************************************************ This is also used in @LambdaFormInfo@ in the @ClosureInfo@ module. A @ReEntrant@ closure may be entered multiple times, but should not be updated or blackholed. An @Updatable@ closure should be updated after evaluation (and may be blackholed during evaluation). A @SingleEntry@ closure will only be entered once, and so need not be updated but may safely be blackholed. -} data UpdateFlag = ReEntrant | Updatable | SingleEntry instance Outputable UpdateFlag where ppr u = char $ case u of ReEntrant -> 'r' Updatable -> 'u' SingleEntry -> 's' isUpdatable :: UpdateFlag -> Bool isUpdatable ReEntrant = False isUpdatable SingleEntry = False isUpdatable Updatable = True {- ************************************************************************ * * StgOp * * ************************************************************************ An StgOp allows us to group together PrimOps and ForeignCalls. It's quite useful to move these around together, notably in StgOpApp and COpStmt. -} data StgOp = StgPrimOp PrimOp | StgPrimCallOp PrimCall | StgFCallOp ForeignCall Type -- The Type, which is obtained from the foreign import declaration -- itself, is needed by the stg-to-cmm pass to determine the offset to -- apply to unlifted boxed arguments in GHC.StgToCmm.Foreign. See Note -- [Unlifted boxed arguments to foreign calls] {- ************************************************************************ * * Pretty-printing * * ************************************************************************ Robin Popplestone asked for semi-colon separators on STG binds; here's hoping he likes terminators instead... Ditto for case alternatives. -} type OutputablePass pass = ( Outputable (XLet pass) , Outputable (XLetNoEscape pass) , Outputable (XRhsClosure pass) , OutputableBndr (BinderP pass) ) -- | STG pretty-printing options data StgPprOpts = StgPprOpts { stgSccEnabled :: !Bool -- ^ Enable cost-centres } -- | STG pretty-printing options used for panic messages panicStgPprOpts :: StgPprOpts panicStgPprOpts = StgPprOpts { stgSccEnabled = True } -- | STG pretty-printing options used for short messages shortStgPprOpts :: StgPprOpts shortStgPprOpts = StgPprOpts { stgSccEnabled = False } pprGenStgTopBinding :: OutputablePass pass => StgPprOpts -> GenStgTopBinding pass -> SDoc pprGenStgTopBinding opts b = case b of StgTopStringLit bndr str -> hang (hsep [pprBndr LetBind bndr, equals]) 4 (pprHsBytes str <> semi) StgTopLifted bind -> pprGenStgBinding opts bind pprGenStgBinding :: OutputablePass pass => StgPprOpts -> GenStgBinding pass -> SDoc pprGenStgBinding opts b = case b of StgNonRec bndr rhs -> hang (hsep [pprBndr LetBind bndr, equals]) 4 (pprStgRhs opts rhs <> semi) StgRec pairs -> vcat [ text "Rec {" , vcat (intersperse blankLine (map ppr_bind pairs)) , text "end Rec }" ] where ppr_bind (bndr, expr) = hang (hsep [pprBndr LetBind bndr, equals]) 4 (pprStgRhs opts expr <> semi) instance OutputablePass pass => Outputable (GenStgBinding pass) where ppr = pprGenStgBinding panicStgPprOpts pprGenStgTopBindings :: (OutputablePass pass) => StgPprOpts -> [GenStgTopBinding pass] -> SDoc pprGenStgTopBindings opts binds = vcat $ intersperse blankLine (map (pprGenStgTopBinding opts) binds) pprStgBinding :: OutputablePass pass => StgPprOpts -> GenStgBinding pass -> SDoc pprStgBinding = pprGenStgBinding pprStgTopBinding :: OutputablePass pass => StgPprOpts -> GenStgTopBinding pass -> SDoc pprStgTopBinding = pprGenStgTopBinding pprStgTopBindings :: OutputablePass pass => StgPprOpts -> [GenStgTopBinding pass] -> SDoc pprStgTopBindings = pprGenStgTopBindings pprIdWithRep :: Id -> SDoc pprIdWithRep v = ppr v <> pprTypeRep (idType v) pprTypeRep :: Type -> SDoc pprTypeRep ty = ppUnlessOption sdocSuppressStgReps $ char ':' <> case typePrimRep ty of [r] -> ppr r r -> ppr r instance Outputable StgArg where ppr = pprStgArg pprStgArg :: StgArg -> SDoc pprStgArg (StgVarArg var) = pprIdWithRep var pprStgArg (StgLitArg con) = ppr con <> pprTypeRep (literalType con) instance OutputablePass pass => Outputable (GenStgExpr pass) where ppr = pprStgExpr panicStgPprOpts pprStgExpr :: OutputablePass pass => StgPprOpts -> GenStgExpr pass -> SDoc pprStgExpr opts e = case e of -- special case StgLit lit -> ppr lit -- general case StgApp func args | null args , Just sig <- idTagSig_maybe func -> ppr func <> ppr sig | otherwise -> hang (ppr func) 4 (interppSP args) -- TODO: Print taggedness StgConApp con n args _ -> hsep [ ppr con, ppr n, brackets (interppSP args) ] StgOpApp op args _ -> hsep [ pprStgOp op, brackets (interppSP args)] -- special case: let v = -- in -- let ... -- in -- ... -- -- Very special! Suspicious! (SLPJ) {- StgLet srt (StgNonRec bndr (StgRhsClosure cc bi free_vars upd_flag args rhs)) expr@(StgLet _ _)) -> ($$) (hang (hcat [text "let { ", ppr bndr, text " = ", ppr cc, pp_binder_info bi, text " [", whenPprDebug (interppSP free_vars), text "] \\", ppr upd_flag, text " [", interppSP args, char ']']) 8 (sep [hsep [ppr rhs, text "} in"]])) (ppr expr) -} -- special case: let ... in let ... StgLet ext bind expr@StgLet{} -> ($$) (sep [hang (text "let" <+> ppr ext <+> text "{") 2 (hsep [pprGenStgBinding opts bind, text "} in"])]) (pprStgExpr opts expr) -- general case StgLet ext bind expr -> sep [ hang (text "let" <+> ppr ext <+> text "{") 2 (pprGenStgBinding opts bind) , hang (text "} in ") 2 (pprStgExpr opts expr) ] StgLetNoEscape ext bind expr -> sep [ hang (text "let-no-escape" <+> ppr ext <+> text "{") 2 (pprGenStgBinding opts bind) , hang (text "} in ") 2 (pprStgExpr opts expr) ] StgTick _tickish expr -> sdocOption sdocSuppressTicks $ \case True -> pprStgExpr opts expr False -> pprStgExpr opts expr -- XXX sep [ ppr tickish, pprStgExpr opts expr ] -- Don't indent for a single case alternative. StgCase expr bndr alt_type [alt] -> sep [ sep [ text "case" , nest 4 (hsep [ pprStgExpr opts expr , whenPprDebug (dcolon <+> ppr alt_type) ]) , text "of" , pprBndr CaseBind bndr , char '{' ] , pprStgAlt opts False alt , char '}' ] StgCase expr bndr alt_type alts -> sep [ sep [ text "case" , nest 4 (hsep [ pprStgExpr opts expr , whenPprDebug (dcolon <+> ppr alt_type) ]) , text "of" , pprBndr CaseBind bndr, char '{' ] , nest 2 (vcat (map (pprStgAlt opts True) alts)) , char '}' ] pprStgAlt :: OutputablePass pass => StgPprOpts -> Bool -> GenStgAlt pass -> SDoc pprStgAlt opts indent GenStgAlt{alt_con, alt_bndrs, alt_rhs} | indent = hang altPattern 4 (pprStgExpr opts alt_rhs <> semi) | otherwise = sep [altPattern, pprStgExpr opts alt_rhs <> semi] where altPattern = hsep [ ppr alt_con , sep (map (pprBndr CasePatBind) alt_bndrs) , text "->" ] pprStgOp :: StgOp -> SDoc pprStgOp (StgPrimOp op) = ppr op pprStgOp (StgPrimCallOp op)= ppr op pprStgOp (StgFCallOp op _) = ppr op instance Outputable StgOp where ppr = pprStgOp instance Outputable AltType where ppr PolyAlt = text "Polymorphic" ppr (MultiValAlt n) = text "MultiAlt" <+> ppr n ppr (AlgAlt tc) = text "Alg" <+> ppr tc ppr (PrimAlt tc) = text "Prim" <+> ppr tc pprStgRhs :: OutputablePass pass => StgPprOpts -> GenStgRhs pass -> SDoc pprStgRhs opts rhs = case rhs of StgRhsClosure ext cc upd_flag args body -> hang (hsep [ if stgSccEnabled opts then ppr cc else empty , ppUnlessOption sdocSuppressStgExts (ppr ext) , char '\\' <> ppr upd_flag, brackets (interppSP args) ]) 4 (pprStgExpr opts body) StgRhsCon cc con mid _ticks args -> hcat [ ppr cc, space , case mid of NoNumber -> empty Numbered n -> hcat [ppr n, space] -- The bang indicates this is an StgRhsCon instead of an StgConApp. , ppr con, text "! ", brackets (sep (map pprStgArg args))] instance OutputablePass pass => Outputable (GenStgRhs pass) where ppr = pprStgRhs panicStgPprOpts ghc-lib-parser-9.4.7.20230826/compiler/GHC/StgToCmm/Config.hs0000644000000000000000000001426614472400113021103 0ustar0000000000000000-- | The stg to cmm code generator configuration module GHC.StgToCmm.Config ( StgToCmmConfig(..) , stgToCmmPlatform ) where import GHC.Platform.Profile import GHC.Platform import GHC.Unit.Module import GHC.Utils.Outputable import GHC.Utils.TmpFs import GHC.Prelude -- This config is static and contains information only passed *downwards* by StgToCmm.Monad data StgToCmmConfig = StgToCmmConfig ----------------------------- General Settings -------------------------------- { stgToCmmProfile :: !Profile -- ^ Current profile , stgToCmmThisModule :: Module -- ^ The module being compiled. This field kept lazy for -- Cmm/Parser.y which preloads it with a panic , stgToCmmTmpDir :: !TempDir -- ^ Temp Dir for files used in compilation , stgToCmmContext :: !SDocContext -- ^ Context for StgToCmm phase , stgToCmmDebugLevel :: !Int -- ^ The verbosity of debug messages , stgToCmmBinBlobThresh :: !(Maybe Word) -- ^ Threshold at which Binary literals (e.g. strings) -- are either dumped to a file and a CmmFileEmbed literal -- is emitted (over threshold), or become a CmmString -- Literal (under or at threshold). CmmFileEmbed is only supported -- with the NCG, thus a Just means two things: We have a threshold, -- and will be using the NCG. Conversely, a Nothing implies we are not -- using NCG and disables CmmFileEmbed. See Note -- [Embedding large binary blobs] in GHC.CmmToAsm.Ppr, and -- @cgTopBinding@ in GHC.StgToCmm. , stgToCmmMaxInlAllocSize :: !Int -- ^ Max size, in bytes, of inline array allocations. ------------------------------ Ticky Options ---------------------------------- , stgToCmmDoTicky :: !Bool -- ^ Ticky profiling enabled (cf @-ticky@) , stgToCmmTickyAllocd :: !Bool -- ^ True indicates ticky prof traces allocs of each named -- thing in addition to allocs _by_ that thing , stgToCmmTickyLNE :: !Bool -- ^ True indicates ticky uses name-specific counters for -- join-points (let-no-escape) , stgToCmmTickyDynThunk :: !Bool -- ^ True indicates ticky uses name-specific counters for -- dynamic thunks , stgToCmmTickyTag :: !Bool -- ^ True indicates ticky will count number of avoided tag checks by tag inference. ---------------------------------- Flags -------------------------------------- , stgToCmmLoopification :: !Bool -- ^ Loopification enabled (cf @-floopification@) , stgToCmmAlignCheck :: !Bool -- ^ Insert alignment check (cf @-falignment-sanitisation@) , stgToCmmOptHpc :: !Bool -- ^ perform code generation for code coverage , stgToCmmFastPAPCalls :: !Bool -- ^ , stgToCmmSCCProfiling :: !Bool -- ^ Check if cost-centre profiling is enabled , stgToCmmEagerBlackHole :: !Bool -- ^ , stgToCmmInfoTableMap :: !Bool -- ^ true means generate C Stub for IPE map, See note [Mapping -- Info Tables to Source Positions] , stgToCmmOmitYields :: !Bool -- ^ true means omit heap checks when no allocation is performed , stgToCmmOmitIfPragmas :: !Bool -- ^ true means don't generate interface programs (implied by -O0) , stgToCmmPIC :: !Bool -- ^ true if @-fPIC@ , stgToCmmPIE :: !Bool -- ^ true if @-fPIE@ , stgToCmmExtDynRefs :: !Bool -- ^ true if @-fexternal-dynamic-refs@, meaning generate -- code for linking against dynamic libraries , stgToCmmDoBoundsCheck :: !Bool -- ^ decides whether to check array bounds in StgToCmm.Prim -- or not , stgToCmmDoTagCheck :: !Bool -- ^ Verify tag inference predictions. ------------------------------ Backend Flags ---------------------------------- , stgToCmmAllowBigArith :: !Bool -- ^ Allowed to emit larger than native size arithmetic (only LLVM and C backends) , stgToCmmAllowQuotRemInstr :: !Bool -- ^ Allowed to generate QuotRem instructions , stgToCmmAllowQuotRem2 :: !Bool -- ^ Allowed to generate QuotRem , stgToCmmAllowExtendedAddSubInstrs :: !Bool -- ^ Allowed to generate AddWordC, SubWordC, Add2, etc. , stgToCmmAllowIntMul2Instr :: !Bool -- ^ Allowed to generate IntMul2 instruction , stgToCmmAllowFabsInstrs :: !Bool -- ^ Allowed to generate Fabs instructions , stgToCmmTickyAP :: !Bool -- ^ Disable use of precomputed standard thunks. ------------------------------ SIMD flags ------------------------------------ -- Each of these flags checks vector compatibility with the backend requested -- during compilation. In essence, this means checking for @-fllvm@ which is -- the only backend that currently allows SIMD instructions, see -- Ghc.StgToCmm.Prim.checkVecCompatibility for these flags only call site. , stgToCmmVecInstrsErr :: Maybe String -- ^ Error (if any) to raise when vector instructions are -- used, see @StgToCmm.Prim.checkVecCompatibility@ , stgToCmmAvx :: !Bool -- ^ check for Advanced Vector Extensions , stgToCmmAvx2 :: !Bool -- ^ check for Advanced Vector Extensions 2 , stgToCmmAvx512f :: !Bool -- ^ check for Advanced Vector 512-bit Extensions } stgToCmmPlatform :: StgToCmmConfig -> Platform stgToCmmPlatform = profilePlatform . stgToCmmProfile ghc-lib-parser-9.4.7.20230826/compiler/GHC/StgToCmm/Types.hs0000644000000000000000000002216114472400113020773 0ustar0000000000000000 module GHC.StgToCmm.Types ( CmmCgInfos (..) , LambdaFormInfo (..) , ModuleLFInfos , StandardFormInfo (..) , DoSCCProfiling , DoExtDynRefs ) where import GHC.Prelude import GHC.Core.DataCon import GHC.Runtime.Heap.Layout import GHC.Types.Basic import GHC.Types.ForeignStubs import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Utils.Outputable {- Note [Conveying CAF-info and LFInfo between modules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some information about an Id is generated in the code generator, and is not available earlier. Namely: * CAF info. Code motion in Cmm or earlier phases may move references around so we compute information about which bits of code refer to which CAF late in the Cmm pipeline. * LambdaFormInfo. This records the details of a closure representation, including - the final arity (for functions) - whether it is a data constructor, and if so its tag Collectively we call this CgInfo (see GHC.StgToCmm.Types). It's very useful for importing modules to have this information. We can always make a conservative assumption, but that is bad: e.g. * For CAF info, if we know nothing we have to assume it is a CAF which bloats the SRTs of the importing module. Conservative assumption here is made when creating new Ids. * For data constructors, we really like having well-tagged pointers. See #14677, #16559, #15155, and wiki: commentary/rts/haskell-execution/pointer-tagging Conservative assumption here is made when we import an Id without a LambdaFormInfo in the interface, in GHC.StgToCmm.Closure.mkLFImported. So we arrange to always serialise this information into the interface file. The moving parts are: * We record the CgInfo in the IdInfo of the Id. * GHC.Driver.Pipeline: the call to updateModDetailsIdInfos augments the ModDetails constructed at the end of the Core pipeline, with CgInfo gleaned from the back end. The hard work is done in GHC.Iface.UpdateIdInfos. * For ModIface we generate the final ModIface with CgInfo in GHC.Iface.Make.mkFullIface. * We don't absolutely guarantee to serialise the CgInfo: we won't if you have -fomit-interface-pragmas or -fno-code; and we won't read it in if you have -fignore-interface-pragmas. (We could revisit this decision.) Note [Imported unlifted nullary datacon wrappers must have correct LFInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described in `Note [Conveying CAF-info and LFInfo between modules]`, imported unlifted nullary datacons must have their LambdaFormInfo set to reflect the fact that they are evaluated . This is necessary as otherwise references to them may be passed untagged to code that expects tagged references. What may be less obvious is that this must be done for not only datacon workers but also *wrappers*. The reason is found in this program from #23146: module B where type NP :: [UnliftedType] -> UnliftedType data NP xs where UNil :: NP '[] module A where import B fieldsSam :: NP xs -> NP xs -> Bool fieldsSam UNil UNil = True x = fieldsSam UNil UNil Due to its GADT nature, `B.UNil` produces a trivial wrapper $WUNil :: NP '[] $WUNil = UNil @'[] @~() which is referenced in the RHS of `A.x`. If we fail to give `$WUNil` the correct `LFCon 0` `LambdaFormInfo` then we will end up passing an untagged pointer to `fieldsSam`. This is problematic as `fieldsSam` may take advantage of the unlifted nature of its arguments by omitting handling of the zero tag when scrutinising them. The fix is straightforward: extend the logic in `mkLFImported` to cover (nullary) datacon wrappers as well as workers. This is safe because we know that the wrapper of a nullary datacon will be in WHNF, even if it includes equalities evidence (since such equalities are not runtime relevant). This fixed #23146. See also Note [The LFInfo of Imported Ids] -} -- | Codegen-generated Id infos, to be passed to downstream via interfaces. -- -- This stuff is for optimization purposes only, they're not compulsory. -- -- * When CafInfo of an imported Id is not known it's safe to treat it as CAFFY. -- * When LambdaFormInfo of an imported Id is not known it's safe to treat it as -- `LFUnknown True` (which just says "it could be anything" and we do slow -- entry). -- -- See also Note [Conveying CAF-info and LFInfo between modules] above. -- data CmmCgInfos = CmmCgInfos { cgNonCafs :: !NonCaffySet -- ^ Exported Non-CAFFY closures in the current module. Everything else is -- either not exported of CAFFY. , cgLFInfos :: !ModuleLFInfos -- ^ LambdaFormInfos of exported closures in the current module. , cgIPEStub :: !CStub -- ^ The C stub which is used for IPE information } -------------------------------------------------------------------------------- -- LambdaFormInfo -------------------------------------------------------------------------------- -- | Maps names in the current module to their LambdaFormInfos type ModuleLFInfos = NameEnv LambdaFormInfo -- | Information about an identifier, from the code generator's point of view. -- Every identifier is bound to a LambdaFormInfo in the environment, which gives -- the code generator enough info to be able to tail call or return that -- identifier. data LambdaFormInfo = LFReEntrant -- Reentrant closure (a function) !TopLevelFlag -- True if top level !RepArity -- Arity. Invariant: always > 0 !Bool -- True <=> no fvs !ArgDescr -- Argument descriptor (should really be in ClosureInfo) | LFThunk -- Thunk (zero arity) !TopLevelFlag !Bool -- True <=> no free vars !Bool -- True <=> updatable (i.e., *not* single-entry) !StandardFormInfo !Bool -- True <=> *might* be a function type | LFCon -- A saturated data constructor application !DataCon -- The constructor | LFUnknown -- Used for function arguments and imported things. -- We know nothing about this closure. -- Treat like updatable "LFThunk"... -- Imported things which we *do* know something about use -- one of the other LF constructors (eg LFReEntrant for -- known functions) !Bool -- True <=> *might* be a function type -- The False case is good when we want to enter it, -- because then we know the entry code will do -- For a function, the entry code is the fast entry point | LFUnlifted -- A value of unboxed type; -- always a value, needs evaluation | LFLetNoEscape -- See LetNoEscape module for precise description instance Outputable LambdaFormInfo where ppr (LFReEntrant top rep fvs argdesc) = text "LFReEntrant" <> brackets (ppr top <+> ppr rep <+> pprFvs fvs <+> ppr argdesc) ppr (LFThunk top hasfv updateable sfi m_function) = text "LFThunk" <> brackets (ppr top <+> pprFvs hasfv <+> pprUpdateable updateable <+> ppr sfi <+> pprFuncFlag m_function) ppr (LFCon con) = text "LFCon" <> brackets (ppr con) ppr (LFUnknown m_func) = text "LFUnknown" <> brackets (pprFuncFlag m_func) ppr LFUnlifted = text "LFUnlifted" ppr LFLetNoEscape = text "LFLetNoEscape" pprFvs :: Bool -> SDoc pprFvs True = text "no-fvs" pprFvs False = text "fvs" pprFuncFlag :: Bool -> SDoc pprFuncFlag True = text "mFunc" pprFuncFlag False = text "value" pprUpdateable :: Bool -> SDoc pprUpdateable True = text "updateable" pprUpdateable False = text "oneshot" -------------------------------------------------------------------------------- -- | StandardFormInfo tells whether this thunk has one of a small number of -- standard forms data StandardFormInfo = NonStandardThunk -- The usual case: not of the standard forms | SelectorThunk -- A SelectorThunk is of form -- case x of -- con a1,..,an -> ak -- and the constructor is from a single-constr type. !WordOff -- 0-origin offset of ak within the "goods" of -- constructor (Recall that the a1,...,an may be laid -- out in the heap in a non-obvious order.) | ApThunk -- An ApThunk is of form -- x1 ... xn -- The code for the thunk just pushes x2..xn on the stack and enters x1. -- There are a few of these (for 1 <= n <= MAX_SPEC_AP_SIZE) pre-compiled -- in the RTS to save space. !RepArity -- Arity, n deriving (Eq) instance Outputable StandardFormInfo where ppr NonStandardThunk = text "RegThunk" ppr (SelectorThunk w) = text "SelThunk:" <> ppr w ppr (ApThunk n) = text "ApThunk:" <> ppr n -------------------------------------------------------------------------------- -- Gaining sight in a sea of blindness -------------------------------------------------------------------------------- type DoSCCProfiling = Bool type DoExtDynRefs = Bool ghc-lib-parser-9.4.7.20230826/compiler/GHC/SysTools/BaseDir.hs0000644000000000000000000002071414472400113021304 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} {- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2001-2017 -- -- Finding the compiler's base directory. -- ----------------------------------------------------------------------------- -} module GHC.SysTools.BaseDir ( expandTopDir, expandToolDir , findTopDir, findToolDir , tryFindTopDir ) where import GHC.Prelude -- See Note [Base Dir] for why some of this logic is shared with ghc-pkg. import GHC.BaseDir import GHC.Utils.Panic import System.Environment (lookupEnv) import System.FilePath -- Windows #if defined(mingw32_HOST_OS) import System.Directory (doesDirectoryExist) #endif {- Note [topdir: How GHC finds its files] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC needs various support files (library packages, RTS etc), plus various auxiliary programs (cp, gcc, etc). It starts by finding topdir, the root of GHC's support files On Unix: - ghc always has a shell wrapper that passes a -B option On Windows: - ghc never has a shell wrapper. - we can find the location of the ghc binary, which is $topdir//.exe where may be "ghc", "ghc-stage2", or similar - we strip off the "/.exe" to leave $topdir. from topdir we can find package.conf, ghc-asm, etc. Note [tooldir: How GHC finds mingw on Windows] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC has some custom logic on Windows for finding the mingw toolchain and perl. Depending on whether GHC is built with the make build system or Hadrian, and on whether we're running a bindist, we might find the mingw toolchain either under $topdir/../{mingw, perl}/ or $topdir/../../{mingw, perl}/. This story is long and with lots of twist and turns.. But lets talk about how the build system finds and wires through the toolchain information. 1) It all starts in configure.ac which has two modes it operates on: a) The default is where `EnableDistroToolchain` is false. This indicates that we want to use the in-tree bundled toolchains. In this mode we will download and unpack some custom toolchains into the `inplace/mingw` folder and everything is pointed to that folder. b) The second path is when `EnableDistroToolchain` is true. This makes the toolchain behave a lot like Linux, in that the environment is queried for information on the tools we require. From configure.ac we export the standard variables to set the paths to the tools for the build system to use. 2) After we have the path to the tools we have to generate the right paths to store in the settings file for ghc to use. This is done in aclocal.m4. Again we have two modes of operation: a) If not `EnableDistroToolchain` the paths are rewritten to paths using a variable `$tooldir` as we need an absolute path. $tooldir is filled in by the `expandToolDir` function in this module at GHC startup. b) When `EnableDistroToolchain` then instead of filling in a absolute path we fill in just the program name. The assumption here is that at runtime the environment GHC is operating on will be the same as the one configure was run in. This means we expect `gcc, ld, as` etc to be on the PATH. From `aclocal.m4` we export a couple of variables starting with `Settings` which will be used to generate the settings file. 3) The next step is to generate the settings file, this is where things diverge based on the build system. Both Make and Hadrian handle this differently: make) Make deals with this rather simply. As an output of configure.ac `config.mk.in` is processed and `config.mk` generated which has the values we set in `aclocal.m4`. This allows the rest of the build system to have access to these and other values determined by configure. Based on this file, `rts/include/ghc.mk` when ran will produce the settings file by echoing the values into a the final file. Coincidentally this is also where `ghcplatform.h` and `ghcversion.h` generated which contains information about the build platform and sets CPP for use by the entire build. hadrian) For hadrian the file `cfg/system.config.in` is preprocessed by configure and the output written to `system.config`. This serves the same purpose as `config.mk` but it rewrites the values that were exported. As an example `SettingsCCompilerCommand` is rewritten to `settings-c-compiler-command`. Next up is `src/Oracles/Settings.hs` which makes from some Haskell ADT to the settings `keys` in the `system.config`. As an example, `settings-c-compiler-command` is mapped to `SettingsFileSetting_CCompilerCommand`. The last part of this is the `generateSettings` in `src/Rules/Generate.hs` which produces the desired settings file out of Hadrian. This is the equivalent to `rts/include/ghc.mk`. -- So why do we have these? On Windows there's no such thing as a platform compiler and as such we need to provide GCC and binutils. The easiest way is to bundle these with the compiler and wire them up. This gives you a relocatable binball. This works fine for most users. However mingw-w64 have a different requirement. They require all packages in the repo to be compiled using the same version of the compiler. So it means when they are rebuilding the world to add support for GCC X, they expect all packages to have been compiled with GCC X which is a problem since we ship an older GCC version. GHC is a package in mingw-w64 because there are Haskell packages in the repository which of course requires a Haskell compiler. To help them we provide the override which allows GHC to instead of using an inplace compiler to play nice with the system compiler instead. -} -- | Expand occurrences of the @$tooldir@ interpolation in a string -- on Windows, leave the string untouched otherwise. expandToolDir :: Bool -- ^ whether we are use the ambiant mingw toolchain -> Maybe FilePath -- ^ tooldir -> String -> String #if defined(mingw32_HOST_OS) expandToolDir False (Just tool_dir) s = expandPathVar "tooldir" tool_dir s expandToolDir False Nothing _ = panic "Could not determine $tooldir" expandToolDir True _ s = s #else expandToolDir _ _ s = s #endif -- | Returns a Unix-format path pointing to TopDir. findTopDir :: Maybe String -- Maybe TopDir path (without the '-B' prefix). -> IO String -- TopDir (in Unix format '/' separated) findTopDir m_minusb = do maybe_exec_dir <- tryFindTopDir m_minusb case maybe_exec_dir of -- "Just" on Windows, "Nothing" on unix Nothing -> throwGhcExceptionIO $ InstallationError "missing -B option" Just dir -> return dir tryFindTopDir :: Maybe String -- ^ Maybe TopDir path (without the '-B' prefix). -> IO (Maybe String) -- ^ TopDir (in Unix format '/' separated) tryFindTopDir (Just minusb) = return $ Just $ normalise minusb tryFindTopDir Nothing = do -- The _GHC_TOP_DIR environment variable can be used to specify -- the top dir when the -B argument is not specified. It is not -- intended for use by users, it was added specifically for the -- purpose of running GHC within GHCi. maybe_env_top_dir <- lookupEnv "_GHC_TOP_DIR" case maybe_env_top_dir of Just env_top_dir -> return $ Just env_top_dir -- Try directory of executable Nothing -> getBaseDir -- See Note [tooldir: How GHC finds mingw on Windows] -- Returns @Nothing@ when not on Windows. -- When called on Windows, it either throws an error when the -- tooldir can't be located, or returns @Just tooldirpath@. -- If the distro toolchain is being used we treat Windows the same as Linux findToolDir :: Bool -- ^ whether we are use the ambiant mingw toolchain -> FilePath -- ^ topdir -> IO (Maybe FilePath) #if defined(mingw32_HOST_OS) findToolDir False top_dir = go 0 (top_dir "..") [] where maxDepth = 3 go :: Int -> FilePath -> [FilePath] -> IO (Maybe FilePath) go k path tried | k == maxDepth = throwGhcExceptionIO $ InstallationError $ "could not detect mingw toolchain in the following paths: " ++ show tried | otherwise = do let try = path "mingw" let tried' = tried ++ [try] oneLevel <- doesDirectoryExist try if oneLevel then return (Just path) else go (k+1) (path "..") tried' findToolDir True _ = return Nothing #else findToolDir _ _ = return Nothing #endif ghc-lib-parser-9.4.7.20230826/compiler/GHC/SysTools/Terminal.hs0000644000000000000000000000654614472400113021555 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} module GHC.SysTools.Terminal (stderrSupportsAnsiColors) where import GHC.Prelude #if defined(MIN_VERSION_terminfo) import GHC.IO (catchException) import Data.Maybe (fromMaybe) import System.Console.Terminfo (SetupTermError, Terminal, getCapability, setupTermFromEnv, termColors) import System.Posix (queryTerminal, stdError) #elif defined(mingw32_HOST_OS) import GHC.IO (catchException) import GHC.Utils.Exception (try) -- import Data.Bits ((.|.), (.&.)) import Foreign (Ptr, peek, with) import qualified Graphics.Win32 as Win32 import qualified System.Win32 as Win32 #endif import System.IO.Unsafe #if defined(mingw32_HOST_OS) && !defined(WINAPI) # if defined(i386_HOST_ARCH) # define WINAPI stdcall # elif defined(x86_64_HOST_ARCH) # define WINAPI ccall # else # error unknown architecture # endif #endif -- | Does the controlling terminal support ANSI color sequences? -- This memoized to avoid thread-safety issues in ncurses (see #17922). stderrSupportsAnsiColors :: Bool stderrSupportsAnsiColors = unsafePerformIO stderrSupportsAnsiColors' {-# NOINLINE stderrSupportsAnsiColors #-} -- | Check if ANSI escape sequences can be used to control color in stderr. stderrSupportsAnsiColors' :: IO Bool stderrSupportsAnsiColors' = do #if defined(MIN_VERSION_terminfo) stderr_available <- queryTerminal stdError if stderr_available then fmap termSupportsColors setupTermFromEnv `catchException` \ (_ :: SetupTermError) -> pure False else pure False where termSupportsColors :: Terminal -> Bool termSupportsColors term = fromMaybe 0 (getCapability term termColors) > 0 #elif defined(mingw32_HOST_OS) h <- Win32.getStdHandle Win32.sTD_ERROR_HANDLE `catchException` \ (_ :: IOError) -> pure Win32.nullHANDLE if h == Win32.nullHANDLE then pure False else do eMode <- try (getConsoleMode h) case eMode of Left (_ :: IOError) -> Win32.isMinTTYHandle h -- Check if the we're in a MinTTY terminal -- (e.g., Cygwin or MSYS2) Right mode | modeHasVTP mode -> pure True | otherwise -> enableVTP h mode where enableVTP :: Win32.HANDLE -> Win32.DWORD -> IO Bool enableVTP h mode = do setConsoleMode h (modeAddVTP mode) modeHasVTP <$> getConsoleMode h `catchException` \ (_ :: IOError) -> pure False modeHasVTP :: Win32.DWORD -> Bool modeHasVTP mode = mode .&. eNABLE_VIRTUAL_TERMINAL_PROCESSING /= 0 modeAddVTP :: Win32.DWORD -> Win32.DWORD modeAddVTP mode = mode .|. eNABLE_VIRTUAL_TERMINAL_PROCESSING eNABLE_VIRTUAL_TERMINAL_PROCESSING :: Win32.DWORD eNABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 getConsoleMode :: Win32.HANDLE -> IO Win32.DWORD getConsoleMode h = with 64 $ \ mode -> do Win32.failIfFalse_ "GetConsoleMode" (c_GetConsoleMode h mode) peek mode setConsoleMode :: Win32.HANDLE -> Win32.DWORD -> IO () setConsoleMode h mode = do Win32.failIfFalse_ "SetConsoleMode" (c_SetConsoleMode h mode) foreign import WINAPI unsafe "windows.h GetConsoleMode" c_GetConsoleMode :: Win32.HANDLE -> Ptr Win32.DWORD -> IO Win32.BOOL foreign import WINAPI unsafe "windows.h SetConsoleMode" c_SetConsoleMode :: Win32.HANDLE -> Win32.DWORD -> IO Win32.BOOL #else pure False #endif ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Errors/Hole/FitTypes.hs0000644000000000000000000001335514472400113022457 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.Tc.Errors.Hole.FitTypes ( TypedHole (..), HoleFit (..), HoleFitCandidate (..), CandPlugin, FitPlugin, HoleFitPlugin (..), HoleFitPluginR (..), hfIsLcl, pprHoleFitCand ) where import GHC.Prelude import GHC.Tc.Types import GHC.Tc.Types.Constraint import GHC.Tc.Utils.TcType import GHC.Types.Name.Reader import GHC.Hs.Doc import GHC.Types.Id import GHC.Utils.Outputable import GHC.Types.Name import GHC.Data.Bag import Data.Function ( on ) data TypedHole = TypedHole { th_relevant_cts :: Bag CtEvidence -- ^ Any relevant Cts to the hole , th_implics :: [Implication] -- ^ The nested implications of the hole with the -- innermost implication first. , th_hole :: Maybe Hole -- ^ The hole itself, if available. } instance Outputable TypedHole where ppr (TypedHole { th_relevant_cts = rels , th_implics = implics , th_hole = hole }) = hang (text "TypedHole") 2 (ppr rels $+$ ppr implics $+$ ppr hole) -- | HoleFitCandidates are passed to hole fit plugins and then -- checked whether they fit a given typed-hole. data HoleFitCandidate = IdHFCand Id -- An id, like locals. | NameHFCand Name -- A name, like built-in syntax. | GreHFCand GlobalRdrElt -- A global, like imported ids. instance Eq HoleFitCandidate where IdHFCand i1 == IdHFCand i2 = i1 == i2 NameHFCand n1 == NameHFCand n2 = n1 == n2 GreHFCand gre1 == GreHFCand gre2 = gre_name gre1 == gre_name gre2 _ == _ = False instance Outputable HoleFitCandidate where ppr = pprHoleFitCand pprHoleFitCand :: HoleFitCandidate -> SDoc pprHoleFitCand (IdHFCand cid) = text "Id HFC: " <> ppr cid pprHoleFitCand (NameHFCand cname) = text "Name HFC: " <> ppr cname pprHoleFitCand (GreHFCand cgre) = text "Gre HFC: " <> ppr cgre instance NamedThing HoleFitCandidate where getName hfc = case hfc of IdHFCand cid -> idName cid NameHFCand cname -> cname GreHFCand cgre -> greMangledName cgre getOccName hfc = case hfc of IdHFCand cid -> occName cid NameHFCand cname -> occName cname GreHFCand cgre -> occName (greMangledName cgre) instance HasOccName HoleFitCandidate where occName = getOccName instance Ord HoleFitCandidate where compare = compare `on` getName -- | HoleFit is the type we use for valid hole fits. It contains the -- element that was checked, the Id of that element as found by `tcLookup`, -- and the refinement level of the fit, which is the number of extra argument -- holes that this fit uses (e.g. if hfRefLvl is 2, the fit is for `Id _ _`). data HoleFit = HoleFit { hfId :: Id -- ^ The elements id in the TcM , hfCand :: HoleFitCandidate -- ^ The candidate that was checked. , hfType :: TcType -- ^ The type of the id, possibly zonked. , hfRefLvl :: Int -- ^ The number of holes in this fit. , hfWrap :: [TcType] -- ^ The wrapper for the match. , hfMatches :: [TcType] -- ^ What the refinement variables got matched with, if anything , hfDoc :: Maybe [HsDocString] -- ^ Documentation of this HoleFit, if available. } | RawHoleFit SDoc -- ^ A fit that is just displayed as is. Here so thatHoleFitPlugins -- can inject any fit they want. -- We define an Eq and Ord instance to be able to build a graph. instance Eq HoleFit where (==) = (==) `on` hfId instance Outputable HoleFit where ppr (RawHoleFit sd) = sd ppr (HoleFit _ cand ty _ _ mtchs _) = hang (name <+> holes) 2 (text "where" <+> name <+> dcolon <+> (ppr ty)) where name = ppr $ getName cand holes = sep $ map (parens . (text "_" <+> dcolon <+>) . ppr) mtchs -- We compare HoleFits by their name instead of their Id, since we don't -- want our tests to be affected by the non-determinism of `nonDetCmpVar`, -- which is used to compare Ids. When comparing, we want HoleFits with a lower -- refinement level to come first. instance Ord HoleFit where compare (RawHoleFit _) (RawHoleFit _) = EQ compare (RawHoleFit _) _ = LT compare _ (RawHoleFit _) = GT compare a@(HoleFit {}) b@(HoleFit {}) = cmp a b where cmp = if hfRefLvl a == hfRefLvl b then compare `on` (getName . hfCand) else compare `on` hfRefLvl hfIsLcl :: HoleFit -> Bool hfIsLcl hf@(HoleFit {}) = case hfCand hf of IdHFCand _ -> True NameHFCand _ -> False GreHFCand gre -> gre_lcl gre hfIsLcl _ = False -- | A plugin for modifying the candidate hole fits *before* they're checked. type CandPlugin = TypedHole -> [HoleFitCandidate] -> TcM [HoleFitCandidate] -- | A plugin for modifying hole fits *after* they've been found. type FitPlugin = TypedHole -> [HoleFit] -> TcM [HoleFit] -- | A HoleFitPlugin is a pair of candidate and fit plugins. data HoleFitPlugin = HoleFitPlugin { candPlugin :: CandPlugin , fitPlugin :: FitPlugin } -- | HoleFitPluginR adds a TcRef to hole fit plugins so that plugins can -- track internal state. Note the existential quantification, ensuring that -- the state cannot be modified from outside the plugin. data HoleFitPluginR = forall s. HoleFitPluginR { hfPluginInit :: TcM (TcRef s) -- ^ Initializes the TcRef to be passed to the plugin , hfPluginRun :: TcRef s -> HoleFitPlugin -- ^ The function defining the plugin itself , hfPluginStop :: TcRef s -> TcM () -- ^ Cleanup of state, guaranteed to be called even on error } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Errors/Ppr.hs0000644000000000000000000040741714472400113020570 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} {-# OPTIONS_GHC -fno-warn-orphans #-} -- instance Diagnostic TcRnMessage module GHC.Tc.Errors.Ppr ( pprTypeDoesNotHaveFixedRuntimeRep , pprScopeError -- , tidySkolemInfo , tidySkolemInfoAnon -- , withHsDocContext , pprHsDocContext , inHsDocContext ) where import GHC.Prelude import GHC.Builtin.Names import GHC.Core.Coercion import GHC.Core.Unify ( tcMatchTys ) import GHC.Core.TyCon import GHC.Core.Class import GHC.Core.DataCon import GHC.Core.Coercion.Axiom (coAxiomTyCon, coAxiomSingleBranch) import GHC.Core.ConLike import GHC.Core.FamInstEnv (famInstAxiom) import GHC.Core.InstEnv import GHC.Core.TyCo.Rep (Type(..)) import GHC.Core.TyCo.Ppr (pprWithExplicitKindsWhen, pprSourceTyCon, pprTyVars, pprWithTYPE) import GHC.Core.PatSyn ( patSynName, pprPatSynType ) import GHC.Core.Predicate import GHC.Core.Type import GHC.Driver.Flags import GHC.Hs import GHC.Tc.Errors.Types import GHC.Tc.Types.Constraint import {-# SOURCE #-} GHC.Tc.Types (getLclEnvLoc) import GHC.Tc.Types.Origin import GHC.Tc.Types.Rank (Rank(..)) import GHC.Tc.Utils.TcType import GHC.Types.Error import GHC.Types.FieldLabel (flIsOverloaded) import GHC.Types.Hint (UntickedPromotedThing(..), pprUntickedConstructor, isBareSymbol) import GHC.Types.Hint.Ppr () -- Outputable GhcHint import GHC.Types.Basic import GHC.Types.Id import GHC.Types.Name import GHC.Types.Name.Reader ( GreName(..), pprNameProvenance , RdrName, rdrNameOcc, greMangledName ) import GHC.Types.Name.Set import GHC.Types.SrcLoc import GHC.Types.TyThing import GHC.Types.Unique.Set ( nonDetEltsUniqSet ) import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Var.Env import GHC.Unit.State (pprWithUnitState, UnitState) import GHC.Unit.Module import GHC.Data.Bag import GHC.Data.FastString import GHC.Data.List.SetOps ( nubOrdBy ) import GHC.Data.Maybe import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import qualified GHC.LanguageExtensions as LangExt import Data.List.NonEmpty (NonEmpty(..)) import qualified Data.List.NonEmpty as NE import Data.Function (on) import Data.List ( groupBy, sortBy, tails , partition, unfoldr ) import Data.Ord ( comparing ) import Data.Bifunctor import GHC.Types.Name.Env instance Diagnostic TcRnMessage where diagnosticMessage = \case TcRnUnknownMessage m -> diagnosticMessage m TcRnMessageWithInfo unit_state msg_with_info -> case msg_with_info of TcRnMessageDetailed err_info msg -> messageWithInfoDiagnosticMessage unit_state err_info (diagnosticMessage msg) TcRnSolverReport msgs _ _ -> mkDecorated $ map pprSolverReportWithCtxt msgs TcRnRedundantConstraints redundants (info, show_info) -> mkSimpleDecorated $ text "Redundant constraint" <> plural redundants <> colon <+> pprEvVarTheta redundants $$ if show_info then text "In" <+> ppr info else empty TcRnInaccessibleCode implic contras -> mkSimpleDecorated $ hang (text "Inaccessible code in") 2 (ppr (ic_info implic)) $$ vcat (map pprSolverReportWithCtxt (NE.toList contras)) TcRnTypeDoesNotHaveFixedRuntimeRep ty prov (ErrInfo extra supplementary) -> mkDecorated [pprTypeDoesNotHaveFixedRuntimeRep ty prov, extra, supplementary] TcRnImplicitLift id_or_name ErrInfo{..} -> mkDecorated $ ( text "The variable" <+> quotes (ppr id_or_name) <+> text "is implicitly lifted in the TH quotation" ) : [errInfoContext, errInfoSupplementary] TcRnUnusedPatternBinds bind -> mkDecorated [hang (text "This pattern-binding binds no variables:") 2 (ppr bind)] TcRnDodgyImports name -> mkDecorated [dodgy_msg (text "import") name (dodgy_msg_insert name :: IE GhcPs)] TcRnDodgyExports name -> mkDecorated [dodgy_msg (text "export") name (dodgy_msg_insert name :: IE GhcRn)] TcRnMissingImportList ie -> mkDecorated [ text "The import item" <+> quotes (ppr ie) <+> text "does not have an explicit import list" ] TcRnUnsafeDueToPlugin -> mkDecorated [text "Use of plugins makes the module unsafe"] TcRnModMissingRealSrcSpan mod -> mkDecorated [text "Module does not have a RealSrcSpan:" <+> ppr mod] TcRnIdNotExportedFromModuleSig name mod -> mkDecorated [ text "The identifier" <+> ppr (occName name) <+> text "does not exist in the signature for" <+> ppr mod ] TcRnIdNotExportedFromLocalSig name -> mkDecorated [ text "The identifier" <+> ppr (occName name) <+> text "does not exist in the local signature." ] TcRnShadowedName occ provenance -> let shadowed_locs = case provenance of ShadowedNameProvenanceLocal n -> [text "bound at" <+> ppr n] ShadowedNameProvenanceGlobal gres -> map pprNameProvenance gres in mkSimpleDecorated $ sep [text "This binding for" <+> quotes (ppr occ) <+> text "shadows the existing binding" <> plural shadowed_locs, nest 2 (vcat shadowed_locs)] TcRnDuplicateWarningDecls d rdr_name -> mkSimpleDecorated $ vcat [text "Multiple warning declarations for" <+> quotes (ppr rdr_name), text "also at " <+> ppr (getLocA d)] TcRnSimplifierTooManyIterations simples limit wc -> mkSimpleDecorated $ hang (text "solveWanteds: too many iterations" <+> parens (text "limit =" <+> ppr limit)) 2 (vcat [ text "Unsolved:" <+> ppr wc , text "Simples:" <+> ppr simples ]) TcRnIllegalPatSynDecl rdrname -> mkSimpleDecorated $ hang (text "Illegal pattern synonym declaration for" <+> quotes (ppr rdrname)) 2 (text "Pattern synonym declarations are only valid at top level") TcRnLinearPatSyn ty -> mkSimpleDecorated $ hang (text "Pattern synonyms do not support linear fields (GHC #18806):") 2 (ppr ty) TcRnEmptyRecordUpdate -> mkSimpleDecorated $ text "Empty record update" TcRnIllegalFieldPunning fld -> mkSimpleDecorated $ text "Illegal use of punning for field" <+> quotes (ppr fld) TcRnIllegalWildcardsInRecord fld_part -> mkSimpleDecorated $ text "Illegal `..' in record" <+> pprRecordFieldPart fld_part TcRnIllegalWildcardInType mb_name bad mb_ctxt -> mkSimpleDecorated $ vcat [ main_msg, context_msg ] where main_msg :: SDoc main_msg = case bad of WildcardNotLastInConstraint -> hang notAllowed 2 constraint_hint_msg ExtraConstraintWildcardNotAllowed allow_sole -> case allow_sole of SoleExtraConstraintWildcardNotAllowed -> notAllowed SoleExtraConstraintWildcardAllowed -> hang notAllowed 2 sole_msg WildcardsNotAllowedAtAll -> notAllowed context_msg :: SDoc context_msg = case mb_ctxt of Just ctxt -> nest 2 (text "in" <+> pprHsDocContext ctxt) _ -> empty notAllowed, what, wildcard, how :: SDoc notAllowed = what <+> quotes wildcard <+> how wildcard = case mb_name of Nothing -> pprAnonWildCard Just name -> ppr name what | Just _ <- mb_name = text "Named wildcard" | ExtraConstraintWildcardNotAllowed {} <- bad = text "Extra-constraint wildcard" | otherwise = text "Wildcard" how = case bad of WildcardNotLastInConstraint -> text "not allowed in a constraint" _ -> text "not allowed" constraint_hint_msg :: SDoc constraint_hint_msg | Just _ <- mb_name = vcat [ text "Extra-constraint wildcards must be anonymous" , nest 2 (text "e.g f :: (Eq a, _) => blah") ] | otherwise = vcat [ text "except as the last top-level constraint of a type signature" , nest 2 (text "e.g f :: (Eq a, _) => blah") ] sole_msg :: SDoc sole_msg = vcat [ text "except as the sole constraint" , nest 2 (text "e.g., deriving instance _ => Eq (Foo a)") ] TcRnDuplicateFieldName fld_part dups -> mkSimpleDecorated $ hsep [text "duplicate field name", quotes (ppr (NE.head dups)), text "in record", pprRecordFieldPart fld_part] TcRnIllegalViewPattern pat -> mkSimpleDecorated $ vcat [text "Illegal view pattern: " <+> ppr pat] TcRnCharLiteralOutOfRange c -> mkSimpleDecorated $ text "character literal out of range: '\\" <> char c <> char '\'' TcRnIllegalWildcardsInConstructor con -> mkSimpleDecorated $ vcat [ text "Illegal `..' notation for constructor" <+> quotes (ppr con) , nest 2 (text "The constructor has no labelled fields") ] TcRnIgnoringAnnotations anns -> mkSimpleDecorated $ text "Ignoring ANN annotation" <> plural anns <> comma <+> text "because this is a stage-1 compiler without -fexternal-interpreter or doesn't support GHCi" TcRnAnnotationInSafeHaskell -> mkSimpleDecorated $ vcat [ text "Annotations are not compatible with Safe Haskell." , text "See https://gitlab.haskell.org/ghc/ghc/issues/10826" ] TcRnInvalidTypeApplication fun_ty hs_ty -> mkSimpleDecorated $ text "Cannot apply expression of type" <+> quotes (ppr fun_ty) $$ text "to a visible type argument" <+> quotes (ppr hs_ty) TcRnTagToEnumMissingValArg -> mkSimpleDecorated $ text "tagToEnum# must appear applied to one value argument" TcRnTagToEnumUnspecifiedResTy ty -> mkSimpleDecorated $ hang (text "Bad call to tagToEnum# at type" <+> ppr ty) 2 (vcat [ text "Specify the type by giving a type signature" , text "e.g. (tagToEnum# x) :: Bool" ]) TcRnTagToEnumResTyNotAnEnum ty -> mkSimpleDecorated $ hang (text "Bad call to tagToEnum# at type" <+> ppr ty) 2 (text "Result type must be an enumeration type") TcRnArrowIfThenElsePredDependsOnResultTy -> mkSimpleDecorated $ text "Predicate type of `ifThenElse' depends on result type" TcRnIllegalHsBootFileDecl -> mkSimpleDecorated $ text "Illegal declarations in an hs-boot file" TcRnRecursivePatternSynonym binds -> mkSimpleDecorated $ hang (text "Recursive pattern synonym definition with following bindings:") 2 (vcat $ map pprLBind . bagToList $ binds) where pprLoc loc = parens (text "defined at" <+> ppr loc) pprLBind :: CollectPass GhcRn => GenLocated (SrcSpanAnn' a) (HsBindLR GhcRn idR) -> SDoc pprLBind (L loc bind) = pprWithCommas ppr (collectHsBindBinders CollNoDictBinders bind) <+> pprLoc (locA loc) TcRnPartialTypeSigTyVarMismatch n1 n2 fn_name hs_ty -> mkSimpleDecorated $ hang (text "Couldn't match" <+> quotes (ppr n1) <+> text "with" <+> quotes (ppr n2)) 2 (hang (text "both bound by the partial type signature:") 2 (ppr fn_name <+> dcolon <+> ppr hs_ty)) TcRnPartialTypeSigBadQuantifier n fn_name m_unif_ty hs_ty -> mkSimpleDecorated $ hang (text "Can't quantify over" <+> quotes (ppr n)) 2 (vcat [ hang (text "bound by the partial type signature:") 2 (ppr fn_name <+> dcolon <+> ppr hs_ty) , extra ]) where extra | Just rhs_ty <- m_unif_ty = sep [ quotes (ppr n), text "should really be", quotes (ppr rhs_ty) ] | otherwise = empty TcRnMissingSignature what _ _ -> mkSimpleDecorated $ case what of MissingPatSynSig p -> hang (text "Pattern synonym with no type signature:") 2 (text "pattern" <+> pprPrefixName (patSynName p) <+> dcolon <+> pprPatSynType p) MissingTopLevelBindingSig name ty -> hang (text "Top-level binding with no type signature:") 2 (pprPrefixName name <+> dcolon <+> pprSigmaType ty) MissingTyConKindSig tc cusks_enabled -> hang msg 2 (text "type" <+> pprPrefixName (tyConName tc) <+> dcolon <+> pprKind (tyConKind tc)) where msg | cusks_enabled = text "Top-level type constructor with no standalone kind signature or CUSK:" | otherwise = text "Top-level type constructor with no standalone kind signature:" TcRnPolymorphicBinderMissingSig n ty -> mkSimpleDecorated $ sep [ text "Polymorphic local binding with no type signature:" , nest 2 $ pprPrefixName n <+> dcolon <+> ppr ty ] TcRnOverloadedSig sig -> mkSimpleDecorated $ hang (text "Overloaded signature conflicts with monomorphism restriction") 2 (ppr sig) TcRnTupleConstraintInst _ -> mkSimpleDecorated $ text "You can't specify an instance for a tuple constraint" TcRnAbstractClassInst clas -> mkSimpleDecorated $ text "Cannot define instance for abstract class" <+> quotes (ppr (className clas)) TcRnNoClassInstHead tau -> mkSimpleDecorated $ hang (text "Instance head is not headed by a class:") 2 (pprType tau) TcRnUserTypeError ty -> mkSimpleDecorated (pprUserTypeErrorTy ty) TcRnConstraintInKind ty -> mkSimpleDecorated $ text "Illegal constraint in a kind:" <+> pprType ty TcRnUnboxedTupleOrSumTypeFuncArg tuple_or_sum ty -> mkSimpleDecorated $ sep [ text "Illegal unboxed" <+> what <+> text "type as function argument:" , pprType ty ] where what = case tuple_or_sum of UnboxedTupleType -> text "tuple" UnboxedSumType -> text "sum" TcRnLinearFuncInKind ty -> mkSimpleDecorated $ text "Illegal linear function in a kind:" <+> pprType ty TcRnForAllEscapeError ty kind -> mkSimpleDecorated $ vcat [ hang (text "Quantified type's kind mentions quantified type variable") 2 (text "type:" <+> quotes (ppr ty)) , hang (text "where the body of the forall has this kind:") 2 (quotes (pprKind kind)) ] TcRnVDQInTermType ty -> mkSimpleDecorated $ vcat [ hang (text "Illegal visible, dependent quantification" <+> text "in the type of a term:") 2 (pprType ty) , text "(GHC does not yet support this)" ] TcRnBadQuantPredHead ty -> mkSimpleDecorated $ hang (text "Quantified predicate must have a class or type variable head:") 2 (pprType ty) TcRnIllegalTupleConstraint ty -> mkSimpleDecorated $ text "Illegal tuple constraint:" <+> pprType ty TcRnNonTypeVarArgInConstraint ty -> mkSimpleDecorated $ hang (text "Non type-variable argument") 2 (text "in the constraint:" <+> pprType ty) TcRnIllegalImplicitParam ty -> mkSimpleDecorated $ text "Illegal implicit parameter" <+> quotes (pprType ty) TcRnIllegalConstraintSynonymOfKind kind -> mkSimpleDecorated $ text "Illegal constraint synonym of kind:" <+> quotes (pprKind kind) TcRnIllegalClassInst tcf -> mkSimpleDecorated $ vcat [ text "Illegal instance for a" <+> ppr tcf , text "A class instance must be for a class" ] TcRnOversaturatedVisibleKindArg ty -> mkSimpleDecorated $ text "Illegal oversaturated visible kind argument:" <+> quotes (char '@' <> pprParendType ty) TcRnBadAssociatedType clas tc -> mkSimpleDecorated $ hsep [ text "Class", quotes (ppr clas) , text "does not have an associated type", quotes (ppr tc) ] TcRnForAllRankErr rank ty -> let herald = case tcSplitForAllTyVars ty of ([], _) -> text "Illegal qualified type:" _ -> text "Illegal polymorphic type:" extra = case rank of MonoTypeConstraint -> text "A constraint must be a monotype" _ -> empty in mkSimpleDecorated $ vcat [hang herald 2 (pprType ty), extra] TcRnMonomorphicBindings bindings -> let pp_bndrs = pprBindings bindings in mkSimpleDecorated $ sep [ text "The Monomorphism Restriction applies to the binding" <> plural bindings , text "for" <+> pp_bndrs ] TcRnOrphanInstance inst -> mkSimpleDecorated $ hsep [ text "Orphan instance:" , pprInstanceHdr inst ] TcRnFunDepConflict unit_state sorted -> let herald = text "Functional dependencies conflict between instance declarations:" in mkSimpleDecorated $ pprWithUnitState unit_state $ (hang herald 2 (pprInstances $ NE.toList sorted)) TcRnDupInstanceDecls unit_state sorted -> let herald = text "Duplicate instance declarations:" in mkSimpleDecorated $ pprWithUnitState unit_state $ (hang herald 2 (pprInstances $ NE.toList sorted)) TcRnConflictingFamInstDecls sortedNE -> let sorted = NE.toList sortedNE in mkSimpleDecorated $ hang (text "Conflicting family instance declarations:") 2 (vcat [ pprCoAxBranchUser (coAxiomTyCon ax) (coAxiomSingleBranch ax) | fi <- sorted , let ax = famInstAxiom fi ]) TcRnFamInstNotInjective rea fam_tc (eqn1 NE.:| rest_eqns) -> let (herald, show_kinds) = case rea of InjErrRhsBareTyVar tys -> (injectivityErrorHerald $$ text "RHS of injective type family equation is a bare" <+> text "type variable" $$ text "but these LHS type and kind patterns are not bare" <+> text "variables:" <+> pprQuotedList tys, False) InjErrRhsCannotBeATypeFam -> (injectivityErrorHerald $$ text "RHS of injective type family equation cannot" <+> text "be a type family:", False) InjErrRhsOverlap -> (text "Type family equation right-hand sides overlap; this violates" $$ text "the family's injectivity annotation:", False) InjErrCannotInferFromRhs tvs has_kinds _ -> let show_kinds = has_kinds == YesHasKinds what = if show_kinds then text "Type/kind" else text "Type" body = sep [ what <+> text "variable" <> pluralVarSet tvs <+> pprVarSet tvs (pprQuotedList . scopedSort) , text "cannot be inferred from the right-hand side." ] in (injectivityErrorHerald $$ body $$ text "In the type family equation:", show_kinds) in mkSimpleDecorated $ pprWithExplicitKindsWhen show_kinds $ hang herald 2 (vcat (map (pprCoAxBranchUser fam_tc) (eqn1 : rest_eqns))) TcRnBangOnUnliftedType ty -> mkSimpleDecorated $ text "Strictness flag has no effect on unlifted type" <+> quotes (ppr ty) TcRnMultipleDefaultDeclarations dup_things -> mkSimpleDecorated $ hang (text "Multiple default declarations") 2 (vcat (map pp dup_things)) where pp :: LDefaultDecl GhcRn -> SDoc pp (L locn (DefaultDecl _ _)) = text "here was another default declaration" <+> ppr (locA locn) TcRnBadDefaultType ty deflt_clss -> mkSimpleDecorated $ hang (text "The default type" <+> quotes (ppr ty) <+> text "is not an instance of") 2 (foldr1 (\a b -> a <+> text "or" <+> b) (map (quotes. ppr) deflt_clss)) TcRnPatSynBundledWithNonDataCon -> mkSimpleDecorated $ text "Pattern synonyms can be bundled only with datatypes." TcRnPatSynBundledWithWrongType expected_res_ty res_ty -> mkSimpleDecorated $ text "Pattern synonyms can only be bundled with matching type constructors" $$ text "Couldn't match expected type of" <+> quotes (ppr expected_res_ty) <+> text "with actual type of" <+> quotes (ppr res_ty) TcRnDupeModuleExport mod -> mkSimpleDecorated $ hsep [ text "Duplicate" , quotes (text "Module" <+> ppr mod) , text "in export list" ] TcRnExportedModNotImported mod -> mkSimpleDecorated $ formatExportItemError (text "module" <+> ppr mod) "is not imported" TcRnNullExportedModule mod -> mkSimpleDecorated $ formatExportItemError (text "module" <+> ppr mod) "exports nothing" TcRnMissingExportList mod -> mkSimpleDecorated $ formatExportItemError (text "module" <+> ppr mod) "is missing an export list" TcRnExportHiddenComponents export_item -> mkSimpleDecorated $ formatExportItemError (ppr export_item) "attempts to export constructors or class methods that are not visible here" TcRnDuplicateExport child ie1 ie2 -> mkSimpleDecorated $ hsep [ quotes (ppr child) , text "is exported by", quotes (ppr ie1) , text "and", quotes (ppr ie2) ] TcRnExportedParentChildMismatch parent_name ty_thing child parent_names -> mkSimpleDecorated $ text "The type constructor" <+> quotes (ppr parent_name) <+> text "is not the parent of the" <+> text what_is <+> quotes thing <> char '.' $$ text (capitalise what_is) <> text "s can only be exported with their parent type constructor." $$ (case parents of [] -> empty [_] -> text "Parent:" _ -> text "Parents:") <+> fsep (punctuate comma parents) where pp_category :: TyThing -> String pp_category (AnId i) | isRecordSelector i = "record selector" pp_category i = tyThingCategory i what_is = pp_category ty_thing thing = ppr child parents = map ppr parent_names TcRnConflictingExports occ child1 gre1 ie1 child2 gre2 ie2 -> mkSimpleDecorated $ vcat [ text "Conflicting exports for" <+> quotes (ppr occ) <> colon , ppr_export child1 gre1 ie1 , ppr_export child2 gre2 ie2 ] where ppr_export child gre ie = nest 3 (hang (quotes (ppr ie) <+> text "exports" <+> quotes (ppr_name child)) 2 (pprNameProvenance gre)) -- DuplicateRecordFields means that nameOccName might be a -- mangled $sel-prefixed thing, in which case show the correct OccName -- alone (but otherwise show the Name so it will have a module -- qualifier) ppr_name (FieldGreName fl) | flIsOverloaded fl = ppr fl | otherwise = ppr (flSelector fl) ppr_name (NormalGreName name) = ppr name TcRnAmbiguousField rupd parent_type -> mkSimpleDecorated $ vcat [ text "The record update" <+> ppr rupd <+> text "with type" <+> ppr parent_type <+> text "is ambiguous." , text "This will not be supported by -XDuplicateRecordFields in future releases of GHC." ] TcRnMissingFields con fields -> mkSimpleDecorated $ vcat [header, nest 2 rest] where rest | null fields = empty | otherwise = vcat (fmap pprField fields) header = text "Fields of" <+> quotes (ppr con) <+> text "not initialised" <> if null fields then empty else colon TcRnFieldUpdateInvalidType prs -> mkSimpleDecorated $ hang (text "Record update for insufficiently polymorphic field" <> plural prs <> colon) 2 (vcat [ ppr f <+> dcolon <+> ppr ty | (f,ty) <- prs ]) TcRnNoConstructorHasAllFields conflictingFields -> mkSimpleDecorated $ hang (text "No constructor has all these fields:") 2 (pprQuotedList conflictingFields) TcRnMixedSelectors data_name data_sels pat_name pat_syn_sels -> mkSimpleDecorated $ text "Cannot use a mixture of pattern synonym and record selectors" $$ text "Record selectors defined by" <+> quotes (ppr data_name) <> colon <+> pprWithCommas ppr data_sels $$ text "Pattern synonym selectors defined by" <+> quotes (ppr pat_name) <> colon <+> pprWithCommas ppr pat_syn_sels TcRnMissingStrictFields con fields -> mkSimpleDecorated $ vcat [header, nest 2 rest] where rest | null fields = empty -- Happens for non-record constructors -- with strict fields | otherwise = vcat (fmap pprField fields) header = text "Constructor" <+> quotes (ppr con) <+> text "does not have the required strict field(s)" <> if null fields then empty else colon TcRnNoPossibleParentForFields rbinds -> mkSimpleDecorated $ hang (text "No type has all these fields:") 2 (pprQuotedList fields) where fields = map (hfbLHS . unLoc) rbinds TcRnBadOverloadedRecordUpdate _rbinds -> mkSimpleDecorated $ text "Record update is ambiguous, and requires a type signature" TcRnStaticFormNotClosed name reason -> mkSimpleDecorated $ quotes (ppr name) <+> text "is used in a static form but it is not closed" <+> text "because it" $$ sep (causes reason) where causes :: NotClosedReason -> [SDoc] causes NotLetBoundReason = [text "is not let-bound."] causes (NotTypeClosed vs) = [ text "has a non-closed type because it contains the" , text "type variables:" <+> pprVarSet vs (hsep . punctuate comma . map (quotes . ppr)) ] causes (NotClosed n reason) = let msg = text "uses" <+> quotes (ppr n) <+> text "which" in case reason of NotClosed _ _ -> msg : causes reason _ -> let (xs0, xs1) = splitAt 1 $ causes reason in fmap (msg <+>) xs0 ++ xs1 TcRnUselessTypeable -> mkSimpleDecorated $ text "Deriving" <+> quotes (ppr typeableClassName) <+> text "has no effect: all types now auto-derive Typeable" TcRnDerivingDefaults cls -> mkSimpleDecorated $ sep [ text "Both DeriveAnyClass and" <+> text "GeneralizedNewtypeDeriving are enabled" , text "Defaulting to the DeriveAnyClass strategy" <+> text "for instantiating" <+> ppr cls ] TcRnNonUnaryTypeclassConstraint ct -> mkSimpleDecorated $ quotes (ppr ct) <+> text "is not a unary constraint, as expected by a deriving clause" TcRnPartialTypeSignatures _ theta -> mkSimpleDecorated $ text "Found type wildcard" <+> quotes (char '_') <+> text "standing for" <+> quotes (pprTheta theta) TcRnCannotDeriveInstance cls cls_tys mb_strat newtype_deriving reason -> mkSimpleDecorated $ derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving True reason TcRnLazyGADTPattern -> mkSimpleDecorated $ hang (text "An existential or GADT data constructor cannot be used") 2 (text "inside a lazy (~) pattern") TcRnArrowProcGADTPattern -> mkSimpleDecorated $ text "Proc patterns cannot use existential or GADT data constructors" TcRnSpecialClassInst cls because_safeHaskell -> mkSimpleDecorated $ text "Class" <+> quotes (ppr $ className cls) <+> text "does not support user-specified instances" <> safeHaskell_msg where safeHaskell_msg | because_safeHaskell = text " when Safe Haskell is enabled." | otherwise = dot TcRnForallIdentifier rdr_name -> mkSimpleDecorated $ fsep [ text "The use of" <+> quotes (ppr rdr_name) <+> text "as an identifier", text "will become an error in a future GHC release." ] TcRnTypeEqualityOutOfScope -> mkDecorated [ text "The" <+> quotes (text "~") <+> text "operator is out of scope." $$ text "Assuming it to stand for an equality constraint." , text "NB:" <+> (quotes (text "~") <+> text "used to be built-in syntax but now is a regular type operator" $$ text "exported from Data.Type.Equality and Prelude.") $$ text "If you are using a custom Prelude, consider re-exporting it." , text "This will become an error in a future GHC release." ] TcRnTypeEqualityRequiresOperators -> mkSimpleDecorated $ fsep [ text "The use of" <+> quotes (text "~") <+> text "without TypeOperators", text "will become an error in a future GHC release." ] TcRnIllegalTypeOperator overall_ty op -> mkSimpleDecorated $ text "Illegal operator" <+> quotes (ppr op) <+> text "in type" <+> quotes (ppr overall_ty) TcRnGADTMonoLocalBinds -> mkSimpleDecorated $ fsep [ text "Pattern matching on GADTs without MonoLocalBinds" , text "is fragile." ] TcRnIncorrectNameSpace name _ -> mkSimpleDecorated $ msg where msg -- We are in a type-level namespace, -- and the name is incorrectly at the term-level. | isValNameSpace ns = text "The" <+> what <+> text "does not live in the type-level namespace" -- We are in a term-level namespace, -- and the name is incorrectly at the type-level. | otherwise = text "Illegal term-level use of the" <+> what ns = nameNameSpace name what = pprNameSpace ns <+> quotes (ppr name) TcRnNotInScope err name imp_errs _ -> mkSimpleDecorated $ pprScopeError name err $$ vcat (map ppr imp_errs) TcRnUntickedPromotedThing thing -> mkSimpleDecorated $ text "Unticked promoted" <+> what where what :: SDoc what = case thing of UntickedExplicitList -> text "list" <> dot UntickedConstructor fixity nm -> let con = pprUntickedConstructor fixity nm bare_sym = isBareSymbol fixity nm in text "constructor:" <+> con <> if bare_sym then empty else dot TcRnIllegalBuiltinSyntax what rdr_name -> mkSimpleDecorated $ hsep [text "Illegal", what, text "of built-in syntax:", ppr rdr_name] TcRnWarnDefaulting tidy_wanteds tidy_tv default_ty -> mkSimpleDecorated $ hang (hsep $ [ text "Defaulting" ] ++ (case tidy_tv of Nothing -> [] Just tv -> [text "the type variable" , quotes (ppr tv)]) ++ [ text "to type" , quotes (ppr default_ty) , text "in the following constraint" <> plural tidy_wanteds ]) 2 (pprWithArising tidy_wanteds) TcRnForeignImportPrimExtNotSet _decl -> mkSimpleDecorated $ text "`foreign import prim' requires GHCForeignImportPrim." TcRnForeignImportPrimSafeAnn _decl -> mkSimpleDecorated $ text "The safe/unsafe annotation should not be used with `foreign import prim'." TcRnForeignFunctionImportAsValue _decl -> mkSimpleDecorated $ text "`value' imports cannot have function types" TcRnFunPtrImportWithoutAmpersand _decl -> mkSimpleDecorated $ text "possible missing & in foreign import of FunPtr" TcRnIllegalForeignDeclBackend _decl _backend expectedBknds -> mkSimpleDecorated $ text "Illegal foreign declaration:" <+> case expectedBknds of COrAsmOrLlvm -> text "requires unregisterised, llvm (-fllvm) or native code generation (-fasm)" COrAsmOrLlvmOrInterp -> text "requires interpreted, unregisterised, llvm or native code generation" TcRnUnsupportedCallConv _decl unsupportedCC -> mkSimpleDecorated $ case unsupportedCC of StdCallConvUnsupported -> text "the 'stdcall' calling convention is unsupported on this platform," $$ text "treating as ccall" PrimCallConvUnsupported -> text "The `prim' calling convention can only be used with `foreign import'" JavaScriptCallConvUnsupported -> text "The `javascript' calling convention is unsupported on this platform" TcRnIllegalForeignType mArgOrResult reason -> mkSimpleDecorated $ hang msg 2 extra where arg_or_res = case mArgOrResult of Nothing -> empty Just Arg -> text "argument" Just Result -> text "result" msg = hsep [ text "Unacceptable", arg_or_res , text "type in foreign declaration:"] extra = case reason of TypeCannotBeMarshaled ty why -> let innerMsg = quotes (ppr ty) <+> text "cannot be marshalled in a foreign call" in case why of NotADataType -> quotes (ppr ty) <+> text "is not a data type" NewtypeDataConNotInScope Nothing -> hang innerMsg 2 $ text "because its data constructor is not in scope" NewtypeDataConNotInScope (Just tc) -> hang innerMsg 2 $ text "because the data constructor for" <+> quotes (ppr tc) <+> text "is not in scope" UnliftedFFITypesNeeded -> innerMsg $$ text "UnliftedFFITypes is required to marshal unlifted types" NotABoxedMarshalableTyCon -> innerMsg ForeignLabelNotAPtr -> innerMsg $$ text "A foreign-imported address (via &foo) must have type (Ptr a) or (FunPtr a)" NotSimpleUnliftedType -> innerMsg $$ text "foreign import prim only accepts simple unlifted types" ForeignDynNotPtr expected ty -> vcat [ text "Expected: Ptr/FunPtr" <+> pprParendType expected <> comma, text " Actual:" <+> ppr ty ] SafeHaskellMustBeInIO -> text "Safe Haskell is on, all FFI imports must be in the IO monad" IOResultExpected -> text "IO result type expected" UnexpectedNestedForall -> text "Unexpected nested forall" LinearTypesNotAllowed -> text "Linear types are not supported in FFI declarations, see #18472" OneArgExpected -> text "One argument expected" AtLeastOneArgExpected -> text "At least one argument expected" TcRnInvalidCIdentifier target -> mkSimpleDecorated $ sep [quotes (ppr target) <+> text "is not a valid C identifier"] TcRnCannotDefaultConcrete frr -> mkSimpleDecorated $ ppr (frr_context frr) $$ text "cannot be assigned a fixed runtime representation," <+> text "not even by defaulting." diagnosticReason = \case TcRnUnknownMessage m -> diagnosticReason m TcRnMessageWithInfo _ msg_with_info -> case msg_with_info of TcRnMessageDetailed _ m -> diagnosticReason m TcRnSolverReport _ reason _ -> reason -- Error, or a Warning if we are deferring type errors TcRnRedundantConstraints {} -> WarningWithFlag Opt_WarnRedundantConstraints TcRnInaccessibleCode {} -> WarningWithFlag Opt_WarnInaccessibleCode TcRnTypeDoesNotHaveFixedRuntimeRep{} -> ErrorWithoutFlag TcRnImplicitLift{} -> WarningWithFlag Opt_WarnImplicitLift TcRnUnusedPatternBinds{} -> WarningWithFlag Opt_WarnUnusedPatternBinds TcRnDodgyImports{} -> WarningWithFlag Opt_WarnDodgyImports TcRnDodgyExports{} -> WarningWithFlag Opt_WarnDodgyExports TcRnMissingImportList{} -> WarningWithFlag Opt_WarnMissingImportList TcRnUnsafeDueToPlugin{} -> WarningWithoutFlag TcRnModMissingRealSrcSpan{} -> ErrorWithoutFlag TcRnIdNotExportedFromModuleSig{} -> ErrorWithoutFlag TcRnIdNotExportedFromLocalSig{} -> ErrorWithoutFlag TcRnShadowedName{} -> WarningWithFlag Opt_WarnNameShadowing TcRnDuplicateWarningDecls{} -> ErrorWithoutFlag TcRnSimplifierTooManyIterations{} -> ErrorWithoutFlag TcRnIllegalPatSynDecl{} -> ErrorWithoutFlag TcRnLinearPatSyn{} -> ErrorWithoutFlag TcRnEmptyRecordUpdate -> ErrorWithoutFlag TcRnIllegalFieldPunning{} -> ErrorWithoutFlag TcRnIllegalWildcardsInRecord{} -> ErrorWithoutFlag TcRnIllegalWildcardInType{} -> ErrorWithoutFlag TcRnDuplicateFieldName{} -> ErrorWithoutFlag TcRnIllegalViewPattern{} -> ErrorWithoutFlag TcRnCharLiteralOutOfRange{} -> ErrorWithoutFlag TcRnIllegalWildcardsInConstructor{} -> ErrorWithoutFlag TcRnIgnoringAnnotations{} -> WarningWithoutFlag TcRnAnnotationInSafeHaskell -> ErrorWithoutFlag TcRnInvalidTypeApplication{} -> ErrorWithoutFlag TcRnTagToEnumMissingValArg -> ErrorWithoutFlag TcRnTagToEnumUnspecifiedResTy{} -> ErrorWithoutFlag TcRnTagToEnumResTyNotAnEnum{} -> ErrorWithoutFlag TcRnArrowIfThenElsePredDependsOnResultTy -> ErrorWithoutFlag TcRnIllegalHsBootFileDecl -> ErrorWithoutFlag TcRnRecursivePatternSynonym{} -> ErrorWithoutFlag TcRnPartialTypeSigTyVarMismatch{} -> ErrorWithoutFlag TcRnPartialTypeSigBadQuantifier{} -> ErrorWithoutFlag TcRnMissingSignature what exported overridden -> WarningWithFlag $ missingSignatureWarningFlag what exported overridden TcRnPolymorphicBinderMissingSig{} -> WarningWithFlag Opt_WarnMissingLocalSignatures TcRnOverloadedSig{} -> ErrorWithoutFlag TcRnTupleConstraintInst{} -> ErrorWithoutFlag TcRnAbstractClassInst{} -> ErrorWithoutFlag TcRnNoClassInstHead{} -> ErrorWithoutFlag TcRnUserTypeError{} -> ErrorWithoutFlag TcRnConstraintInKind{} -> ErrorWithoutFlag TcRnUnboxedTupleOrSumTypeFuncArg{} -> ErrorWithoutFlag TcRnLinearFuncInKind{} -> ErrorWithoutFlag TcRnForAllEscapeError{} -> ErrorWithoutFlag TcRnVDQInTermType{} -> ErrorWithoutFlag TcRnBadQuantPredHead{} -> ErrorWithoutFlag TcRnIllegalTupleConstraint{} -> ErrorWithoutFlag TcRnNonTypeVarArgInConstraint{} -> ErrorWithoutFlag TcRnIllegalImplicitParam{} -> ErrorWithoutFlag TcRnIllegalConstraintSynonymOfKind{} -> ErrorWithoutFlag TcRnIllegalClassInst{} -> ErrorWithoutFlag TcRnOversaturatedVisibleKindArg{} -> ErrorWithoutFlag TcRnBadAssociatedType{} -> ErrorWithoutFlag TcRnForAllRankErr{} -> ErrorWithoutFlag TcRnMonomorphicBindings{} -> WarningWithFlag Opt_WarnMonomorphism TcRnOrphanInstance{} -> WarningWithFlag Opt_WarnOrphans TcRnFunDepConflict{} -> ErrorWithoutFlag TcRnDupInstanceDecls{} -> ErrorWithoutFlag TcRnConflictingFamInstDecls{} -> ErrorWithoutFlag TcRnFamInstNotInjective{} -> ErrorWithoutFlag TcRnBangOnUnliftedType{} -> WarningWithFlag Opt_WarnRedundantStrictnessFlags TcRnMultipleDefaultDeclarations{} -> ErrorWithoutFlag TcRnBadDefaultType{} -> ErrorWithoutFlag TcRnPatSynBundledWithNonDataCon{} -> ErrorWithoutFlag TcRnPatSynBundledWithWrongType{} -> ErrorWithoutFlag TcRnDupeModuleExport{} -> WarningWithFlag Opt_WarnDuplicateExports TcRnExportedModNotImported{} -> ErrorWithoutFlag TcRnNullExportedModule{} -> WarningWithFlag Opt_WarnDodgyExports TcRnMissingExportList{} -> WarningWithFlag Opt_WarnMissingExportList TcRnExportHiddenComponents{} -> ErrorWithoutFlag TcRnDuplicateExport{} -> WarningWithFlag Opt_WarnDuplicateExports TcRnExportedParentChildMismatch{} -> ErrorWithoutFlag TcRnConflictingExports{} -> ErrorWithoutFlag TcRnAmbiguousField{} -> WarningWithFlag Opt_WarnAmbiguousFields TcRnMissingFields{} -> WarningWithFlag Opt_WarnMissingFields TcRnFieldUpdateInvalidType{} -> ErrorWithoutFlag TcRnNoConstructorHasAllFields{} -> ErrorWithoutFlag TcRnMixedSelectors{} -> ErrorWithoutFlag TcRnMissingStrictFields{} -> ErrorWithoutFlag TcRnNoPossibleParentForFields{} -> ErrorWithoutFlag TcRnBadOverloadedRecordUpdate{} -> ErrorWithoutFlag TcRnStaticFormNotClosed{} -> ErrorWithoutFlag TcRnUselessTypeable -> WarningWithFlag Opt_WarnDerivingTypeable TcRnDerivingDefaults{} -> WarningWithFlag Opt_WarnDerivingDefaults TcRnNonUnaryTypeclassConstraint{} -> ErrorWithoutFlag TcRnPartialTypeSignatures{} -> WarningWithFlag Opt_WarnPartialTypeSignatures TcRnCannotDeriveInstance _ _ _ _ rea -> case rea of DerivErrNotWellKinded{} -> ErrorWithoutFlag DerivErrSafeHaskellGenericInst -> ErrorWithoutFlag DerivErrDerivingViaWrongKind{} -> ErrorWithoutFlag DerivErrNoEtaReduce{} -> ErrorWithoutFlag DerivErrBootFileFound -> ErrorWithoutFlag DerivErrDataConsNotAllInScope{} -> ErrorWithoutFlag DerivErrGNDUsedOnData -> ErrorWithoutFlag DerivErrNullaryClasses -> ErrorWithoutFlag DerivErrLastArgMustBeApp -> ErrorWithoutFlag DerivErrNoFamilyInstance{} -> ErrorWithoutFlag DerivErrNotStockDeriveable{} -> ErrorWithoutFlag DerivErrHasAssociatedDatatypes{} -> ErrorWithoutFlag DerivErrNewtypeNonDeriveableClass -> ErrorWithoutFlag DerivErrCannotEtaReduceEnough{} -> ErrorWithoutFlag DerivErrOnlyAnyClassDeriveable{} -> ErrorWithoutFlag DerivErrNotDeriveable{} -> ErrorWithoutFlag DerivErrNotAClass{} -> ErrorWithoutFlag DerivErrNoConstructors{} -> ErrorWithoutFlag DerivErrLangExtRequired{} -> ErrorWithoutFlag DerivErrDunnoHowToDeriveForType{} -> ErrorWithoutFlag DerivErrMustBeEnumType{} -> ErrorWithoutFlag DerivErrMustHaveExactlyOneConstructor{} -> ErrorWithoutFlag DerivErrMustHaveSomeParameters{} -> ErrorWithoutFlag DerivErrMustNotHaveClassContext{} -> ErrorWithoutFlag DerivErrBadConstructor{} -> ErrorWithoutFlag DerivErrGenerics{} -> ErrorWithoutFlag DerivErrEnumOrProduct{} -> ErrorWithoutFlag TcRnLazyGADTPattern -> ErrorWithoutFlag TcRnArrowProcGADTPattern -> ErrorWithoutFlag TcRnSpecialClassInst {} -> ErrorWithoutFlag TcRnForallIdentifier {} -> WarningWithFlag Opt_WarnForallIdentifier TcRnTypeEqualityOutOfScope -> WarningWithFlag Opt_WarnTypeEqualityOutOfScope TcRnTypeEqualityRequiresOperators -> WarningWithFlag Opt_WarnTypeEqualityRequiresOperators TcRnIllegalTypeOperator {} -> ErrorWithoutFlag TcRnGADTMonoLocalBinds {} -> WarningWithFlag Opt_WarnGADTMonoLocalBinds TcRnIncorrectNameSpace {} -> ErrorWithoutFlag TcRnNotInScope {} -> ErrorWithoutFlag TcRnUntickedPromotedThing {} -> WarningWithFlag Opt_WarnUntickedPromotedConstructors TcRnIllegalBuiltinSyntax {} -> ErrorWithoutFlag TcRnWarnDefaulting {} -> WarningWithFlag Opt_WarnTypeDefaults TcRnForeignImportPrimExtNotSet{} -> ErrorWithoutFlag TcRnForeignImportPrimSafeAnn{} -> ErrorWithoutFlag TcRnForeignFunctionImportAsValue{} -> ErrorWithoutFlag TcRnFunPtrImportWithoutAmpersand{} -> WarningWithFlag Opt_WarnDodgyForeignImports TcRnIllegalForeignDeclBackend{} -> ErrorWithoutFlag TcRnUnsupportedCallConv _ unsupportedCC -> case unsupportedCC of StdCallConvUnsupported -> WarningWithFlag Opt_WarnUnsupportedCallingConventions _ -> ErrorWithoutFlag TcRnIllegalForeignType{} -> ErrorWithoutFlag TcRnInvalidCIdentifier{} -> ErrorWithoutFlag TcRnCannotDefaultConcrete{} -> ErrorWithoutFlag diagnosticHints = \case TcRnUnknownMessage m -> diagnosticHints m TcRnMessageWithInfo _ msg_with_info -> case msg_with_info of TcRnMessageDetailed _ m -> diagnosticHints m TcRnSolverReport _ _ hints -> hints TcRnRedundantConstraints{} -> noHints TcRnInaccessibleCode{} -> noHints TcRnTypeDoesNotHaveFixedRuntimeRep{} -> noHints TcRnImplicitLift{} -> noHints TcRnUnusedPatternBinds{} -> noHints TcRnDodgyImports{} -> noHints TcRnDodgyExports{} -> noHints TcRnMissingImportList{} -> noHints TcRnUnsafeDueToPlugin{} -> noHints TcRnModMissingRealSrcSpan{} -> noHints TcRnIdNotExportedFromModuleSig name mod -> [SuggestAddToHSigExportList name $ Just mod] TcRnIdNotExportedFromLocalSig name -> [SuggestAddToHSigExportList name Nothing] TcRnShadowedName{} -> noHints TcRnDuplicateWarningDecls{} -> noHints TcRnSimplifierTooManyIterations{} -> [SuggestIncreaseSimplifierIterations] TcRnIllegalPatSynDecl{} -> noHints TcRnLinearPatSyn{} -> noHints TcRnEmptyRecordUpdate{} -> noHints TcRnIllegalFieldPunning{} -> [suggestExtension LangExt.NamedFieldPuns] TcRnIllegalWildcardsInRecord{} -> [suggestExtension LangExt.RecordWildCards] TcRnIllegalWildcardInType{} -> noHints TcRnDuplicateFieldName{} -> noHints TcRnIllegalViewPattern{} -> [suggestExtension LangExt.ViewPatterns] TcRnCharLiteralOutOfRange{} -> noHints TcRnIllegalWildcardsInConstructor{} -> noHints TcRnIgnoringAnnotations{} -> noHints TcRnAnnotationInSafeHaskell -> noHints TcRnInvalidTypeApplication{} -> noHints TcRnTagToEnumMissingValArg -> noHints TcRnTagToEnumUnspecifiedResTy{} -> noHints TcRnTagToEnumResTyNotAnEnum{} -> noHints TcRnArrowIfThenElsePredDependsOnResultTy -> noHints TcRnIllegalHsBootFileDecl -> noHints TcRnRecursivePatternSynonym{} -> noHints TcRnPartialTypeSigTyVarMismatch{} -> noHints TcRnPartialTypeSigBadQuantifier{} -> noHints TcRnMissingSignature {} -> noHints TcRnPolymorphicBinderMissingSig{} -> noHints TcRnOverloadedSig{} -> noHints TcRnTupleConstraintInst{} -> noHints TcRnAbstractClassInst{} -> noHints TcRnNoClassInstHead{} -> noHints TcRnUserTypeError{} -> noHints TcRnConstraintInKind{} -> noHints TcRnUnboxedTupleOrSumTypeFuncArg tuple_or_sum _ -> [suggestExtension $ unboxedTupleOrSumExtension tuple_or_sum] TcRnLinearFuncInKind{} -> noHints TcRnForAllEscapeError{} -> noHints TcRnVDQInTermType{} -> noHints TcRnBadQuantPredHead{} -> noHints TcRnIllegalTupleConstraint{} -> [suggestExtension LangExt.ConstraintKinds] TcRnNonTypeVarArgInConstraint{} -> [suggestExtension LangExt.FlexibleContexts] TcRnIllegalImplicitParam{} -> noHints TcRnIllegalConstraintSynonymOfKind{} -> [suggestExtension LangExt.ConstraintKinds] TcRnIllegalClassInst{} -> noHints TcRnOversaturatedVisibleKindArg{} -> noHints TcRnBadAssociatedType{} -> noHints TcRnForAllRankErr rank _ -> case rank of LimitedRank{} -> [suggestExtension LangExt.RankNTypes] MonoTypeRankZero -> [suggestExtension LangExt.RankNTypes] MonoTypeTyConArg -> [suggestExtension LangExt.ImpredicativeTypes] MonoTypeSynArg -> [suggestExtension LangExt.LiberalTypeSynonyms] MonoTypeConstraint -> [suggestExtension LangExt.QuantifiedConstraints] _ -> noHints TcRnMonomorphicBindings bindings -> case bindings of [] -> noHints (x:xs) -> [SuggestAddTypeSignatures $ NamedBindings (x NE.:| xs)] TcRnOrphanInstance{} -> [SuggestFixOrphanInstance] TcRnFunDepConflict{} -> noHints TcRnDupInstanceDecls{} -> noHints TcRnConflictingFamInstDecls{} -> noHints TcRnFamInstNotInjective rea _ _ -> case rea of InjErrRhsBareTyVar{} -> noHints InjErrRhsCannotBeATypeFam -> noHints InjErrRhsOverlap -> noHints InjErrCannotInferFromRhs _ _ suggestUndInst | YesSuggestUndecidableInstaces <- suggestUndInst -> [suggestExtension LangExt.UndecidableInstances] | otherwise -> noHints TcRnBangOnUnliftedType{} -> noHints TcRnMultipleDefaultDeclarations{} -> noHints TcRnBadDefaultType{} -> noHints TcRnPatSynBundledWithNonDataCon{} -> noHints TcRnPatSynBundledWithWrongType{} -> noHints TcRnDupeModuleExport{} -> noHints TcRnExportedModNotImported{} -> noHints TcRnNullExportedModule{} -> noHints TcRnMissingExportList{} -> noHints TcRnExportHiddenComponents{} -> noHints TcRnDuplicateExport{} -> noHints TcRnExportedParentChildMismatch{} -> noHints TcRnConflictingExports{} -> noHints TcRnAmbiguousField{} -> noHints TcRnMissingFields{} -> noHints TcRnFieldUpdateInvalidType{} -> noHints TcRnNoConstructorHasAllFields{} -> noHints TcRnMixedSelectors{} -> noHints TcRnMissingStrictFields{} -> noHints TcRnNoPossibleParentForFields{} -> noHints TcRnBadOverloadedRecordUpdate{} -> noHints TcRnStaticFormNotClosed{} -> noHints TcRnUselessTypeable -> noHints TcRnDerivingDefaults{} -> [useDerivingStrategies] TcRnNonUnaryTypeclassConstraint{} -> noHints TcRnPartialTypeSignatures suggestParSig _ -> case suggestParSig of YesSuggestPartialTypeSignatures -> let info = text "to use the inferred type" in [suggestExtensionWithInfo info LangExt.PartialTypeSignatures] NoSuggestPartialTypeSignatures -> noHints TcRnCannotDeriveInstance cls _ _ newtype_deriving rea -> deriveInstanceErrReasonHints cls newtype_deriving rea TcRnLazyGADTPattern -> noHints TcRnArrowProcGADTPattern -> noHints TcRnSpecialClassInst {} -> noHints TcRnForallIdentifier {} -> [SuggestRenameForall] TcRnTypeEqualityOutOfScope -> noHints TcRnTypeEqualityRequiresOperators -> [suggestExtension LangExt.TypeOperators] TcRnIllegalTypeOperator {} -> [suggestExtension LangExt.TypeOperators] TcRnGADTMonoLocalBinds {} -> [suggestAnyExtension [LangExt.GADTs, LangExt.TypeFamilies]] TcRnIncorrectNameSpace nm is_th_use | is_th_use -> [SuggestAppropriateTHTick $ nameNameSpace nm] | otherwise -> noHints TcRnNotInScope err _ _ hints -> scopeErrorHints err ++ hints TcRnUntickedPromotedThing thing -> [SuggestAddTick thing] TcRnIllegalBuiltinSyntax {} -> noHints TcRnWarnDefaulting {} -> noHints TcRnForeignImportPrimExtNotSet{} -> [suggestExtension LangExt.GHCForeignImportPrim] TcRnForeignImportPrimSafeAnn{} -> noHints TcRnForeignFunctionImportAsValue{} -> noHints TcRnFunPtrImportWithoutAmpersand{} -> noHints TcRnIllegalForeignDeclBackend{} -> noHints TcRnUnsupportedCallConv{} -> noHints TcRnIllegalForeignType _ reason -> case reason of TypeCannotBeMarshaled _ why | NewtypeDataConNotInScope{} <- why -> [SuggestImportingDataCon] | UnliftedFFITypesNeeded <- why -> [suggestExtension LangExt.UnliftedFFITypes] _ -> noHints TcRnInvalidCIdentifier{} -> noHints TcRnCannotDefaultConcrete{} -> [SuggestAddTypeSignatures UnnamedBinding] deriveInstanceErrReasonHints :: Class -> UsingGeneralizedNewtypeDeriving -> DeriveInstanceErrReason -> [GhcHint] deriveInstanceErrReasonHints cls newtype_deriving = \case DerivErrNotWellKinded _ _ n_args_to_keep | cls `hasKey` gen1ClassKey && n_args_to_keep >= 0 -> [suggestExtension LangExt.PolyKinds] | otherwise -> noHints DerivErrSafeHaskellGenericInst -> noHints DerivErrDerivingViaWrongKind{} -> noHints DerivErrNoEtaReduce{} -> noHints DerivErrBootFileFound -> noHints DerivErrDataConsNotAllInScope{} -> noHints DerivErrGNDUsedOnData -> noHints DerivErrNullaryClasses -> noHints DerivErrLastArgMustBeApp -> noHints DerivErrNoFamilyInstance{} -> noHints DerivErrNotStockDeriveable deriveAnyClassEnabled | deriveAnyClassEnabled == NoDeriveAnyClassEnabled -> [suggestExtension LangExt.DeriveAnyClass] | otherwise -> noHints DerivErrHasAssociatedDatatypes{} -> noHints DerivErrNewtypeNonDeriveableClass | newtype_deriving == NoGeneralizedNewtypeDeriving -> [useGND] | otherwise -> noHints DerivErrCannotEtaReduceEnough{} | newtype_deriving == NoGeneralizedNewtypeDeriving -> [useGND] | otherwise -> noHints DerivErrOnlyAnyClassDeriveable _ deriveAnyClassEnabled | deriveAnyClassEnabled == NoDeriveAnyClassEnabled -> [suggestExtension LangExt.DeriveAnyClass] | otherwise -> noHints DerivErrNotDeriveable deriveAnyClassEnabled | deriveAnyClassEnabled == NoDeriveAnyClassEnabled -> [suggestExtension LangExt.DeriveAnyClass] | otherwise -> noHints DerivErrNotAClass{} -> noHints DerivErrNoConstructors{} -> let info = text "to enable deriving for empty data types" in [useExtensionInOrderTo info LangExt.EmptyDataDeriving] DerivErrLangExtRequired{} -- This is a slightly weird corner case of GHC: we are failing -- to derive a typeclass instance because a particular 'Extension' -- is not enabled (and so we report in the main error), but here -- we don't want to /repeat/ to enable the extension in the hint. -> noHints DerivErrDunnoHowToDeriveForType{} -> noHints DerivErrMustBeEnumType rep_tc -- We want to suggest GND only if this /is/ a newtype. | newtype_deriving == NoGeneralizedNewtypeDeriving && isNewTyCon rep_tc -> [useGND] | otherwise -> noHints DerivErrMustHaveExactlyOneConstructor{} -> noHints DerivErrMustHaveSomeParameters{} -> noHints DerivErrMustNotHaveClassContext{} -> noHints DerivErrBadConstructor wcard _ -> case wcard of Nothing -> noHints Just YesHasWildcard -> [SuggestFillInWildcardConstraint] Just NoHasWildcard -> [SuggestAddStandaloneDerivation] DerivErrGenerics{} -> noHints DerivErrEnumOrProduct{} -> noHints messageWithInfoDiagnosticMessage :: UnitState -> ErrInfo -> DecoratedSDoc -> DecoratedSDoc messageWithInfoDiagnosticMessage unit_state ErrInfo{..} important = let err_info' = map (pprWithUnitState unit_state) [errInfoContext, errInfoSupplementary] in (mapDecoratedSDoc (pprWithUnitState unit_state) important) `unionDecoratedSDoc` mkDecorated err_info' dodgy_msg :: (Outputable a, Outputable b) => SDoc -> a -> b -> SDoc dodgy_msg kind tc ie = sep [ text "The" <+> kind <+> text "item" <+> quotes (ppr ie) <+> text "suggests that", quotes (ppr tc) <+> text "has (in-scope) constructors or class methods,", text "but it has none" ] dodgy_msg_insert :: forall p . IdP (GhcPass p) -> IE (GhcPass p) dodgy_msg_insert tc = IEThingAll noAnn ii where ii :: LIEWrappedName (IdP (GhcPass p)) ii = noLocA (IEName $ noLocA tc) pprTypeDoesNotHaveFixedRuntimeRep :: Type -> FixedRuntimeRepProvenance -> SDoc pprTypeDoesNotHaveFixedRuntimeRep ty prov = let what = pprFixedRuntimeRepProvenance prov in text "The" <+> what <+> text "does not have a fixed runtime representation:" $$ format_frr_err ty format_frr_err :: Type -- ^ the type which doesn't have a fixed runtime representation -> SDoc format_frr_err ty = (bullet <+> ppr tidy_ty <+> dcolon <+> ppr tidy_ki) where (tidy_env, tidy_ty) = tidyOpenType emptyTidyEnv ty tidy_ki = tidyType tidy_env (tcTypeKind ty) pprField :: (FieldLabelString, TcType) -> SDoc pprField (f,ty) = ppr f <+> dcolon <+> ppr ty pprRecordFieldPart :: RecordFieldPart -> SDoc pprRecordFieldPart = \case RecordFieldConstructor{} -> text "construction" RecordFieldPattern{} -> text "pattern" RecordFieldUpdate -> text "update" pprBindings :: [Name] -> SDoc pprBindings = pprWithCommas (quotes . ppr) injectivityErrorHerald :: SDoc injectivityErrorHerald = text "Type family equation violates the family's injectivity annotation." formatExportItemError :: SDoc -> String -> SDoc formatExportItemError exportedThing reason = hsep [ text "The export item" , quotes exportedThing , text reason ] -- | What warning flag is associated with the given missing signature? missingSignatureWarningFlag :: MissingSignature -> Exported -> Bool -> WarningFlag missingSignatureWarningFlag (MissingTopLevelBindingSig {}) exported overridden | IsExported <- exported , not overridden = Opt_WarnMissingExportedSignatures | otherwise = Opt_WarnMissingSignatures missingSignatureWarningFlag (MissingPatSynSig {}) exported overridden | IsExported <- exported , not overridden = Opt_WarnMissingExportedPatternSynonymSignatures | otherwise = Opt_WarnMissingPatternSynonymSignatures missingSignatureWarningFlag (MissingTyConKindSig {}) _ _ = Opt_WarnMissingKindSignatures useDerivingStrategies :: GhcHint useDerivingStrategies = useExtensionInOrderTo (text "to pick a different strategy") LangExt.DerivingStrategies useGND :: GhcHint useGND = let info = text "for GHC's" <+> text "newtype-deriving extension" in suggestExtensionWithInfo info LangExt.GeneralizedNewtypeDeriving cannotMakeDerivedInstanceHerald :: Class -> [Type] -> Maybe (DerivStrategy GhcTc) -> UsingGeneralizedNewtypeDeriving -> Bool -- ^ If False, only prints the why. -> SDoc -> SDoc cannotMakeDerivedInstanceHerald cls cls_args mb_strat newtype_deriving pprHerald why = if pprHerald then sep [(hang (text "Can't make a derived instance of") 2 (quotes (ppr pred) <+> via_mechanism) $$ nest 2 extra) <> colon, nest 2 why] else why where strat_used = isJust mb_strat extra | not strat_used, (newtype_deriving == YesGeneralizedNewtypeDeriving) = text "(even with cunning GeneralizedNewtypeDeriving)" | otherwise = empty pred = mkClassPred cls cls_args via_mechanism | strat_used , Just strat <- mb_strat = text "with the" <+> (derivStrategyName strat) <+> text "strategy" | otherwise = empty badCon :: DataCon -> SDoc -> SDoc badCon con msg = text "Constructor" <+> quotes (ppr con) <+> msg derivErrDiagnosticMessage :: Class -> [Type] -> Maybe (DerivStrategy GhcTc) -> UsingGeneralizedNewtypeDeriving -> Bool -- If True, includes the herald \"can't make a derived..\" -> DeriveInstanceErrReason -> SDoc derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving pprHerald = \case DerivErrNotWellKinded tc cls_kind _ -> sep [ hang (text "Cannot derive well-kinded instance of form" <+> quotes (pprClassPred cls cls_tys <+> parens (ppr tc <+> text "..."))) 2 empty , nest 2 (text "Class" <+> quotes (ppr cls) <+> text "expects an argument of kind" <+> quotes (pprKind cls_kind)) ] DerivErrSafeHaskellGenericInst -> text "Generic instances can only be derived in" <+> text "Safe Haskell using the stock strategy." DerivErrDerivingViaWrongKind cls_kind via_ty via_kind -> hang (text "Cannot derive instance via" <+> quotes (pprType via_ty)) 2 (text "Class" <+> quotes (ppr cls) <+> text "expects an argument of kind" <+> quotes (pprKind cls_kind) <> char ',' $+$ text "but" <+> quotes (pprType via_ty) <+> text "has kind" <+> quotes (pprKind via_kind)) DerivErrNoEtaReduce inst_ty -> sep [text "Cannot eta-reduce to an instance of form", nest 2 (text "instance (...) =>" <+> pprClassPred cls (cls_tys ++ [inst_ty]))] DerivErrBootFileFound -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Cannot derive instances in hs-boot files" $+$ text "Write an instance declaration instead") DerivErrDataConsNotAllInScope tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (hang (text "The data constructors of" <+> quotes (ppr tc) <+> text "are not all in scope") 2 (text "so you cannot derive an instance for it")) DerivErrGNDUsedOnData -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "GeneralizedNewtypeDeriving cannot be used on non-newtypes") DerivErrNullaryClasses -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Cannot derive instances for nullary classes") DerivErrLastArgMustBeApp -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald ( text "The last argument of the instance must be a" <+> text "data or newtype application") DerivErrNoFamilyInstance tc tc_args -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "No family instance for" <+> quotes (pprTypeApp tc tc_args)) DerivErrNotStockDeriveable _ -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (ppr cls) <+> text "is not a stock derivable class (Eq, Show, etc.)") DerivErrHasAssociatedDatatypes hasAdfs at_last_cls_tv_in_kinds at_without_last_cls_tv -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald $ vcat [ ppWhen (hasAdfs == YesHasAdfs) adfs_msg , case at_without_last_cls_tv of YesAssociatedTyNotParamOverLastTyVar tc -> at_without_last_cls_tv_msg tc NoAssociatedTyNotParamOverLastTyVar -> empty , case at_last_cls_tv_in_kinds of YesAssocTyLastVarInKind tc -> at_last_cls_tv_in_kinds_msg tc NoAssocTyLastVarInKind -> empty ] where adfs_msg = text "the class has associated data types" at_without_last_cls_tv_msg at_tc = hang (text "the associated type" <+> quotes (ppr at_tc) <+> text "is not parameterized over the last type variable") 2 (text "of the class" <+> quotes (ppr cls)) at_last_cls_tv_in_kinds_msg at_tc = hang (text "the associated type" <+> quotes (ppr at_tc) <+> text "contains the last type variable") 2 (text "of the class" <+> quotes (ppr cls) <+> text "in a kind, which is not (yet) allowed") DerivErrNewtypeNonDeriveableClass -> derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving pprHerald (DerivErrNotStockDeriveable NoDeriveAnyClassEnabled) DerivErrCannotEtaReduceEnough eta_ok -> let cant_derive_err = ppUnless eta_ok eta_msg eta_msg = text "cannot eta-reduce the representation type enough" in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald cant_derive_err DerivErrOnlyAnyClassDeriveable tc _ -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (ppr tc) <+> text "is a type class," <+> text "and can only have a derived instance" $+$ text "if DeriveAnyClass is enabled") DerivErrNotDeriveable _ -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald empty DerivErrNotAClass predType -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (ppr predType) <+> text "is not a class") DerivErrNoConstructors rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (pprSourceTyCon rep_tc) <+> text "must have at least one data constructor") DerivErrLangExtRequired ext -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "You need " <> ppr ext <+> text "to derive an instance for this class") DerivErrDunnoHowToDeriveForType ty -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (hang (text "Don't know how to derive" <+> quotes (ppr cls)) 2 (text "for type" <+> quotes (ppr ty))) DerivErrMustBeEnumType rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (sep [ quotes (pprSourceTyCon rep_tc) <+> text "must be an enumeration type" , text "(an enumeration consists of one or more nullary, non-GADT constructors)" ]) DerivErrMustHaveExactlyOneConstructor rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (quotes (pprSourceTyCon rep_tc) <+> text "must have precisely one constructor") DerivErrMustHaveSomeParameters rep_tc -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Data type" <+> quotes (ppr rep_tc) <+> text "must have some type parameters") DerivErrMustNotHaveClassContext rep_tc bad_stupid_theta -> cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (text "Data type" <+> quotes (ppr rep_tc) <+> text "must not have a class context:" <+> pprTheta bad_stupid_theta) DerivErrBadConstructor _ reasons -> let why = vcat $ map renderReason reasons in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald why where renderReason = \case DerivErrBadConExistential con -> badCon con $ text "must be truly polymorphic in the last argument of the data type" DerivErrBadConCovariant con -> badCon con $ text "must not use the type variable in a function argument" DerivErrBadConFunTypes con -> badCon con $ text "must not contain function types" DerivErrBadConWrongArg con -> badCon con $ text "must use the type variable only as the last argument of a data type" DerivErrBadConIsGADT con -> badCon con $ text "is a GADT" DerivErrBadConHasExistentials con -> badCon con $ text "has existential type variables in its type" DerivErrBadConHasConstraints con -> badCon con $ text "has constraints in its type" DerivErrBadConHasHigherRankType con -> badCon con $ text "has a higher-rank type" DerivErrGenerics reasons -> let why = vcat $ map renderReason reasons in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald why where renderReason = \case DerivErrGenericsMustNotHaveDatatypeContext tc_name -> ppr tc_name <+> text "must not have a datatype context" DerivErrGenericsMustNotHaveExoticArgs dc -> ppr dc <+> text "must not have exotic unlifted or polymorphic arguments" DerivErrGenericsMustBeVanillaDataCon dc -> ppr dc <+> text "must be a vanilla data constructor" DerivErrGenericsMustHaveSomeTypeParams rep_tc -> text "Data type" <+> quotes (ppr rep_tc) <+> text "must have some type parameters" DerivErrGenericsMustNotHaveExistentials con -> badCon con $ text "must not have existential arguments" DerivErrGenericsWrongArgKind con -> badCon con $ text "applies a type to an argument involving the last parameter" $$ text "but the applied type is not of kind * -> *" DerivErrEnumOrProduct this that -> let ppr1 = derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving False this ppr2 = derivErrDiagnosticMessage cls cls_tys mb_strat newtype_deriving False that in cannotMakeDerivedInstanceHerald cls cls_tys mb_strat newtype_deriving pprHerald (ppr1 $$ text " or" $$ ppr2) {- ********************************************************************* * * Outputable SolverReportErrCtxt (for debugging) * * **********************************************************************-} instance Outputable SolverReportErrCtxt where ppr (CEC { cec_binds = bvar , cec_defer_type_errors = dte , cec_expr_holes = eh , cec_type_holes = th , cec_out_of_scope_holes = osh , cec_warn_redundant = wr , cec_expand_syns = es , cec_suppress = sup }) = text "CEC" <+> braces (vcat [ text "cec_binds" <+> equals <+> ppr bvar , text "cec_defer_type_errors" <+> equals <+> ppr dte , text "cec_expr_holes" <+> equals <+> ppr eh , text "cec_type_holes" <+> equals <+> ppr th , text "cec_out_of_scope_holes" <+> equals <+> ppr osh , text "cec_warn_redundant" <+> equals <+> ppr wr , text "cec_expand_syns" <+> equals <+> ppr es , text "cec_suppress" <+> equals <+> ppr sup ]) {- ********************************************************************* * * Outputting TcSolverReportMsg errors * * **********************************************************************-} -- | Pretty-print a 'SolverReportWithCtxt', containing a 'TcSolverReportMsg' -- with its enclosing 'SolverReportErrCtxt'. pprSolverReportWithCtxt :: SolverReportWithCtxt -> SDoc pprSolverReportWithCtxt (SolverReportWithCtxt { reportContext = ctxt, reportContent = msg }) = pprTcSolverReportMsg ctxt msg -- | Pretty-print a 'TcSolverReportMsg', with its enclosing 'SolverReportErrCtxt'. pprTcSolverReportMsg :: SolverReportErrCtxt -> TcSolverReportMsg -> SDoc pprTcSolverReportMsg ctxt (TcReportWithInfo msg (info :| infos)) = vcat ( pprTcSolverReportMsg ctxt msg : pprTcSolverReportInfo ctxt info : map (pprTcSolverReportInfo ctxt) infos ) pprTcSolverReportMsg _ (BadTelescope telescope skols) = hang (text "These kind and type variables:" <+> ppr telescope $$ text "are out of dependency order. Perhaps try this ordering:") 2 (pprTyVars sorted_tvs) where sorted_tvs = scopedSort skols pprTcSolverReportMsg _ (UserTypeError ty) = pprUserTypeErrorTy ty pprTcSolverReportMsg ctxt (ReportHoleError hole err) = pprHoleError ctxt hole err pprTcSolverReportMsg _ (CannotUnifyWithPolytype item tv1 ty2) = vcat [ (if isSkolemTyVar tv1 then text "Cannot equate type variable" else text "Cannot instantiate unification variable") <+> quotes (ppr tv1) , hang (text "with a" <+> what <+> text "involving polytypes:") 2 (ppr ty2) ] where what = text $ levelString $ ctLocTypeOrKind_maybe (errorItemCtLoc item) `orElse` TypeLevel pprTcSolverReportMsg _ (Mismatch { mismatch_ea = add_ea , mismatch_item = item , mismatch_ty1 = ty1 , mismatch_ty2 = ty2 }) = addArising (errorItemOrigin item) msg where msg | (isLiftedRuntimeRep ty1 && isUnliftedRuntimeRep ty2) || (isLiftedRuntimeRep ty2 && isUnliftedRuntimeRep ty1) || (isLiftedLevity ty1 && isUnliftedLevity ty2) || (isLiftedLevity ty2 && isUnliftedLevity ty1) = text "Couldn't match a lifted type with an unlifted type" | isAtomicTy ty1 || isAtomicTy ty2 = -- Print with quotes sep [ text herald1 <+> quotes (ppr ty1) , nest padding $ text herald2 <+> quotes (ppr ty2) ] | otherwise = -- Print with vertical layout vcat [ text herald1 <> colon <+> ppr ty1 , nest padding $ text herald2 <> colon <+> ppr ty2 ] herald1 = conc [ "Couldn't match" , if is_repr then "representation of" else "" , if add_ea then "expected" else "" , what ] herald2 = conc [ "with" , if is_repr then "that of" else "" , if add_ea then ("actual " ++ what) else "" ] padding = length herald1 - length herald2 is_repr = case errorItemEqRel item of { ReprEq -> True; NomEq -> False } what = levelString (ctLocTypeOrKind_maybe (errorItemCtLoc item) `orElse` TypeLevel) conc :: [String] -> String conc = foldr1 add_space add_space :: String -> String -> String add_space s1 s2 | null s1 = s2 | null s2 = s1 | otherwise = s1 ++ (' ' : s2) pprTcSolverReportMsg _ (KindMismatch { kmismatch_what = thing , kmismatch_expected = exp , kmismatch_actual = act }) = hang (text "Expected" <+> kind_desc <> comma) 2 (text "but" <+> quotes (ppr thing) <+> text "has kind" <+> quotes (ppr act)) where kind_desc | tcIsConstraintKind exp = text "a constraint" | Just arg <- kindRep_maybe exp -- TYPE t0 , tcIsTyVarTy arg = sdocOption sdocPrintExplicitRuntimeReps $ \case True -> text "kind" <+> quotes (ppr exp) False -> text "a type" | otherwise = text "kind" <+> quotes (ppr exp) pprTcSolverReportMsg ctxt (TypeEqMismatch { teq_mismatch_ppr_explicit_kinds = ppr_explicit_kinds , teq_mismatch_item = item , teq_mismatch_ty1 = ty1 , teq_mismatch_ty2 = ty2 , teq_mismatch_expected = exp , teq_mismatch_actual = act , teq_mismatch_what = mb_thing }) = addArising orig $ pprWithExplicitKindsWhen ppr_explicit_kinds msg where msg | isUnliftedTypeKind act, isLiftedTypeKind exp = sep [ text "Expecting a lifted type, but" , thing_msg mb_thing (text "an") (text "unlifted") ] | isLiftedTypeKind act, isUnliftedTypeKind exp = sep [ text "Expecting an unlifted type, but" , thing_msg mb_thing (text "a") (text "lifted") ] | tcIsLiftedTypeKind exp = maybe_num_args_msg $$ sep [ text "Expected a type, but" , case mb_thing of Nothing -> text "found something with kind" Just thing -> quotes (ppr thing) <+> text "has kind" , quotes (pprWithTYPE act) ] | Just nargs_msg <- num_args_msg , Right ea_msg <- mk_ea_msg ctxt (Just item) level orig = nargs_msg $$ pprTcSolverReportMsg ctxt ea_msg | -- pprTrace "check" (ppr ea_looks_same $$ ppr exp $$ ppr act $$ ppr ty1 $$ ppr ty2) $ ea_looks_same ty1 ty2 exp act , Right ea_msg <- mk_ea_msg ctxt (Just item) level orig = pprTcSolverReportMsg ctxt ea_msg -- The mismatched types are /inside/ exp and act | let mismatch_err = Mismatch False item ty1 ty2 errs = case mk_ea_msg ctxt Nothing level orig of Left ea_info -> [ mkTcReportWithInfo mismatch_err ea_info ] Right ea_err -> [ mismatch_err, ea_err ] = vcat $ map (pprTcSolverReportMsg ctxt) errs ct_loc = errorItemCtLoc item orig = errorItemOrigin item level = ctLocTypeOrKind_maybe ct_loc `orElse` TypeLevel thing_msg (Just thing) _ levity = quotes (ppr thing) <+> text "is" <+> levity thing_msg Nothing an levity = text "got" <+> an <+> levity <+> text "type" num_args_msg = case level of KindLevel | not (isMetaTyVarTy exp) && not (isMetaTyVarTy act) -- if one is a meta-tyvar, then it's possible that the user -- has asked for something impredicative, and we couldn't unify. -- Don't bother with counting arguments. -> let n_act = count_args act n_exp = count_args exp in case n_act - n_exp of n | n > 0 -- we don't know how many args there are, so don't -- recommend removing args that aren't , Just thing <- mb_thing -> Just $ pprTcSolverReportMsg ctxt (ExpectingMoreArguments n thing) _ -> Nothing _ -> Nothing maybe_num_args_msg = num_args_msg `orElse` empty count_args ty = count isVisibleBinder $ fst $ splitPiTys ty pprTcSolverReportMsg _ (FixedRuntimeRepError frr_origs) = vcat (map make_msg frr_origs) where -- Assemble the error message: pair up each origin with the corresponding type, e.g. -- • FixedRuntimeRep origin msg 1 ... -- a :: TYPE r1 -- • FixedRuntimeRep origin msg 2 ... -- b :: TYPE r2 make_msg :: FixedRuntimeRepErrorInfo -> SDoc make_msg (FRR_Info { frr_info_origin = FixedRuntimeRepOrigin { frr_type = ty , frr_context = frr_ctxt } , frr_info_not_concrete = mb_not_conc }) = -- Add bullet points if there is more than one error. (if length frr_origs > 1 then (bullet <+>) else id) $ vcat [ sep [ pprFixedRuntimeRepContext frr_ctxt , text "does not have a fixed runtime representation." ] , type_printout ty , case mb_not_conc of Nothing -> empty Just (conc_tv, not_conc) -> unsolved_concrete_eq_explanation conc_tv not_conc ] -- Don't print out the type (only the kind), if the type includes -- a confusing cast, unless the user passed -fprint-explicit-coercions. -- -- Example: -- -- In T20363, we have a representation-polymorphism error with a type -- of the form -- -- ( (# #) |> co ) :: TYPE NilRep -- -- where NilRep is a nullary type family application which reduces to TupleRep '[]. -- We prefer avoiding showing the cast to the user, but we also don't want to -- print the confusing: -- -- (# #) :: TYPE NilRep -- -- So in this case we simply don't print the type, only the kind. confusing_cast :: Type -> Bool confusing_cast ty = case ty of CastTy inner_ty _ -- A confusing cast is one that is responsible -- for a representation-polymorphism error. -> isConcrete (typeKind inner_ty) _ -> False type_printout :: Type -> SDoc type_printout ty = sdocOption sdocPrintExplicitCoercions $ \ show_coercions -> if confusing_cast ty && not show_coercions then vcat [ text "Its kind is:" , nest 2 $ pprWithTYPE (typeKind ty) , text "(Use -fprint-explicit-coercions to see the full type.)" ] else vcat [ text "Its type is:" , nest 2 $ ppr ty <+> dcolon <+> pprWithTYPE (typeKind ty) ] unsolved_concrete_eq_explanation :: TcTyVar -> Type -> SDoc unsolved_concrete_eq_explanation tv not_conc = text "Cannot unify" <+> quotes (ppr not_conc) <+> text "with the type variable" <+> quotes (ppr tv) $$ text "because it is not a concrete" <+> what <> dot where ki = tyVarKind tv what :: SDoc what | isRuntimeRepTy ki = quotes (text "RuntimeRep") | isLevityTy ki = quotes (text "Levity") | otherwise = text "type" pprTcSolverReportMsg _ (SkolemEscape item implic esc_skols) = let esc_doc = sep [ text "because" <+> what <+> text "variable" <> plural esc_skols <+> pprQuotedList esc_skols , text "would escape" <+> if isSingleton esc_skols then text "its scope" else text "their scope" ] in vcat [ nest 2 $ esc_doc , sep [ (if isSingleton esc_skols then text "This (rigid, skolem)" <+> what <+> text "variable is" else text "These (rigid, skolem)" <+> what <+> text "variables are") <+> text "bound by" , nest 2 $ ppr (ic_info implic) , nest 2 $ text "at" <+> ppr (getLclEnvLoc (ic_env implic)) ] ] where what = text $ levelString $ ctLocTypeOrKind_maybe (errorItemCtLoc item) `orElse` TypeLevel pprTcSolverReportMsg _ (UntouchableVariable tv implic) | Implic { ic_given = given, ic_info = skol_info } <- implic = sep [ quotes (ppr tv) <+> text "is untouchable" , nest 2 $ text "inside the constraints:" <+> pprEvVarTheta given , nest 2 $ text "bound by" <+> ppr skol_info , nest 2 $ text "at" <+> ppr (getLclEnvLoc (ic_env implic)) ] pprTcSolverReportMsg _ (BlockedEquality item) = vcat [ hang (text "Cannot use equality for substitution:") 2 (ppr (errorItemPred item)) , text "Doing so would be ill-kinded." ] pprTcSolverReportMsg _ (ExpectingMoreArguments n thing) = text "Expecting" <+> speakN (abs n) <+> more <+> quotes (ppr thing) where more | n == 1 = text "more argument to" | otherwise = text "more arguments to" -- n > 1 pprTcSolverReportMsg ctxt (UnboundImplicitParams (item :| items)) = let givens = getUserGivens ctxt in if null givens then addArising (errorItemOrigin item) $ sep [ text "Unbound implicit parameter" <> plural preds , nest 2 (pprParendTheta preds) ] else pprTcSolverReportMsg ctxt (CouldNotDeduce givens (item :| items) Nothing) where preds = map errorItemPred (item : items) pprTcSolverReportMsg ctxt (CouldNotDeduce useful_givens (item :| others) mb_extra) = main_msg $$ case supplementary of Left infos -> vcat (map (pprTcSolverReportInfo ctxt) infos) Right other_msg -> pprTcSolverReportMsg ctxt other_msg where main_msg | null useful_givens = addArising orig (no_instance_msg <+> missing) | otherwise = vcat (addArising orig (no_deduce_msg <+> missing) : pp_givens useful_givens) supplementary = case mb_extra of Nothing -> Left [] Just (CND_Extra level ty1 ty2) -> mk_supplementary_ea_msg ctxt level ty1 ty2 orig orig = errorItemOrigin item wanteds = map errorItemPred (item:others) no_instance_msg = case wanteds of [wanted] | Just (tc, _) <- splitTyConApp_maybe wanted -- Don't say "no instance" for a constraint such as "c" for a type variable c. , isClassTyCon tc -> text "No instance for" _ -> text "Could not solve:" no_deduce_msg = case wanteds of [_wanted] -> text "Could not deduce" _ -> text "Could not deduce:" missing = case wanteds of [wanted] -> pprParendType wanted _ -> pprTheta wanteds pprTcSolverReportMsg ctxt (AmbiguityPreventsSolvingCt item ambigs) = pprTcSolverReportInfo ctxt (Ambiguity True ambigs) <+> pprArising (errorItemOrigin item) $$ text "prevents the constraint" <+> quotes (pprParendType $ errorItemPred item) <+> text "from being solved." pprTcSolverReportMsg ctxt@(CEC {cec_encl = implics}) (CannotResolveInstance item unifiers candidates imp_errs suggs binds) = vcat [ pprTcSolverReportMsg ctxt no_inst_msg , nest 2 extra_note , mb_patsyn_prov `orElse` empty , ppWhen (has_ambigs && not (null unifiers && null useful_givens)) (vcat [ ppUnless lead_with_ambig $ pprTcSolverReportInfo ctxt (Ambiguity False (ambig_kvs, ambig_tvs)) , pprRelevantBindings binds , potential_msg ]) , ppWhen (isNothing mb_patsyn_prov) $ -- Don't suggest fixes for the provided context of a pattern -- synonym; the right fix is to bind more in the pattern show_fixes (ctxtFixes has_ambigs pred implics ++ drv_fixes) , ppWhen (not (null candidates)) (hang (text "There are instances for similar types:") 2 (vcat (map ppr candidates))) -- See Note [Report candidate instances] , vcat $ map ppr imp_errs , vcat $ map ppr suggs ] where orig = errorItemOrigin item pred = errorItemPred item (clas, tys) = getClassPredTys pred -- See Note [Highlighting ambiguous type variables] (ambig_kvs, ambig_tvs) = ambigTkvsOfTy pred ambigs = ambig_kvs ++ ambig_tvs has_ambigs = not (null ambigs) useful_givens = discardProvCtxtGivens orig (getUserGivensFromImplics implics) -- useful_givens are the enclosing implications with non-empty givens, -- modulo the horrid discardProvCtxtGivens lead_with_ambig = not (null ambigs) && not (any isRuntimeUnkSkol ambigs) && not (null unifiers) && null useful_givens no_inst_msg :: TcSolverReportMsg no_inst_msg | lead_with_ambig = AmbiguityPreventsSolvingCt item (ambig_kvs, ambig_tvs) | otherwise = CouldNotDeduce useful_givens (item :| []) Nothing -- Report "potential instances" only when the constraint arises -- directly from the user's use of an overloaded function want_potential (TypeEqOrigin {}) = False want_potential _ = True potential_msg = ppWhen (not (null unifiers) && want_potential orig) $ potential_hdr $$ potentialInstancesErrMsg (PotentialInstances { matches = [], unifiers }) potential_hdr = ppWhen lead_with_ambig $ text "Probable fix: use a type annotation to specify what" <+> pprQuotedList ambig_tvs <+> text "should be." mb_patsyn_prov :: Maybe SDoc mb_patsyn_prov | not lead_with_ambig , ProvCtxtOrigin PSB{ psb_def = L _ pat } <- orig = Just (vcat [ text "In other words, a successful match on the pattern" , nest 2 $ ppr pat , text "does not provide the constraint" <+> pprParendType pred ]) | otherwise = Nothing extra_note | any isFunTy (filterOutInvisibleTypes (classTyCon clas) tys) = text "(maybe you haven't applied a function to enough arguments?)" | className clas == typeableClassName -- Avoid mysterious "No instance for (Typeable T) , [_,ty] <- tys -- Look for (Typeable (k->*) (T k)) , Just (tc,_) <- tcSplitTyConApp_maybe ty , not (isTypeFamilyTyCon tc) = hang (text "GHC can't yet do polykinded") 2 (text "Typeable" <+> parens (ppr ty <+> dcolon <+> ppr (tcTypeKind ty))) | otherwise = empty drv_fixes = case orig of DerivClauseOrigin -> [drv_fix False] StandAloneDerivOrigin -> [drv_fix True] DerivOriginDC _ _ standalone -> [drv_fix standalone] DerivOriginCoerce _ _ _ standalone -> [drv_fix standalone] _ -> [] drv_fix standalone_wildcard | standalone_wildcard = text "fill in the wildcard constraint yourself" | otherwise = hang (text "use a standalone 'deriving instance' declaration,") 2 (text "so you can specify the instance context yourself") pprTcSolverReportMsg (CEC {cec_encl = implics}) (OverlappingInstances item matches unifiers) = vcat [ addArising orig $ (text "Overlapping instances for" <+> pprType (mkClassPred clas tys)) , ppUnless (null matching_givens) $ sep [text "Matching givens (or their superclasses):" , nest 2 (vcat matching_givens)] , potentialInstancesErrMsg (PotentialInstances { matches, unifiers }) , ppWhen (null matching_givens && isSingleton matches && null unifiers) $ -- Intuitively, some given matched the wanted in their -- flattened or rewritten (from given equalities) form -- but the matcher can't figure that out because the -- constraints are non-flat and non-rewritten so we -- simply report back the whole given -- context. Accelerate Smart.hs showed this problem. sep [ text "There exists a (perhaps superclass) match:" , nest 2 (vcat (pp_givens useful_givens))] , ppWhen (isSingleton matches) $ parens (vcat [ ppUnless (null tyCoVars) $ text "The choice depends on the instantiation of" <+> quotes (pprWithCommas ppr tyCoVars) , ppUnless (null famTyCons) $ if (null tyCoVars) then text "The choice depends on the result of evaluating" <+> quotes (pprWithCommas ppr famTyCons) else text "and the result of evaluating" <+> quotes (pprWithCommas ppr famTyCons) , ppWhen (null (matching_givens)) $ vcat [ text "To pick the first instance above, use IncoherentInstances" , text "when compiling the other instance declarations"] ])] where orig = errorItemOrigin item pred = errorItemPred item (clas, tys) = getClassPredTys pred tyCoVars = tyCoVarsOfTypesList tys famTyCons = filter isFamilyTyCon $ concatMap (nonDetEltsUniqSet . tyConsOfType) tys useful_givens = discardProvCtxtGivens orig (getUserGivensFromImplics implics) matching_givens = mapMaybe matchable useful_givens matchable implic@(Implic { ic_given = evvars, ic_info = skol_info }) = case ev_vars_matching of [] -> Nothing _ -> Just $ hang (pprTheta ev_vars_matching) 2 (sep [ text "bound by" <+> ppr skol_info , text "at" <+> ppr (getLclEnvLoc (ic_env implic)) ]) where ev_vars_matching = [ pred | ev_var <- evvars , let pred = evVarPred ev_var , any can_match (pred : transSuperClasses pred) ] can_match pred = case getClassPredTys_maybe pred of Just (clas', tys') -> clas' == clas && isJust (tcMatchTys tys tys') Nothing -> False pprTcSolverReportMsg _ (UnsafeOverlap item matches unsafe_overlapped) = vcat [ addArising orig (text "Unsafe overlapping instances for" <+> pprType (mkClassPred clas tys)) , sep [text "The matching instance is:", nest 2 (pprInstance $ head matches)] , vcat [ text "It is compiled in a Safe module and as such can only" , text "overlap instances from the same module, however it" , text "overlaps the following instances from different" <+> text "modules:" , nest 2 (vcat [pprInstances $ unsafe_overlapped]) ] ] where orig = errorItemOrigin item pred = errorItemPred item (clas, tys) = getClassPredTys pred {- ********************************************************************* * * Displaying potential instances * * **********************************************************************-} -- | Directly display the given matching and unifying instances, -- with a header for each: `Matching instances`/`Potentially matching instances`. pprPotentialInstances :: (ClsInst -> SDoc) -> PotentialInstances -> SDoc pprPotentialInstances ppr_inst (PotentialInstances { matches, unifiers }) = vcat [ ppWhen (not $ null matches) $ text "Matching instance" <> plural matches <> colon $$ nest 2 (vcat (map ppr_inst matches)) , ppWhen (not $ null unifiers) $ (text "Potentially matching instance" <> plural unifiers <> colon) $$ nest 2 (vcat (map ppr_inst unifiers)) ] -- | Display a summary of available instances, omitting those involving -- out-of-scope types, in order to explain why we couldn't solve a particular -- constraint, e.g. due to instance overlap or out-of-scope types. -- -- To directly display a collection of matching/unifying instances, -- use 'pprPotentialInstances'. potentialInstancesErrMsg :: PotentialInstances -> SDoc -- See Note [Displaying potential instances] potentialInstancesErrMsg potentials = sdocOption sdocPrintPotentialInstances $ \print_insts -> getPprStyle $ \sty -> potentials_msg_with_options potentials print_insts sty -- | Display a summary of available instances, omitting out-of-scope ones. -- -- Use 'potentialInstancesErrMsg' to automatically set the pretty-printing -- options. potentials_msg_with_options :: PotentialInstances -> Bool -- ^ Whether to print /all/ potential instances -> PprStyle -> SDoc potentials_msg_with_options (PotentialInstances { matches, unifiers }) show_all_potentials sty | null matches && null unifiers = empty | null show_these_matches && null show_these_unifiers = vcat [ not_in_scope_msg empty , flag_hint ] | otherwise = vcat [ pprPotentialInstances pprInstance -- print instance + location info (PotentialInstances { matches = show_these_matches , unifiers = show_these_unifiers }) , overlapping_but_not_more_specific_msg sorted_matches , nest 2 $ vcat [ ppWhen (n_in_scope_hidden > 0) $ text "...plus" <+> speakNOf n_in_scope_hidden (text "other") , ppWhen (not_in_scopes > 0) $ not_in_scope_msg (text "...plus") , flag_hint ] ] where n_show_matches, n_show_unifiers :: Int n_show_matches = 3 n_show_unifiers = 2 (in_scope_matches, not_in_scope_matches) = partition inst_in_scope matches (in_scope_unifiers, not_in_scope_unifiers) = partition inst_in_scope unifiers sorted_matches = sortBy fuzzyClsInstCmp in_scope_matches sorted_unifiers = sortBy fuzzyClsInstCmp in_scope_unifiers (show_these_matches, show_these_unifiers) | show_all_potentials = (sorted_matches, sorted_unifiers) | otherwise = (take n_show_matches sorted_matches ,take n_show_unifiers sorted_unifiers) n_in_scope_hidden = length sorted_matches + length sorted_unifiers - length show_these_matches - length show_these_unifiers -- "in scope" means that all the type constructors -- are lexically in scope; these instances are likely -- to be more useful inst_in_scope :: ClsInst -> Bool inst_in_scope cls_inst = nameSetAll name_in_scope $ orphNamesOfTypes (is_tys cls_inst) name_in_scope name | pretendNameIsInScope name = True -- E.g. (->); see Note [pretendNameIsInScope] in GHC.Builtin.Names | Just mod <- nameModule_maybe name = qual_in_scope (qualName sty mod (nameOccName name)) | otherwise = True qual_in_scope :: QualifyName -> Bool qual_in_scope NameUnqual = True qual_in_scope (NameQual {}) = True qual_in_scope _ = False not_in_scopes :: Int not_in_scopes = length not_in_scope_matches + length not_in_scope_unifiers not_in_scope_msg herald = hang (herald <+> speakNOf not_in_scopes (text "instance") <+> text "involving out-of-scope types") 2 (ppWhen show_all_potentials $ pprPotentialInstances pprInstanceHdr -- only print the header, not the instance location info (PotentialInstances { matches = not_in_scope_matches , unifiers = not_in_scope_unifiers })) flag_hint = ppUnless (show_all_potentials || (equalLength show_these_matches matches && equalLength show_these_unifiers unifiers)) $ text "(use -fprint-potential-instances to see them all)" -- | Compute a message informing the user of any instances that are overlapped -- but were not discarded because the instance overlapping them wasn't -- strictly more specific. overlapping_but_not_more_specific_msg :: [ClsInst] -> SDoc overlapping_but_not_more_specific_msg insts -- Only print one example of "overlapping but not strictly more specific", -- to avoid information overload. | overlap : _ <- overlapping_but_not_more_specific = overlap_header $$ ppr_overlapping overlap | otherwise = empty where overlap_header :: SDoc overlap_header | [_] <- overlapping_but_not_more_specific = text "An overlapping instance can only be chosen when it is strictly more specific." | otherwise = text "Overlapping instances can only be chosen when they are strictly more specific." overlapping_but_not_more_specific :: [(ClsInst, ClsInst)] overlapping_but_not_more_specific = nubOrdBy (comparing (is_dfun . fst)) [ (overlapper, overlappee) | these <- groupBy ((==) `on` is_cls_nm) insts -- Take all pairs of distinct instances... , one:others <- tails these -- if `these = [inst_1, inst_2, ...]` , other <- others -- then we get pairs `(one, other) = (inst_i, inst_j)` with `i < j` -- ... such that one instance in the pair overlaps the other... , let mb_overlapping | hasOverlappingFlag (overlapMode $ is_flag one) || hasOverlappableFlag (overlapMode $ is_flag other) = [(one, other)] | hasOverlappingFlag (overlapMode $ is_flag other) || hasOverlappableFlag (overlapMode $ is_flag one) = [(other, one)] | otherwise = [] , (overlapper, overlappee) <- mb_overlapping -- ... but the overlapper is not more specific than the overlappee. , not (overlapper `more_specific_than` overlappee) ] more_specific_than :: ClsInst -> ClsInst -> Bool is1 `more_specific_than` is2 = isJust (tcMatchTys (is_tys is1) (is_tys is2)) ppr_overlapping :: (ClsInst, ClsInst) -> SDoc ppr_overlapping (overlapper, overlappee) = text "The first instance that follows overlaps the second, but is not more specific than it:" $$ nest 2 (vcat $ map pprInstanceHdr [overlapper, overlappee]) {- Note [Displaying potential instances] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When showing a list of instances for - overlapping instances (show ones that match) - no such instance (show ones that could match) we want to give it a bit of structure. Here's the plan * Say that an instance is "in scope" if all of the type constructors it mentions are lexically in scope. These are the ones most likely to be useful to the programmer. * Show at most n_show in-scope instances, and summarise the rest ("plus N others") * Summarise the not-in-scope instances ("plus 4 not in scope") * Add the flag -fshow-potential-instances which replaces the summary with the full list -} {- ********************************************************************* * * Outputting TcSolverReportInfo * * **********************************************************************-} -- | Pretty-print an informational message, to accompany a 'TcSolverReportMsg'. pprTcSolverReportInfo :: SolverReportErrCtxt -> TcSolverReportInfo -> SDoc pprTcSolverReportInfo _ (Ambiguity prepend_msg (ambig_kvs, ambig_tvs)) = msg where msg | any isRuntimeUnkSkol ambig_kvs -- See Note [Runtime skolems] || any isRuntimeUnkSkol ambig_tvs = vcat [ text "Cannot resolve unknown runtime type" <> plural ambig_tvs <+> pprQuotedList ambig_tvs , text "Use :print or :force to determine these types"] | not (null ambig_tvs) = pp_ambig (text "type") ambig_tvs | otherwise = pp_ambig (text "kind") ambig_kvs pp_ambig what tkvs | prepend_msg -- "Ambiguous type variable 't0'" = text "Ambiguous" <+> what <+> text "variable" <> plural tkvs <+> pprQuotedList tkvs | otherwise -- "The type variable 't0' is ambiguous" = text "The" <+> what <+> text "variable" <> plural tkvs <+> pprQuotedList tkvs <+> isOrAre tkvs <+> text "ambiguous" pprTcSolverReportInfo ctxt (TyVarInfo tv ) = case tcTyVarDetails tv of SkolemTv sk_info _ _ -> pprSkols ctxt [(getSkolemInfo sk_info, [tv])] RuntimeUnk {} -> quotes (ppr tv) <+> text "is an interactive-debugger skolem" MetaTv {} -> empty pprTcSolverReportInfo _ (NonInjectiveTyFam tc) = text "NB:" <+> quotes (ppr tc) <+> text "is a non-injective type family" pprTcSolverReportInfo _ (ReportCoercibleMsg msg) = pprCoercibleMsg msg pprTcSolverReportInfo _ (ExpectedActual { ea_expected = exp, ea_actual = act }) = vcat [ text "Expected:" <+> ppr exp , text " Actual:" <+> ppr act ] pprTcSolverReportInfo _ (ExpectedActualAfterTySynExpansion { ea_expanded_expected = exp , ea_expanded_actual = act } ) = vcat [ text "Type synonyms expanded:" , text "Expected type:" <+> ppr exp , text " Actual type:" <+> ppr act ] pprTcSolverReportInfo ctxt (WhenMatching cty1 cty2 sub_o mb_sub_t_or_k) = sdocOption sdocPrintExplicitCoercions $ \printExplicitCoercions -> if printExplicitCoercions || not (cty1 `pickyEqType` cty2) then vcat [ hang (text "When matching" <+> sub_whats) 2 (vcat [ ppr cty1 <+> dcolon <+> ppr (tcTypeKind cty1) , ppr cty2 <+> dcolon <+> ppr (tcTypeKind cty2) ]) , supplementary ] else text "When matching the kind of" <+> quotes (ppr cty1) where sub_t_or_k = mb_sub_t_or_k `orElse` TypeLevel sub_whats = text (levelString sub_t_or_k) <> char 's' supplementary = case mk_supplementary_ea_msg ctxt sub_t_or_k cty1 cty2 sub_o of Left infos -> vcat $ map (pprTcSolverReportInfo ctxt) infos Right msg -> pprTcSolverReportMsg ctxt msg pprTcSolverReportInfo _ (SameOcc same_pkg n1 n2) = text "NB:" <+> (ppr_from same_pkg n1 $$ ppr_from same_pkg n2) where ppr_from same_pkg nm | isGoodSrcSpan loc = hang (quotes (ppr nm) <+> text "is defined at") 2 (ppr loc) | otherwise -- Imported things have an UnhelpfulSrcSpan = hang (quotes (ppr nm)) 2 (sep [ text "is defined in" <+> quotes (ppr (moduleName mod)) , ppUnless (same_pkg || pkg == mainUnit) $ nest 4 $ text "in package" <+> quotes (ppr pkg) ]) where pkg = moduleUnit mod mod = nameModule nm loc = nameSrcSpan nm pprTcSolverReportInfo ctxt (OccursCheckInterestingTyVars (tv :| tvs)) = hang (text "Type variable kinds:") 2 $ vcat (map (tyvar_binding . tidyTyCoVarOcc (cec_tidy ctxt)) (tv:tvs)) where tyvar_binding tyvar = ppr tyvar <+> dcolon <+> ppr (tyVarKind tyvar) pprCoercibleMsg :: CoercibleMsg -> SDoc pprCoercibleMsg (UnknownRoles ty) = hang (text "NB: We cannot know what roles the parameters to" <+> quotes (ppr ty) <+> text "have;") 2 (text "we must assume that the role is nominal") pprCoercibleMsg (TyConIsAbstract tc) = hsep [ text "NB: The type constructor" , quotes (pprSourceTyCon tc) , text "is abstract" ] pprCoercibleMsg (OutOfScopeNewtypeConstructor tc dc) = hang (text "The data constructor" <+> quotes (ppr $ dataConName dc)) 2 (sep [ text "of newtype" <+> quotes (pprSourceTyCon tc) , text "is not in scope" ]) {- ********************************************************************* * * Outputting HoleError messages * * **********************************************************************-} pprHoleError :: SolverReportErrCtxt -> Hole -> HoleError -> SDoc pprHoleError _ (Hole { hole_ty, hole_occ = occ }) (OutOfScopeHole imp_errs) = out_of_scope_msg $$ vcat (map ppr imp_errs) where herald | isDataOcc occ = text "Data constructor not in scope:" | otherwise = text "Variable not in scope:" out_of_scope_msg -- Print v :: ty only if the type has structure | boring_type = hang herald 2 (ppr occ) | otherwise = hang herald 2 (pp_occ_with_type occ hole_ty) boring_type = isTyVarTy hole_ty pprHoleError ctxt (Hole { hole_ty, hole_occ}) (HoleError sort other_tvs hole_skol_info) = vcat [ hole_msg , tyvars_msg , case sort of { ExprHole {} -> expr_hole_hint; _ -> type_hole_hint } ] where hole_msg = case sort of ExprHole {} -> hang (text "Found hole:") 2 (pp_occ_with_type hole_occ hole_ty) TypeHole -> hang (text "Found type wildcard" <+> quotes (ppr hole_occ)) 2 (text "standing for" <+> quotes pp_hole_type_with_kind) ConstraintHole -> hang (text "Found extra-constraints wildcard standing for") 2 (quotes $ pprType hole_ty) -- always kind constraint hole_kind = tcTypeKind hole_ty pp_hole_type_with_kind | isLiftedTypeKind hole_kind || isCoVarType hole_ty -- Don't print the kind of unlifted -- equalities (#15039) = pprType hole_ty | otherwise = pprType hole_ty <+> dcolon <+> pprKind hole_kind tyvars = tyCoVarsOfTypeList hole_ty tyvars_msg = ppUnless (null tyvars) $ text "Where:" <+> (vcat (map loc_msg other_tvs) $$ pprSkols ctxt hole_skol_info) -- Coercion variables can be free in the -- hole, via kind casts expr_hole_hint -- Give hint for, say, f x = _x | lengthFS (occNameFS hole_occ) > 1 -- Don't give this hint for plain "_" = text "Or perhaps" <+> quotes (ppr hole_occ) <+> text "is mis-spelled, or not in scope" | otherwise = empty type_hole_hint | ErrorWithoutFlag <- cec_type_holes ctxt = text "To use the inferred type, enable PartialTypeSignatures" | otherwise = empty loc_msg tv | isTyVar tv = case tcTyVarDetails tv of MetaTv {} -> quotes (ppr tv) <+> text "is an ambiguous type variable" _ -> empty -- Skolems dealt with already | otherwise -- A coercion variable can be free in the hole type = ppWhenOption sdocPrintExplicitCoercions $ quotes (ppr tv) <+> text "is a coercion variable" pp_occ_with_type :: OccName -> Type -> SDoc pp_occ_with_type occ hole_ty = hang (pprPrefixOcc occ) 2 (dcolon <+> pprType hole_ty) {- ********************************************************************* * * Outputting ScopeError messages * * **********************************************************************-} pprScopeError :: RdrName -> NotInScopeError -> SDoc pprScopeError rdr_name scope_err = case scope_err of NotInScope {} -> hang (text "Not in scope:") 2 (what <+> quotes (ppr rdr_name)) NoExactName name -> text "The Name" <+> quotes (ppr name) <+> text "is not in scope." SameName gres -> assertPpr (length gres >= 2) (text "pprScopeError SameName: fewer than 2 elements" $$ nest 2 (ppr gres)) $ hang (text "Same Name in multiple name-spaces:") 2 (vcat (map pp_one sorted_names)) where sorted_names = sortBy (leftmost_smallest `on` nameSrcSpan) (map greMangledName gres) pp_one name = hang (pprNameSpace (occNameSpace (getOccName name)) <+> quotes (ppr name) <> comma) 2 (text "declared at:" <+> ppr (nameSrcLoc name)) MissingBinding thing _ -> sep [ text "The" <+> thing <+> text "for" <+> quotes (ppr rdr_name) , nest 2 $ text "lacks an accompanying binding" ] NoTopLevelBinding -> hang (text "No top-level binding for") 2 (what <+> quotes (ppr rdr_name) <+> text "in this module") UnknownSubordinate doc -> quotes (ppr rdr_name) <+> text "is not a (visible)" <+> doc where what = pprNonVarNameSpace (occNameSpace (rdrNameOcc rdr_name)) scopeErrorHints :: NotInScopeError -> [GhcHint] scopeErrorHints scope_err = case scope_err of NotInScope -> noHints NoExactName {} -> [SuggestDumpSlices] SameName {} -> [SuggestDumpSlices] MissingBinding _ hints -> hints NoTopLevelBinding -> noHints UnknownSubordinate {} -> noHints {- ********************************************************************* * * Outputting ImportError messages * * **********************************************************************-} instance Outputable ImportError where ppr (MissingModule mod_name) = hsep [ text "NB: no module named" , quotes (ppr mod_name) , text "is imported." ] ppr (ModulesDoNotExport mods occ_name) | mod NE.:| [] <- mods = hsep [ text "NB: the module" , quotes (ppr mod) , text "does not export" , quotes (ppr occ_name) <> dot ] | otherwise = hsep [ text "NB: neither" , quotedListWithNor (map ppr $ NE.toList mods) , text "export" , quotes (ppr occ_name) <> dot ] {- ********************************************************************* * * Suggested fixes for implication constraints * * **********************************************************************-} -- TODO: these functions should use GhcHint instead. show_fixes :: [SDoc] -> SDoc show_fixes [] = empty show_fixes (f:fs) = sep [ text "Possible fix:" , nest 2 (vcat (f : map (text "or" <+>) fs))] ctxtFixes :: Bool -> PredType -> [Implication] -> [SDoc] ctxtFixes has_ambig_tvs pred implics | not has_ambig_tvs , isTyVarClassPred pred , (skol:skols) <- usefulContext implics pred , let what | null skols , SigSkol (PatSynCtxt {}) _ _ <- skol = text "\"required\"" | otherwise = empty = [sep [ text "add" <+> pprParendType pred <+> text "to the" <+> what <+> text "context of" , nest 2 $ ppr_skol skol $$ vcat [ text "or" <+> ppr_skol skol | skol <- skols ] ] ] | otherwise = [] where ppr_skol (PatSkol (RealDataCon dc) _) = text "the data constructor" <+> quotes (ppr dc) ppr_skol (PatSkol (PatSynCon ps) _) = text "the pattern synonym" <+> quotes (ppr ps) ppr_skol skol_info = ppr skol_info usefulContext :: [Implication] -> PredType -> [SkolemInfoAnon] -- usefulContext picks out the implications whose context -- the programmer might plausibly augment to solve 'pred' usefulContext implics pred = go implics where pred_tvs = tyCoVarsOfType pred go [] = [] go (ic : ics) | implausible ic = rest | otherwise = ic_info ic : rest where -- Stop when the context binds a variable free in the predicate rest | any (`elemVarSet` pred_tvs) (ic_skols ic) = [] | otherwise = go ics implausible ic | null (ic_skols ic) = True | implausible_info (ic_info ic) = True | otherwise = False implausible_info (SigSkol (InfSigCtxt {}) _ _) = True implausible_info _ = False -- Do not suggest adding constraints to an *inferred* type signature pp_givens :: [Implication] -> [SDoc] pp_givens givens = case givens of [] -> [] (g:gs) -> ppr_given (text "from the context:") g : map (ppr_given (text "or from:")) gs where ppr_given herald implic@(Implic { ic_given = gs, ic_info = skol_info }) = hang (herald <+> pprEvVarTheta (mkMinimalBySCs evVarPred gs)) -- See Note [Suppress redundant givens during error reporting] -- for why we use mkMinimalBySCs above. 2 (sep [ text "bound by" <+> ppr skol_info , text "at" <+> ppr (getLclEnvLoc (ic_env implic)) ]) {- ********************************************************************* * * CtOrigin information * * **********************************************************************-} levelString :: TypeOrKind -> String levelString TypeLevel = "type" levelString KindLevel = "kind" pprArising :: CtOrigin -> SDoc -- Used for the main, top-level error message -- We've done special processing for TypeEq, KindEq, givens pprArising (TypeEqOrigin {}) = empty pprArising (KindEqOrigin {}) = empty pprArising (AmbiguityCheckOrigin {}) = empty -- the "In the ambiguity check" context -- is sufficient; this would just be -- repetitive pprArising orig | isGivenOrigin orig = empty | otherwise = pprCtOrigin orig -- Add the "arising from..." part to a message addArising :: CtOrigin -> SDoc -> SDoc addArising orig msg = hang msg 2 (pprArising orig) pprWithArising :: [Ct] -> SDoc -- Print something like -- (Eq a) arising from a use of x at y -- (Show a) arising from a use of p at q -- Also return a location for the error message -- Works for Wanted/Derived only pprWithArising [] = panic "pprWithArising" pprWithArising (ct:cts) | null cts = addArising (ctLocOrigin loc) (pprTheta [ctPred ct]) | otherwise = vcat (map ppr_one (ct:cts)) where loc = ctLoc ct ppr_one ct' = hang (parens (pprType (ctPred ct'))) 2 (pprCtLoc (ctLoc ct')) {- ********************************************************************* * * SkolemInfo * * **********************************************************************-} tidySkolemInfo :: TidyEnv -> SkolemInfo -> SkolemInfo tidySkolemInfo env (SkolemInfo u sk_anon) = SkolemInfo u (tidySkolemInfoAnon env sk_anon) ---------------- tidySkolemInfoAnon :: TidyEnv -> SkolemInfoAnon -> SkolemInfoAnon tidySkolemInfoAnon env (DerivSkol ty) = DerivSkol (tidyType env ty) tidySkolemInfoAnon env (SigSkol cx ty tv_prs) = tidySigSkol env cx ty tv_prs tidySkolemInfoAnon env (InferSkol ids) = InferSkol (mapSnd (tidyType env) ids) tidySkolemInfoAnon env (UnifyForAllSkol ty) = UnifyForAllSkol (tidyType env ty) tidySkolemInfoAnon _ info = info tidySigSkol :: TidyEnv -> UserTypeCtxt -> TcType -> [(Name,TcTyVar)] -> SkolemInfoAnon -- We need to take special care when tidying SigSkol -- See Note [SigSkol SkolemInfo] in "GHC.Tc.Types.Origin" tidySigSkol env cx ty tv_prs = SigSkol cx (tidy_ty env ty) tv_prs' where tv_prs' = mapSnd (tidyTyCoVarOcc env) tv_prs inst_env = mkNameEnv tv_prs' tidy_ty env (ForAllTy (Bndr tv vis) ty) = ForAllTy (Bndr tv' vis) (tidy_ty env' ty) where (env', tv') = tidy_tv_bndr env tv tidy_ty env ty@(FunTy InvisArg w arg res) -- Look under c => t = ty { ft_mult = tidy_ty env w, ft_arg = tidyType env arg, ft_res = tidy_ty env res } tidy_ty env ty = tidyType env ty tidy_tv_bndr :: TidyEnv -> TyCoVar -> (TidyEnv, TyCoVar) tidy_tv_bndr env@(occ_env, subst) tv | Just tv' <- lookupNameEnv inst_env (tyVarName tv) = ((occ_env, extendVarEnv subst tv tv'), tv') | otherwise = tidyVarBndr env tv pprSkols :: SolverReportErrCtxt -> [(SkolemInfoAnon, [TcTyVar])] -> SDoc pprSkols ctxt zonked_ty_vars = let tidy_ty_vars = map (bimap (tidySkolemInfoAnon (cec_tidy ctxt)) id) zonked_ty_vars in vcat (map pp_one tidy_ty_vars) where no_msg = text "No skolem info - we could not find the origin of the following variables" <+> ppr zonked_ty_vars $$ text "This should not happen, please report it as a bug following the instructions at:" $$ text "https://gitlab.haskell.org/ghc/ghc/wikis/report-a-bug" pp_one (UnkSkol cs, tvs) = vcat [ hang (pprQuotedList tvs) 2 (is_or_are tvs "a" "(rigid, skolem)") , nest 2 (text "of unknown origin") , nest 2 (text "bound at" <+> ppr (skolsSpan tvs)) , no_msg , prettyCallStackDoc cs ] pp_one (RuntimeUnkSkol, tvs) = hang (pprQuotedList tvs) 2 (is_or_are tvs "an" "unknown runtime") pp_one (skol_info, tvs) = vcat [ hang (pprQuotedList tvs) 2 (is_or_are tvs "a" "rigid" <+> text "bound by") , nest 2 (pprSkolInfo skol_info) , nest 2 (text "at" <+> ppr (skolsSpan tvs)) ] is_or_are [_] article adjective = text "is" <+> text article <+> text adjective <+> text "type variable" is_or_are _ _ adjective = text "are" <+> text adjective <+> text "type variables" skolsSpan :: [TcTyVar] -> SrcSpan skolsSpan skol_tvs = foldr1 combineSrcSpans (map getSrcSpan skol_tvs) {- ********************************************************************* * * Utilities for expected/actual messages * * **********************************************************************-} mk_supplementary_ea_msg :: SolverReportErrCtxt -> TypeOrKind -> Type -> Type -> CtOrigin -> Either [TcSolverReportInfo] TcSolverReportMsg mk_supplementary_ea_msg ctxt level ty1 ty2 orig | TypeEqOrigin { uo_expected = exp, uo_actual = act } <- orig , not (ea_looks_same ty1 ty2 exp act) = mk_ea_msg ctxt Nothing level orig | otherwise = Left [] ea_looks_same :: Type -> Type -> Type -> Type -> Bool -- True if the faulting types (ty1, ty2) look the same as -- the expected/actual types (exp, act). -- If so, we don't want to redundantly report the latter ea_looks_same ty1 ty2 exp act = (act `looks_same` ty1 && exp `looks_same` ty2) || (exp `looks_same` ty1 && act `looks_same` ty2) where looks_same t1 t2 = t1 `pickyEqType` t2 || t1 `eqType` liftedTypeKind && t2 `eqType` liftedTypeKind -- pickyEqType is sensitive to synonyms, so only replies True -- when the types really look the same. However, -- (TYPE 'LiftedRep) and Type both print the same way. mk_ea_msg :: SolverReportErrCtxt -> Maybe ErrorItem -> TypeOrKind -> CtOrigin -> Either [TcSolverReportInfo] TcSolverReportMsg -- Constructs a "Couldn't match" message -- The (Maybe ErrorItem) says whether this is the main top-level message (Just) -- or a supplementary message (Nothing) mk_ea_msg ctxt at_top level (TypeEqOrigin { uo_actual = act, uo_expected = exp, uo_thing = mb_thing }) | Just thing <- mb_thing , KindLevel <- level = Right $ KindMismatch { kmismatch_what = thing , kmismatch_expected = exp , kmismatch_actual = act } | Just item <- at_top , let mismatch = Mismatch { mismatch_ea = True , mismatch_item = item , mismatch_ty1 = exp , mismatch_ty2 = act } = Right $ if expanded_syns then mkTcReportWithInfo mismatch [ea_expanded] else mismatch | otherwise = Left $ if expanded_syns then [ea,ea_expanded] else [ea] where ea = ExpectedActual { ea_expected = exp, ea_actual = act } ea_expanded = ExpectedActualAfterTySynExpansion { ea_expanded_expected = expTy1 , ea_expanded_actual = expTy2 } expanded_syns = cec_expand_syns ctxt && not (expTy1 `pickyEqType` exp && expTy2 `pickyEqType` act) (expTy1, expTy2) = expandSynonymsToMatch exp act mk_ea_msg _ _ _ _ = Left [] {- Note [Expanding type synonyms to make types similar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In type error messages, if -fprint-expanded-types is used, we want to expand type synonyms to make expected and found types as similar as possible, but we shouldn't expand types too much to make type messages even more verbose and harder to understand. The whole point here is to make the difference in expected and found types clearer. `expandSynonymsToMatch` does this, it takes two types, and expands type synonyms only as much as necessary. Given two types t1 and t2: * If they're already same, it just returns the types. * If they're in form `C1 t1_1 .. t1_n` and `C2 t2_1 .. t2_m` (C1 and C2 are type constructors), it expands C1 and C2 if they're different type synonyms. Then it recursively does the same thing on expanded types. If C1 and C2 are same, then it applies the same procedure to arguments of C1 and arguments of C2 to make them as similar as possible. Most important thing here is to keep number of synonym expansions at minimum. For example, if t1 is `T (T3, T5, Int)` and t2 is `T (T5, T3, Bool)` where T5 = T4, T4 = T3, ..., T1 = X, it returns `T (T3, T3, Int)` and `T (T3, T3, Bool)`. * Otherwise types don't have same shapes and so the difference is clearly visible. It doesn't do any expansions and show these types. Note that we only expand top-layer type synonyms. Only when top-layer constructors are the same we start expanding inner type synonyms. Suppose top-layer type synonyms of t1 and t2 can expand N and M times, respectively. If their type-synonym-expanded forms will meet at some point (i.e. will have same shapes according to `sameShapes` function), it's possible to find where they meet in O(N+M) top-layer type synonym expansions and O(min(N,M)) comparisons. We first collect all the top-layer expansions of t1 and t2 in two lists, then drop the prefix of the longer list so that they have same lengths. Then we search through both lists in parallel, and return the first pair of types that have same shapes. Inner types of these two types with same shapes are then expanded using the same algorithm. In case they don't meet, we return the last pair of types in the lists, which has top-layer type synonyms completely expanded. (in this case the inner types are not expanded at all, as the current form already shows the type error) -} -- | Expand type synonyms in given types only enough to make them as similar as -- possible. Returned types are the same in terms of used type synonyms. -- -- To expand all synonyms, see 'Type.expandTypeSynonyms'. -- -- See `ExpandSynsFail` tests in tests testsuite/tests/typecheck/should_fail for -- some examples of how this should work. expandSynonymsToMatch :: Type -> Type -> (Type, Type) expandSynonymsToMatch ty1 ty2 = (ty1_ret, ty2_ret) where (ty1_ret, ty2_ret) = go ty1 ty2 -- Returns (type synonym expanded version of first type, -- type synonym expanded version of second type) go :: Type -> Type -> (Type, Type) go t1 t2 | t1 `pickyEqType` t2 = -- Types are same, nothing to do (t1, t2) go (TyConApp tc1 tys1) (TyConApp tc2 tys2) | tc1 == tc2 , tys1 `equalLength` tys2 = -- Type constructors are same. They may be synonyms, but we don't -- expand further. The lengths of tys1 and tys2 must be equal; -- for example, with type S a = a, we don't want -- to zip (S Monad Int) and (S Bool). let (tys1', tys2') = unzip (zipWithEqual "expandSynonymsToMatch" go tys1 tys2) in (TyConApp tc1 tys1', TyConApp tc2 tys2') go (AppTy t1_1 t1_2) (AppTy t2_1 t2_2) = let (t1_1', t2_1') = go t1_1 t2_1 (t1_2', t2_2') = go t1_2 t2_2 in (mkAppTy t1_1' t1_2', mkAppTy t2_1' t2_2') go ty1@(FunTy _ w1 t1_1 t1_2) ty2@(FunTy _ w2 t2_1 t2_2) | w1 `eqType` w2 = let (t1_1', t2_1') = go t1_1 t2_1 (t1_2', t2_2') = go t1_2 t2_2 in ( ty1 { ft_arg = t1_1', ft_res = t1_2' } , ty2 { ft_arg = t2_1', ft_res = t2_2' }) go (ForAllTy b1 t1) (ForAllTy b2 t2) = -- NOTE: We may have a bug here, but we just can't reproduce it easily. -- See D1016 comments for details and our attempts at producing a test -- case. Short version: We probably need RnEnv2 to really get this right. let (t1', t2') = go t1 t2 in (ForAllTy b1 t1', ForAllTy b2 t2') go (CastTy ty1 _) ty2 = go ty1 ty2 go ty1 (CastTy ty2 _) = go ty1 ty2 go t1 t2 = -- See Note [Expanding type synonyms to make types similar] for how this -- works let t1_exp_tys = t1 : tyExpansions t1 t2_exp_tys = t2 : tyExpansions t2 t1_exps = length t1_exp_tys t2_exps = length t2_exp_tys dif = abs (t1_exps - t2_exps) in followExpansions $ zipEqual "expandSynonymsToMatch.go" (if t1_exps > t2_exps then drop dif t1_exp_tys else t1_exp_tys) (if t2_exps > t1_exps then drop dif t2_exp_tys else t2_exp_tys) -- Expand the top layer type synonyms repeatedly, collect expansions in a -- list. The list does not include the original type. -- -- Example, if you have: -- -- type T10 = T9 -- type T9 = T8 -- ... -- type T0 = Int -- -- `tyExpansions T10` returns [T9, T8, T7, ... Int] -- -- This only expands the top layer, so if you have: -- -- type M a = Maybe a -- -- `tyExpansions (M T10)` returns [Maybe T10] (T10 is not expanded) tyExpansions :: Type -> [Type] tyExpansions = unfoldr (\t -> (\x -> (x, x)) `fmap` tcView t) -- Drop the type pairs until types in a pair look alike (i.e. the outer -- constructors are the same). followExpansions :: [(Type, Type)] -> (Type, Type) followExpansions [] = pprPanic "followExpansions" empty followExpansions [(t1, t2)] | sameShapes t1 t2 = go t1 t2 -- expand subtrees | otherwise = (t1, t2) -- the difference is already visible followExpansions ((t1, t2) : tss) -- Traverse subtrees when the outer shapes are the same | sameShapes t1 t2 = go t1 t2 -- Otherwise follow the expansions until they look alike | otherwise = followExpansions tss sameShapes :: Type -> Type -> Bool sameShapes AppTy{} AppTy{} = True sameShapes (TyConApp tc1 _) (TyConApp tc2 _) = tc1 == tc2 sameShapes (FunTy {}) (FunTy {}) = True sameShapes (ForAllTy {}) (ForAllTy {}) = True sameShapes (CastTy ty1 _) ty2 = sameShapes ty1 ty2 sameShapes ty1 (CastTy ty2 _) = sameShapes ty1 ty2 sameShapes _ _ = False {- ************************************************************************ * * \subsection{Contexts for renaming errors} * * ************************************************************************ -} withHsDocContext :: HsDocContext -> SDoc -> SDoc withHsDocContext ctxt doc = doc $$ inHsDocContext ctxt inHsDocContext :: HsDocContext -> SDoc inHsDocContext ctxt = text "In" <+> pprHsDocContext ctxt pprHsDocContext :: HsDocContext -> SDoc pprHsDocContext (GenericCtx doc) = doc pprHsDocContext (TypeSigCtx doc) = text "the type signature for" <+> doc pprHsDocContext (StandaloneKindSigCtx doc) = text "the standalone kind signature for" <+> doc pprHsDocContext PatCtx = text "a pattern type-signature" pprHsDocContext SpecInstSigCtx = text "a SPECIALISE instance pragma" pprHsDocContext DefaultDeclCtx = text "a `default' declaration" pprHsDocContext DerivDeclCtx = text "a deriving declaration" pprHsDocContext (RuleCtx name) = text "the rewrite rule" <+> doubleQuotes (ftext name) pprHsDocContext (TyDataCtx tycon) = text "the data type declaration for" <+> quotes (ppr tycon) pprHsDocContext (FamPatCtx tycon) = text "a type pattern of family instance for" <+> quotes (ppr tycon) pprHsDocContext (TySynCtx name) = text "the declaration for type synonym" <+> quotes (ppr name) pprHsDocContext (TyFamilyCtx name) = text "the declaration for type family" <+> quotes (ppr name) pprHsDocContext (ClassDeclCtx name) = text "the declaration for class" <+> quotes (ppr name) pprHsDocContext ExprWithTySigCtx = text "an expression type signature" pprHsDocContext TypBrCtx = text "a Template-Haskell quoted type" pprHsDocContext HsTypeCtx = text "a type argument" pprHsDocContext HsTypePatCtx = text "a type argument in a pattern" pprHsDocContext GHCiCtx = text "GHCi input" pprHsDocContext (SpliceTypeCtx hs_ty) = text "the spliced type" <+> quotes (ppr hs_ty) pprHsDocContext ClassInstanceCtx = text "GHC.Tc.Gen.Splice.reifyInstances" pprHsDocContext (ForeignDeclCtx name) = text "the foreign declaration for" <+> quotes (ppr name) pprHsDocContext (ConDeclCtx [name]) = text "the definition of data constructor" <+> quotes (ppr name) pprHsDocContext (ConDeclCtx names) = text "the definition of data constructors" <+> interpp'SP names ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Errors/Types.hs0000644000000000000000000030304614472400113021124 0ustar0000000000000000{-# LANGUAGE GADTs #-} module GHC.Tc.Errors.Types ( -- * Main types TcRnMessage(..) , TcRnMessageDetailed(..) , ErrInfo(..) , FixedRuntimeRepProvenance(..) , pprFixedRuntimeRepProvenance , ShadowedNameProvenance(..) , RecordFieldPart(..) , InjectivityErrReason(..) , HasKinds(..) , hasKinds , SuggestUndecidableInstances(..) , suggestUndecidableInstances , NotClosedReason(..) , SuggestPartialTypeSignatures(..) , suggestPartialTypeSignatures , DeriveInstanceErrReason(..) , UsingGeneralizedNewtypeDeriving(..) , usingGeneralizedNewtypeDeriving , DeriveAnyClassEnabled(..) , deriveAnyClassEnabled , DeriveInstanceBadConstructor(..) , HasWildcard(..) , hasWildcard , BadAnonWildcardContext(..) , SoleExtraConstraintWildcardAllowed(..) , DeriveGenericsErrReason(..) , HasAssociatedDataFamInsts(..) , hasAssociatedDataFamInsts , AssociatedTyLastVarInKind(..) , associatedTyLastVarInKind , AssociatedTyNotParamOverLastTyVar(..) , associatedTyNotParamOverLastTyVar , MissingSignature(..) , Exported(..) , HsDocContext(..) , FixedRuntimeRepErrorInfo(..) , ErrorItem(..), errorItemOrigin, errorItemEqRel, errorItemPred, errorItemCtLoc , SolverReport(..), SolverReportSupplementary(..) , SolverReportWithCtxt(..) , SolverReportErrCtxt(..) , getUserGivens, discardProvCtxtGivens , TcSolverReportMsg(..), TcSolverReportInfo(..) , CND_Extra(..) , mkTcReportWithInfo , FitsMbSuppressed(..) , ValidHoleFits(..), noValidHoleFits , HoleFitDispConfig(..) , RelevantBindings(..), pprRelevantBindings , NotInScopeError(..), mkTcRnNotInScope , ImportError(..) , HoleError(..) , CoercibleMsg(..) , PotentialInstances(..) , UnsupportedCallConvention(..) , ExpectedBackends(..) , ArgOrResult(..) ) where import GHC.Prelude import GHC.Hs import {-# SOURCE #-} GHC.Tc.Types (TcIdSigInfo) import {-# SOURCE #-} GHC.Tc.Errors.Hole.FitTypes (HoleFit) import GHC.Tc.Types.Constraint import GHC.Tc.Types.Evidence (EvBindsVar) import GHC.Tc.Types.Origin ( CtOrigin (ProvCtxtOrigin), SkolemInfoAnon (SigSkol) , UserTypeCtxt (PatSynCtxt), TyVarBndrs, TypedThing , FixedRuntimeRepOrigin(..) ) import GHC.Tc.Types.Rank (Rank) import GHC.Tc.Utils.TcType (IllegalForeignTypeReason, TcType) import GHC.Types.Error import GHC.Types.Hint (UntickedPromotedThing(..)) import GHC.Types.FieldLabel (FieldLabelString) import GHC.Types.ForeignCall (CLabelString) import GHC.Types.Name (Name, OccName, getSrcLoc) import GHC.Types.Name.Reader import GHC.Types.SrcLoc import GHC.Types.TyThing (TyThing) import GHC.Types.Var (Id, TyCoVar, TyVar, TcTyVar) import GHC.Types.Var.Env (TidyEnv) import GHC.Types.Var.Set (TyVarSet, VarSet) import GHC.Unit.Types (Module) import GHC.Utils.Outputable import GHC.Core.Class (Class) import GHC.Core.Coercion.Axiom (CoAxBranch) import GHC.Core.ConLike (ConLike) import GHC.Core.DataCon (DataCon) import GHC.Core.FamInstEnv (FamInst) import GHC.Core.InstEnv (ClsInst) import GHC.Core.PatSyn (PatSyn) import GHC.Core.Predicate (EqRel, predTypeEqRel) import GHC.Core.TyCon (TyCon, TyConFlavour) import GHC.Core.Type (Kind, Type, ThetaType, PredType) import GHC.Driver.Backend (Backend) import GHC.Unit.State (UnitState) import GHC.Unit.Module.Name (ModuleName) import GHC.Types.Basic import GHC.Utils.Misc (filterOut) import qualified GHC.LanguageExtensions as LangExt import GHC.Data.FastString (FastString) import qualified Data.List.NonEmpty as NE import Data.Typeable hiding (TyCon) import qualified Data.Semigroup as Semigroup {- Note [Migrating TcM Messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As part of #18516, we are slowly migrating the diagnostic messages emitted and reported in the TcM from SDoc to TcRnMessage. Historically, GHC emitted some diagnostics in 3 pieces, i.e. there were lots of error-reporting functions that accepted 3 SDocs an input: one for the important part of the message, one for the context and one for any supplementary information. Consider the following: • Couldn't match expected type ‘Int’ with actual type ‘Char’ • In the expression: x4 In a stmt of a 'do' block: return (x2, x4) In the expression: Under the hood, the reporting functions in Tc.Utils.Monad were emitting "Couldn't match" as the important part, "In the expression" as the context and "In a stmt..In the expression" as the supplementary, with the context and supplementary usually smashed together so that the final message would be composed only by two SDoc (which would then be bulletted like in the example). In order for us to smooth out the migration to the new diagnostic infrastructure, we introduce the 'ErrInfo' and 'TcRnMessageDetailed' types, which serve exactly the purpose of bridging the two worlds together without breaking the external API or the existing format of messages reported by GHC. Using 'ErrInfo' and 'TcRnMessageDetailed' also allows us to move away from the SDoc-ridden diagnostic API inside Tc.Utils.Monad, enabling further refactorings. In the future, once the conversion will be complete and we will successfully eradicate any use of SDoc in the diagnostic reporting of GHC, we can surely revisit the usage and existence of these two types, which for now remain a "necessary evil". -} -- The majority of TcRn messages come with extra context about the error, -- and this newtype captures it. See Note [Migrating TcM Messages]. data ErrInfo = ErrInfo { errInfoContext :: !SDoc -- ^ Extra context associated to the error. , errInfoSupplementary :: !SDoc -- ^ Extra supplementary info associated to the error. } -- | 'TcRnMessageDetailed' is an \"internal\" type (used only inside -- 'GHC.Tc.Utils.Monad' that wraps a 'TcRnMessage' while also providing -- any extra info needed to correctly pretty-print this diagnostic later on. data TcRnMessageDetailed = TcRnMessageDetailed !ErrInfo -- ^ Extra info associated with the message !TcRnMessage -- | An error which might arise during typechecking/renaming. data TcRnMessage where {-| Simply wraps a generic 'Diagnostic' message @a@. It can be used by plugins to provide custom diagnostic messages originated during typechecking/renaming. -} TcRnUnknownMessage :: (Diagnostic a, Typeable a) => a -> TcRnMessage {-| TcRnMessageWithInfo is a constructor which is used when extra information is needed to be provided in order to qualify a diagnostic and where it was originated (and why). It carries an extra 'UnitState' which can be used to pretty-print some names and it wraps a 'TcRnMessageDetailed', which includes any extra context associated with this diagnostic. -} TcRnMessageWithInfo :: !UnitState -- ^ The 'UnitState' will allow us to pretty-print -- some diagnostics with more detail. -> !TcRnMessageDetailed -> TcRnMessage {-| TcRnSolverReport is the constructor used to report unsolved constraints after constraint solving, as well as other errors such as hole fit errors. See the documentation of the 'TcSolverReportMsg' datatype for an overview of the different errors. -} TcRnSolverReport :: [SolverReportWithCtxt] -> DiagnosticReason -> [GhcHint] -> TcRnMessage -- TODO: split up TcRnSolverReport into several components, -- so that we can compute the reason and hints, as opposed -- to having to pass them here. {-| TcRnRedundantConstraints is a warning that is emitted when a binding has a user-written type signature which contains superfluous constraints. Example: f :: (Eq a, Ord a) => a -> a -> a f x y = (x < y) || x == y -- `Eq a` is superfluous: the `Ord a` constraint suffices. Test cases: T9939, T10632, T18036a, T20602, PluralS, T19296. -} TcRnRedundantConstraints :: [Id] -> (SkolemInfoAnon, Bool) -- ^ The contextual skolem info. -- The boolean controls whether we -- want to show it in the user message. -- (Nice to keep track of the info in either case, -- for other users of the GHC API.) -> TcRnMessage {-| TcRnInaccessibleCode is a warning that is emitted when the RHS of a pattern match is inaccessible, because the constraint solver has detected a contradiction. Example: data B a where { MkTrue :: B True; MkFalse :: B False } foo :: B False -> Bool foo MkFalse = False foo MkTrue = True -- Inaccessible: requires True ~ False Test cases: T7293, T7294, T15558, T17646, T18572, T18610, tcfail167. -} TcRnInaccessibleCode :: Implication -- ^ The implication containing a contradiction. -> NE.NonEmpty SolverReportWithCtxt -- ^ The contradiction(s). -> TcRnMessage {-| A type which was expected to have a fixed runtime representation does not have a fixed runtime representation. Example: data D (a :: TYPE r) = MkD a Test cases: T11724, T18534, RepPolyPatSynArg, RepPolyPatSynUnliftedNewtype, RepPolyPatSynRes, T20423 -} TcRnTypeDoesNotHaveFixedRuntimeRep :: !Type -> !FixedRuntimeRepProvenance -> !ErrInfo -- Extra info accumulated in the TcM monad -> TcRnMessage {-| TcRnImplicitLift is a warning (controlled with -Wimplicit-lift) that occurs when a Template Haskell quote implicitly uses 'lift'. Example: warning1 :: Lift t => t -> Q Exp warning1 x = [| x |] Test cases: th/T17804 -} TcRnImplicitLift :: Outputable var => var -> !ErrInfo -> TcRnMessage {-| TcRnUnusedPatternBinds is a warning (controlled with -Wunused-pattern-binds) that occurs if a pattern binding binds no variables at all, unless it is a lone wild-card pattern, or a banged pattern. Example: Just _ = rhs3 -- Warning: unused pattern binding (_, _) = rhs4 -- Warning: unused pattern binding _ = rhs3 -- No warning: lone wild-card pattern !() = rhs4 -- No warning: banged pattern; behaves like seq Test cases: rename/{T13646,T17c,T17e,T7085} -} TcRnUnusedPatternBinds :: HsBind GhcRn -> TcRnMessage {-| TcRnDodgyImports is a warning (controlled with -Wdodgy-imports) that occurs when a datatype 'T' is imported with all constructors, i.e. 'T(..)', but has been exported abstractly, i.e. 'T'. Test cases: rename/should_compile/T7167 -} TcRnDodgyImports :: RdrName -> TcRnMessage {-| TcRnDodgyExports is a warning (controlled by -Wdodgy-exports) that occurs when a datatype 'T' is exported with all constructors, i.e. 'T(..)', but is it just a type synonym or a type/data family. Example: module Foo ( T(..) -- Warning: T is a type synonym , A(..) -- Warning: A is a type family , C(..) -- Warning: C is a data family ) where type T = Int type family A :: * -> * data family C :: * -> * Test cases: warnings/should_compile/DodgyExports01 -} TcRnDodgyExports :: Name -> TcRnMessage {-| TcRnMissingImportList is a warning (controlled by -Wmissing-import-lists) that occurs when an import declaration does not explicitly list all the names brought into scope. Test cases: rename/should_compile/T4489 -} TcRnMissingImportList :: IE GhcPs -> TcRnMessage {-| When a module marked trustworthy or unsafe (using -XTrustworthy or -XUnsafe) is compiled with a plugin, the TcRnUnsafeDueToPlugin warning (controlled by -Wunsafe) is used as the reason the module was inferred to be unsafe. This warning is not raised if the -fplugin-trustworthy flag is passed. Test cases: plugins/T19926 -} TcRnUnsafeDueToPlugin :: TcRnMessage {-| TcRnModMissingRealSrcSpan is an error that occurrs when compiling a module that lacks an associated 'RealSrcSpan'. Test cases: None -} TcRnModMissingRealSrcSpan :: Module -> TcRnMessage {-| TcRnIdNotExportedFromModuleSig is an error pertaining to backpack that occurs when an identifier required by a signature is not exported by the module or signature that is being used as a substitution for that signature. Example(s): None Test cases: backpack/should_fail/bkpfail36 -} TcRnIdNotExportedFromModuleSig :: Name -> Module -> TcRnMessage {-| TcRnIdNotExportedFromLocalSig is an error pertaining to backpack that occurs when an identifier which is necessary for implementing a module signature is not exported from that signature. Example(s): None Test cases: backpack/should_fail/bkpfail30 backpack/should_fail/bkpfail31 backpack/should_fail/bkpfail34 -} TcRnIdNotExportedFromLocalSig :: Name -> TcRnMessage {-| TcRnShadowedName is a warning (controlled by -Wname-shadowing) that occurs whenever an inner-scope value has the same name as an outer-scope value, i.e. the inner value shadows the outer one. This can catch typographical errors that turn into hard-to-find bugs. The warning is suppressed for names beginning with an underscore. Examples(s): f = ... let f = id in ... f ... -- NOT OK, 'f' is shadowed f x = do { _ignore <- this; _ignore <- that; return (the other) } -- suppressed via underscore Test cases: typecheck/should_compile/T10971a rename/should_compile/rn039 rename/should_compile/rn064 rename/should_compile/T1972 rename/should_fail/T2723 rename/should_compile/T3262 driver/werror -} TcRnShadowedName :: OccName -> ShadowedNameProvenance -> TcRnMessage {-| TcRnDuplicateWarningDecls is an error that occurs whenever a warning is declared twice. Examples(s): None. Test cases: None. -} TcRnDuplicateWarningDecls :: !(LocatedN RdrName) -> !RdrName -> TcRnMessage {-| TcRnDuplicateWarningDecls is an error that occurs whenever the constraint solver in the simplifier hits the iterations' limit. Examples(s): None. Test cases: None. -} TcRnSimplifierTooManyIterations :: Cts -> !IntWithInf -- ^ The limit. -> WantedConstraints -> TcRnMessage {-| TcRnIllegalPatSynDecl is an error that occurs whenever there is an illegal pattern synonym declaration. Examples(s): varWithLocalPatSyn x = case x of P -> () where pattern P = () -- not valid, it can't be local, it must be defined at top-level. Test cases: patsyn/should_fail/local -} TcRnIllegalPatSynDecl :: !(LIdP GhcPs) -> TcRnMessage {-| TcRnLinearPatSyn is an error that occurs whenever a pattern synonym signature uses a field that is not unrestricted. Example(s): None Test cases: linear/should_fail/LinearPatSyn2 -} TcRnLinearPatSyn :: !Type -> TcRnMessage {-| TcRnEmptyRecordUpdate is an error that occurs whenever a record is updated without specifying any field. Examples(s): $(deriveJSON defaultOptions{} ''Bad) -- not ok, no fields selected for update of defaultOptions Test cases: th/T12788 -} TcRnEmptyRecordUpdate :: TcRnMessage {-| TcRnIllegalFieldPunning is an error that occurs whenever field punning is used without the 'NamedFieldPuns' extension enabled. Examples(s): data Foo = Foo { a :: Int } foo :: Foo -> Int foo Foo{a} = a -- Not ok, punning used without extension. Test cases: parser/should_fail/RecordDotSyntaxFail12 -} TcRnIllegalFieldPunning :: !(Located RdrName) -> TcRnMessage {-| TcRnIllegalWildcardsInRecord is an error that occurs whenever wildcards (..) are used in a record without the relevant extension being enabled. Examples(s): data Foo = Foo { a :: Int } foo :: Foo -> Int foo Foo{..} = a -- Not ok, wildcards used without extension. Test cases: parser/should_fail/RecordWildCardsFail -} TcRnIllegalWildcardsInRecord :: !RecordFieldPart -> TcRnMessage {-| TcRnIllegalWildcardInType is an error that occurs when a wildcard appears in a type in a location in which wildcards aren't allowed. Examples: Type synonyms: type T = _ Class declarations and instances: class C _ instance C _ Standalone kind signatures: type D :: _ data D Test cases: ExtraConstraintsWildcardInTypeSplice2 ExtraConstraintsWildcardInTypeSpliceUsed ExtraConstraintsWildcardNotLast ExtraConstraintsWildcardTwice NestedExtraConstraintsWildcard NestedNamedExtraConstraintsWildcard PartialClassMethodSignature PartialClassMethodSignature2 T12039 T13324_fail1 UnnamedConstraintWildcard1 UnnamedConstraintWildcard2 WildcardInADT1 WildcardInADT2 WildcardInADT3 WildcardInADTContext1 WildcardInDefault WildcardInDefaultSignature WildcardInDeriving WildcardInForeignExport WildcardInForeignImport WildcardInGADT1 WildcardInGADT2 WildcardInInstanceHead WildcardInInstanceSig WildcardInNewtype WildcardInPatSynSig WildcardInStandaloneDeriving WildcardInTypeFamilyInstanceRHS WildcardInTypeSynonymRHS saks_fail003 T15433a -} TcRnIllegalWildcardInType :: Maybe Name -- ^ the wildcard name, or 'Nothing' for an anonymous wildcard -> !BadAnonWildcardContext -> !(Maybe HsDocContext) -> TcRnMessage {-| TcRnDuplicateFieldName is an error that occurs whenever there are duplicate field names in a record. Examples(s): None. Test cases: None. -} TcRnDuplicateFieldName :: !RecordFieldPart -> NE.NonEmpty RdrName -> TcRnMessage {-| TcRnIllegalViewPattern is an error that occurs whenever the ViewPatterns syntax is used but the ViewPatterns language extension is not enabled. Examples(s): data Foo = Foo { a :: Int } foo :: Foo -> Int foo (a -> l) = l -- not OK, the 'ViewPattern' extension is not enabled. Test cases: parser/should_fail/ViewPatternsFail -} TcRnIllegalViewPattern :: !(Pat GhcPs) -> TcRnMessage {-| TcRnCharLiteralOutOfRange is an error that occurs whenever a character is out of range. Examples(s): None Test cases: None -} TcRnCharLiteralOutOfRange :: !Char -> TcRnMessage {-| TcRnIllegalWildcardsInConstructor is an error that occurs whenever the record wildcards '..' are used inside a constructor without labeled fields. Examples(s): None Test cases: None -} TcRnIllegalWildcardsInConstructor :: !Name -> TcRnMessage {-| TcRnIgnoringAnnotations is a warning that occurs when the source code contains annotation pragmas but the platform in use does not support an external interpreter such as GHCi and therefore the annotations are ignored. Example(s): None Test cases: None -} TcRnIgnoringAnnotations :: [LAnnDecl GhcRn] -> TcRnMessage {-| TcRnAnnotationInSafeHaskell is an error that occurs if annotation pragmas are used in conjunction with Safe Haskell. Example(s): None Test cases: annotations/should_fail/T10826 -} TcRnAnnotationInSafeHaskell :: TcRnMessage {-| TcRnInvalidTypeApplication is an error that occurs when a visible type application is used with an expression that does not accept "specified" type arguments. Example(s): foo :: forall {a}. a -> a foo x = x bar :: () bar = let x = foo @Int 42 in () Test cases: overloadedrecflds/should_fail/overloadedlabelsfail03 typecheck/should_fail/ExplicitSpecificity1 typecheck/should_fail/ExplicitSpecificity10 typecheck/should_fail/ExplicitSpecificity2 typecheck/should_fail/T17173 typecheck/should_fail/VtaFail -} TcRnInvalidTypeApplication :: Type -> LHsWcType GhcRn -> TcRnMessage {-| TcRnTagToEnumMissingValArg is an error that occurs when the 'tagToEnum#' function is not applied to a single value argument. Example(s): tagToEnum# 1 2 Test cases: None -} TcRnTagToEnumMissingValArg :: TcRnMessage {-| TcRnTagToEnumUnspecifiedResTy is an error that occurs when the 'tagToEnum#' function is not given a concrete result type. Example(s): foo :: forall a. a foo = tagToEnum# 0# Test cases: typecheck/should_fail/tcfail164 -} TcRnTagToEnumUnspecifiedResTy :: Type -> TcRnMessage {-| TcRnTagToEnumResTyNotAnEnum is an error that occurs when the 'tagToEnum#' function is given a result type that is not an enumeration type. Example(s): foo :: Int -- not an enumeration TyCon foo = tagToEnum# 0# Test cases: typecheck/should_fail/tcfail164 -} TcRnTagToEnumResTyNotAnEnum :: Type -> TcRnMessage {-| TcRnArrowIfThenElsePredDependsOnResultTy is an error that occurs when the predicate type of an ifThenElse expression in arrow notation depends on the type of the result. Example(s): None Test cases: None -} TcRnArrowIfThenElsePredDependsOnResultTy :: TcRnMessage {-| TcRnIllegalHsBootFileDecl is an error that occurs when an hs-boot file contains declarations that are not allowed, such as bindings. Example(s): None Test cases: None -} TcRnIllegalHsBootFileDecl :: TcRnMessage {-| TcRnRecursivePatternSynonym is an error that occurs when a pattern synonym is defined in terms of itself, either directly or indirectly. Example(s): pattern A = B pattern B = A Test cases: patsyn/should_fail/T16900 -} TcRnRecursivePatternSynonym :: LHsBinds GhcRn -> TcRnMessage {-| TcRnPartialTypeSigTyVarMismatch is an error that occurs when a partial type signature attempts to unify two different types. Example(s): f :: a -> b -> _ f x y = [x, y] Test cases: partial-sigs/should_fail/T14449 -} TcRnPartialTypeSigTyVarMismatch :: Name -- ^ first type variable -> Name -- ^ second type variable -> Name -- ^ function name -> LHsSigWcType GhcRn -> TcRnMessage {-| TcRnPartialTypeSigBadQuantifier is an error that occurs when a type variable being quantified over in the partial type signature of a function gets unified with a type that is free in that function's context. Example(s): foo :: Num a => a -> a foo xxx = g xxx where g :: forall b. Num b => _ -> b g y = xxx + y Test cases: partial-sig/should_fail/T14479 -} TcRnPartialTypeSigBadQuantifier :: Name -- ^ user-written name of type variable being quantified -> Name -- ^ function name -> Maybe Type -- ^ type the variable unified with, if known -> LHsSigWcType GhcRn -- ^ partial type signature -> TcRnMessage {-| TcRnMissingSignature is a warning that occurs when a top-level binding or a pattern synonym does not have a type signature. Controlled by the flags: -Wmissing-signatures -Wmissing-exported-signatures -Wmissing-pattern-synonym-signatures -Wmissing-exported-pattern-synonym-signatures -Wmissing-kind-signatures Test cases: T11077 (top-level bindings) T12484 (pattern synonyms) T19564 (kind signatures) -} TcRnMissingSignature :: MissingSignature -> Exported -> Bool -- ^ True: -Wmissing-signatures overrides -Wmissing-exported-signatures, -- or -Wmissing-pattern-synonym-signatures overrides -Wmissing-exported-pattern-synonym-signatures -> TcRnMessage {-| TcRnPolymorphicBinderMissingSig is a warning controlled by -Wmissing-local-signatures that occurs when a local polymorphic binding lacks a type signature. Example(s): id a = a Test cases: warnings/should_compile/T12574 -} TcRnPolymorphicBinderMissingSig :: Name -> Type -> TcRnMessage {-| TcRnOverloadedSig is an error that occurs when a binding group conflicts with the monomorphism restriction. Example(s): data T a = T a mono = ... where x :: Applicative f => f a T x = ... Test cases: typecheck/should_compile/T11339 -} TcRnOverloadedSig :: TcIdSigInfo -> TcRnMessage {-| TcRnTupleConstraintInst is an error that occurs whenever an instance for a tuple constraint is specified. Examples(s): class C m a class D m a f :: (forall a. Eq a => (C m a, D m a)) => m a f = undefined Test cases: quantified-constraints/T15334 -} TcRnTupleConstraintInst :: !Class -> TcRnMessage {-| TcRnAbstractClassInst is an error that occurs whenever an instance of an abstract class is specified. Examples(s): -- A.hs-boot module A where class C a -- B.hs module B where import {-# SOURCE #-} A instance C Int where -- A.hs module A where import B class C a where f :: a -- Main.hs import A main = print (f :: Int) Test cases: typecheck/should_fail/T13068 -} TcRnAbstractClassInst :: !Class -> TcRnMessage {-| TcRnNoClassInstHead is an error that occurs whenever an instance head is not headed by a class. Examples(s): instance c Test cases: typecheck/rename/T5513 typecheck/rename/T16385 -} TcRnNoClassInstHead :: !Type -> TcRnMessage {-| TcRnUserTypeError is an error that occurs due to a user's custom type error, which can be triggered by adding a `TypeError` constraint in a type signature or typeclass instance. Examples(s): f :: TypeError (Text "This is a type error") f = undefined Test cases: typecheck/should_fail/CustomTypeErrors02 typecheck/should_fail/CustomTypeErrors03 -} TcRnUserTypeError :: !Type -> TcRnMessage {-| TcRnConstraintInKind is an error that occurs whenever a constraint is specified in a kind. Examples(s): data Q :: Eq a => Type where {} Test cases: dependent/should_fail/T13895 polykinds/T16263 saks/should_fail/saks_fail004 typecheck/should_fail/T16059a typecheck/should_fail/T18714 -} TcRnConstraintInKind :: !Type -> TcRnMessage {-| TcRnUnboxedTupleTypeFuncArg is an error that occurs whenever an unboxed tuple or unboxed sum type is specified as a function argument, when the appropriate extension (`-XUnboxedTuples` or `-XUnboxedSums`) isn't enabled. Examples(s): -- T15073.hs import T15073a newtype Foo a = MkFoo a deriving P -- T15073a.hs class P a where p :: a -> (# a #) Test cases: deriving/should_fail/T15073.hs deriving/should_fail/T15073a.hs typecheck/should_fail/T16059d -} TcRnUnboxedTupleOrSumTypeFuncArg :: UnboxedTupleOrSum -- ^ whether this is an unboxed tuple or an unboxed sum -> !Type -> TcRnMessage {-| TcRnLinearFuncInKind is an error that occurs whenever a linear function is specified in a kind. Examples(s): data A :: * %1 -> * Test cases: linear/should_fail/LinearKind linear/should_fail/LinearKind2 linear/should_fail/LinearKind3 -} TcRnLinearFuncInKind :: !Type -> TcRnMessage {-| TcRnForAllEscapeError is an error that occurs whenever a quantified type's kind mentions quantified type variable. Examples(s): type T :: TYPE (BoxedRep l) data T = MkT Test cases: unlifted-datatypes/should_fail/UnlDataNullaryPoly -} TcRnForAllEscapeError :: !Type -> !Kind -> TcRnMessage {-| TcRnVDQInTermType is an error that occurs whenever a visible dependent quantification is specified in the type of a term. Examples(s): a = (undefined :: forall k -> k -> Type) @Int Test cases: dependent/should_fail/T15859 dependent/should_fail/T16326_Fail1 dependent/should_fail/T16326_Fail2 dependent/should_fail/T16326_Fail3 dependent/should_fail/T16326_Fail4 dependent/should_fail/T16326_Fail5 dependent/should_fail/T16326_Fail6 dependent/should_fail/T16326_Fail7 dependent/should_fail/T16326_Fail8 dependent/should_fail/T16326_Fail9 dependent/should_fail/T16326_Fail10 dependent/should_fail/T16326_Fail11 dependent/should_fail/T16326_Fail12 dependent/should_fail/T17687 dependent/should_fail/T18271 -} TcRnVDQInTermType :: !Type -> TcRnMessage {-| TcRnBadQuantPredHead is an error that occurs whenever a quantified predicate lacks a class or type variable head. Examples(s): class (forall a. A t a => A t [a]) => B t where type A t a :: Constraint Test cases: quantified-constraints/T16474 -} TcRnBadQuantPredHead :: !Type -> TcRnMessage {-| TcRnIllegalTupleConstraint is an error that occurs whenever an illegal tuple constraint is specified. Examples(s): g :: ((Show a, Num a), Eq a) => a -> a g = undefined Test cases: typecheck/should_fail/tcfail209a -} TcRnIllegalTupleConstraint :: !Type -> TcRnMessage {-| TcRnNonTypeVarArgInConstraint is an error that occurs whenever a non type-variable argument is specified in a constraint. Examples(s): data T instance Eq Int => Eq T Test cases: ghci/scripts/T13202 ghci/scripts/T13202a polykinds/T12055a typecheck/should_fail/T10351 typecheck/should_fail/T19187 typecheck/should_fail/T6022 typecheck/should_fail/T8883 -} TcRnNonTypeVarArgInConstraint :: !Type -> TcRnMessage {-| TcRnIllegalImplicitParam is an error that occurs whenever an illegal implicit parameter is specified. Examples(s): type Bla = ?x::Int data T = T instance Bla => Eq T Test cases: polykinds/T11466 typecheck/should_fail/T8912 typecheck/should_fail/tcfail041 typecheck/should_fail/tcfail211 typecheck/should_fail/tcrun045 -} TcRnIllegalImplicitParam :: !Type -> TcRnMessage {-| TcRnIllegalConstraintSynonymOfKind is an error that occurs whenever an illegal constraint synonym of kind is specified. Examples(s): type Showish = Show f :: (Showish a) => a -> a f = undefined Test cases: typecheck/should_fail/tcfail209 -} TcRnIllegalConstraintSynonymOfKind :: !Type -> TcRnMessage {-| TcRnIllegalClassInst is an error that occurs whenever a class instance is specified for a non-class. Examples(s): type C1 a = (Show (a -> Bool)) instance C1 Int where Test cases: polykinds/T13267 -} TcRnIllegalClassInst :: !TyConFlavour -> TcRnMessage {-| TcRnOversaturatedVisibleKindArg is an error that occurs whenever an illegal oversaturated visible kind argument is specified. Examples(s): type family F2 :: forall (a :: Type). Type where F2 @a = Maybe a Test cases: typecheck/should_fail/T15793 typecheck/should_fail/T16255 -} TcRnOversaturatedVisibleKindArg :: !Type -> TcRnMessage {-| TcRnBadAssociatedType is an error that occurs whenever a class doesn't have an associated type. Examples(s): $(do d <- instanceD (cxt []) (conT ''Eq `appT` conT ''Foo) [tySynInstD $ tySynEqn Nothing (conT ''Rep `appT` conT ''Foo) (conT ''Maybe)] return [d]) ======> instance Eq Foo where type Rep Foo = Maybe Test cases: th/T12387a -} TcRnBadAssociatedType :: {-Class-} !Name -> {-TyCon-} !Name -> TcRnMessage {-| TcRnForAllRankErr is an error that occurs whenever an illegal ranked type is specified. Examples(s): foo :: (a,b) -> (a~b => t) -> (a,b) foo p x = p Test cases: - ghci/should_run/T15806 - indexed-types/should_fail/SimpleFail15 - typecheck/should_fail/T11355 - typecheck/should_fail/T12083a - typecheck/should_fail/T12083b - typecheck/should_fail/T16059c - typecheck/should_fail/T16059e - typecheck/should_fail/T17213 - typecheck/should_fail/T18939_Fail - typecheck/should_fail/T2538 - typecheck/should_fail/T5957 - typecheck/should_fail/T7019 - typecheck/should_fail/T7019a - typecheck/should_fail/T7809 - typecheck/should_fail/T9196 - typecheck/should_fail/tcfail127 - typecheck/should_fail/tcfail184 - typecheck/should_fail/tcfail196 - typecheck/should_fail/tcfail197 -} TcRnForAllRankErr :: !Rank -> !Type -> TcRnMessage {-| TcRnMonomorphicBindings is a warning (controlled by -Wmonomorphism-restriction) that arise when the monomorphism restriction applies to the given bindings. Examples(s): {-# OPTIONS_GHC -Wmonomorphism-restriction #-} bar = 10 foo :: Int foo = bar main :: IO () main = print foo The example above emits the warning (for 'bar'), because without monomorphism restriction the inferred type for 'bar' is 'bar :: Num p => p'. This warning tells us that /if/ we were to enable '-XMonomorphismRestriction' we would make 'bar' less polymorphic, as its type would become 'bar :: Int', so GHC warns us about that. Test cases: typecheck/should_compile/T13785 -} TcRnMonomorphicBindings :: [Name] -> TcRnMessage {-| TcRnOrphanInstance is a warning (controlled by -Wwarn-orphans) that arises when a typeclass instance is an \"orphan\", i.e. if it appears in a module in which neither the class nor the type being instanced are declared in the same module. Examples(s): None Test cases: warnings/should_compile/T9178 typecheck/should_compile/T4912 -} TcRnOrphanInstance :: ClsInst -> TcRnMessage {-| TcRnFunDepConflict is an error that occurs when there are functional dependencies conflicts between instance declarations. Examples(s): None Test cases: typecheck/should_fail/T2307 typecheck/should_fail/tcfail096 typecheck/should_fail/tcfail202 -} TcRnFunDepConflict :: !UnitState -> NE.NonEmpty ClsInst -> TcRnMessage {-| TcRnDupInstanceDecls is an error that occurs when there are duplicate instance declarations. Examples(s): class Foo a where foo :: a -> Int instance Foo Int where foo = id instance Foo Int where foo = const 42 Test cases: cabal/T12733/T12733 typecheck/should_fail/tcfail035 typecheck/should_fail/tcfail023 backpack/should_fail/bkpfail18 typecheck/should_fail/TcNullaryTCFail typecheck/should_fail/tcfail036 typecheck/should_fail/tcfail073 module/mod51 module/mod52 module/mod44 -} TcRnDupInstanceDecls :: !UnitState -> NE.NonEmpty ClsInst -> TcRnMessage {-| TcRnConflictingFamInstDecls is an error that occurs when there are conflicting family instance declarations. Examples(s): None. Test cases: indexed-types/should_fail/ExplicitForAllFams4b indexed-types/should_fail/NoGood indexed-types/should_fail/Over indexed-types/should_fail/OverDirectThisMod indexed-types/should_fail/OverIndirectThisMod indexed-types/should_fail/SimpleFail11a indexed-types/should_fail/SimpleFail11b indexed-types/should_fail/SimpleFail11c indexed-types/should_fail/SimpleFail11d indexed-types/should_fail/SimpleFail2a indexed-types/should_fail/SimpleFail2b indexed-types/should_fail/T13092/T13092 indexed-types/should_fail/T13092c/T13092c indexed-types/should_fail/T14179 indexed-types/should_fail/T2334A indexed-types/should_fail/T2677 indexed-types/should_fail/T3330b indexed-types/should_fail/T4246 indexed-types/should_fail/T7102a indexed-types/should_fail/T9371 polykinds/T7524 typecheck/should_fail/UnliftedNewtypesOverlap -} TcRnConflictingFamInstDecls :: NE.NonEmpty FamInst -> TcRnMessage TcRnFamInstNotInjective :: InjectivityErrReason -> TyCon -> NE.NonEmpty CoAxBranch -> TcRnMessage {-| TcRnBangOnUnliftedType is a warning (controlled by -Wredundant-strictness-flags) that occurs when a strictness annotation is applied to an unlifted type. Example(s): data T = MkT !Int# -- Strictness flag has no effect on unlifted types Test cases: typecheck/should_compile/T20187a typecheck/should_compile/T20187b -} TcRnBangOnUnliftedType :: !Type -> TcRnMessage {-| TcRnMultipleDefaultDeclarations is an error that occurs when a module has more than one default declaration. Example: default (Integer, Int) default (Double, Float) -- 2nd default declaration not allowed Text cases: module/mod58 -} TcRnMultipleDefaultDeclarations :: [LDefaultDecl GhcRn] -> TcRnMessage {-| TcRnBadDefaultType is an error that occurs when a type used in a default declaration does not have an instance for any of the applicable classes. Example(s): data Foo default (Foo) Test cases: typecheck/should_fail/T11974b -} TcRnBadDefaultType :: Type -> [Class] -> TcRnMessage {-| TcRnPatSynBundledWithNonDataCon is an error that occurs when a module's export list bundles a pattern synonym with a type that is not a proper `data` or `newtype` construction. Example(s): module Foo (MyClass(.., P)) where pattern P = Nothing class MyClass a where foo :: a -> Int Test cases: patsyn/should_fail/export-class -} TcRnPatSynBundledWithNonDataCon :: TcRnMessage {-| TcRnPatSynBundledWithWrongType is an error that occurs when the export list of a module has a pattern synonym bundled with a type that does not match the type of the pattern synonym. Example(s): module Foo (R(P,x)) where data Q = Q Int data R = R pattern P{x} = Q x Text cases: patsyn/should_fail/export-ps-rec-sel patsyn/should_fail/export-type-synonym patsyn/should_fail/export-type -} TcRnPatSynBundledWithWrongType :: Type -> Type -> TcRnMessage {-| TcRnDupeModuleExport is a warning controlled by @-Wduplicate-exports@ that occurs when a module appears more than once in an export list. Example(s): module Foo (module Bar, module Bar) import Bar Text cases: None -} TcRnDupeModuleExport :: ModuleName -> TcRnMessage {-| TcRnExportedModNotImported is an error that occurs when an export list contains a module that is not imported. Example(s): None Text cases: module/mod135 module/mod8 rename/should_fail/rnfail028 backpack/should_fail/bkpfail48 -} TcRnExportedModNotImported :: ModuleName -> TcRnMessage {-| TcRnNullExportedModule is a warning controlled by -Wdodgy-exports that occurs when an export list contains a module that has no exports. Example(s): module Foo (module Bar) where import Bar () Test cases: None -} TcRnNullExportedModule :: ModuleName -> TcRnMessage {-| TcRnMissingExportList is a warning controlled by -Wmissing-export-lists that occurs when a module does not have an explicit export list. Example(s): None Test cases: typecheck/should_fail/MissingExportList03 -} TcRnMissingExportList :: ModuleName -> TcRnMessage {-| TcRnExportHiddenComponents is an error that occurs when an export contains constructor or class methods that are not visible. Example(s): None Test cases: None -} TcRnExportHiddenComponents :: IE GhcPs -> TcRnMessage {-| TcRnDuplicateExport is a warning (controlled by -Wduplicate-exports) that occurs when an identifier appears in an export list more than once. Example(s): None Test cases: module/MultiExport module/mod128 module/mod14 module/mod5 overloadedrecflds/should_fail/DuplicateExports patsyn/should_compile/T11959 -} TcRnDuplicateExport :: GreName -> IE GhcPs -> IE GhcPs -> TcRnMessage {-| TcRnExportedParentChildMismatch is an error that occurs when an export is bundled with a parent that it does not belong to Example(s): module Foo (T(a)) where data T a = True Test cases: module/T11970 module/T11970B module/mod17 module/mod3 overloadedrecflds/should_fail/NoParent -} TcRnExportedParentChildMismatch :: Name -> TyThing -> GreName -> [Name] -> TcRnMessage {-| TcRnConflictingExports is an error that occurs when different identifiers that have the same name are being exported by a module. Example(s): module Foo (Bar.f, module Baz) where import qualified Bar (f) import Baz (f) Test cases: module/mod131 module/mod142 module/mod143 module/mod144 module/mod145 module/mod146 module/mod150 module/mod155 overloadedrecflds/should_fail/T14953 overloadedrecflds/should_fail/overloadedrecfldsfail10 rename/should_fail/rnfail029 rename/should_fail/rnfail040 typecheck/should_fail/T16453E2 typecheck/should_fail/tcfail025 typecheck/should_fail/tcfail026 -} TcRnConflictingExports :: OccName -- ^ Occurrence name shared by both exports -> GreName -- ^ Name of first export -> GlobalRdrElt -- ^ Provenance for definition site of first export -> IE GhcPs -- ^ Export decl of first export -> GreName -- ^ Name of second export -> GlobalRdrElt -- ^ Provenance for definition site of second export -> IE GhcPs -- ^ Export decl of second export -> TcRnMessage {-| TcRnAmbiguousField is a warning controlled by -Wambiguous-fields occurring when a record update's type cannot be precisely determined. This will not be supported by -XDuplicateRecordFields in future releases. Example(s): data Person = MkPerson { personId :: Int, name :: String } data Address = MkAddress { personId :: Int, address :: String } bad1 x = x { personId = 4 } :: Person -- ambiguous bad2 (x :: Person) = x { personId = 4 } -- ambiguous good x = (x :: Person) { personId = 4 } -- not ambiguous Test cases: overloadedrecflds/should_fail/overloadedrecfldsfail06 -} TcRnAmbiguousField :: HsExpr GhcRn -- ^ Field update -> TyCon -- ^ Record type -> TcRnMessage {-| TcRnMissingFields is a warning controlled by -Wmissing-fields occurring when the intialisation of a record is missing one or more (lazy) fields. Example(s): data Rec = Rec { a :: Int, b :: String, c :: Bool } x = Rec { a = 1, b = "two" } -- missing field 'c' Test cases: deSugar/should_compile/T13870 deSugar/should_compile/ds041 patsyn/should_compile/T11283 rename/should_compile/T5334 rename/should_compile/T12229 rename/should_compile/T5892a warnings/should_fail/WerrorFail2 -} TcRnMissingFields :: ConLike -> [(FieldLabelString, TcType)] -> TcRnMessage {-| TcRnFieldUpdateInvalidType is an error occurring when an updated field's type mentions something that is outside the universally quantified variables of the data constructor, such as an existentially quantified type. Example(s): data X = forall a. MkX { f :: a } x = (MkX ()) { f = False } Test cases: patsyn/should_fail/records-exquant typecheck/should_fail/T3323 -} TcRnFieldUpdateInvalidType :: [(FieldLabelString,TcType)] -> TcRnMessage {-| TcRnNoConstructorHasAllFields is an error that occurs when a record update has fields that no single constructor encompasses. Example(s): data Foo = A { x :: Bool } | B { y :: Int } foo = (A False) { x = True, y = 5 } Test cases: overloadedrecflds/should_fail/overloadedrecfldsfail08 patsyn/should_fail/mixed-pat-syn-record-sels typecheck/should_fail/T7989 -} TcRnNoConstructorHasAllFields :: [FieldLabelString] -> TcRnMessage {- TcRnMixedSelectors is an error for when a mixture of pattern synonym and record selectors are used in the same record update block. Example(s): data Rec = Rec { foo :: Int, bar :: String } pattern Pat { f1, f2 } = Rec { foo = f1, bar = f2 } illegal :: Rec -> Rec illegal r = r { f1 = 1, bar = "two" } Test cases: patsyn/should_fail/records-mixing-fields -} TcRnMixedSelectors :: Name -- ^ Record -> [Id] -- ^ Record selectors -> Name -- ^ Pattern synonym -> [Id] -- ^ Pattern selectors -> TcRnMessage {- TcRnMissingStrictFields is an error occurring when a record field marked as strict is omitted when constructing said record. Example(s): data R = R { strictField :: !Bool, nonStrict :: Int } x = R { nonStrict = 1 } Test cases: typecheck/should_fail/T18869 typecheck/should_fail/tcfail085 typecheck/should_fail/tcfail112 -} TcRnMissingStrictFields :: ConLike -> [(FieldLabelString, TcType)] -> TcRnMessage {- TcRnNoPossibleParentForFields is an error thrown when the fields used in a record update block do not all belong to any one type. Example(s): data R1 = R1 { x :: Int, y :: Int } data R2 = R2 { y :: Int, z :: Int } update r = r { x = 1, y = 2, z = 3 } Test cases: overloadedrecflds/should_fail/overloadedrecfldsfail01 overloadedrecflds/should_fail/overloadedrecfldsfail14 -} TcRnNoPossibleParentForFields :: [LHsRecUpdField GhcRn] -> TcRnMessage {- TcRnBadOverloadedRecordUpdate is an error for a record update that cannot be pinned down to any one constructor and thus must be given a type signature. Example(s): data R1 = R1 { x :: Int } data R2 = R2 { x :: Int } update r = r { x = 1 } -- needs a type signature Test cases: overloadedrecflds/should_fail/overloadedrecfldsfail01 -} TcRnBadOverloadedRecordUpdate :: [LHsRecUpdField GhcRn] -> TcRnMessage {- TcRnStaticFormNotClosed is an error pertaining to terms that are marked static using the -XStaticPointers extension but which are not closed terms. Example(s): f x = static x Test cases: rename/should_fail/RnStaticPointersFail01 rename/should_fail/RnStaticPointersFail03 -} TcRnStaticFormNotClosed :: Name -> NotClosedReason -> TcRnMessage {-| TcRnSpecialClassInst is an error that occurs when a user attempts to define an instance for a built-in typeclass such as 'Coercible', 'Typeable', or 'KnownNat', outside of a signature file. Test cases: deriving/should_fail/T9687 deriving/should_fail/T14916 polykinds/T8132 typecheck/should_fail/TcCoercibleFail2 typecheck/should_fail/T12837 typecheck/should_fail/T14390 -} TcRnSpecialClassInst :: !Class -> !Bool -- ^ Whether the error is due to Safe Haskell being enabled -> TcRnMessage {-| TcRnUselessTypeable is a warning (controlled by -Wderiving-typeable) that occurs when trying to derive an instance of the 'Typeable' class. Deriving 'Typeable' is no longer necessary (hence the \"useless\") as all types automatically derive 'Typeable' in modern GHC versions. Example(s): None. Test cases: warnings/should_compile/DerivingTypeable -} TcRnUselessTypeable :: TcRnMessage {-| TcRnDerivingDefaults is a warning (controlled by -Wderiving-defaults) that occurs when both 'DeriveAnyClass' and 'GeneralizedNewtypeDeriving' are enabled, and therefore GHC defaults to 'DeriveAnyClass', which might not be what the user wants. Example(s): None. Test cases: typecheck/should_compile/T15839a deriving/should_compile/T16179 -} TcRnDerivingDefaults :: !Class -> TcRnMessage {-| TcRnNonUnaryTypeclassConstraint is an error that occurs when GHC encounters a non-unary constraint when trying to derive a typeclass. Example(s): class A deriving instance A data B deriving A -- We cannot derive A, is not unary (i.e. 'class A a'). Test cases: deriving/should_fail/T7959 deriving/should_fail/drvfail005 deriving/should_fail/drvfail009 deriving/should_fail/drvfail006 -} TcRnNonUnaryTypeclassConstraint :: !(LHsSigType GhcRn) -> TcRnMessage {-| TcRnPartialTypeSignatures is a warning (controlled by -Wpartial-type-signatures) that occurs when a wildcard '_' is found in place of a type in a signature or a type class derivation Example(s): foo :: _ -> Int foo = ... deriving instance _ => Eq (Foo a) Test cases: dependent/should_compile/T11241 dependent/should_compile/T15076 dependent/should_compile/T14880-2 typecheck/should_compile/T17024 typecheck/should_compile/T10072 partial-sigs/should_fail/TidyClash2 partial-sigs/should_fail/Defaulting1MROff partial-sigs/should_fail/WildcardsInPatternAndExprSig partial-sigs/should_fail/T10615 partial-sigs/should_fail/T14584a partial-sigs/should_fail/TidyClash partial-sigs/should_fail/T11122 partial-sigs/should_fail/T14584 partial-sigs/should_fail/T10045 partial-sigs/should_fail/PartialTypeSignaturesDisabled partial-sigs/should_fail/T10999 partial-sigs/should_fail/ExtraConstraintsWildcardInExpressionSignature partial-sigs/should_fail/ExtraConstraintsWildcardInPatternSplice partial-sigs/should_fail/WildcardInstantiations partial-sigs/should_run/T15415 partial-sigs/should_compile/T10463 partial-sigs/should_compile/T15039a partial-sigs/should_compile/T16728b partial-sigs/should_compile/T15039c partial-sigs/should_compile/T10438 partial-sigs/should_compile/SplicesUsed partial-sigs/should_compile/T18008 partial-sigs/should_compile/ExprSigLocal partial-sigs/should_compile/T11339a partial-sigs/should_compile/T11670 partial-sigs/should_compile/WarningWildcardInstantiations partial-sigs/should_compile/T16728 partial-sigs/should_compile/T12033 partial-sigs/should_compile/T15039b partial-sigs/should_compile/T10403 partial-sigs/should_compile/T11192 partial-sigs/should_compile/T16728a partial-sigs/should_compile/TypedSplice partial-sigs/should_compile/T15039d partial-sigs/should_compile/T11016 partial-sigs/should_compile/T13324_compile2 linear/should_fail/LinearPartialSig polykinds/T14265 polykinds/T14172 -} TcRnPartialTypeSignatures :: !SuggestPartialTypeSignatures -> !ThetaType -> TcRnMessage {-| TcRnCannotDeriveInstance is an error that occurs every time a typeclass instance can't be derived. The 'DeriveInstanceErrReason' will contain the specific reason this error arose. Example(s): None. Test cases: generics/T10604/T10604_no_PolyKinds deriving/should_fail/drvfail009 deriving/should_fail/drvfail-functor2 deriving/should_fail/T10598_fail3 deriving/should_fail/deriving-via-fail2 deriving/should_fail/deriving-via-fail deriving/should_fail/T16181 -} TcRnCannotDeriveInstance :: !Class -- ^ The typeclass we are trying to derive -- an instance for -> [Type] -- ^ The typeclass arguments, if any. -> !(Maybe (DerivStrategy GhcTc)) -- ^ The derivation strategy, if any. -> !UsingGeneralizedNewtypeDeriving -- ^ Is '-XGeneralizedNewtypeDeriving' enabled? -> !DeriveInstanceErrReason -- ^ The specific reason why we couldn't derive -- an instance for the class. -> TcRnMessage {-| TcRnLazyGADTPattern is an error that occurs when a user writes a nested GADT pattern match inside a lazy (~) pattern. Test case: gadt/lazypat -} TcRnLazyGADTPattern :: TcRnMessage {-| TcRnArrowProcGADTPattern is an error that occurs when a user writes a GADT pattern inside arrow proc notation. Test case: arrows/should_fail/arrowfail004. -} TcRnArrowProcGADTPattern :: TcRnMessage {-| TcRnForallIdentifier is a warning (controlled with -Wforall-identifier) that occurs when a definition uses 'forall' as an identifier. Example: forall x = () g forall = () Test cases: T20609 T20609a T20609b T20609c T20609d -} TcRnForallIdentifier :: RdrName -> TcRnMessage {-| TcRnTypeEqualityOutOfScope is a warning (controlled by -Wtype-equality-out-of-scope) that occurs when the type equality (a ~ b) is not in scope. Test case: T18862b -} TcRnTypeEqualityOutOfScope :: TcRnMessage {-| TcRnTypeEqualityRequiresOperators is a warning (controlled by -Wtype-equality-requires-operators) that occurs when the type equality (a ~ b) is used without the TypeOperators extension. Example: {-# LANGUAGE NoTypeOperators #-} f :: (a ~ b) => a -> b Test case: T18862a -} TcRnTypeEqualityRequiresOperators :: TcRnMessage {-| TcRnIllegalTypeOperator is an error that occurs when a type operator is used without the TypeOperators extension. Example: {-# LANGUAGE NoTypeOperators #-} f :: Vec a n -> Vec a m -> Vec a (n + m) Test case: T12811 -} TcRnIllegalTypeOperator :: !SDoc -> !RdrName -> TcRnMessage {-| TcRnGADTMonoLocalBinds is a warning controlled by -Wgadt-mono-local-binds that occurs when pattern matching on a GADT when -XMonoLocalBinds is off. Example(s): None Test cases: T20485, T20485a -} TcRnGADTMonoLocalBinds :: TcRnMessage {-| The TcRnNotInScope constructor is used for various not-in-scope errors. See 'NotInScopeError' for more details. -} TcRnNotInScope :: NotInScopeError -- ^ what the problem is -> RdrName -- ^ the name that is not in scope -> [ImportError] -- ^ import errors that are relevant -> [GhcHint] -- ^ hints, e.g. enable DataKinds to refer to a promoted data constructor -> TcRnMessage {-| TcRnUntickedPromotedThing is a warning (controlled with -Wunticked-promoted-constructors) that is triggered by an unticked occurrence of a promoted data constructor. Examples: data A = MkA type family F (a :: A) where { F MkA = Bool } type B = [ Int, Bool ] Test cases: T9778, T19984. -} TcRnUntickedPromotedThing :: UntickedPromotedThing -> TcRnMessage {-| TcRnIllegalBuiltinSyntax is an error that occurs when built-in syntax appears in an unexpected location, e.g. as a data constructor or in a fixity declaration. Examples: infixl 5 : data P = (,) Test cases: rnfail042, T14907b, T15124, T15233. -} TcRnIllegalBuiltinSyntax :: SDoc -- ^ what kind of thing this is (a binding, fixity declaration, ...) -> RdrName -> TcRnMessage -- TODO: remove the SDoc argument. {-| TcRnWarnDefaulting is a warning (controlled by -Wtype-defaults) that is triggered whenever a Wanted typeclass constraint is solving through the defaulting of a type variable. Example: one = show 1 -- We get Wanteds Show a0, Num a0, and default a0 to Integer. Test cases: none (which are really specific to defaulting), but see e.g. tcfail204. -} TcRnWarnDefaulting :: [Ct] -- ^ Wanted constraints in which defaulting occurred -> Maybe TyVar -- ^ The type variable being defaulted -> Type -- ^ The default type -> TcRnMessage {-| TcRnIncorrectNameSpace is an error that occurs when a 'Name' is used in the incorrect 'NameSpace', e.g. a type constructor or class used in a term, or a term variable used in a type. Example: f x = Int Test cases: T18740a, T20884. -} TcRnIncorrectNameSpace :: Name -> Bool -- ^ whether the error is happening -- in a Template Haskell tick -- (so we should give a Template Haskell hint) -> TcRnMessage {- TcRnForeignImportPrimExtNotSet is an error occurring when a foreign import is declared using the @prim@ calling convention without having turned on the -XGHCForeignImportPrim extension. Example(s): foreign import prim "foo" foo :: ByteArray# -> (# Int#, Int# #) Test cases: ffi/should_fail/T20116 -} TcRnForeignImportPrimExtNotSet :: ForeignImport -> TcRnMessage {- TcRnForeignImportPrimSafeAnn is an error declaring that the safe/unsafe annotation should not be used with @prim@ foreign imports. Example(s): foreign import prim unsafe "my_primop_cmm" :: ... Test cases: None -} TcRnForeignImportPrimSafeAnn :: ForeignImport -> TcRnMessage {- TcRnForeignFunctionImportAsValue is an error explaining that foreign @value@ imports cannot have function types. Example(s): foreign import capi "math.h value sqrt" f :: CInt -> CInt Test cases: ffi/should_fail/capi_value_function -} TcRnForeignFunctionImportAsValue :: ForeignImport -> TcRnMessage {- TcRnFunPtrImportWithoutAmpersand is a warning controlled by @-Wdodgy-foreign-imports@ that informs the user of a possible missing @&@ in the declaration of a foreign import with a 'FunPtr' return type. Example(s): foreign import ccall "f" f :: FunPtr (Int -> IO ()) Test cases: ffi/should_compile/T1357 -} TcRnFunPtrImportWithoutAmpersand :: ForeignImport -> TcRnMessage {- TcRnIllegalForeignDeclBackend is an error occurring when a foreign import declaration is not compatible with the code generation backend being used. Example(s): None Test cases: None -} TcRnIllegalForeignDeclBackend :: Either ForeignExport ForeignImport -> Backend -> ExpectedBackends -> TcRnMessage {- TcRnUnsupportedCallConv informs the user that the calling convention specified for a foreign export declaration is not compatible with the target platform. It is a warning controlled by @-Wunsupported-calling-conventions@ in the case of @stdcall@ but is otherwise considered an error. Example(s): None Test cases: None -} TcRnUnsupportedCallConv :: Either ForeignExport ForeignImport -> UnsupportedCallConvention -> TcRnMessage {- TcRnIllegalForeignType is an error for when a type appears in a foreign function signature that is not compatible with the FFI. Example(s): None Test cases: ffi/should_fail/T3066 ffi/should_fail/ccfail004 ffi/should_fail/T10461 ffi/should_fail/T7506 ffi/should_fail/T5664 safeHaskell/ghci/p6 safeHaskell/safeLanguage/SafeLang08 ffi/should_fail/T16702 linear/should_fail/LinearFFI ffi/should_fail/T7243 -} TcRnIllegalForeignType :: !(Maybe ArgOrResult) -> !IllegalForeignTypeReason -> TcRnMessage {- TcRnInvalidCIdentifier indicates a C identifier that is not valid. Example(s): foreign import prim safe "not valid" cmm_test2 :: Int# -> Int# Test cases: th/T10638 -} TcRnInvalidCIdentifier :: !CLabelString -> TcRnMessage {- TcRnCannotDefaultConcrete is an error occurring when a concrete type variable cannot be defaulted. Test cases: T23153 -} TcRnCannotDefaultConcrete :: !FixedRuntimeRepOrigin -> TcRnMessage -- | Specifies which backend code generators where expected for an FFI declaration data ExpectedBackends = COrAsmOrLlvm -- ^ C, Asm, or LLVM | COrAsmOrLlvmOrInterp -- ^ C, Asm, LLVM, or interpreted deriving Eq -- | Specifies which calling convention is unsupported on the current platform data UnsupportedCallConvention = StdCallConvUnsupported | PrimCallConvUnsupported | JavaScriptCallConvUnsupported deriving Eq -- | Whether the error pertains to a function argument or a result. data ArgOrResult = Arg | Result -- | Which parts of a record field are affected by a particular error or warning. data RecordFieldPart = RecordFieldConstructor !Name | RecordFieldPattern !Name | RecordFieldUpdate -- | Where a shadowed name comes from data ShadowedNameProvenance = ShadowedNameProvenanceLocal !SrcLoc -- ^ The shadowed name is local to the module | ShadowedNameProvenanceGlobal [GlobalRdrElt] -- ^ The shadowed name is global, typically imported from elsewhere. -- | In what context did we require a type to have a fixed runtime representation? -- -- Used by 'GHC.Tc.Utils.TcMType.checkTypeHasFixedRuntimeRep' for throwing -- representation polymorphism errors when validity checking. -- -- See Note [Representation polymorphism checking] in GHC.Tc.Utils.Concrete data FixedRuntimeRepProvenance -- | Data constructor fields must have a fixed runtime representation. -- -- Tests: T11734, T18534. = FixedRuntimeRepDataConField -- | Pattern synonym signature arguments must have a fixed runtime representation. -- -- Test: RepPolyPatSynArg. | FixedRuntimeRepPatSynSigArg -- | Pattern synonym signature scrutinee must have a fixed runtime representation. -- -- Test: RepPolyPatSynRes. | FixedRuntimeRepPatSynSigRes pprFixedRuntimeRepProvenance :: FixedRuntimeRepProvenance -> SDoc pprFixedRuntimeRepProvenance FixedRuntimeRepDataConField = text "data constructor field" pprFixedRuntimeRepProvenance FixedRuntimeRepPatSynSigArg = text "pattern synonym argument" pprFixedRuntimeRepProvenance FixedRuntimeRepPatSynSigRes = text "pattern synonym scrutinee" -- | Why the particular injectivity error arose together with more information, -- if any. data InjectivityErrReason = InjErrRhsBareTyVar [Type] | InjErrRhsCannotBeATypeFam | InjErrRhsOverlap | InjErrCannotInferFromRhs !TyVarSet !HasKinds !SuggestUndecidableInstances data HasKinds = YesHasKinds | NoHasKinds deriving (Show, Eq) hasKinds :: Bool -> HasKinds hasKinds True = YesHasKinds hasKinds False = NoHasKinds data SuggestUndecidableInstances = YesSuggestUndecidableInstaces | NoSuggestUndecidableInstaces deriving (Show, Eq) suggestUndecidableInstances :: Bool -> SuggestUndecidableInstances suggestUndecidableInstances True = YesSuggestUndecidableInstaces suggestUndecidableInstances False = NoSuggestUndecidableInstaces -- | A data type to describe why a variable is not closed. -- See Note [Not-closed error messages] in GHC.Tc.Gen.Expr data NotClosedReason = NotLetBoundReason | NotTypeClosed VarSet | NotClosed Name NotClosedReason data SuggestPartialTypeSignatures = YesSuggestPartialTypeSignatures | NoSuggestPartialTypeSignatures deriving (Show, Eq) suggestPartialTypeSignatures :: Bool -> SuggestPartialTypeSignatures suggestPartialTypeSignatures True = YesSuggestPartialTypeSignatures suggestPartialTypeSignatures False = NoSuggestPartialTypeSignatures data UsingGeneralizedNewtypeDeriving = YesGeneralizedNewtypeDeriving | NoGeneralizedNewtypeDeriving deriving Eq usingGeneralizedNewtypeDeriving :: Bool -> UsingGeneralizedNewtypeDeriving usingGeneralizedNewtypeDeriving True = YesGeneralizedNewtypeDeriving usingGeneralizedNewtypeDeriving False = NoGeneralizedNewtypeDeriving data DeriveAnyClassEnabled = YesDeriveAnyClassEnabled | NoDeriveAnyClassEnabled deriving Eq deriveAnyClassEnabled :: Bool -> DeriveAnyClassEnabled deriveAnyClassEnabled True = YesDeriveAnyClassEnabled deriveAnyClassEnabled False = NoDeriveAnyClassEnabled -- | Why a particular typeclass instance couldn't be derived. data DeriveInstanceErrReason = -- | The typeclass instance is not well-kinded. DerivErrNotWellKinded !TyCon -- ^ The type constructor that occurs in -- the typeclass instance declaration. !Kind -- ^ The typeclass kind. !Int -- ^ The number of typeclass arguments that GHC -- kept. See Note [tc_args and tycon arity] in -- GHC.Tc.Deriv. -- | Generic instances can only be derived using the stock strategy -- in Safe Haskell. | DerivErrSafeHaskellGenericInst | DerivErrDerivingViaWrongKind !Kind !Type !Kind | DerivErrNoEtaReduce !Type -- ^ The instance type -- | We cannot derive instances in boot files | DerivErrBootFileFound | DerivErrDataConsNotAllInScope !TyCon -- | We cannot use GND on non-newtype types | DerivErrGNDUsedOnData -- | We cannot derive instances of nullary classes | DerivErrNullaryClasses -- | Last arg must be newtype or data application | DerivErrLastArgMustBeApp | DerivErrNoFamilyInstance !TyCon [Type] | DerivErrNotStockDeriveable !DeriveAnyClassEnabled | DerivErrHasAssociatedDatatypes !HasAssociatedDataFamInsts !AssociatedTyLastVarInKind !AssociatedTyNotParamOverLastTyVar | DerivErrNewtypeNonDeriveableClass | DerivErrCannotEtaReduceEnough !Bool -- Is eta-reduction OK? | DerivErrOnlyAnyClassDeriveable !TyCon -- ^ Type constructor for which the instance -- is requested !DeriveAnyClassEnabled -- ^ Whether or not -XDeriveAnyClass is enabled -- already. -- | Stock deriving won't work, but perhas DeriveAnyClass will. | DerivErrNotDeriveable !DeriveAnyClassEnabled -- | The given 'PredType' is not a class. | DerivErrNotAClass !PredType -- | The given (representation of the) 'TyCon' has no -- data constructors. | DerivErrNoConstructors !TyCon | DerivErrLangExtRequired !LangExt.Extension -- | GHC simply doesn't how to how derive the input 'Class' for the given -- 'Type'. | DerivErrDunnoHowToDeriveForType !Type -- | The given 'TyCon' must be an enumeration. -- See Note [Enumeration types] in GHC.Core.TyCon | DerivErrMustBeEnumType !TyCon -- | The given 'TyCon' must have /precisely/ one constructor. | DerivErrMustHaveExactlyOneConstructor !TyCon -- | The given data type must have some parameters. | DerivErrMustHaveSomeParameters !TyCon -- | The given data type must not have a class context. | DerivErrMustNotHaveClassContext !TyCon !ThetaType -- | We couldn't derive an instance for a particular data constructor -- for a variety of reasons. | DerivErrBadConstructor !(Maybe HasWildcard) [DeriveInstanceBadConstructor] -- | We couldn't derive a 'Generic' instance for the given type for a -- variety of reasons | DerivErrGenerics [DeriveGenericsErrReason] -- | We couldn't derive an instance either because the type was not an -- enum type or because it did have more than one constructor. | DerivErrEnumOrProduct !DeriveInstanceErrReason !DeriveInstanceErrReason data DeriveInstanceBadConstructor = -- | The given 'DataCon' must be truly polymorphic in the -- last argument of the data type. DerivErrBadConExistential !DataCon -- | The given 'DataCon' must not use the type variable in a function argument" | DerivErrBadConCovariant !DataCon -- | The given 'DataCon' must not contain function types | DerivErrBadConFunTypes !DataCon -- | The given 'DataCon' must use the type variable only -- as the last argument of a data type | DerivErrBadConWrongArg !DataCon -- | The given 'DataCon' is a GADT so we cannot directly -- derive an istance for it. | DerivErrBadConIsGADT !DataCon -- | The given 'DataCon' has existentials type vars in its type. | DerivErrBadConHasExistentials !DataCon -- | The given 'DataCon' has constraints in its type. | DerivErrBadConHasConstraints !DataCon -- | The given 'DataCon' has a higher-rank type. | DerivErrBadConHasHigherRankType !DataCon data DeriveGenericsErrReason = -- | The type must not have some datatype context. DerivErrGenericsMustNotHaveDatatypeContext !TyCon -- | The data constructor must not have exotic unlifted -- or polymorphic arguments. | DerivErrGenericsMustNotHaveExoticArgs !DataCon -- | The data constructor must be a vanilla constructor. | DerivErrGenericsMustBeVanillaDataCon !DataCon -- | The type must have some type parameters. -- check (d) from Note [Requirements for deriving Generic and Rep] -- in GHC.Tc.Deriv.Generics. | DerivErrGenericsMustHaveSomeTypeParams !TyCon -- | The data constructor must not have existential arguments. | DerivErrGenericsMustNotHaveExistentials !DataCon -- | The derivation applies a type to an argument involving -- the last parameter but the applied type is not of kind * -> *. | DerivErrGenericsWrongArgKind !DataCon data HasWildcard = YesHasWildcard | NoHasWildcard deriving Eq hasWildcard :: Bool -> HasWildcard hasWildcard True = YesHasWildcard hasWildcard False = NoHasWildcard -- | A context in which we don't allow anonymous wildcards. data BadAnonWildcardContext = WildcardNotLastInConstraint | ExtraConstraintWildcardNotAllowed SoleExtraConstraintWildcardAllowed | WildcardsNotAllowedAtAll -- | Whether a sole extra-constraint wildcard is allowed, -- e.g. @_ => ..@ as opposed to @( .., _ ) => ..@. data SoleExtraConstraintWildcardAllowed = SoleExtraConstraintWildcardNotAllowed | SoleExtraConstraintWildcardAllowed -- | A type representing whether or not the input type has associated data family instances. data HasAssociatedDataFamInsts = YesHasAdfs | NoHasAdfs deriving Eq hasAssociatedDataFamInsts :: Bool -> HasAssociatedDataFamInsts hasAssociatedDataFamInsts True = YesHasAdfs hasAssociatedDataFamInsts False = NoHasAdfs -- | If 'YesAssocTyLastVarInKind', the associated type of a typeclass -- contains the last type variable of the class in a kind, which is not (yet) allowed -- by GHC. data AssociatedTyLastVarInKind = YesAssocTyLastVarInKind !TyCon -- ^ The associated type family of the class | NoAssocTyLastVarInKind deriving Eq associatedTyLastVarInKind :: Maybe TyCon -> AssociatedTyLastVarInKind associatedTyLastVarInKind (Just tc) = YesAssocTyLastVarInKind tc associatedTyLastVarInKind Nothing = NoAssocTyLastVarInKind -- | If 'NoAssociatedTyNotParamOverLastTyVar', the associated type of a -- typeclass is not parameterized over the last type variable of the class data AssociatedTyNotParamOverLastTyVar = YesAssociatedTyNotParamOverLastTyVar !TyCon -- ^ The associated type family of the class | NoAssociatedTyNotParamOverLastTyVar deriving Eq associatedTyNotParamOverLastTyVar :: Maybe TyCon -> AssociatedTyNotParamOverLastTyVar associatedTyNotParamOverLastTyVar (Just tc) = YesAssociatedTyNotParamOverLastTyVar tc associatedTyNotParamOverLastTyVar Nothing = NoAssociatedTyNotParamOverLastTyVar -- | What kind of thing is missing a type signature? -- -- Used for reporting @"missing signature"@ warnings, see -- 'tcRnMissingSignature'. data MissingSignature = MissingTopLevelBindingSig Name Type | MissingPatSynSig PatSyn | MissingTyConKindSig TyCon Bool -- ^ whether -XCUSKs is enabled -- | Is the object we are dealing with exported or not? -- -- Used for reporting @"missing signature"@ warnings, see -- 'TcRnMissingSignature'. data Exported = IsNotExported | IsExported instance Outputable Exported where ppr IsNotExported = text "IsNotExported" ppr IsExported = text "IsExported" -------------------------------------------------------------------------------- -- -- Errors used in GHC.Tc.Errors -- -------------------------------------------------------------------------------- {- Note [Error report] ~~~~~~~~~~~~~~~~~~~~~~ The idea is that error msgs are divided into three parts: the main msg, the context block ("In the second argument of ..."), and the relevant bindings block, which are displayed in that order, with a mark to divide them. The the main msg ('report_important') varies depending on the error in question, but context and relevant bindings are always the same, which should simplify visual parsing. See 'GHC.Tc.Errors.Types.SolverReport' and 'GHC.Tc.Errors.mkErrorReport'. -} -- | A collection of main error messages and supplementary information. -- -- In practice, we will: -- - display the important messages first, -- - then the error context (e.g. by way of a call to 'GHC.Tc.Errors.mkErrorReport'), -- - then the supplementary information (e.g. relevant bindings, valid hole fits), -- - then the hints ("Possible fix: ..."). -- -- So this is mostly just a way of making sure that the error context appears -- early on rather than at the end of the message. -- -- See Note [Error report] for details. data SolverReport = SolverReport { sr_important_msgs :: [SolverReportWithCtxt] , sr_supplementary :: [SolverReportSupplementary] , sr_hints :: [GhcHint] } -- | Additional information to print in a 'SolverReport', after the -- important messages and after the error context. -- -- See Note [Error report]. data SolverReportSupplementary = SupplementaryBindings RelevantBindings | SupplementaryHoleFits ValidHoleFits | SupplementaryCts [(PredType, RealSrcSpan)] -- | A 'TcSolverReportMsg', together with context (e.g. enclosing implication constraints) -- that are needed in order to report it. data SolverReportWithCtxt = SolverReportWithCtxt { reportContext :: SolverReportErrCtxt -- ^ Context for what we wish to report. -- This can change as we enter implications, so is -- stored alongside the content. , reportContent :: TcSolverReportMsg -- ^ The content of the message to report. } instance Semigroup SolverReport where SolverReport main1 supp1 hints1 <> SolverReport main2 supp2 hints2 = SolverReport (main1 ++ main2) (supp1 ++ supp2) (hints1 ++ hints2) instance Monoid SolverReport where mempty = SolverReport [] [] [] mappend = (Semigroup.<>) -- | Context needed when reporting a 'TcSolverReportMsg', such as -- the enclosing implication constraints or whether we are deferring type errors. data SolverReportErrCtxt = CEC { cec_encl :: [Implication] -- ^ Enclosing implications -- (innermost first) -- ic_skols and givens are tidied, rest are not , cec_tidy :: TidyEnv , cec_binds :: EvBindsVar -- ^ We make some errors (depending on cec_defer) -- into warnings, and emit evidence bindings -- into 'cec_binds' for unsolved constraints , cec_defer_type_errors :: DiagnosticReason -- ^ Whether to defer type errors until runtime -- We might throw a warning on an error when encountering a hole, -- depending on the type of hole (expression hole, type hole, out of scope hole). -- We store the reasons for reporting a diagnostic for each type of hole. , cec_expr_holes :: DiagnosticReason -- ^ Reason for reporting holes in expressions. , cec_type_holes :: DiagnosticReason -- ^ Reason for reporting holes in types. , cec_out_of_scope_holes :: DiagnosticReason -- ^ Reason for reporting out of scope holes. , cec_warn_redundant :: Bool -- ^ True <=> -Wredundant-constraints , cec_expand_syns :: Bool -- ^ True <=> -fprint-expanded-synonyms , cec_suppress :: Bool -- ^ True <=> More important errors have occurred, -- so create bindings if need be, but -- don't issue any more errors/warnings -- See Note [Suppressing error messages] } getUserGivens :: SolverReportErrCtxt -> [UserGiven] -- One item for each enclosing implication getUserGivens (CEC {cec_encl = implics}) = getUserGivensFromImplics implics ---------------------------------------------------------------------------- -- -- ErrorItem -- ---------------------------------------------------------------------------- -- | A predicate with its arising location; used to encapsulate a constraint -- that will give rise to a diagnostic. data ErrorItem -- We could perhaps use Ct here (and indeed used to do exactly that), but -- having a separate type gives to denote errors-in-formation gives us -- a nice place to do pre-processing, such as calculating ei_suppress. -- Perhaps some day, an ErrorItem could eventually evolve to contain -- the error text (or some representation of it), so we can then have all -- the errors together when deciding which to report. = EI { ei_pred :: PredType -- report about this -- The ei_pred field will never be an unboxed equality with -- a (casted) tyvar on the right; this is guaranteed by the solver , ei_evdest :: Maybe TcEvDest -- for Wanteds, where to put evidence , ei_flavour :: CtFlavour , ei_loc :: CtLoc , ei_m_reason :: Maybe CtIrredReason -- if this ErrorItem was made from a -- CtIrred, this stores the reason , ei_suppress :: Bool -- Suppress because of Note [Wanteds rewrite Wanteds] -- in GHC.Tc.Constraint } instance Outputable ErrorItem where ppr (EI { ei_pred = pred , ei_evdest = m_evdest , ei_flavour = flav , ei_suppress = supp }) = pp_supp <+> ppr flav <+> pp_dest m_evdest <+> ppr pred where pp_dest Nothing = empty pp_dest (Just ev) = ppr ev <+> dcolon pp_supp = if supp then text "suppress:" else empty errorItemOrigin :: ErrorItem -> CtOrigin errorItemOrigin = ctLocOrigin . ei_loc errorItemEqRel :: ErrorItem -> EqRel errorItemEqRel = predTypeEqRel . ei_pred errorItemCtLoc :: ErrorItem -> CtLoc errorItemCtLoc = ei_loc errorItemPred :: ErrorItem -> PredType errorItemPred = ei_pred {- Note [discardProvCtxtGivens] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In most situations we call all enclosing implications "useful". There is one exception, and that is when the constraint that causes the error is from the "provided" context of a pattern synonym declaration: pattern Pat :: (Num a, Eq a) => Show a => a -> Maybe a -- required => provided => type pattern Pat x <- (Just x, 4) When checking the pattern RHS we must check that it does actually bind all the claimed "provided" constraints; in this case, does the pattern (Just x, 4) bind the (Show a) constraint. Answer: no! But the implication we generate for this will look like forall a. (Num a, Eq a) => [W] Show a because when checking the pattern we must make the required constraints available, since they are needed to match the pattern (in this case the literal '4' needs (Num a, Eq a)). BUT we don't want to suggest adding (Show a) to the "required" constraints of the pattern synonym, thus: pattern Pat :: (Num a, Eq a, Show a) => Show a => a -> Maybe a It would then typecheck but it's silly. We want the /pattern/ to bind the alleged "provided" constraints, Show a. So we suppress that Implication in discardProvCtxtGivens. It's painfully ad-hoc but the truth is that adding it to the "required" constraints would work. Suppressing it solves two problems. First, we never tell the user that we could not deduce a "provided" constraint from the "required" context. Second, we never give a possible fix that suggests to add a "provided" constraint to the "required" context. For example, without this distinction the above code gives a bad error message (showing both problems): error: Could not deduce (Show a) ... from the context: (Eq a) ... Possible fix: add (Show a) to the context of the signature for pattern synonym `Pat' ... -} discardProvCtxtGivens :: CtOrigin -> [UserGiven] -> [UserGiven] discardProvCtxtGivens orig givens -- See Note [discardProvCtxtGivens] | ProvCtxtOrigin (PSB {psb_id = L _ name}) <- orig = filterOut (discard name) givens | otherwise = givens where discard n (Implic { ic_info = SigSkol (PatSynCtxt n') _ _ }) = n == n' discard _ _ = False -- | An error reported after constraint solving. -- This is usually, some sort of unsolved constraint error, -- but we try to be specific about the precise problem we encountered. data TcSolverReportMsg -- NB: this datatype is only a first step in refactoring GHC.Tc.Errors -- to use the diagnostic infrastructure (TcRnMessage etc). -- If you see possible improvements, please go right ahead! -- | Wrap a message with additional information. -- -- Prefer using the 'mkTcReportWithInfo' smart constructor = TcReportWithInfo TcSolverReportMsg (NE.NonEmpty TcSolverReportInfo) -- | Quantified variables appear out of dependency order. -- -- Example: -- -- forall (a :: k) k. ... -- -- Test cases: BadTelescope2, T16418, T16247, T16726, T18451. | BadTelescope TyVarBndrs [TyCoVar] -- | We came across a custom type error and we have decided to report it. -- -- Example: -- -- type family F a where -- F a = TypeError (Text "error") -- -- err :: F () -- err = () -- -- Test cases: CustomTypeErrors0{1,2,3,4,5}, T12104. | UserTypeError Type -- | We want to report an out of scope variable or a typed hole. -- See 'HoleError'. | ReportHoleError Hole HoleError -- | A type equality between a type variable and a polytype. -- -- Test cases: T12427a, T2846b, T10194, ... | CannotUnifyWithPolytype ErrorItem TyVar Type -- | Couldn't unify two types or kinds. -- -- Example: -- -- 3 + 3# -- can't match a lifted type with an unlifted type -- -- Test cases: T1396, T8263, ... | Mismatch { mismatch_ea :: Bool -- ^ Should this be phrased in terms of expected vs actual? , mismatch_item :: ErrorItem -- ^ The constraint in which the mismatch originated. , mismatch_ty1 :: Type -- ^ First type (the expected type if if mismatch_ea is True) , mismatch_ty2 :: Type -- ^ Second type (the actual type if mismatch_ea is True) } -- | A type has an unexpected kind. -- -- Test cases: T2994, T7609, ... | KindMismatch { kmismatch_what :: TypedThing -- ^ What thing is 'kmismatch_actual' the kind of? , kmismatch_expected :: Type , kmismatch_actual :: Type } -- TODO: combine 'Mismatch' and 'KindMismatch' messages. -- | A mismatch between two types, which arose from a type equality. -- -- Test cases: T1470, tcfail212. | TypeEqMismatch { teq_mismatch_ppr_explicit_kinds :: Bool , teq_mismatch_item :: ErrorItem , teq_mismatch_ty1 :: Type , teq_mismatch_ty2 :: Type , teq_mismatch_expected :: Type -- ^ The overall expected type , teq_mismatch_actual :: Type -- ^ The overall actual type , teq_mismatch_what :: Maybe TypedThing -- ^ What thing is 'teq_mismatch_actual' the kind of? } -- TODO: combine 'Mismatch' and 'TypeEqMismatch' messages. -- | A violation of the representation-polymorphism invariants. -- -- See 'FixedRuntimeRepErrorInfo' and 'FixedRuntimeRepContext' for more information. | FixedRuntimeRepError [FixedRuntimeRepErrorInfo] -- | A skolem type variable escapes its scope. -- -- Example: -- -- data Ex where { MkEx :: a -> MkEx } -- foo (MkEx x) = x -- -- Test cases: TypeSkolEscape, T11142. | SkolemEscape ErrorItem Implication [TyVar] -- | Trying to unify an untouchable variable, e.g. a variable from an outer scope. -- -- Test case: Simple14 | UntouchableVariable TyVar Implication -- | An equality between two types is blocked on a kind equality -- beteen their kinds. -- -- Test cases: none. | BlockedEquality ErrorItem -- | Something was not applied to sufficiently many arguments. -- -- Example: -- -- instance Eq Maybe where {..} -- -- Test case: T11563. | ExpectingMoreArguments Int TypedThing -- | Trying to use an unbound implicit parameter. -- -- Example: -- -- foo :: Int -- foo = ?param -- -- Test case: tcfail130. | UnboundImplicitParams (NE.NonEmpty ErrorItem) -- | Couldn't solve some Wanted constraints using the Givens. -- This is the most commonly used constructor, used for generic -- @"No instance for ..."@ and @"Could not deduce ... from"@ messages. | CouldNotDeduce { cnd_user_givens :: [Implication] -- | The Wanted constraints we couldn't solve. -- -- N.B.: the 'ErrorItem' at the head of the list has been tidied, -- perhaps not the others. , cnd_wanted :: NE.NonEmpty ErrorItem -- | Some additional info consumed by 'mk_supplementary_ea_msg'. , cnd_extra :: Maybe CND_Extra } -- | A constraint couldn't be solved because it contains -- ambiguous type variables. -- -- Example: -- -- class C a b where -- f :: (a,b) -- -- x = fst f -- -- -- Test case: T4921. | AmbiguityPreventsSolvingCt ErrorItem -- ^ always a class constraint ([TyVar], [TyVar]) -- ^ ambiguous kind and type variables, respectively -- | Could not solve a constraint; there were several unifying candidate instances -- but no matching instances. This is used to report as much useful information -- as possible about why we couldn't choose any instance, e.g. because of -- ambiguous type variables. | CannotResolveInstance { cannotResolve_item :: ErrorItem , cannotResolve_unifiers :: [ClsInst] , cannotResolve_candidates :: [ClsInst] , cannotResolve_importErrors :: [ImportError] , cannotResolve_suggestions :: [GhcHint] , cannotResolve_relevant_bindings :: RelevantBindings } -- TODO: remove the fields of type [GhcHint] and RelevantBindings, -- in order to handle them uniformly with other diagnostic messages. -- | Could not solve a constraint using available instances -- because the instances overlap. -- -- Test cases: tcfail118, tcfail121, tcfail218. | OverlappingInstances { overlappingInstances_item :: ErrorItem , overlappingInstances_matches :: [ClsInst] , overlappingInstances_unifiers :: [ClsInst] } -- | Could not solve a constraint from instances because -- instances declared in a Safe module cannot overlap instances -- from other modules (with -XSafeHaskell). -- -- Test cases: SH_Overlap{1,2,5,6,7,11}. | UnsafeOverlap { unsafeOverlap_item :: ErrorItem , unsafeOverlap_matches :: [ClsInst] , unsafeOverlapped :: [ClsInst] } -- | Additional information to be given in a 'CouldNotDeduce' message, -- which is then passed on to 'mk_supplementary_ea_msg'. data CND_Extra = CND_Extra TypeOrKind Type Type -- | Additional information that can be appended to an existing 'TcSolverReportMsg'. data TcSolverReportInfo -- NB: this datatype is only a first step in refactoring GHC.Tc.Errors -- to use the diagnostic infrastructure (TcRnMessage etc). -- It would be better for these constructors to not be so closely tied -- to the constructors of 'TcSolverReportMsg'. -- If you see possible improvements, please go right ahead! -- | Some type variables remained ambiguous: print them to the user. = Ambiguity { lead_with_ambig_msg :: Bool -- ^ True <=> start the message with "Ambiguous type variable ..." -- False <=> create a message of the form "The type variable is ambiguous." , ambig_tyvars :: ([TyVar], [TyVar]) -- ^ Ambiguous kind and type variables, respectively. -- Guaranteed to not both be empty. } -- | Specify some information about a type variable, -- e.g. its 'SkolemInfo'. | TyVarInfo TyVar -- | Remind the user that a particular type family is not injective. | NonInjectiveTyFam TyCon -- | Explain why we couldn't coerce between two types. See 'CoercibleMsg'. | ReportCoercibleMsg CoercibleMsg -- | Display the expected and actual types. | ExpectedActual { ea_expected, ea_actual :: Type } -- | Display the expected and actual types, after expanding type synonyms. | ExpectedActualAfterTySynExpansion { ea_expanded_expected, ea_expanded_actual :: Type } -- | Explain how a kind equality originated. | WhenMatching TcType TcType CtOrigin (Maybe TypeOrKind) -- | Add some information to disambiguate errors in which -- two 'Names' would otherwise appear to be identical. -- -- See Note [Disambiguating (X ~ X) errors]. | SameOcc { sameOcc_same_pkg :: Bool -- ^ Whether the two 'Name's also came from the same package. , sameOcc_lhs :: Name , sameOcc_rhs :: Name } -- | Report some type variables that might be participating in an occurs-check failure. | OccursCheckInterestingTyVars (NE.NonEmpty TyVar) -- | Some form of @"not in scope"@ error. See also the 'OutOfScopeHole' -- constructor of 'HoleError'. data NotInScopeError -- | A run-of-the-mill @"not in scope"@ error. = NotInScope -- | An exact 'Name' was not in scope. -- -- This usually indicates a problem with a Template Haskell splice. -- -- Test cases: T5971, T18263. | NoExactName Name -- The same exact 'Name' occurs in multiple name-spaces. -- -- This usually indicates a problem with a Template Haskell splice. -- -- Test case: T7241. | SameName [GlobalRdrElt] -- ^ always at least 2 elements -- A type signature, fixity declaration, pragma, standalone kind signature... -- is missing an associated binding. | MissingBinding SDoc [GhcHint] -- TODO: remove the SDoc argument. -- | Couldn't find a top-level binding. -- -- Happens when specifying an annotation for something that -- is not in scope. -- -- Test cases: annfail01, annfail02, annfail11. | NoTopLevelBinding -- | A class doesnt have a method with this name, -- or, a class doesn't have an associated type with this name, -- or, a record doesn't have a record field with this name. | UnknownSubordinate SDoc -- | Create a @"not in scope"@ error message for the given 'RdrName'. mkTcRnNotInScope :: RdrName -> NotInScopeError -> TcRnMessage mkTcRnNotInScope rdr err = TcRnNotInScope err rdr [] noHints -- | Configuration for pretty-printing valid hole fits. data HoleFitDispConfig = HFDC { showWrap, showWrapVars, showType, showProv, showMatches :: Bool } -- | Report an error involving a 'Hole'. -- -- This could be an out of scope data constructor or variable, -- a typed hole, or a wildcard in a type. data HoleError -- | Report an out-of-scope data constructor or variable -- masquerading as an expression hole. -- -- See Note [Insoluble holes] in GHC.Tc.Types.Constraint. -- See 'NotInScopeError' for other not-in-scope errors. -- -- Test cases: T9177a. = OutOfScopeHole [ImportError] -- | Report a typed hole, or wildcard, with additional information. | HoleError HoleSort [TcTyVar] -- Other type variables which get computed on the way. [(SkolemInfoAnon, [TcTyVar])] -- Zonked and grouped skolems for the type of the hole. -- | A message that aims to explain why two types couldn't be seen -- to be representationally equal. data CoercibleMsg -- | Not knowing the role of a type constructor prevents us from -- concluding that two types are representationally equal. -- -- Example: -- -- foo :: Applicative m => m (Sum Int) -- foo = coerce (pure $ 1 :: Int) -- -- We don't know what role `m` has, so we can't coerce `m Int` to `m (Sum Int)`. -- -- Test cases: T8984, TcCoercibleFail. = UnknownRoles Type -- | The fact that a 'TyCon' is abstract prevents us from decomposing -- a 'TyConApp' and deducing that two types are representationally equal. -- -- Test cases: none. | TyConIsAbstract TyCon -- | We can't unwrap a newtype whose constructor is not in scope. -- -- Example: -- -- import Data.Ord (Down) -- NB: not importing the constructor -- foo :: Int -> Down Int -- foo = coerce -- -- Test cases: TcCoercibleFail. | OutOfScopeNewtypeConstructor TyCon DataCon -- | Explain a problem with an import. data ImportError -- | Couldn't find a module with the requested name. = MissingModule ModuleName -- | The imported modules don't export what we're looking for. | ModulesDoNotExport (NE.NonEmpty Module) OccName -- | This datatype collates instances that match or unifier, -- in order to report an error message for an unsolved typeclass constraint. data PotentialInstances = PotentialInstances { matches :: [ClsInst] , unifiers :: [ClsInst] } -- | Append additional information to a `TcSolverReportMsg`. mkTcReportWithInfo :: TcSolverReportMsg -> [TcSolverReportInfo] -> TcSolverReportMsg mkTcReportWithInfo msg [] = msg mkTcReportWithInfo (TcReportWithInfo msg (prev NE.:| prevs)) infos = TcReportWithInfo msg (prev NE.:| prevs ++ infos) mkTcReportWithInfo msg (info : infos) = TcReportWithInfo msg (info NE.:| infos) -- | A collection of valid hole fits or refinement fits, -- in which some fits might have been suppressed. data FitsMbSuppressed = Fits { fits :: [HoleFit] , fitsSuppressed :: Bool -- ^ Whether we have suppressed any fits because there were too many. } -- | A collection of hole fits and refinement fits. data ValidHoleFits = ValidHoleFits { holeFits :: FitsMbSuppressed , refinementFits :: FitsMbSuppressed } noValidHoleFits :: ValidHoleFits noValidHoleFits = ValidHoleFits (Fits [] False) (Fits [] False) data RelevantBindings = RelevantBindings { relevantBindingNamesAndTys :: [(Name, Type)] , ranOutOfFuel :: Bool -- ^ Whether we ran out of fuel generating the bindings. } -- | Display some relevant bindings. pprRelevantBindings :: RelevantBindings -> SDoc -- This function should be in "GHC.Tc.Errors.Ppr", -- but's it's here for the moment as it's needed in "GHC.Tc.Errors". pprRelevantBindings (RelevantBindings bds ran_out_of_fuel) = ppUnless (null bds) $ hang (text "Relevant bindings include") 2 (vcat (map ppr_binding bds) $$ ppWhen ran_out_of_fuel discardMsg) where ppr_binding (nm, tidy_ty) = sep [ pprPrefixOcc nm <+> dcolon <+> ppr tidy_ty , nest 2 (parens (text "bound at" <+> ppr (getSrcLoc nm)))] discardMsg :: SDoc discardMsg = text "(Some bindings suppressed;" <+> text "use -fmax-relevant-binds=N or -fno-max-relevant-binds)" -- | Stores the information to be reported in a representation-polymorphism -- error message. data FixedRuntimeRepErrorInfo = FRR_Info { frr_info_origin :: FixedRuntimeRepOrigin -- ^ What is the original type we checked for -- representation-polymorphism, and what specific -- check did we perform? , frr_info_not_concrete :: Maybe (TcTyVar, TcType) -- ^ Which non-concrete type did we try to -- unify this concrete type variable with? } {- ************************************************************************ * * \subsection{Contexts for renaming errors} * * ************************************************************************ -} -- AZ:TODO: Change these all to be Name instead of RdrName. -- Merge TcType.UserTypeContext in to it. data HsDocContext = TypeSigCtx SDoc | StandaloneKindSigCtx SDoc | PatCtx | SpecInstSigCtx | DefaultDeclCtx | ForeignDeclCtx (LocatedN RdrName) | DerivDeclCtx | RuleCtx FastString | TyDataCtx (LocatedN RdrName) | TySynCtx (LocatedN RdrName) | TyFamilyCtx (LocatedN RdrName) | FamPatCtx (LocatedN RdrName) -- The patterns of a type/data family instance | ConDeclCtx [LocatedN Name] | ClassDeclCtx (LocatedN RdrName) | ExprWithTySigCtx | TypBrCtx | HsTypeCtx | HsTypePatCtx | GHCiCtx | SpliceTypeCtx (LHsType GhcPs) | ClassInstanceCtx | GenericCtx SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Solver/InertSet.hs0000644000000000000000000021501214472400113021546 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE TypeApplications #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Tc.Solver.InertSet ( -- * The work list WorkList(..), isEmptyWorkList, emptyWorkList, extendWorkListNonEq, extendWorkListCt, extendWorkListCts, extendWorkListEq, appendWorkList, extendWorkListImplic, workListSize, selectWorkItem, -- * The inert set InertSet(..), InertCans(..), InertEqs, emptyInert, addInertItem, matchableGivens, mightEqualLater, prohibitedSuperClassSolve, -- * Inert equalities foldTyEqs, delEq, findEq, partitionInertEqs, partitionFunEqs, -- * Kick-out kickOutRewritableLHS, -- * Cycle breaker vars CycleBreakerVarStack, pushCycleBreakerVarStack, insertCycleBreakerBinding, forAllCycleBreakerBindings_ ) where import GHC.Prelude import GHC.Tc.Solver.Types import GHC.Tc.Types.Constraint import GHC.Tc.Types.Origin import GHC.Tc.Utils.TcType import GHC.Types.Var import GHC.Types.Var.Env import GHC.Core.Reduction import GHC.Core.Predicate import GHC.Core.TyCo.FVs import qualified GHC.Core.TyCo.Rep as Rep import GHC.Core.TyCon import GHC.Core.Unify import GHC.Data.Bag import GHC.Utils.Misc ( partitionWith ) import GHC.Utils.Outputable import GHC.Utils.Panic import Data.List ( partition ) import Data.List.NonEmpty ( NonEmpty(..), (<|) ) import qualified Data.List.NonEmpty as NE import GHC.Utils.Panic.Plain import GHC.Data.Maybe import Control.Monad ( forM_ ) {- ************************************************************************ * * * Worklists * * Canonical and non-canonical constraints that the simplifier has to * * work on. Including their simplification depths. * * * * * ************************************************************************ Note [WorkList priorities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A WorkList contains canonical and non-canonical items (of all flavours). Notice that each Ct now has a simplification depth. We may consider using this depth for prioritization as well in the future. As a simple form of priority queue, our worklist separates out * equalities (wl_eqs); see Note [Prioritise equalities] * all the rest (wl_rest) Note [Prioritise equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's very important to process equalities /first/: * (Efficiency) The general reason to do so is that if we process a class constraint first, we may end up putting it into the inert set and then kicking it out later. That's extra work compared to just doing the equality first. * (Avoiding fundep iteration) As #14723 showed, it's possible to get non-termination if we - Emit the fundep equalities for a class constraint, generating some fresh unification variables. - That leads to some unification - Which kicks out the class constraint - Which isn't solved (because there are still some more equalities in the work-list), but generates yet more fundeps Solution: prioritise equalities over class constraints * (Class equalities) We need to prioritise equalities even if they are hidden inside a class constraint; see Note [Prioritise class equalities] * (Kick-out) We want to apply this priority scheme to kicked-out constraints too (see the call to extendWorkListCt in kick_out_rewritable E.g. a CIrredCan can be a hetero-kinded (t1 ~ t2), which may become homo-kinded when kicked out, and hence we want to prioritise it. Note [Prioritise class equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We prioritise equalities in the solver (see selectWorkItem). But class constraints like (a ~ b) and (a ~~ b) are actually equalities too; see Note [The equality types story] in GHC.Builtin.Types.Prim. Failing to prioritise these is inefficient (more kick-outs etc). But, worse, it can prevent us spotting a "recursive knot" among Wanted constraints. See comment:10 of #12734 for a worked-out example. So we arrange to put these particular class constraints in the wl_eqs. NB: since we do not currently apply the substitution to the inert_solved_dicts, the knot-tying still seems a bit fragile. But this makes it better. Note [Residual implications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The wl_implics in the WorkList are the residual implication constraints that are generated while solving or canonicalising the current worklist. Specifically, when canonicalising (forall a. t1 ~ forall a. t2) from which we get the implication (forall a. t1 ~ t2) See GHC.Tc.Solver.Monad.deferTcSForAllEq -} -- See Note [WorkList priorities] data WorkList = WL { wl_eqs :: [Ct] -- CEqCan, CDictCan, CIrredCan -- Given and Wanted -- Contains both equality constraints and their -- class-level variants (a~b) and (a~~b); -- See Note [Prioritise equalities] -- See Note [Prioritise class equalities] , wl_rest :: [Ct] , wl_implics :: Bag Implication -- See Note [Residual implications] } appendWorkList :: WorkList -> WorkList -> WorkList appendWorkList (WL { wl_eqs = eqs1, wl_rest = rest1 , wl_implics = implics1 }) (WL { wl_eqs = eqs2, wl_rest = rest2 , wl_implics = implics2 }) = WL { wl_eqs = eqs1 ++ eqs2 , wl_rest = rest1 ++ rest2 , wl_implics = implics1 `unionBags` implics2 } workListSize :: WorkList -> Int workListSize (WL { wl_eqs = eqs, wl_rest = rest }) = length eqs + length rest extendWorkListEq :: Ct -> WorkList -> WorkList extendWorkListEq ct wl = wl { wl_eqs = ct : wl_eqs wl } extendWorkListNonEq :: Ct -> WorkList -> WorkList -- Extension by non equality extendWorkListNonEq ct wl = wl { wl_rest = ct : wl_rest wl } extendWorkListImplic :: Implication -> WorkList -> WorkList extendWorkListImplic implic wl = wl { wl_implics = implic `consBag` wl_implics wl } extendWorkListCt :: Ct -> WorkList -> WorkList -- Agnostic extendWorkListCt ct wl = case classifyPredType (ctPred ct) of EqPred {} -> extendWorkListEq ct wl ClassPred cls _ -- See Note [Prioritise class equalities] | isEqPredClass cls -> extendWorkListEq ct wl _ -> extendWorkListNonEq ct wl extendWorkListCts :: [Ct] -> WorkList -> WorkList -- Agnostic extendWorkListCts cts wl = foldr extendWorkListCt wl cts isEmptyWorkList :: WorkList -> Bool isEmptyWorkList (WL { wl_eqs = eqs, wl_rest = rest, wl_implics = implics }) = null eqs && null rest && isEmptyBag implics emptyWorkList :: WorkList emptyWorkList = WL { wl_eqs = [], wl_rest = [], wl_implics = emptyBag } selectWorkItem :: WorkList -> Maybe (Ct, WorkList) -- See Note [Prioritise equalities] selectWorkItem wl@(WL { wl_eqs = eqs, wl_rest = rest }) | ct:cts <- eqs = Just (ct, wl { wl_eqs = cts }) | ct:cts <- rest = Just (ct, wl { wl_rest = cts }) | otherwise = Nothing -- Pretty printing instance Outputable WorkList where ppr (WL { wl_eqs = eqs, wl_rest = rest, wl_implics = implics }) = text "WL" <+> (braces $ vcat [ ppUnless (null eqs) $ text "Eqs =" <+> vcat (map ppr eqs) , ppUnless (null rest) $ text "Non-eqs =" <+> vcat (map ppr rest) , ppUnless (isEmptyBag implics) $ ifPprDebug (text "Implics =" <+> vcat (map ppr (bagToList implics))) (text "(Implics omitted)") ]) {- ********************************************************************* * * InertSet: the inert set * * * * ********************************************************************* -} type CycleBreakerVarStack = NonEmpty [(TcTyVar, TcType)] -- ^ a stack of (CycleBreakerTv, original family applications) lists -- first element in the stack corresponds to current implication; -- later elements correspond to outer implications -- used to undo the cycle-breaking needed to handle -- Note [Type equality cycles] in GHC.Tc.Solver.Canonical -- Why store the outer implications? For the use in mightEqualLater (only) data InertSet = IS { inert_cans :: InertCans -- Canonical Given, Wanted -- Sometimes called "the inert set" , inert_cycle_breakers :: CycleBreakerVarStack , inert_famapp_cache :: FunEqMap Reduction -- Just a hash-cons cache for use when reducing family applications -- only -- -- If F tys :-> (co, rhs, flav), -- then co :: F tys ~N rhs -- all evidence is from instances or Givens; no coercion holes here -- (We have no way of "kicking out" from the cache, so putting -- wanteds here means we can end up solving a Wanted with itself. Bad) , inert_solved_dicts :: DictMap CtEvidence -- All Wanteds, of form ev :: C t1 .. tn -- See Note [Solved dictionaries] -- and Note [Do not add superclasses of solved dictionaries] } instance Outputable InertSet where ppr (IS { inert_cans = ics , inert_solved_dicts = solved_dicts }) = vcat [ ppr ics , ppUnless (null dicts) $ text "Solved dicts =" <+> vcat (map ppr dicts) ] where dicts = bagToList (dictsToBag solved_dicts) emptyInertCans :: InertCans emptyInertCans = IC { inert_eqs = emptyDVarEnv , inert_given_eq_lvl = topTcLevel , inert_given_eqs = False , inert_dicts = emptyDictMap , inert_safehask = emptyDictMap , inert_funeqs = emptyFunEqs , inert_insts = [] , inert_irreds = emptyCts } emptyInert :: InertSet emptyInert = IS { inert_cans = emptyInertCans , inert_cycle_breakers = [] :| [] , inert_famapp_cache = emptyFunEqs , inert_solved_dicts = emptyDictMap } {- Note [Solved dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we apply a top-level instance declaration, we add the "solved" dictionary to the inert_solved_dicts. In general, we use it to avoid creating a new EvVar when we have a new goal that we have solved in the past. But in particular, we can use it to create *recursive* dictionaries. The simplest, degenerate case is instance C [a] => C [a] where ... If we have [W] d1 :: C [x] then we can apply the instance to get d1 = $dfCList d [W] d2 :: C [x] Now 'd1' goes in inert_solved_dicts, and we can solve d2 directly from d1. d1 = $dfCList d d2 = d1 See Note [Example of recursive dictionaries] VERY IMPORTANT INVARIANT: (Solved Dictionary Invariant) Every member of the inert_solved_dicts is the result of applying an instance declaration that "takes a step" An instance "takes a step" if it has the form dfunDList d1 d2 = MkD (...) (...) (...) That is, the dfun is lazy in its arguments, and guarantees to immediately return a dictionary constructor. NB: all dictionary data constructors are lazy in their arguments. This property is crucial to ensure that all dictionaries are non-bottom, which in turn ensures that the whole "recursive dictionary" idea works at all, even if we get something like rec { d = dfunDList d dx } See Note [Recursive superclasses] in GHC.Tc.TyCl.Instance. Reason: - All instances, except two exceptions listed below, "take a step" in the above sense - Exception 1: local quantified constraints have no such guarantee; indeed, adding a "solved dictionary" when appling a quantified constraint led to the ability to define unsafeCoerce in #17267. - Exception 2: the magic built-in instance for (~) has no such guarantee. It behaves as if we had class (a ~# b) => (a ~ b) where {} instance (a ~# b) => (a ~ b) where {} The "dfun" for the instance is strict in the coercion. Anyway there's no point in recording a "solved dict" for (t1 ~ t2); it's not going to allow a recursive dictionary to be constructed. Ditto (~~) and Coercible. THEREFORE we only add a "solved dictionary" - when applying an instance declaration - subject to Exceptions 1 and 2 above In implementation terms - GHC.Tc.Solver.Monad.addSolvedDict adds a new solved dictionary, conditional on the kind of instance - It is only called when applying an instance decl, in GHC.Tc.Solver.Interact.doTopReactDict - ClsInst.InstanceWhat says what kind of instance was used to solve the constraint. In particular * LocalInstance identifies quantified constraints * BuiltinEqInstance identifies the strange built-in instances for equality. - ClsInst.instanceReturnsDictCon says which kind of instance guarantees to return a dictionary constructor Other notes about solved dictionaries * See also Note [Do not add superclasses of solved dictionaries] * The inert_solved_dicts field is not rewritten by equalities, so it may get out of date. * The inert_solved_dicts are all Wanteds, never givens * We only cache dictionaries from top-level instances, not from local quantified constraints. Reason: if we cached the latter we'd need to purge the cache when bringing new quantified constraints into scope, because quantified constraints "shadow" top-level instances. Note [Do not add superclasses of solved dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Every member of inert_solved_dicts is the result of applying a dictionary function, NOT of applying superclass selection to anything. Consider class Ord a => C a where instance Ord [a] => C [a] where ... Suppose we are trying to solve [G] d1 : Ord a [W] d2 : C [a] Then we'll use the instance decl to give [G] d1 : Ord a Solved: d2 : C [a] = $dfCList d3 [W] d3 : Ord [a] We must not add d4 : Ord [a] to the 'solved' set (by taking the superclass of d2), otherwise we'll use it to solve d3, without ever using d1, which would be a catastrophe. Solution: when extending the solved dictionaries, do not add superclasses. That's why each element of the inert_solved_dicts is the result of applying a dictionary function. Note [Example of recursive dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- Example 1 data D r = ZeroD | SuccD (r (D r)); instance (Eq (r (D r))) => Eq (D r) where ZeroD == ZeroD = True (SuccD a) == (SuccD b) = a == b _ == _ = False; equalDC :: D [] -> D [] -> Bool; equalDC = (==); We need to prove (Eq (D [])). Here's how we go: [W] d1 : Eq (D []) By instance decl of Eq (D r): [W] d2 : Eq [D []] where d1 = dfEqD d2 By instance decl of Eq [a]: [W] d3 : Eq (D []) where d2 = dfEqList d3 d1 = dfEqD d2 Now this wanted can interact with our "solved" d1 to get: d3 = d1 -- Example 2: This code arises in the context of "Scrap Your Boilerplate with Class" class Sat a class Data ctx a instance Sat (ctx Char) => Data ctx Char -- dfunData1 instance (Sat (ctx [a]), Data ctx a) => Data ctx [a] -- dfunData2 class Data Maybe a => Foo a instance Foo t => Sat (Maybe t) -- dfunSat instance Data Maybe a => Foo a -- dfunFoo1 instance Foo a => Foo [a] -- dfunFoo2 instance Foo [Char] -- dfunFoo3 Consider generating the superclasses of the instance declaration instance Foo a => Foo [a] So our problem is this [G] d0 : Foo t [W] d1 : Data Maybe [t] -- Desired superclass We may add the given in the inert set, along with its superclasses Inert: [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 WorkList [W] d1 : Data Maybe [t] Solve d1 using instance dfunData2; d1 := dfunData2 d2 d3 Inert: [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] WorkList: [W] d2 : Sat (Maybe [t]) [W] d3 : Data Maybe t Now, we may simplify d2 using dfunSat; d2 := dfunSat d4 Inert: [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] d2 : Sat (Maybe [t]) WorkList: [W] d3 : Data Maybe t [W] d4 : Foo [t] Now, we can just solve d3 from d01; d3 := d01 Inert [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] d2 : Sat (Maybe [t]) WorkList [W] d4 : Foo [t] Now, solve d4 using dfunFoo2; d4 := dfunFoo2 d5 Inert [G] d0 : Foo t [G] d01 : Data Maybe t -- Superclass of d0 Solved: d1 : Data Maybe [t] d2 : Sat (Maybe [t]) d4 : Foo [t] WorkList: [W] d5 : Foo t Now, d5 can be solved! d5 := d0 Result d1 := dfunData2 d2 d3 d2 := dfunSat d4 d3 := d01 d4 := dfunFoo2 d5 d5 := d0 -} {- ********************************************************************* * * InertCans: the canonical inerts * * * * ********************************************************************* -} {- Note [Tracking Given equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For reasons described in (UNTOUCHABLE) in GHC.Tc.Utils.Unify Note [Unification preconditions], we can't unify alpha[2] ~ Int under a level-4 implication if there are any Given equalities bound by the implications at level 3 of 4. To that end, the InertCans tracks inert_given_eq_lvl :: TcLevel -- The TcLevel of the innermost implication that has a Given -- equality of the sort that make a unification variable untouchable -- (see Note [Unification preconditions] in GHC.Tc.Utils.Unify). We update inert_given_eq_lvl whenever we add a Given to the inert set, in updateGivenEqs. Then a unification variable alpha[n] is untouchable iff n < inert_given_eq_lvl that is, if the unification variable was born outside an enclosing Given equality. Exactly which constraints should trigger (UNTOUCHABLE), and hence should update inert_given_eq_lvl? * We do /not/ need to worry about let-bound skolems, such ast forall[2] a. a ~ [b] => blah See Note [Let-bound skolems] * Consider an implication forall[2]. beta[1] => alpha[1] ~ Int where beta is a unification variable that has already been unified to () in an outer scope. Then alpha[1] is perfectly touchable and we can unify alpha := Int. So when deciding whether the givens contain an equality, we should canonicalise first, rather than just looking at the /original/ givens (#8644). * However, we must take account of *potential* equalities. Consider the same example again, but this time we have /not/ yet unified beta: forall[2] beta[1] => ...blah... Because beta might turn into an equality, updateGivenEqs conservatively treats it as a potential equality, and updates inert_give_eq_lvl * What about something like forall[2] a b. a ~ F b => [W] alpha[1] ~ X y z? That Given cannot affect the Wanted, because the Given is entirely *local*: it mentions only skolems bound in the very same implication. Such equalities need not make alpha untouchable. (Test case typecheck/should_compile/LocalGivenEqs has a real-life motivating example, with some detailed commentary.) Hence the 'mentionsOuterVar' test in updateGivenEqs. However, solely to support better error messages (see Note [HasGivenEqs] in GHC.Tc.Types.Constraint) we also track these "local" equalities in the boolean inert_given_eqs field. This field is used only to set the ic_given_eqs field to LocalGivenEqs; see the function getHasGivenEqs. Here is a simpler case that triggers this behaviour: data T where MkT :: F a ~ G b => a -> b -> T f (MkT _ _) = True Because of this behaviour around local equality givens, we can infer the type of f. This is typecheck/should_compile/LocalGivenEqs2. * We need not look at the equality relation involved (nominal vs representational), because representational equalities can still imply nominal ones. For example, if (G a ~R G b) and G's argument's role is nominal, then we can deduce a ~N b. Note [Let-bound skolems] ~~~~~~~~~~~~~~~~~~~~~~~~ If * the inert set contains a canonical Given CEqCan (a ~ ty) and * 'a' is a skolem bound in this very implication, then: a) The Given is pretty much a let-binding, like f :: (a ~ b->c) => a -> a Here the equality constraint is like saying let a = b->c in ... It is not adding any new, local equality information, and hence can be ignored by has_given_eqs b) 'a' will have been completely substituted out in the inert set, so we can safely discard it. For an example, see #9211. See also GHC.Tc.Utils.Unify Note [Deeper level on the left] for how we ensure that the right variable is on the left of the equality when both are tyvars. You might wonder whether the skolem really needs to be bound "in the very same implication" as the equality constraint. Consider this (c.f. #15009): data S a where MkS :: (a ~ Int) => S a g :: forall a. S a -> a -> blah g x y = let h = \z. ( z :: Int , case x of MkS -> [y,z]) in ... From the type signature for `g`, we get `y::a` . Then when we encounter the `\z`, we'll assign `z :: alpha[1]`, say. Next, from the body of the lambda we'll get [W] alpha[1] ~ Int -- From z::Int [W] forall[2]. (a ~ Int) => [W] alpha[1] ~ a -- From [y,z] Now, unify alpha := a. Now we are stuck with an unsolved alpha~Int! So we must treat alpha as untouchable under the forall[2] implication. Note [Detailed InertCans Invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The InertCans represents a collection of constraints with the following properties: * All canonical * No two dictionaries with the same head * No two CIrreds with the same type * Family equations inert wrt top-level family axioms * Dictionaries have no matching top-level instance * Given family or dictionary constraints don't mention touchable unification variables * Non-CEqCan constraints are fully rewritten with respect to the CEqCan equalities (modulo eqCanRewrite of course; eg a wanted cannot rewrite a given) * CEqCan equalities: see Note [inert_eqs: the inert equalities] Also see documentation in Constraint.Ct for a list of invariants Note [inert_eqs: the inert equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Definition [Can-rewrite relation] A "can-rewrite" relation between flavours, written f1 >= f2, is a binary relation with the following properties (R1) >= is transitive (R2) If f1 >= f, and f2 >= f, then either f1 >= f2 or f2 >= f1 (See Note [Why R2?].) Lemma (L0). If f1 >= f then f1 >= f1 Proof. By property (R2), with f1=f2 Definition [Generalised substitution] A "generalised substitution" S is a set of triples (lhs -f-> t), where lhs is a type variable or an exactly-saturated type family application (that is, lhs is a CanEqLHS) t is a type f is a flavour such that (WF1) if (lhs1 -f1-> t1) in S (lhs2 -f2-> t2) in S then (f1 >= f2) implies that lhs1 does not appear within lhs2 (WF2) if (lhs -f-> t) is in S, then t /= lhs Definition [Applying a generalised substitution] If S is a generalised substitution S(f,t0) = t, if (t0 -fs-> t) in S, and fs >= f = apply S to components of t0, otherwise See also Note [Flavours with roles]. Theorem: S(f,t0) is well defined as a function. Proof: Suppose (lhs -f1-> t1) and (lhs -f2-> t2) are both in S, and f1 >= f and f2 >= f Then by (R2) f1 >= f2 or f2 >= f1, which contradicts (WF1) Notation: repeated application. S^0(f,t) = t S^(n+1)(f,t) = S(f, S^n(t)) Definition: terminating generalised substitution A generalised substitution S is *terminating* iff (IG1) there is an n such that for every f,t, S^n(f,t) = S^(n+1)(f,t) By (IG1) we define S*(f,t) to be the result of exahaustively applying S(f,_) to t. ----------------------------------------------------------------------------- Our main invariant: the CEqCans in inert_eqs should be a terminating generalised substitution ----------------------------------------------------------------------------- Note that termination is not the same as idempotence. To apply S to a type, you may have to apply it recursively. But termination does guarantee that this recursive use will terminate. Note [Why R2?] ~~~~~~~~~~~~~~ R2 states that, if we have f1 >= f and f2 >= f, then either f1 >= f2 or f2 >= f1. If we do not have R2, we will easily fall into a loop. To see why, imagine we have f1 >= f, f2 >= f, and that's it. Then, let our inert set S = {a -f1-> b, b -f2-> a}. Computing S(f,a) does not terminate. And yet, we have a hard time noticing an occurs-check problem when building S, as the two equalities cannot rewrite one another. R2 actually restricts our ability to accept user-written programs. See Note [Avoiding rewriting cycles] in GHC.Tc.Types.Constraint for an example. Note [Rewritable] ~~~~~~~~~~~~~~~~~ This Note defines what it means for a type variable or type family application (that is, a CanEqLHS) to be rewritable in a type. This definition is used by the anyRewritableXXX family of functions and is meant to model the actual behaviour in GHC.Tc.Solver.Rewrite. Ignoring roles (for now): A CanEqLHS lhs is *rewritable* in a type t if the lhs tree appears as a subtree within t without traversing any of the following components of t: * coercions (whether they appear in casts CastTy or as arguments CoercionTy) * kinds of variable occurrences The check for rewritability *does* look in kinds of the bound variable of a ForAllTy. Goal: If lhs is not rewritable in t, then t is a fixpoint of the generalised substitution containing only {lhs -f*-> t'}, where f* is a flavour such that f* >= f for all f. The reason for this definition is that the rewriter does not rewrite in coercions or variables' kinds. In turn, the rewriter does not need to rewrite there because those places are never used for controlling the behaviour of the solver: these places are not used in matching instances or in decomposing equalities. There is one exception to the claim that non-rewritable parts of the tree do not affect the solver: we sometimes do an occurs-check to decide e.g. how to orient an equality. (See the comments on GHC.Tc.Solver.Canonical.canEqTyVarFunEq.) Accordingly, the presence of a variable in a kind or coercion just might influence the solver. Here is an example: type family Const x y where Const x y = x AxConst :: forall x y. Const x y ~# x alpha :: Const Type Nat [W] alpha ~ Int |> (sym (AxConst Type alpha) ;; AxConst Type alpha ;; sym (AxConst Type Nat)) The cast is clearly ludicrous (it ties together a cast and its symmetric version), but we can't quite rule it out. (See (EQ1) from Note [Respecting definitional equality] in GHC.Core.TyCo.Rep to see why we need the Const Type Nat bit.) And yet this cast will (quite rightly) prevent alpha from unifying with the RHS. I (Richard E) don't have an example of where this problem can arise from a Haskell program, but we don't have an air-tight argument for why the definition of *rewritable* given here is correct. Taking roles into account: we must consider a rewrite at a given role. That is, a rewrite arises from some equality, and that equality has a role associated with it. As we traverse a type, we track what role we are allowed to rewrite with. For example, suppose we have an inert [G] b ~R# Int. Then b is rewritable in Maybe b but not in F b, where F is a type function. This role-aware logic is present in both the anyRewritableXXX functions and in the rewriter. See also Note [anyRewritableTyVar must be role-aware] in GHC.Tc.Utils.TcType. Note [Extending the inert equalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Main Theorem [Stability under extension] Suppose we have a "work item" lhs -fw-> t and a terminating generalised substitution S, THEN the extended substitution T = S+(lhs -fw-> t) is a terminating generalised substitution PROVIDED (T1) S(fw,lhs) = lhs -- LHS of work-item is a fixpoint of S(fw,_) (T2) S(fw,t) = t -- RHS of work-item is a fixpoint of S(fw,_) (T3) lhs not in t -- No occurs check in the work item -- If lhs is a type family application, we require only that -- lhs is not *rewritable* in t. See Note [Rewritable] and -- Note [CEqCan occurs check] in GHC.Tc.Types.Constraint. AND, for every (lhs1 -fs-> s) in S: (K0) not (fw >= fs) Reason: suppose we kick out (lhs1 -fs-> s), and add (lhs -fw-> t) to the inert set. The latter can't rewrite the former, so the kick-out achieved nothing -- From here, we can assume fw >= fs OR (K4) lhs1 is a tyvar AND fs >= fw OR { (K1) lhs is not rewritable in lhs1. See Note [Rewritable]. Reason: if fw >= fs, WF1 says we can't have both lhs0 -fw-> t and F lhs0 -fs-> s AND (K2): guarantees termination of the new substitution { (K2a) not (fs >= fs) OR (K2b) lhs not in s } AND (K3) See Note [K3: completeness of solving] { (K3a) If the role of fs is nominal: s /= lhs (K3b) If the role of fs is representational: s is not of form (lhs t1 .. tn) } } Conditions (T1-T3) are established by the canonicaliser Conditions (K1-K3) are established by GHC.Tc.Solver.Monad.kickOutRewritable The idea is that * T1 and T2 are guaranteed by exhaustively rewriting the work-item with S(fw,_). * T3 is guaranteed by an occurs-check on the work item. This is done during canonicalisation, in checkTypeEq; invariant (TyEq:OC) of CEqCan. See also Note [CEqCan occurs check] in GHC.Tc.Types.Constraint. * (K1-3) are the "kick-out" criteria. (As stated, they are really the "keep" criteria.) If the current inert S contains a triple that does not satisfy (K1-3), then we remove it from S by "kicking it out", and re-processing it. * Note that kicking out is a Bad Thing, because it means we have to re-process a constraint. The less we kick out, the better. TODO: Make sure that kicking out really *is* a Bad Thing. We've assumed this but haven't done the empirical study to check. * Assume we have G>=G, G>=W and that's all. Then, when performing a unification we add a new given a -G-> ty. But doing so does NOT require us to kick out an inert wanted that mentions a, because of (K2a). This is a common case, hence good not to kick out. See also (K2a) below. * Lemma (L1): The conditions of the Main Theorem imply that there is no (lhs -fs-> t) in S, s.t. (fs >= fw). Proof. Suppose the contrary (fs >= fw). Then because of (T1), S(fw,lhs)=lhs. But since fs>=fw, S(fw,lhs) = t, hence t=lhs. But now we have (lhs -fs-> lhs) in S, which contradicts (WF2). * The extended substitution satisfies (WF1) and (WF2) - (K1) plus (L1) guarantee that the extended substitution satisfies (WF1). - (T3) guarantees (WF2). * (K2) and (K4) are about termination. Intuitively, any infinite chain S^0(f,t), S^1(f,t), S^2(f,t).... must pass through the new work item infinitely often, since the substitution without the work item is terminating; and must pass through at least one of the triples in S infinitely often. - (K2a): if not(fs>=fs) then there is no f that fs can rewrite (fs>=f) (this is Lemma (L0)), and hence this triple never plays a role in application S(f,t). It is always safe to extend S with such a triple. (NB: we could strengten K1) in this way too, but see K3. - (K2b): if lhs not in s, we have no further opportunity to apply the work item - (K4): See Note [K4] * Lemma (L3). Suppose we have f* such that, for all f, f* >= f. Then if we are adding lhs -fw-> t (where T1, T2, and T3 hold), we will keep a -f*-> s. Proof. K4 holds; thus, we keep. Key lemma to make it watertight. Under the conditions of the Main Theorem, forall f st fw >= f, a is not in S^k(f,t), for any k Also, consider roles more carefully. See Note [Flavours with roles] Note [K4] ~~~~~~~~~ K4 is a "keep" condition of Note [Extending the inert equalities]. Here is the scenario: * We are considering adding (lhs -fw-> t) to the inert set S. * S already has (lhs1 -fs-> s). * We know S(fw, lhs) = lhs, S(fw, t) = t, and lhs is not rewritable in t. See Note [Rewritable]. These are (T1), (T2), and (T3). * We further know fw >= fs. (If not, then we short-circuit via (K0).) K4 says that we may keep lhs1 -fs-> s in S if: lhs1 is a tyvar AND fs >= fw Why K4 guarantees termination: * If fs >= fw, we know a is not rewritable in t, because of (T2). * We further know lhs /= a, because of (T1). * Accordingly, a use of the new inert item lhs -fw-> t cannot create the conditions for a use of a -fs-> s (precisely because t does not mention a), and hence, the extended substitution (with lhs -fw-> t in it) is a terminating generalised substitution. Recall that the termination generalised substitution includes only mappings that pass an occurs check. This is (T3). At one point, we worried that the argument here would fail if s mentioned a, but (T3) rules out this possibility. Put another way: the terminating generalised substitution considers only the inert_eqs, not other parts of the inert set (such as the irreds). Can we liberalise K4? No. Why we cannot drop the (fs >= fw) condition: * Suppose not (fs >= fw). It might be the case that t mentions a, and this can cause a loop. Example: Work: [G] b ~ a Inert: [W] a ~ b (where G >= G, G >= W, and W >= W) If we don't kick out the inert, then we get a loop on e.g. [W] a ~ Int. * Note that the above example is different if the inert is a Given G, because (T1) won't hold. Why we cannot drop the tyvar condition: * Presume fs >= fw. Thus, F tys is not rewritable in t, because of (T2). * Can the use of lhs -fw-> t create the conditions for a use of F tys -fs-> s? Yes! This can happen if t appears within tys. Here is an example: Work: [G] a ~ Int Inert: [G] F Int ~ F a Now, if we have [W] F a ~ Bool, we will rewrite ad infinitum on the left-hand side. The key reason why K2b works in the tyvar case is that tyvars are atomic: if the right-hand side of an equality does not mention a variable a, then it cannot allow an equality with an LHS of a to fire. This is not the case for type family applications. Bottom line: K4 can keep only inerts with tyvars on the left. Put differently, K4 will never prevent an inert with a type family on the left from being kicked out. Consequence: We never kick out a Given/Nominal equality with a tyvar on the left. This is Lemma (L3) of Note [Extending the inert equalities]. It is good because it means we can effectively model the mutable filling of metavariables with Given/Nominal equalities. That is: it should be the case that we could rewrite our solver never to fill in a metavariable; instead, it would "solve" a wanted like alpha ~ Int by turning it into a Given, allowing it to be used in rewriting. We would want the solver to behave the same whether it uses metavariables or Givens. And (L3) says that no Given/Nominals over tyvars are ever kicked out, just like we never unfill a metavariable. Nice. Getting this wrong (that is, allowing K4 to apply to situations with the type family on the left) led to #19042. (At that point, K4 was known as K2b.) Originally, this condition was part of K2, but #17672 suggests it should be a top-level K condition. Note [K3: completeness of solving] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (K3) is not necessary for the extended substitution to be terminating. In fact K1 could be made stronger by saying ... then (not (fw >= fs) or not (fs >= fs)) But it's not enough for S to be terminating; we also want completeness. That is, we want to be able to solve all soluble wanted equalities. Suppose we have work-item b -G-> a inert-item a -W-> b Assuming (G >= W) but not (W >= W), this fulfills all the conditions, so we could extend the inerts, thus: inert-items b -G-> a a -W-> b But if we kicked-out the inert item, we'd get work-item a -W-> b inert-item b -G-> a Then rewrite the work-item gives us (a -W-> a), which is soluble via Refl. So we add one more clause to the kick-out criteria Another way to understand (K3) is that we treat an inert item a -f-> b in the same way as b -f-> a So if we kick out one, we should kick out the other. The orientation is somewhat accidental. When considering roles, we also need the second clause (K3b). Consider work-item c -G/N-> a inert-item a -W/R-> b c The work-item doesn't get rewritten by the inert, because (>=) doesn't hold. But we don't kick out the inert item because not (W/R >= W/R). So we just add the work item. But then, consider if we hit the following: work-item b -G/N-> Id inert-items a -W/R-> b c c -G/N-> a where newtype Id x = Id x For similar reasons, if we only had (K3a), we wouldn't kick the representational inert out. And then, we'd miss solving the inert, which now reduced to reflexivity. The solution here is to kick out representational inerts whenever the lhs of a work item is "exposed", where exposed means being at the head of the top-level application chain (lhs t1 .. tn). See is_can_eq_lhs_head. This is encoded in (K3b). Beware: if we make this test succeed too often, we kick out too much, and the solver might loop. Consider (#14363) work item: [G] a ~R f b inert item: [G] b ~R f a In GHC 8.2 the completeness tests more aggressive, and kicked out the inert item; but no rewriting happened and there was an infinite loop. All we need is to have the tyvar at the head. Note [Flavours with roles] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The system described in Note [inert_eqs: the inert equalities] discusses an abstract set of flavours. In GHC, flavours have two components: the flavour proper, taken from {Wanted, Given} and the equality relation (often called role), taken from {NomEq, ReprEq}. When substituting w.r.t. the inert set, as described in Note [inert_eqs: the inert equalities], we must be careful to respect all components of a flavour. For example, if we have inert set: a -G/R-> Int b -G/R-> Bool type role T nominal representational and we wish to compute S(W/R, T a b), the correct answer is T a Bool, NOT T Int Bool. The reason is that T's first parameter has a nominal role, and thus rewriting a to Int in T a b is wrong. Indeed, this non-congruence of substitution means that the proof in Note [inert_eqs: the inert equalities] may need to be revisited, but we don't think that the end conclusion is wrong. -} data InertCans -- See Note [Detailed InertCans Invariants] for more = IC { inert_eqs :: InertEqs -- See Note [inert_eqs: the inert equalities] -- All CEqCans with a TyVarLHS; index is the LHS tyvar -- Domain = skolems and untouchables; a touchable would be unified , inert_funeqs :: FunEqMap EqualCtList -- All CEqCans with a TyFamLHS; index is the whole family head type. -- LHS is fully rewritten (modulo eqCanRewrite constraints) -- wrt inert_eqs -- Can include both [G] and [W] , inert_dicts :: DictMap Ct -- Dictionaries only -- All fully rewritten (modulo flavour constraints) -- wrt inert_eqs , inert_insts :: [QCInst] , inert_safehask :: DictMap Ct -- Failed dictionary resolution due to Safe Haskell overlapping -- instances restriction. We keep this separate from inert_dicts -- as it doesn't cause compilation failure, just safe inference -- failure. -- -- ^ See Note [Safe Haskell Overlapping Instances Implementation] -- in GHC.Tc.Solver , inert_irreds :: Cts -- Irreducible predicates that cannot be made canonical, -- and which don't interact with others (e.g. (c a)) -- and insoluble predicates (e.g. Int ~ Bool, or a ~ [a]) , inert_given_eq_lvl :: TcLevel -- The TcLevel of the innermost implication that has a Given -- equality of the sort that make a unification variable untouchable -- (see Note [Unification preconditions] in GHC.Tc.Utils.Unify). -- See Note [Tracking Given equalities] , inert_given_eqs :: Bool -- True <=> The inert Givens *at this level* (tcl_tclvl) -- could includes at least one equality /other than/ a -- let-bound skolem equality. -- Reason: report these givens when reporting a failed equality -- See Note [Tracking Given equalities] } type InertEqs = DTyVarEnv EqualCtList instance Outputable InertCans where ppr (IC { inert_eqs = eqs , inert_funeqs = funeqs , inert_dicts = dicts , inert_safehask = safehask , inert_irreds = irreds , inert_given_eq_lvl = ge_lvl , inert_given_eqs = given_eqs , inert_insts = insts }) = braces $ vcat [ ppUnless (isEmptyDVarEnv eqs) $ text "Equalities:" <+> pprCts (foldDVarEnv folder emptyCts eqs) , ppUnless (isEmptyTcAppMap funeqs) $ text "Type-function equalities =" <+> pprCts (foldFunEqs folder funeqs emptyCts) , ppUnless (isEmptyTcAppMap dicts) $ text "Dictionaries =" <+> pprCts (dictsToBag dicts) , ppUnless (isEmptyTcAppMap safehask) $ text "Safe Haskell unsafe overlap =" <+> pprCts (dictsToBag safehask) , ppUnless (isEmptyCts irreds) $ text "Irreds =" <+> pprCts irreds , ppUnless (null insts) $ text "Given instances =" <+> vcat (map ppr insts) , text "Innermost given equalities =" <+> ppr ge_lvl , text "Given eqs at this level =" <+> ppr given_eqs ] where folder eqs rest = listToBag eqs `andCts` rest {- ********************************************************************* * * Inert equalities * * ********************************************************************* -} addTyEq :: InertEqs -> TcTyVar -> Ct -> InertEqs addTyEq old_eqs tv ct = extendDVarEnv_C add_eq old_eqs tv [ct] where add_eq old_eqs _ = addToEqualCtList ct old_eqs addCanFunEq :: FunEqMap EqualCtList -> TyCon -> [TcType] -> Ct -> FunEqMap EqualCtList addCanFunEq old_eqs fun_tc fun_args ct = alterTcApp old_eqs fun_tc fun_args upd where upd (Just old_equal_ct_list) = Just $ addToEqualCtList ct old_equal_ct_list upd Nothing = Just [ct] foldTyEqs :: (Ct -> b -> b) -> InertEqs -> b -> b foldTyEqs k eqs z = foldDVarEnv (\cts z -> foldr k z cts) z eqs findTyEqs :: InertCans -> TyVar -> [Ct] findTyEqs icans tv = concat @Maybe (lookupDVarEnv (inert_eqs icans) tv) delEq :: InertCans -> CanEqLHS -> TcType -> InertCans delEq ic lhs rhs = case lhs of TyVarLHS tv -> ic { inert_eqs = alterDVarEnv upd (inert_eqs ic) tv } TyFamLHS tf args -> ic { inert_funeqs = alterTcApp (inert_funeqs ic) tf args upd } where isThisOne :: Ct -> Bool isThisOne (CEqCan { cc_rhs = t1 }) = tcEqTypeNoKindCheck rhs t1 isThisOne other = pprPanic "delEq" (ppr lhs $$ ppr ic $$ ppr other) upd :: Maybe EqualCtList -> Maybe EqualCtList upd (Just eq_ct_list) = filterEqualCtList (not . isThisOne) eq_ct_list upd Nothing = Nothing findEq :: InertCans -> CanEqLHS -> [Ct] findEq icans (TyVarLHS tv) = findTyEqs icans tv findEq icans (TyFamLHS fun_tc fun_args) = concat @Maybe (findFunEq (inert_funeqs icans) fun_tc fun_args) {-# INLINE partition_eqs_container #-} partition_eqs_container :: forall container . container -- empty container -> (forall b. (EqualCtList -> b -> b) -> b -> container -> b) -- folder -> (container -> CanEqLHS -> EqualCtList -> container) -- extender -> (Ct -> Bool) -> container -> ([Ct], container) partition_eqs_container empty_container fold_container extend_container pred orig_inerts = fold_container folder ([], empty_container) orig_inerts where folder :: EqualCtList -> ([Ct], container) -> ([Ct], container) folder eqs (acc_true, acc_false) = (eqs_true ++ acc_true, acc_false') where (eqs_true, eqs_false) = partition pred eqs acc_false' | CEqCan { cc_lhs = lhs } : _ <- eqs_false = extend_container acc_false lhs eqs_false | otherwise = acc_false partitionInertEqs :: (Ct -> Bool) -- Ct will always be a CEqCan with a TyVarLHS -> InertEqs -> ([Ct], InertEqs) partitionInertEqs = partition_eqs_container emptyDVarEnv foldDVarEnv extendInertEqs -- precondition: CanEqLHS is a TyVarLHS extendInertEqs :: InertEqs -> CanEqLHS -> EqualCtList -> InertEqs extendInertEqs eqs (TyVarLHS tv) new_eqs = extendDVarEnv eqs tv new_eqs extendInertEqs _ other _ = pprPanic "extendInertEqs" (ppr other) partitionFunEqs :: (Ct -> Bool) -- Ct will always be a CEqCan with a TyFamLHS -> FunEqMap EqualCtList -> ([Ct], FunEqMap EqualCtList) partitionFunEqs = partition_eqs_container emptyFunEqs (\ f z eqs -> foldFunEqs f eqs z) extendFunEqs -- precondition: CanEqLHS is a TyFamLHS extendFunEqs :: FunEqMap EqualCtList -> CanEqLHS -> EqualCtList -> FunEqMap EqualCtList extendFunEqs eqs (TyFamLHS tf args) new_eqs = insertTcApp eqs tf args new_eqs extendFunEqs _ other _ = pprPanic "extendFunEqs" (ppr other) {- ********************************************************************* * * Adding to and removing from the inert set * * * * ********************************************************************* -} addInertItem :: TcLevel -> InertCans -> Ct -> InertCans addInertItem tc_lvl ics@(IC { inert_funeqs = funeqs, inert_eqs = eqs }) item@(CEqCan { cc_lhs = lhs }) = updateGivenEqs tc_lvl item $ case lhs of TyFamLHS tc tys -> ics { inert_funeqs = addCanFunEq funeqs tc tys item } TyVarLHS tv -> ics { inert_eqs = addTyEq eqs tv item } addInertItem tc_lvl ics@(IC { inert_irreds = irreds }) item@(CIrredCan {}) = updateGivenEqs tc_lvl item $ -- An Irred might turn out to be an -- equality, so we play safe ics { inert_irreds = irreds `snocBag` item } addInertItem _ ics item@(CDictCan { cc_class = cls, cc_tyargs = tys }) = ics { inert_dicts = addDict (inert_dicts ics) cls tys item } addInertItem _ _ item = pprPanic "upd_inert set: can't happen! Inserting " $ ppr item -- Can't be CNonCanonical because they only land in inert_irreds updateGivenEqs :: TcLevel -> Ct -> InertCans -> InertCans -- Set the inert_given_eq_level to the current level (tclvl) -- if the constraint is a given equality that should prevent -- filling in an outer unification variable. -- See Note [Tracking Given equalities] updateGivenEqs tclvl ct inerts@(IC { inert_given_eq_lvl = ge_lvl }) | not (isGivenCt ct) = inerts | not_equality ct = inerts -- See Note [Let-bound skolems] | otherwise = inerts { inert_given_eq_lvl = ge_lvl' , inert_given_eqs = True } where ge_lvl' | mentionsOuterVar tclvl (ctEvidence ct) -- Includes things like (c a), which *might* be an equality = tclvl | otherwise = ge_lvl not_equality :: Ct -> Bool -- True <=> definitely not an equality of any kind -- except for a let-bound skolem, which doesn't count -- See Note [Let-bound skolems] -- NB: no need to spot the boxed CDictCan (a ~ b) because its -- superclass (a ~# b) will be a CEqCan not_equality (CEqCan { cc_lhs = TyVarLHS tv }) = not (isOuterTyVar tclvl tv) not_equality (CDictCan {}) = True not_equality _ = False kickOutRewritableLHS :: CtFlavourRole -- Flavour/role of the equality that -- is being added to the inert set -> CanEqLHS -- The new equality is lhs ~ ty -> InertCans -> (WorkList, InertCans) -- See Note [kickOutRewritable] kickOutRewritableLHS new_fr new_lhs ics@(IC { inert_eqs = tv_eqs , inert_dicts = dictmap , inert_funeqs = funeqmap , inert_irreds = irreds , inert_insts = old_insts }) = (kicked_out, inert_cans_in) where -- inert_safehask stays unchanged; is that right? inert_cans_in = ics { inert_eqs = tv_eqs_in , inert_dicts = dicts_in , inert_funeqs = feqs_in , inert_irreds = irs_in , inert_insts = insts_in } kicked_out :: WorkList -- NB: use extendWorkList to ensure that kicked-out equalities get priority -- See Note [Prioritise equalities] (Kick-out). -- The irreds may include non-canonical (hetero-kinded) equality -- constraints, which perhaps may have become soluble after new_lhs -- is substituted; ditto the dictionaries, which may include (a~b) -- or (a~~b) constraints. kicked_out = foldr extendWorkListCt (emptyWorkList { wl_eqs = tv_eqs_out ++ feqs_out }) ((dicts_out `andCts` irs_out) `extendCtsList` insts_out) (tv_eqs_out, tv_eqs_in) = partitionInertEqs kick_out_eq tv_eqs (feqs_out, feqs_in) = partitionFunEqs kick_out_eq funeqmap (dicts_out, dicts_in) = partitionDicts kick_out_ct dictmap (irs_out, irs_in) = partitionBag kick_out_ct irreds -- Kick out even insolubles: See Note [Rewrite insolubles] -- Of course we must kick out irreducibles like (c a), in case -- we can rewrite 'c' to something more useful -- Kick-out for inert instances -- See Note [Quantified constraints] in GHC.Tc.Solver.Canonical insts_out :: [Ct] insts_in :: [QCInst] (insts_out, insts_in) | fr_may_rewrite (Given, NomEq) -- All the insts are Givens = partitionWith kick_out_qci old_insts | otherwise = ([], old_insts) kick_out_qci qci | let ev = qci_ev qci , fr_can_rewrite_ty NomEq (ctEvPred (qci_ev qci)) = Left (mkNonCanonical ev) | otherwise = Right qci (_, new_role) = new_fr fr_tv_can_rewrite_ty :: TyVar -> EqRel -> Type -> Bool fr_tv_can_rewrite_ty new_tv role ty = anyRewritableTyVar role can_rewrite ty where can_rewrite :: EqRel -> TyVar -> Bool can_rewrite old_role tv = new_role `eqCanRewrite` old_role && tv == new_tv fr_tf_can_rewrite_ty :: TyCon -> [TcType] -> EqRel -> Type -> Bool fr_tf_can_rewrite_ty new_tf new_tf_args role ty = anyRewritableTyFamApp role can_rewrite ty where can_rewrite :: EqRel -> TyCon -> [TcType] -> Bool can_rewrite old_role old_tf old_tf_args = new_role `eqCanRewrite` old_role && tcEqTyConApps new_tf new_tf_args old_tf old_tf_args -- it's possible for old_tf_args to have too many. This is fine; -- we'll only check what we need to. {-# INLINE fr_can_rewrite_ty #-} -- perform the check here only once fr_can_rewrite_ty :: EqRel -> Type -> Bool fr_can_rewrite_ty = case new_lhs of TyVarLHS new_tv -> fr_tv_can_rewrite_ty new_tv TyFamLHS new_tf new_tf_args -> fr_tf_can_rewrite_ty new_tf new_tf_args fr_may_rewrite :: CtFlavourRole -> Bool fr_may_rewrite fs = new_fr `eqCanRewriteFR` fs -- Can the new item rewrite the inert item? {-# INLINE kick_out_ct #-} -- perform case on new_lhs here only once kick_out_ct :: Ct -> Bool -- Kick it out if the new CEqCan can rewrite the inert one -- See Note [kickOutRewritable] kick_out_ct = case new_lhs of TyVarLHS new_tv -> \ct -> let fs@(_,role) = ctFlavourRole ct in fr_may_rewrite fs && fr_tv_can_rewrite_ty new_tv role (ctPred ct) TyFamLHS new_tf new_tf_args -> \ct -> let fs@(_, role) = ctFlavourRole ct in fr_may_rewrite fs && fr_tf_can_rewrite_ty new_tf new_tf_args role (ctPred ct) -- Implements criteria K1-K3 in Note [Extending the inert equalities] kick_out_eq :: Ct -> Bool kick_out_eq (CEqCan { cc_lhs = lhs, cc_rhs = rhs_ty , cc_ev = ev, cc_eq_rel = eq_rel }) | not (fr_may_rewrite fs) = False -- (K0) Keep it in the inert set if the new thing can't rewrite it -- Below here (fr_may_rewrite fs) is True | TyVarLHS _ <- lhs , fs `eqCanRewriteFR` new_fr = False -- (K4) Keep it in the inert set if the LHS is a tyvar and -- it can rewrite the work item. See Note [K4] | fr_can_rewrite_ty eq_rel (canEqLHSType lhs) = True -- (K1) -- The above check redundantly checks the role & flavour, -- but it's very convenient | kick_out_for_inertness = True -- (K2) | kick_out_for_completeness = True -- (K3) | otherwise = False where fs = (ctEvFlavour ev, eq_rel) kick_out_for_inertness = (fs `eqCanRewriteFR` fs) -- (K2a) && fr_can_rewrite_ty eq_rel rhs_ty -- (K2b) kick_out_for_completeness -- (K3) and Note [K3: completeness of solving] = case eq_rel of NomEq -> rhs_ty `eqType` canEqLHSType new_lhs -- (K3a) ReprEq -> is_can_eq_lhs_head new_lhs rhs_ty -- (K3b) kick_out_eq ct = pprPanic "kick_out_eq" (ppr ct) is_can_eq_lhs_head (TyVarLHS tv) = go where go (Rep.TyVarTy tv') = tv == tv' go (Rep.AppTy fun _) = go fun go (Rep.CastTy ty _) = go ty go (Rep.TyConApp {}) = False go (Rep.LitTy {}) = False go (Rep.ForAllTy {}) = False go (Rep.FunTy {}) = False go (Rep.CoercionTy {}) = False is_can_eq_lhs_head (TyFamLHS fun_tc fun_args) = go where go (Rep.TyVarTy {}) = False go (Rep.AppTy {}) = False -- no TyConApp to the left of an AppTy go (Rep.CastTy ty _) = go ty go (Rep.TyConApp tc args) = tcEqTyConApps fun_tc fun_args tc args go (Rep.LitTy {}) = False go (Rep.ForAllTy {}) = False go (Rep.FunTy {}) = False go (Rep.CoercionTy {}) = False {- Note [kickOutRewritable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [inert_eqs: the inert equalities]. When we add a new inert equality (lhs ~N ty) to the inert set, we must kick out any inert items that could be rewritten by the new equality, to maintain the inert-set invariants. - We want to kick out an existing inert constraint if a) the new constraint can rewrite the inert one b) 'lhs' is free in the inert constraint (so that it *will*) rewrite it if we kick it out. For (b) we use anyRewritableCanLHS, which examines the types /and kinds/ that are directly visible in the type. Hence we will have exposed all the rewriting we care about to make the most precise kinds visible for matching classes etc. No need to kick out constraints that mention type variables whose kinds contain this LHS! - We don't kick out constraints from inert_solved_dicts, and inert_solved_funeqs optimistically. But when we lookup we have to take the substitution into account NB: we could in principle avoid kick-out: a) When unifying a meta-tyvar from an outer level, because then the entire implication will be iterated; see Note [The Unification Level Flag] in GHC.Tc.Solver.Monad. b) For Givens, after a unification. By (GivenInv) in GHC.Tc.Utils.TcType Note [TcLevel invariants], a Given can't include a meta-tyvar from its own level, so it falls under (a). Of course, we must still kick out Givens when adding a new non-unification Given. But kicking out more vigorously may lead to earlier unification and fewer iterations, so we don't take advantage of these possibilities. Note [Rewrite insolubles] ~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have an insoluble alpha ~ [alpha], which is insoluble because an occurs check. And then we unify alpha := [Int]. Then we really want to rewrite the insoluble to [Int] ~ [[Int]]. Now it can be decomposed. Otherwise we end up with a "Can't match [Int] ~ [[Int]]" which is true, but a bit confusing because the outer type constructors match. Hence: * In the main simplifier loops in GHC.Tc.Solver (solveWanteds, simpl_loop), we feed the insolubles in solveSimpleWanteds, so that they get rewritten (albeit not solved). * We kick insolubles out of the inert set, if they can be rewritten (see GHC.Tc.Solver.Monad.kick_out_rewritable) * We rewrite those insolubles in GHC.Tc.Solver.Canonical. See Note [Make sure that insolubles are fully rewritten] in GHC.Tc.Solver.Canonical. -} {- ********************************************************************* * * Queries * * * * ********************************************************************* -} mentionsOuterVar :: TcLevel -> CtEvidence -> Bool mentionsOuterVar tclvl ev = anyFreeVarsOfType (isOuterTyVar tclvl) $ ctEvPred ev isOuterTyVar :: TcLevel -> TyCoVar -> Bool -- True of a type variable that comes from a -- shallower level than the ambient level (tclvl) isOuterTyVar tclvl tv | isTyVar tv = assertPpr (not (isTouchableMetaTyVar tclvl tv)) (ppr tv <+> ppr tclvl) $ tclvl `strictlyDeeperThan` tcTyVarLevel tv -- ASSERT: we are dealing with Givens here, and invariant (GivenInv) from -- Note Note [TcLevel invariants] in GHC.Tc.Utils.TcType ensures that there can't -- be a touchable meta tyvar. If this wasn't true, you might worry that, -- at level 3, a meta-tv alpha[3] gets unified with skolem b[2], and thereby -- becomes "outer" even though its level numbers says it isn't. | otherwise = False -- Coercion variables; doesn't much matter -- | Returns Given constraints that might, -- potentially, match the given pred. This is used when checking to see if a -- Given might overlap with an instance. See Note [Instance and Given overlap] -- in "GHC.Tc.Solver.Interact" matchableGivens :: CtLoc -> PredType -> InertSet -> Cts matchableGivens loc_w pred_w inerts@(IS { inert_cans = inert_cans }) = filterBag matchable_given all_relevant_givens where -- just look in class constraints and irreds. matchableGivens does get called -- for ~R constraints, but we don't need to look through equalities, because -- canonical equalities are used for rewriting. We'll only get caught by -- non-canonical -- that is, irreducible -- equalities. all_relevant_givens :: Cts all_relevant_givens | Just (clas, _) <- getClassPredTys_maybe pred_w = findDictsByClass (inert_dicts inert_cans) clas `unionBags` inert_irreds inert_cans | otherwise = inert_irreds inert_cans matchable_given :: Ct -> Bool matchable_given ct | CtGiven { ctev_loc = loc_g, ctev_pred = pred_g } <- ctEvidence ct = mightEqualLater inerts pred_g loc_g pred_w loc_w | otherwise = False mightEqualLater :: InertSet -> TcPredType -> CtLoc -> TcPredType -> CtLoc -> Bool -- See Note [What might equal later?] -- Used to implement logic in Note [Instance and Given overlap] in GHC.Tc.Solver.Interact mightEqualLater inert_set given_pred given_loc wanted_pred wanted_loc | prohibitedSuperClassSolve given_loc wanted_loc = False | otherwise = case tcUnifyTysFG bind_fun [flattened_given] [flattened_wanted] of SurelyApart -> False -- types that are surely apart do not equal later MaybeApart MARInfinite _ -> False -- see Example 7 in the Note. _ -> True where in_scope = mkInScopeSet $ tyCoVarsOfTypes [given_pred, wanted_pred] -- NB: flatten both at the same time, so that we can share mappings -- from type family applications to variables, and also to guarantee -- that the fresh variables are really fresh between the given and -- the wanted. Flattening both at the same time is needed to get -- Example 10 from the Note. ([flattened_given, flattened_wanted], var_mapping) = flattenTysX in_scope [given_pred, wanted_pred] bind_fun :: BindFun bind_fun tv rhs_ty | isMetaTyVar tv , can_unify tv (metaTyVarInfo tv) rhs_ty -- this checks for CycleBreakerTvs and TyVarTvs; forgetting -- the latter was #19106. = BindMe -- See Examples 4, 5, and 6 from the Note | Just (_fam_tc, fam_args) <- lookupVarEnv var_mapping tv , anyFreeVarsOfTypes mentions_meta_ty_var fam_args = BindMe | otherwise = Apart -- True for TauTv and TyVarTv (and RuntimeUnkTv) meta-tyvars -- (as they can be unified) -- and also for CycleBreakerTvs that mentions meta-tyvars mentions_meta_ty_var :: TyVar -> Bool mentions_meta_ty_var tv | isMetaTyVar tv = case metaTyVarInfo tv of -- See Examples 8 and 9 in the Note CycleBreakerTv -> anyFreeVarsOfType mentions_meta_ty_var (lookupCycleBreakerVar tv inert_set) _ -> True | otherwise = False -- like startSolvingByUnification, but allows cbv variables to unify can_unify :: TcTyVar -> MetaInfo -> Type -> Bool can_unify _lhs_tv TyVarTv rhs_ty -- see Example 3 from the Note | Just rhs_tv <- tcGetTyVar_maybe rhs_ty = case tcTyVarDetails rhs_tv of MetaTv { mtv_info = TyVarTv } -> True MetaTv {} -> False -- could unify with anything SkolemTv {} -> True RuntimeUnk -> True | otherwise -- not a var on the RHS = False can_unify lhs_tv _other _rhs_ty = mentions_meta_ty_var lhs_tv prohibitedSuperClassSolve :: CtLoc -> CtLoc -> Bool -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance prohibitedSuperClassSolve from_loc solve_loc | InstSCOrigin _ given_size <- ctLocOrigin from_loc , ScOrigin wanted_size <- ctLocOrigin solve_loc = given_size >= wanted_size | otherwise = False {- Note [What might equal later?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We must determine whether a Given might later equal a Wanted. We definitely need to account for the possibility that any metavariable might be arbitrarily instantiated. Yet we do *not* want to allow skolems in to be instantiated, as we've already rewritten with respect to any Givens. (We're solving a Wanted here, and so all Givens have already been processed.) This is best understood by example. 1. C alpha ~? C Int That given certainly might match later. 2. C a ~? C Int No. No new givens are going to arise that will get the `a` to rewrite to Int. 3. C alpha[tv] ~? C Int That alpha[tv] is a TyVarTv, unifiable only with other type variables. It cannot equal later. 4. C (F alpha) ~? C Int Sure -- that can equal later, if we learn something useful about alpha. 5. C (F alpha[tv]) ~? C Int This, too, might equal later. Perhaps we have [G] F b ~ Int elsewhere. Or maybe we have C (F alpha[tv] beta[tv]), these unify with each other, and F x x = Int. Remember: returning True doesn't commit ourselves to anything. 6. C (F a) ~? C a No, this won't match later. If we could rewrite (F a) or a, we would have by now. But see also Red Herring below. 7. C (Maybe alpha) ~? C alpha We say this cannot equal later, because it would require alpha := Maybe (Maybe (Maybe ...)). While such a type can be contrived, we choose not to worry about it. See Note [Infinitary substitution in lookup] in GHC.Core.InstEnv. Getting this wrong let to #19107, tested in typecheck/should_compile/T19107. 8. C cbv ~? C Int where cbv = F a The cbv is a cycle-breaker var which stands for F a. See Note [Type equality cycles] in GHC.Tc.Solver.Canonical. This is just like case 6, and we say "no". Saying "no" here is essential in getting the parser to type-check, with its use of DisambECP. 9. C cbv ~? C Int where cbv = F alpha Here, we might indeed equal later. Distinguishing between this case and Example 8 is why we need the InertSet in mightEqualLater. 10. C (F alpha, Int) ~? C (Bool, F alpha) This cannot equal later, because F a would have to equal both Bool and Int. To deal with type family applications, we use the Core flattener. See Note [Flattening type-family applications when matching instances] in GHC.Core.Unify. The Core flattener replaces all type family applications with fresh variables. The next question: should we allow these fresh variables in the domain of a unifying substitution? A type family application that mentions only skolems (example 6) is settled: any skolems would have been rewritten w.r.t. Givens by now. These type family applications match only themselves. A type family application that mentions metavariables, on the other hand, can match anything. So, if the original type family application contains a metavariable, we use BindMe to tell the unifier to allow it in the substitution. On the other hand, a type family application with only skolems is considered rigid. This treatment fixes #18910 and is tested in typecheck/should_compile/InstanceGivenOverlap{,2} Red Herring ~~~~~~~~~~~ In #21208, we have this scenario: instance forall b. C b [G] C a[sk] [W] C (F a[sk]) What should we do with that wanted? According to the logic above, the Given cannot match later (this is example 6), and so we use the global instance. But wait, you say: What if we learn later (say by a future type instance F a = a) that F a unifies with a? That looks like the Given might really match later! This mechanism described in this Note is *not* about this kind of situation, however. It is all asking whether a Given might match the Wanted *in this run of the solver*. It is *not* about whether a variable might be instantiated so that the Given matches, or whether a type instance introduced in a downstream module might make the Given match. The reason we care about what might match later is only about avoiding order-dependence. That is, we don't want to commit to a course of action that depends on seeing constraints in a certain order. But an instantiation of a variable and a later type instance don't introduce order dependency in this way, and so mightMatchLater is right to ignore these possibilities. Here is an example, with no type families, that is perhaps clearer: instance forall b. C (Maybe b) [G] C (Maybe Int) [W] C (Maybe a) What to do? We *might* say that the Given could match later and should thus block us from using the global instance. But we don't do this. Instead, we rely on class coherence to say that choosing the global instance is just fine, even if later we call a function with (a := Int). After all, in this run of the solver, [G] C (Maybe Int) will definitely never match [W] C (Maybe a). (Recall that we process Givens before Wanteds, so there is no [G] a ~ Int hanging about unseen.) Interestingly, in the first case (from #21208), the behavior changed between GHC 8.10.7 and GHC 9.2, with the latter behaving correctly and the former reporting overlapping instances. Test case: typecheck/should_compile/T21208. -} {- ********************************************************************* * * Cycle breakers * * ********************************************************************* -} -- | Return the type family application a CycleBreakerTv maps to. lookupCycleBreakerVar :: TcTyVar -- ^ cbv, must be a CycleBreakerTv -> InertSet -> TcType -- ^ type family application the cbv maps to lookupCycleBreakerVar cbv (IS { inert_cycle_breakers = cbvs_stack }) -- This function looks at every environment in the stack. This is necessary -- to avoid #20231. This function (and its one usage site) is the only reason -- that we store a stack instead of just the top environment. | Just tyfam_app <- assert (isCycleBreakerTyVar cbv) $ firstJusts (NE.map (lookup cbv) cbvs_stack) = tyfam_app | otherwise = pprPanic "lookupCycleBreakerVar found an unbound cycle breaker" (ppr cbv $$ ppr cbvs_stack) -- | Push a fresh environment onto the cycle-breaker var stack. Useful -- when entering a nested implication. pushCycleBreakerVarStack :: CycleBreakerVarStack -> CycleBreakerVarStack pushCycleBreakerVarStack = ([] <|) -- | Add a new cycle-breaker binding to the top environment on the stack. insertCycleBreakerBinding :: TcTyVar -- ^ cbv, must be a CycleBreakerTv -> TcType -- ^ cbv's expansion -> CycleBreakerVarStack -> CycleBreakerVarStack insertCycleBreakerBinding cbv expansion (top_env :| rest_envs) = assert (isCycleBreakerTyVar cbv) $ ((cbv, expansion) : top_env) :| rest_envs -- | Perform a monadic operation on all pairs in the top environment -- in the stack. forAllCycleBreakerBindings_ :: Monad m => CycleBreakerVarStack -> (TcTyVar -> TcType -> m ()) -> m () forAllCycleBreakerBindings_ (top_env :| _rest_envs) action = forM_ top_env (uncurry action) {-# INLINABLE forAllCycleBreakerBindings_ #-} -- to allow SPECIALISE later ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Solver/Types.hs0000644000000000000000000002634014472400113021121 0ustar0000000000000000{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GADTs #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | Utility types used within the constraint solver module GHC.Tc.Solver.Types ( -- Inert CDictCans DictMap, emptyDictMap, findDictsByClass, addDict, addDictsByClass, delDict, foldDicts, filterDicts, findDict, dictsToBag, partitionDicts, FunEqMap, emptyFunEqs, foldFunEqs, findFunEq, insertFunEq, findFunEqsByTyCon, TcAppMap, emptyTcAppMap, isEmptyTcAppMap, insertTcApp, alterTcApp, filterTcAppMap, tcAppMapToBag, foldTcAppMap, EqualCtList, filterEqualCtList, addToEqualCtList ) where import GHC.Prelude import GHC.Tc.Types.Constraint import GHC.Tc.Types.Origin import GHC.Tc.Utils.TcType import GHC.Core.Class import GHC.Core.Map.Type import GHC.Core.Predicate import GHC.Core.TyCon import GHC.Core.TyCon.Env import GHC.Data.Bag import GHC.Data.Maybe import GHC.Data.TrieMap import GHC.Utils.Constants import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain {- ********************************************************************* * * TcAppMap * * ************************************************************************ Note [Use loose types in inert set] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Whenever we are looking up an inert dictionary (CDictCan) or function equality (CEqCan), we use a TcAppMap, which uses the Unique of the class/type family tycon and then a trie which maps the arguments. This trie does *not* need to match the kinds of the arguments; this Note explains why. Consider the types ty0 = (T ty1 ty2 ty3 ty4) and ty0' = (T ty1' ty2' ty3' ty4'), where ty4 and ty4' have different kinds. Let's further assume that both types ty0 and ty0' are well-typed. Because the kind of T is closed, it must be that one of the ty1..ty3 does not match ty1'..ty3' (and that the kind of the fourth argument to T is dependent on whichever one changed). Since we are matching all arguments, during the inert-set lookup, we know that ty1..ty3 do indeed match ty1'..ty3'. Therefore, the kind of ty4 and ty4' must match, too -- without ever looking at it. Accordingly, we use LooseTypeMap, which skips the kind check when looking up a type. I (Richard E) believe this is just an optimization, and that looking at kinds would be harmless. -} type TcAppMap a = DTyConEnv (ListMap LooseTypeMap a) -- Indexed by tycon then the arg types, using "loose" matching, where -- we don't require kind equality. This allows, for example, (a |> co) -- to match (a). -- See Note [Use loose types in inert set] -- Used for types and classes; hence UniqDFM -- See Note [foldTM determinism] in GHC.Data.TrieMap for why we use DTyConEnv here isEmptyTcAppMap :: TcAppMap a -> Bool isEmptyTcAppMap m = isEmptyDTyConEnv m emptyTcAppMap :: TcAppMap a emptyTcAppMap = emptyDTyConEnv findTcApp :: TcAppMap a -> TyCon -> [Type] -> Maybe a findTcApp m tc tys = do { tys_map <- lookupDTyConEnv m tc ; lookupTM tys tys_map } delTcApp :: TcAppMap a -> TyCon -> [Type] -> TcAppMap a delTcApp m tc tys = adjustDTyConEnv (deleteTM tys) m tc insertTcApp :: TcAppMap a -> TyCon -> [Type] -> a -> TcAppMap a insertTcApp m tc tys ct = alterDTyConEnv alter_tm m tc where alter_tm mb_tm = Just (insertTM tys ct (mb_tm `orElse` emptyTM)) alterTcApp :: forall a. TcAppMap a -> TyCon -> [Type] -> XT a -> TcAppMap a alterTcApp m tc tys upd = alterDTyConEnv alter_tm m tc where alter_tm :: Maybe (ListMap LooseTypeMap a) -> Maybe (ListMap LooseTypeMap a) alter_tm m_elt = Just (alterTM tys upd (m_elt `orElse` emptyTM)) filterTcAppMap :: forall a. (a -> Bool) -> TcAppMap a -> TcAppMap a filterTcAppMap f m = mapMaybeDTyConEnv one_tycon m where one_tycon :: ListMap LooseTypeMap a -> Maybe (ListMap LooseTypeMap a) one_tycon tm | isEmptyTM filtered_tm = Nothing | otherwise = Just filtered_tm where filtered_tm = filterTM f tm tcAppMapToBag :: TcAppMap a -> Bag a tcAppMapToBag m = foldTcAppMap consBag m emptyBag foldTcAppMap :: (a -> b -> b) -> TcAppMap a -> b -> b foldTcAppMap k m z = foldDTyConEnv (foldTM k) z m {- ********************************************************************* * * DictMap * * ********************************************************************* -} type DictMap a = TcAppMap a emptyDictMap :: DictMap a emptyDictMap = emptyTcAppMap findDict :: DictMap a -> CtLoc -> Class -> [Type] -> Maybe a findDict m loc cls tys | hasIPSuperClasses cls tys -- See Note [Tuples hiding implicit parameters] = Nothing | Just {} <- isCallStackPred cls tys , isPushCallStackOrigin (ctLocOrigin loc) = Nothing -- See Note [Solving CallStack constraints] | otherwise = findTcApp m (classTyCon cls) tys findDictsByClass :: DictMap a -> Class -> Bag a findDictsByClass m cls | Just tm <- lookupDTyConEnv m (classTyCon cls) = foldTM consBag tm emptyBag | otherwise = emptyBag delDict :: DictMap a -> Class -> [Type] -> DictMap a delDict m cls tys = delTcApp m (classTyCon cls) tys addDict :: DictMap a -> Class -> [Type] -> a -> DictMap a addDict m cls tys item = insertTcApp m (classTyCon cls) tys item addDictsByClass :: DictMap Ct -> Class -> Bag Ct -> DictMap Ct addDictsByClass m cls items = extendDTyConEnv m (classTyCon cls) (foldr add emptyTM items) where add ct@(CDictCan { cc_tyargs = tys }) tm = insertTM tys ct tm add ct _ = pprPanic "addDictsByClass" (ppr ct) filterDicts :: (Ct -> Bool) -> DictMap Ct -> DictMap Ct filterDicts f m = filterTcAppMap f m partitionDicts :: (Ct -> Bool) -> DictMap Ct -> (Bag Ct, DictMap Ct) partitionDicts f m = foldTcAppMap k m (emptyBag, emptyDictMap) where k ct (yeses, noes) | f ct = (ct `consBag` yeses, noes) | otherwise = (yeses, add ct noes) add ct@(CDictCan { cc_class = cls, cc_tyargs = tys }) m = addDict m cls tys ct add ct _ = pprPanic "partitionDicts" (ppr ct) dictsToBag :: DictMap a -> Bag a dictsToBag = tcAppMapToBag foldDicts :: (a -> b -> b) -> DictMap a -> b -> b foldDicts = foldTcAppMap {- Note [Tuples hiding implicit parameters] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f,g :: (?x::Int, C a) => a -> a f v = let ?x = 4 in g v The call to 'g' gives rise to a Wanted constraint (?x::Int, C a). We must /not/ solve this from the Given (?x::Int, C a), because of the intervening binding for (?x::Int). #14218. We deal with this by arranging that we always fail when looking up a tuple constraint that hides an implicit parameter. Note that this applies * both to the inert_dicts (lookupInertDict) * and to the solved_dicts (looukpSolvedDict) An alternative would be not to extend these sets with such tuple constraints, but it seemed more direct to deal with the lookup. Note [Solving CallStack constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [Overview of implicit CallStacks] in GHc.Tc.Types.Evidence. Suppose f :: HasCallStack => blah. Then * Each call to 'f' gives rise to [W] s1 :: IP "callStack" CallStack -- CtOrigin = OccurrenceOf f with a CtOrigin that says "OccurrenceOf f". Remember that HasCallStack is just shorthand for IP "callStack" CallStack See Note [Overview of implicit CallStacks] in GHC.Tc.Types.Evidence * We cannonicalise such constraints, in GHC.Tc.Solver.Canonical.canClassNC, by pushing the call-site info on the stack, and changing the CtOrigin to record that has been done. Bind: s1 = pushCallStack s2 [W] s2 :: IP "callStack" CallStack -- CtOrigin = IPOccOrigin * Then, and only then, we can solve the constraint from an enclosing Given. So we must be careful /not/ to solve 's1' from the Givens. Again, we ensure this by arranging that findDict always misses when looking up such constraints. -} {- ********************************************************************* * * FunEqMap * * ********************************************************************* -} type FunEqMap a = TcAppMap a -- A map whose key is a (TyCon, [Type]) pair emptyFunEqs :: TcAppMap a emptyFunEqs = emptyTcAppMap findFunEq :: FunEqMap a -> TyCon -> [Type] -> Maybe a findFunEq m tc tys = findTcApp m tc tys findFunEqsByTyCon :: FunEqMap a -> TyCon -> [a] -- Get inert function equation constraints that have the given tycon -- in their head. Not that the constraints remain in the inert set. -- We use this to check for wanted interactions with built-in type-function -- constructors. findFunEqsByTyCon m tc | Just tm <- lookupDTyConEnv m tc = foldTM (:) tm [] | otherwise = [] foldFunEqs :: (a -> b -> b) -> FunEqMap a -> b -> b foldFunEqs = foldTcAppMap insertFunEq :: FunEqMap a -> TyCon -> [Type] -> a -> FunEqMap a insertFunEq m tc tys val = insertTcApp m tc tys val {- ********************************************************************* * * EqualCtList * * ********************************************************************* -} {- Note [EqualCtList invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * All are equalities * All these equalities have the same LHS * No element of the list can rewrite any other Accordingly, this list is either empty, contains one element, or contains a Given representational equality and a Wanted nominal one. -} type EqualCtList = [Ct] -- See Note [EqualCtList invariants] addToEqualCtList :: Ct -> EqualCtList -> EqualCtList -- See Note [EqualCtList invariants] addToEqualCtList ct old_eqs | debugIsOn = case ct of CEqCan { cc_lhs = TyVarLHS tv } -> assert (all (shares_lhs tv) old_eqs) $ assertPpr (null bad_prs) (vcat [ text "bad_prs" <+> ppr bad_prs , text "ct:old_eqs" <+> ppr (ct : old_eqs) ]) $ (ct : old_eqs) _ -> pprPanic "addToEqualCtList not CEqCan" (ppr ct) | otherwise = ct : old_eqs where shares_lhs tv (CEqCan { cc_lhs = TyVarLHS old_tv }) = tv == old_tv shares_lhs _ _ = False bad_prs = filter is_bad_pair (distinctPairs (ct : old_eqs)) is_bad_pair (ct1,ct2) = ctFlavourRole ct1 `eqCanRewriteFR` ctFlavourRole ct2 distinctPairs :: [a] -> [(a,a)] -- distinctPairs [x1,...xn] is the list of all pairs [ ...(xi, xj)...] -- where i /= j -- NB: does not return pairs (xi,xi), which would be stupid in the -- context of addToEqualCtList (#22645) distinctPairs [] = [] distinctPairs (x:xs) = concatMap (\y -> [(x,y),(y,x)]) xs ++ distinctPairs xs -- returns Nothing when the new list is empty, to keep the environments smaller filterEqualCtList :: (Ct -> Bool) -> EqualCtList -> Maybe EqualCtList filterEqualCtList pred cts | null new_list = Nothing | otherwise = Just new_list where new_list = filter pred cts ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types.hs0000644000000000000000000022564414472400113017657 0ustar0000000000000000 {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE PatternSynonyms #-} {- (c) The University of Glasgow 2006-2012 (c) The GRASP Project, Glasgow University, 1992-2002 -} -- | Various types used during typechecking. -- -- Please see "GHC.Tc.Utils.Monad" as well for operations on these types. You probably -- want to import it, instead of this module. -- -- All the monads exported here are built on top of the same IOEnv monad. The -- monad functions like a Reader monad in the way it passes the environment -- around. This is done to allow the environment to be manipulated in a stack -- like fashion when entering expressions... etc. -- -- For state that is global and should be returned at the end (e.g not part -- of the stack mechanism), you should use a TcRef (= IORef) to store them. module GHC.Tc.Types( TcRnIf, TcRn, TcM, RnM, IfM, IfL, IfG, -- The monad is opaque outside this module TcRef, -- The environment types Env(..), TcGblEnv(..), TcLclEnv(..), setLclEnvTcLevel, getLclEnvTcLevel, setLclEnvLoc, getLclEnvLoc, IfGblEnv(..), IfLclEnv(..), tcVisibleOrphanMods, RewriteEnv(..), -- Frontend types (shouldn't really be here) FrontendResult(..), -- Renamer types ErrCtxt, RecFieldEnv, pushErrCtxt, pushErrCtxtSameOrigin, ImportAvails(..), emptyImportAvails, plusImportAvails, WhereFrom(..), mkModDeps, -- Typechecker types TcTypeEnv, TcBinderStack, TcBinder(..), TcTyThing(..), tcTyThingTyCon_maybe, PromotionErr(..), IdBindingInfo(..), ClosedTypeId, RhsNames, IsGroupClosed(..), SelfBootInfo(..), bootExports, tcTyThingCategory, pprTcTyThingCategory, peCategory, pprPECategory, CompleteMatch, CompleteMatches, -- Template Haskell ThStage(..), SpliceType(..), PendingStuff(..), topStage, topAnnStage, topSpliceStage, ThLevel, impLevel, outerLevel, thLevel, ForeignSrcLang(..), THDocs, DocLoc(..), ThBindEnv, -- Arrows ArrowCtxt(..), -- TcSigInfo TcSigFun, TcSigInfo(..), TcIdSigInfo(..), TcIdSigInst(..), TcPatSynInfo(..), isPartialSig, hasCompleteSig, -- Misc other types TcId, TcIdSet, NameShape(..), removeBindingShadowing, getPlatform, -- Constraint solver plugins TcPlugin(..), TcPluginSolveResult(TcPluginContradiction, TcPluginOk, ..), TcPluginRewriteResult(..), TcPluginSolver, TcPluginRewriter, TcPluginM(runTcPluginM), unsafeTcPluginTcM, -- Defaulting plugin DefaultingPlugin(..), DefaultingProposal(..), FillDefaulting, DefaultingPluginResult, -- Role annotations RoleAnnotEnv, emptyRoleAnnotEnv, mkRoleAnnotEnv, lookupRoleAnnot, getRoleAnnots, -- Linting lintGblEnv, -- Diagnostics TcRnMessage ) where import GHC.Prelude import GHC.Platform import GHC.Driver.Env import GHC.Driver.Session import {-# SOURCE #-} GHC.Driver.Hooks import GHC.Hs import GHC.Tc.Utils.TcType import GHC.Tc.Types.Constraint import GHC.Tc.Types.Origin import GHC.Tc.Types.Evidence import {-# SOURCE #-} GHC.Tc.Errors.Hole.FitTypes ( HoleFitPlugin ) import GHC.Tc.Errors.Types import GHC.Core.Reduction ( Reduction(..) ) import GHC.Core.Type import GHC.Core.TyCon ( TyCon, tyConKind ) import GHC.Core.PatSyn ( PatSyn ) import GHC.Core.Lint ( lintAxioms ) import GHC.Core.UsageEnv import GHC.Core.InstEnv import GHC.Core.FamInstEnv import GHC.Core.Predicate import GHC.Types.Id ( idType, idName ) import GHC.Types.FieldLabel ( FieldLabel ) import GHC.Types.Fixity.Env import GHC.Types.Annotations import GHC.Types.CompleteMatch import GHC.Types.Name.Reader import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Types.Avail import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.TypeEnv import GHC.Types.TyThing import GHC.Types.SourceFile import GHC.Types.SrcLoc import GHC.Types.Var.Set import GHC.Types.Unique.FM import GHC.Types.Basic import GHC.Types.CostCentre.State import GHC.Types.HpcInfo import GHC.Data.IOEnv import GHC.Data.Bag import GHC.Data.List.SetOps import GHC.Unit import GHC.Unit.Module.Warnings import GHC.Unit.Module.Deps import GHC.Unit.Module.ModDetails import GHC.Utils.Error import GHC.Utils.Outputable import GHC.Utils.Fingerprint import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Logger import GHC.Builtin.Names ( isUnboundName ) import Data.Set ( Set ) import qualified Data.Set as S import Data.Map ( Map ) import Data.Dynamic ( Dynamic ) import Data.Typeable ( TypeRep ) import Data.Maybe ( mapMaybe ) import GHCi.Message import GHCi.RemoteTypes import qualified Language.Haskell.TH as TH import GHC.Driver.Env.KnotVars import GHC.Linker.Types -- | A 'NameShape' is a substitution on 'Name's that can be used -- to refine the identities of a hole while we are renaming interfaces -- (see "GHC.Iface.Rename"). Specifically, a 'NameShape' for -- 'ns_module_name' @A@, defines a mapping from @{A.T}@ -- (for some 'OccName' @T@) to some arbitrary other 'Name'. -- -- The most intruiging thing about a 'NameShape', however, is -- how it's constructed. A 'NameShape' is *implied* by the -- exported 'AvailInfo's of the implementor of an interface: -- if an implementor of signature @\@ exports @M.T@, you implicitly -- define a substitution from @{H.T}@ to @M.T@. So a 'NameShape' -- is computed from the list of 'AvailInfo's that are exported -- by the implementation of a module, or successively merged -- together by the export lists of signatures which are joining -- together. -- -- It's not the most obvious way to go about doing this, but it -- does seem to work! -- -- NB: Can't boot this and put it in NameShape because then we -- start pulling in too many DynFlags things. data NameShape = NameShape { ns_mod_name :: ModuleName, ns_exports :: [AvailInfo], ns_map :: OccEnv Name } {- ************************************************************************ * * Standard monad definition for TcRn All the combinators for the monad can be found in GHC.Tc.Utils.Monad * * ************************************************************************ The monad itself has to be defined here, because it is mentioned by ErrCtxt -} type TcRnIf a b = IOEnv (Env a b) type TcRn = TcRnIf TcGblEnv TcLclEnv -- Type inference type IfM lcl = TcRnIf IfGblEnv lcl -- Iface stuff type IfG = IfM () -- Top level type IfL = IfM IfLclEnv -- Nested -- TcRn is the type-checking and renaming monad: the main monad that -- most type-checking takes place in. The global environment is -- 'TcGblEnv', which tracks all of the top-level type-checking -- information we've accumulated while checking a module, while the -- local environment is 'TcLclEnv', which tracks local information as -- we move inside expressions. -- | Historical "renaming monad" (now it's just 'TcRn'). type RnM = TcRn -- | Historical "type-checking monad" (now it's just 'TcRn'). type TcM = TcRn -- We 'stack' these envs through the Reader like monad infrastructure -- as we move into an expression (although the change is focused in -- the lcl type). data Env gbl lcl = Env { env_top :: !HscEnv, -- Top-level stuff that never changes -- Includes all info about imported things -- BangPattern is to fix leak, see #15111 env_um :: {-# UNPACK #-} !Char, -- Mask for Uniques env_gbl :: gbl, -- Info about things defined at the top level -- of the module being compiled env_lcl :: lcl -- Nested stuff; changes as we go into } instance ContainsDynFlags (Env gbl lcl) where extractDynFlags env = hsc_dflags (env_top env) instance ContainsHooks (Env gbl lcl) where extractHooks env = hsc_hooks (env_top env) instance ContainsLogger (Env gbl lcl) where extractLogger env = hsc_logger (env_top env) instance ContainsModule gbl => ContainsModule (Env gbl lcl) where extractModule env = extractModule (env_gbl env) {- ************************************************************************ * * * RewriteEnv * The rewriting environment * * ************************************************************************ -} -- | A 'RewriteEnv' carries the necessary context for performing rewrites -- (i.e. type family reductions and following filled-in metavariables) -- in the solver. data RewriteEnv = RE { re_loc :: !CtLoc -- ^ In which context are we rewriting? -- -- Type-checking plugins might want to use this location information -- when emitting new Wanted constraints when rewriting type family -- applications. This ensures that such Wanted constraints will, -- when unsolved, give rise to error messages with the -- correct source location. -- Within GHC, we use this field to keep track of reduction depth. -- See Note [Rewriter CtLoc] in GHC.Tc.Solver.Rewrite. , re_flavour :: !CtFlavour , re_eq_rel :: !EqRel -- ^ At what role are we rewriting? -- -- See Note [Rewriter EqRels] in GHC.Tc.Solver.Rewrite , re_rewriters :: !(TcRef RewriterSet) -- ^ See Note [Wanteds rewrite Wanteds] } -- RewriteEnv is mostly used in @GHC.Tc.Solver.Rewrite@, but it is defined -- here so that it can also be passed to rewriting plugins. -- See the 'tcPluginRewrite' field of 'TcPlugin'. {- ************************************************************************ * * The interface environments Used when dealing with IfaceDecls * * ************************************************************************ -} data IfGblEnv = IfGblEnv { -- Some information about where this environment came from; -- useful for debugging. if_doc :: SDoc, -- The type environment for the module being compiled, -- in case the interface refers back to it via a reference that -- was originally a hi-boot file. -- We need the module name so we can test when it's appropriate -- to look in this env. -- See Note [Tying the knot] in GHC.IfaceToCore if_rec_types :: (KnotVars (IfG TypeEnv)) -- Allows a read effect, so it can be in a mutable -- variable; c.f. handling the external package type env -- Nothing => interactive stuff, no loops possible } data IfLclEnv = IfLclEnv { -- The module for the current IfaceDecl -- So if we see f = \x -> x -- it means M.f = \x -> x, where M is the if_mod -- NB: This is a semantic module, see -- Note [Identity versus semantic module] if_mod :: !Module, -- Whether or not the IfaceDecl came from a boot -- file or not; we'll use this to choose between -- NoUnfolding and BootUnfolding if_boot :: IsBootInterface, -- The field is used only for error reporting -- if (say) there's a Lint error in it if_loc :: SDoc, -- Where the interface came from: -- .hi file, or GHCi state, or ext core -- plus which bit is currently being examined if_nsubst :: Maybe NameShape, -- This field is used to make sure "implicit" declarations -- (anything that cannot be exported in mi_exports) get -- wired up correctly in typecheckIfacesForMerging. Most -- of the time it's @Nothing@. See Note [Resolving never-exported Names] -- in GHC.IfaceToCore. if_implicits_env :: Maybe TypeEnv, if_tv_env :: FastStringEnv TyVar, -- Nested tyvar bindings if_id_env :: FastStringEnv Id -- Nested id binding } {- ************************************************************************ * * Global typechecker environment * * ************************************************************************ -} -- | 'FrontendResult' describes the result of running the frontend of a Haskell -- module. Currently one always gets a 'FrontendTypecheck', since running the -- frontend involves typechecking a program. hs-sig merges are not handled here. -- -- This data type really should be in GHC.Driver.Env, but it needs -- to have a TcGblEnv which is only defined here. data FrontendResult = FrontendTypecheck TcGblEnv -- Note [Identity versus semantic module] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- When typechecking an hsig file, it is convenient to keep track -- of two different "this module" identifiers: -- -- - The IDENTITY module is simply thisPackage + the module -- name; i.e. it uniquely *identifies* the interface file -- we're compiling. For example, p[A=]:A is an -- identity module identifying the requirement named A -- from library p. -- -- - The SEMANTIC module, which is the actual module that -- this signature is intended to represent (e.g. if -- we have a identity module p[A=base:Data.IORef]:A, -- then the semantic module is base:Data.IORef) -- -- Which one should you use? -- -- - In the desugarer and later phases of compilation, -- identity and semantic modules coincide, since we never compile -- signatures (we just generate blank object files for -- hsig files.) -- -- A corrolary of this is that the following invariant holds at any point -- past desugaring, -- -- if I have a Module, this_mod, in hand representing the module -- currently being compiled, -- then moduleUnit this_mod == thisPackage dflags -- -- - For any code involving Names, we want semantic modules. -- See lookupIfaceTop in GHC.Iface.Env, mkIface and addFingerprints -- in GHC.Iface.{Make,Recomp}, and tcLookupGlobal in GHC.Tc.Utils.Env -- -- - When reading interfaces, we want the identity module to -- identify the specific interface we want (such interfaces -- should never be loaded into the EPS). However, if a -- hole module is requested, we look for A.hi -- in the home library we are compiling. (See GHC.Iface.Load.) -- Similarly, in GHC.Rename.Names we check for self-imports using -- identity modules, to allow signatures to import their implementor. -- -- - For recompilation avoidance, you want the identity module, -- since that will actually say the specific interface you -- want to track (and recompile if it changes) -- | 'TcGblEnv' describes the top-level of the module at the -- point at which the typechecker is finished work. -- It is this structure that is handed on to the desugarer -- For state that needs to be updated during the typechecking -- phase and returned at end, use a 'TcRef' (= 'IORef'). data TcGblEnv = TcGblEnv { tcg_mod :: Module, -- ^ Module being compiled tcg_semantic_mod :: Module, -- ^ If a signature, the backing module -- See also Note [Identity versus semantic module] tcg_src :: HscSource, -- ^ What kind of module (regular Haskell, hs-boot, hsig) tcg_rdr_env :: GlobalRdrEnv, -- ^ Top level envt; used during renaming tcg_default :: Maybe [Type], -- ^ Types used for defaulting. @Nothing@ => no @default@ decl tcg_fix_env :: FixityEnv, -- ^ Just for things in this module tcg_field_env :: RecFieldEnv, -- ^ Just for things in this module -- See Note [The interactive package] in "GHC.Runtime.Context" tcg_type_env :: TypeEnv, -- ^ Global type env for the module we are compiling now. All -- TyCons and Classes (for this module) end up in here right away, -- along with their derived constructors, selectors. -- -- (Ids defined in this module start in the local envt, though they -- move to the global envt during zonking) -- -- NB: for what "things in this module" means, see -- Note [The interactive package] in "GHC.Runtime.Context" tcg_type_env_var :: KnotVars (IORef TypeEnv), -- Used only to initialise the interface-file -- typechecker in initIfaceTcRn, so that it can see stuff -- bound in this module when dealing with hi-boot recursions -- Updated at intervals (e.g. after dealing with types and classes) tcg_inst_env :: !InstEnv, -- ^ Instance envt for all /home-package/ modules; -- Includes the dfuns in tcg_insts -- NB. BangPattern is to fix a leak, see #15111 tcg_fam_inst_env :: !FamInstEnv, -- ^ Ditto for family instances -- NB. BangPattern is to fix a leak, see #15111 tcg_ann_env :: AnnEnv, -- ^ And for annotations -- Now a bunch of things about this module that are simply -- accumulated, but never consulted until the end. -- Nevertheless, it's convenient to accumulate them along -- with the rest of the info from this module. tcg_exports :: [AvailInfo], -- ^ What is exported tcg_imports :: ImportAvails, -- ^ Information about what was imported from where, including -- things bound in this module. Also store Safe Haskell info -- here about transitive trusted package requirements. -- -- There are not many uses of this field, so you can grep for -- all them. -- -- The ImportAvails records information about the following -- things: -- -- 1. All of the modules you directly imported (tcRnImports) -- 2. The orphans (only!) of all imported modules in a GHCi -- session (runTcInteractive) -- 3. The module that instantiated a signature -- 4. Each of the signatures that merged in -- -- It is used in the following ways: -- - imp_orphs is used to determine what orphan modules should be -- visible in the context (tcVisibleOrphanMods) -- - imp_finsts is used to determine what family instances should -- be visible (tcExtendLocalFamInstEnv) -- - To resolve the meaning of the export list of a module -- (tcRnExports) -- - imp_mods is used to compute usage info (mkIfaceTc, deSugar) -- - imp_trust_own_pkg is used for Safe Haskell in interfaces -- (mkIfaceTc, as well as in "GHC.Driver.Main") -- - To create the Dependencies field in interface (mkDependencies) -- These three fields track unused bindings and imports -- See Note [Tracking unused binding and imports] tcg_dus :: DefUses, tcg_used_gres :: TcRef [GlobalRdrElt], tcg_keep :: TcRef NameSet, tcg_th_used :: TcRef Bool, -- ^ @True@ \<=> Template Haskell syntax used. -- -- We need this so that we can generate a dependency on the -- Template Haskell package, because the desugarer is going -- to emit loads of references to TH symbols. The reference -- is implicit rather than explicit, so we have to zap a -- mutable variable. tcg_th_splice_used :: TcRef Bool, -- ^ @True@ \<=> A Template Haskell splice was used. -- -- Splices disable recompilation avoidance (see #481) tcg_th_needed_deps :: TcRef ([Linkable], PkgsLoaded), -- ^ The set of runtime dependencies required by this module -- See Note [Object File Dependencies] tcg_dfun_n :: TcRef OccSet, -- ^ Allows us to choose unique DFun names. tcg_merged :: [(Module, Fingerprint)], -- ^ The requirements we merged with; we always have to recompile -- if any of these changed. -- The next fields accumulate the payload of the module -- The binds, rules and foreign-decl fields are collected -- initially in un-zonked form and are finally zonked in tcRnSrcDecls tcg_rn_exports :: Maybe [(LIE GhcRn, Avails)], -- Nothing <=> no explicit export list -- Is always Nothing if we don't want to retain renamed -- exports. -- If present contains each renamed export list item -- together with its exported names. tcg_rn_imports :: [LImportDecl GhcRn], -- Keep the renamed imports regardless. They are not -- voluminous and are needed if you want to report unused imports tcg_rn_decls :: Maybe (HsGroup GhcRn), -- ^ Renamed decls, maybe. @Nothing@ \<=> Don't retain renamed -- decls. tcg_dependent_files :: TcRef [FilePath], -- ^ dependencies from addDependentFile tcg_th_topdecls :: TcRef [LHsDecl GhcPs], -- ^ Top-level declarations from addTopDecls tcg_th_foreign_files :: TcRef [(ForeignSrcLang, FilePath)], -- ^ Foreign files emitted from TH. tcg_th_topnames :: TcRef NameSet, -- ^ Exact names bound in top-level declarations in tcg_th_topdecls tcg_th_modfinalizers :: TcRef [(TcLclEnv, ThModFinalizers)], -- ^ Template Haskell module finalizers. -- -- They can use particular local environments. tcg_th_coreplugins :: TcRef [String], -- ^ Core plugins added by Template Haskell code. tcg_th_state :: TcRef (Map TypeRep Dynamic), tcg_th_remote_state :: TcRef (Maybe (ForeignRef (IORef QState))), -- ^ Template Haskell state tcg_th_docs :: TcRef THDocs, -- ^ Docs added in Template Haskell via @putDoc@. tcg_ev_binds :: Bag EvBind, -- Top-level evidence bindings -- Things defined in this module, or (in GHCi) -- in the declarations for a single GHCi command. -- For the latter, see Note [The interactive package] in -- GHC.Runtime.Context tcg_tr_module :: Maybe Id, -- Id for $trModule :: GHC.Unit.Module -- for which every module has a top-level defn -- except in GHCi in which case we have Nothing tcg_binds :: LHsBinds GhcTc, -- Value bindings in this module tcg_sigs :: NameSet, -- ...Top-level names that *lack* a signature tcg_imp_specs :: [LTcSpecPrag], -- ...SPECIALISE prags for imported Ids tcg_warns :: (Warnings GhcRn), -- ...Warnings and deprecations tcg_anns :: [Annotation], -- ...Annotations tcg_tcs :: [TyCon], -- ...TyCons and Classes tcg_ksigs :: NameSet, -- ...Top-level TyCon names that *lack* a signature tcg_insts :: [ClsInst], -- ...Instances tcg_fam_insts :: [FamInst], -- ...Family instances tcg_rules :: [LRuleDecl GhcTc], -- ...Rules tcg_fords :: [LForeignDecl GhcTc], -- ...Foreign import & exports tcg_patsyns :: [PatSyn], -- ...Pattern synonyms tcg_doc_hdr :: Maybe (LHsDoc GhcRn), -- ^ Maybe Haddock header docs tcg_hpc :: !AnyHpcUsage, -- ^ @True@ if any part of the -- prog uses hpc instrumentation. -- NB. BangPattern is to fix a leak, see #15111 tcg_self_boot :: SelfBootInfo, -- ^ Whether this module has a -- corresponding hi-boot file tcg_main :: Maybe Name, -- ^ The Name of the main -- function, if this module is -- the main module. tcg_safe_infer :: TcRef Bool, -- ^ Has the typechecker inferred this module as -XSafe (Safe Haskell)? -- See Note [Safe Haskell Overlapping Instances Implementation], -- although this is used for more than just that failure case. tcg_safe_infer_reasons :: TcRef (Messages TcRnMessage), -- ^ Unreported reasons why tcg_safe_infer is False. -- INVARIANT: If this Messages is non-empty, then tcg_safe_infer is False. -- It may be that tcg_safe_infer is False but this is empty, if no reasons -- are supplied (#19714), or if those reasons have already been -- reported by GHC.Driver.Main.markUnsafeInfer tcg_tc_plugin_solvers :: [TcPluginSolver], -- ^ A list of user-defined type-checking plugins for constraint solving. tcg_tc_plugin_rewriters :: UniqFM TyCon [TcPluginRewriter], -- ^ A collection of all the user-defined type-checking plugins for rewriting -- type family applications, collated by their type family 'TyCon's. tcg_defaulting_plugins :: [FillDefaulting], -- ^ A list of user-defined plugins for type defaulting plugins. tcg_hf_plugins :: [HoleFitPlugin], -- ^ A list of user-defined plugins for hole fit suggestions. tcg_top_loc :: RealSrcSpan, -- ^ The RealSrcSpan this module came from tcg_static_wc :: TcRef WantedConstraints, -- ^ Wanted constraints of static forms. -- See Note [Constraints in static forms]. tcg_complete_matches :: !CompleteMatches, -- ^ Tracking indices for cost centre annotations tcg_cc_st :: TcRef CostCentreState, tcg_next_wrapper_num :: TcRef (ModuleEnv Int) -- ^ See Note [Generating fresh names for FFI wrappers] } -- NB: topModIdentity, not topModSemantic! -- Definition sites of orphan identities will be identity modules, not semantic -- modules. -- Note [Constraints in static forms] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- When a static form produces constraints like -- -- f :: StaticPtr (Bool -> String) -- f = static show -- -- we collect them in tcg_static_wc and resolve them at the end -- of type checking. They need to be resolved separately because -- we don't want to resolve them in the context of the enclosing -- expression. Consider -- -- g :: Show a => StaticPtr (a -> String) -- g = static show -- -- If the @Show a0@ constraint that the body of the static form produces was -- resolved in the context of the enclosing expression, then the body of the -- static form wouldn't be closed because the Show dictionary would come from -- g's context instead of coming from the top level. tcVisibleOrphanMods :: TcGblEnv -> ModuleSet tcVisibleOrphanMods tcg_env = mkModuleSet (tcg_mod tcg_env : imp_orphs (tcg_imports tcg_env)) instance ContainsModule TcGblEnv where extractModule env = tcg_semantic_mod env type RecFieldEnv = NameEnv [FieldLabel] -- Maps a constructor name *in this module* -- to the fields for that constructor. -- This is used when dealing with ".." notation in record -- construction and pattern matching. -- The FieldEnv deals *only* with constructors defined in *this* -- module. For imported modules, we get the same info from the -- TypeEnv data SelfBootInfo = NoSelfBoot -- No corresponding hi-boot file | SelfBoot { sb_mds :: ModDetails -- There was a hi-boot file, , sb_tcs :: NameSet } -- defining these TyCons, -- What is sb_tcs used for? See Note [Extra dependencies from .hs-boot files] -- in GHC.Rename.Module bootExports :: SelfBootInfo -> NameSet bootExports boot = case boot of NoSelfBoot -> emptyNameSet SelfBoot { sb_mds = mds} -> let exports = md_exports mds in availsToNameSet exports {- Note [Tracking unused binding and imports] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We gather three sorts of usage information * tcg_dus :: DefUses (defs/uses) Records what is defined in this module and what is used. Records *defined* Names (local, top-level) and *used* Names (local or imported) Used (a) to report "defined but not used" (see GHC.Rename.Names.reportUnusedNames) (b) to generate version-tracking usage info in interface files (see GHC.Iface.Make.mkUsedNames) This usage info is mainly gathered by the renamer's gathering of free-variables * tcg_used_gres :: TcRef [GlobalRdrElt] Records occurrences of imported entities. Used only to report unused import declarations Records each *occurrence* an *imported* (not locally-defined) entity. The occurrence is recorded by keeping a GlobalRdrElt for it. These is not the GRE that is in the GlobalRdrEnv; rather it is recorded *after* the filtering done by pickGREs. So it reflect /how that occurrence is in scope/. See Note [GRE filtering] in RdrName. * tcg_keep :: TcRef NameSet Records names of the type constructors, data constructors, and Ids that are used by the constraint solver. The typechecker may use find that some imported or locally-defined things are used, even though they do not appear to be mentioned in the source code: (a) The to/from functions for generic data types (b) Top-level variables appearing free in the RHS of an orphan rule (c) Top-level variables appearing free in a TH bracket See Note [Keeping things alive for Template Haskell] in GHC.Rename.Splice (d) The data constructor of a newtype that is used to solve a Coercible instance (e.g. #10347). Example module T10347 (N, mkN) where import Data.Coerce newtype N a = MkN Int mkN :: Int -> N a mkN = coerce Then we wish to record `MkN` as used, since it is (morally) used to perform the coercion in `mkN`. To do so, the Coercible solver updates tcg_keep's TcRef whenever it encounters a use of `coerce` that crosses newtype boundaries. (e) Record fields that are used to solve HasField constraints (see Note [Unused name reporting and HasField] in GHC.Tc.Instance.Class) The tcg_keep field is used in two distinct ways: * Desugar.addExportFlagsAndRules. Where things like (a-c) are locally defined, we should give them an Exported flag, so that the simplifier does not discard them as dead code, and so that they are exposed in the interface file (but not to export to the user). * GHC.Rename.Names.reportUnusedNames. Where newtype data constructors like (d) are imported, we don't want to report them as unused. ************************************************************************ * * The local typechecker environment * * ************************************************************************ Note [The Global-Env/Local-Env story] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During type checking, we keep in the tcg_type_env * All types and classes * All Ids derived from types and classes (constructors, selectors) At the end of type checking, we zonk the local bindings, and as we do so we add to the tcg_type_env * Locally defined top-level Ids Why? Because they are now Ids not TcIds. This final GlobalEnv is a) fed back (via the knot) to typechecking the unfoldings of interface signatures b) used in the ModDetails of this module -} data TcLclEnv -- Changes as we move inside an expression -- Discarded after typecheck/rename; not passed on to desugarer = TcLclEnv { tcl_loc :: RealSrcSpan, -- Source span tcl_ctxt :: [ErrCtxt], -- Error context, innermost on top tcl_in_gen_code :: Bool, -- See Note [Rebindable syntax and HsExpansion] tcl_tclvl :: TcLevel, tcl_th_ctxt :: ThStage, -- Template Haskell context tcl_th_bndrs :: ThBindEnv, -- and binder info -- The ThBindEnv records the TH binding level of in-scope Names -- defined in this module (not imported) -- We can't put this info in the TypeEnv because it's needed -- (and extended) in the renamer, for untyped splices tcl_arrow_ctxt :: ArrowCtxt, -- Arrow-notation context tcl_rdr :: LocalRdrEnv, -- Local name envt -- Maintained during renaming, of course, but also during -- type checking, solely so that when renaming a Template-Haskell -- splice we have the right environment for the renamer. -- -- Does *not* include global name envt; may shadow it -- Includes both ordinary variables and type variables; -- they are kept distinct because tyvar have a different -- occurrence constructor (Name.TvOcc) -- We still need the unsullied global name env so that -- we can look up record field names tcl_env :: TcTypeEnv, -- The local type environment: -- Ids and TyVars defined in this module tcl_usage :: TcRef UsageEnv, -- Required multiplicity of bindings is accumulated here. tcl_bndrs :: TcBinderStack, -- Used for reporting relevant bindings, -- and for tidying types tcl_lie :: TcRef WantedConstraints, -- Place to accumulate type constraints tcl_errs :: TcRef (Messages TcRnMessage) -- Place to accumulate diagnostics } setLclEnvTcLevel :: TcLclEnv -> TcLevel -> TcLclEnv setLclEnvTcLevel env lvl = env { tcl_tclvl = lvl } getLclEnvTcLevel :: TcLclEnv -> TcLevel getLclEnvTcLevel = tcl_tclvl setLclEnvLoc :: TcLclEnv -> RealSrcSpan -> TcLclEnv setLclEnvLoc env loc = env { tcl_loc = loc } getLclEnvLoc :: TcLclEnv -> RealSrcSpan getLclEnvLoc = tcl_loc type ErrCtxt = (Bool, TidyEnv -> TcM (TidyEnv, SDoc)) -- Monadic so that we have a chance -- to deal with bound type variables just before error -- message construction -- Bool: True <=> this is a landmark context; do not -- discard it when trimming for display -- These are here to avoid module loops: one might expect them -- in GHC.Tc.Types.Constraint, but they refer to ErrCtxt which refers to TcM. -- Easier to just keep these definitions here, alongside TcM. pushErrCtxt :: CtOrigin -> ErrCtxt -> CtLoc -> CtLoc pushErrCtxt o err loc@(CtLoc { ctl_env = lcl }) = loc { ctl_origin = o, ctl_env = lcl { tcl_ctxt = err : tcl_ctxt lcl } } pushErrCtxtSameOrigin :: ErrCtxt -> CtLoc -> CtLoc -- Just add information w/o updating the origin! pushErrCtxtSameOrigin err loc@(CtLoc { ctl_env = lcl }) = loc { ctl_env = lcl { tcl_ctxt = err : tcl_ctxt lcl } } type TcTypeEnv = NameEnv TcTyThing type ThBindEnv = NameEnv (TopLevelFlag, ThLevel) -- Domain = all Ids bound in this module (ie not imported) -- The TopLevelFlag tells if the binding is syntactically top level. -- We need to know this, because the cross-stage persistence story allows -- cross-stage at arbitrary types if the Id is bound at top level. -- -- Nota bene: a ThLevel of 'outerLevel' is *not* the same as being -- bound at top level! See Note [Template Haskell levels] in GHC.Tc.Gen.Splice {- Note [Given Insts] ~~~~~~~~~~~~~~~~~~ Because of GADTs, we have to pass inwards the Insts provided by type signatures and existential contexts. Consider data T a where { T1 :: b -> b -> T [b] } f :: Eq a => T a -> Bool f (T1 x y) = [x]==[y] The constructor T1 binds an existential variable 'b', and we need Eq [b]. Well, we have it, because Eq a refines to Eq [b], but we can only spot that if we pass it inwards. -} -- | Type alias for 'IORef'; the convention is we'll use this for mutable -- bits of data in 'TcGblEnv' which are updated during typechecking and -- returned at the end. type TcRef a = IORef a -- ToDo: when should I refer to it as a 'TcId' instead of an 'Id'? type TcId = Id type TcIdSet = IdSet --------------------------- -- The TcBinderStack --------------------------- type TcBinderStack = [TcBinder] -- This is a stack of locally-bound ids and tyvars, -- innermost on top -- Used only in error reporting (relevantBindings in TcError), -- and in tidying -- We can't use the tcl_env type environment, because it doesn't -- keep track of the nesting order data TcBinder = TcIdBndr TcId TopLevelFlag -- Tells whether the binding is syntactically top-level -- (The monomorphic Ids for a recursive group count -- as not-top-level for this purpose.) | TcIdBndr_ExpType -- Variant that allows the type to be specified as -- an ExpType Name ExpType TopLevelFlag | TcTvBndr -- e.g. case x of P (y::a) -> blah Name -- We bind the lexical name "a" to the type of y, TyVar -- which might be an utterly different (perhaps -- existential) tyvar instance Outputable TcBinder where ppr (TcIdBndr id top_lvl) = ppr id <> brackets (ppr top_lvl) ppr (TcIdBndr_ExpType id _ top_lvl) = ppr id <> brackets (ppr top_lvl) ppr (TcTvBndr name tv) = ppr name <+> ppr tv instance HasOccName TcBinder where occName (TcIdBndr id _) = occName (idName id) occName (TcIdBndr_ExpType name _ _) = occName name occName (TcTvBndr name _) = occName name -- fixes #12177 -- Builds up a list of bindings whose OccName has not been seen before -- i.e., If ys = removeBindingShadowing xs -- then -- - ys is obtained from xs by deleting some elements -- - ys has no duplicate OccNames -- - The first duplicated OccName in xs is retained in ys -- Overloaded so that it can be used for both GlobalRdrElt in typed-hole -- substitutions and TcBinder when looking for relevant bindings. removeBindingShadowing :: HasOccName a => [a] -> [a] removeBindingShadowing bindings = reverse $ fst $ foldl (\(bindingAcc, seenNames) binding -> if occName binding `elemOccSet` seenNames -- if we've seen it then (bindingAcc, seenNames) -- skip it else (binding:bindingAcc, extendOccSet seenNames (occName binding))) ([], emptyOccSet) bindings -- | Get target platform getPlatform :: TcM Platform getPlatform = targetPlatform <$> getDynFlags --------------------------- -- Template Haskell stages and levels --------------------------- data SpliceType = Typed | Untyped data ThStage -- See Note [Template Haskell state diagram] -- and Note [Template Haskell levels] in GHC.Tc.Gen.Splice -- Start at: Comp -- At bracket: wrap current stage in Brack -- At splice: currently Brack: return to previous stage -- currently Comp/Splice: compile and run = Splice SpliceType -- Inside a top-level splice -- This code will be run *at compile time*; -- the result replaces the splice -- Binding level = 0 | RunSplice (TcRef [ForeignRef (TH.Q ())]) -- Set when running a splice, i.e. NOT when renaming or typechecking the -- Haskell code for the splice. See Note [RunSplice ThLevel]. -- -- Contains a list of mod finalizers collected while executing the splice. -- -- 'addModFinalizer' inserts finalizers here, and from here they are taken -- to construct an @HsSpliced@ annotation for untyped splices. See Note -- [Delaying modFinalizers in untyped splices] in GHC.Rename.Splice. -- -- For typed splices, the typechecker takes finalizers from here and -- inserts them in the list of finalizers in the global environment. -- -- See Note [Collecting modFinalizers in typed splices] in "GHC.Tc.Gen.Splice". | Comp -- Ordinary Haskell code -- Binding level = 1 | Brack -- Inside brackets ThStage -- Enclosing stage PendingStuff data PendingStuff = RnPendingUntyped -- Renaming the inside of an *untyped* bracket (TcRef [PendingRnSplice]) -- Pending splices in here | RnPendingTyped -- Renaming the inside of a *typed* bracket | TcPending -- Typechecking the inside of a typed bracket (TcRef [PendingTcSplice]) -- Accumulate pending splices here (TcRef WantedConstraints) -- and type constraints here QuoteWrapper -- A type variable and evidence variable -- for the overall monad of -- the bracket. Splices are checked -- against this monad. The evidence -- variable is used for desugaring -- `lift`. topStage, topAnnStage, topSpliceStage :: ThStage topStage = Comp topAnnStage = Splice Untyped topSpliceStage = Splice Untyped instance Outputable ThStage where ppr (Splice _) = text "Splice" ppr (RunSplice _) = text "RunSplice" ppr Comp = text "Comp" ppr (Brack s _) = text "Brack" <> parens (ppr s) type ThLevel = Int -- NB: see Note [Template Haskell levels] in GHC.Tc.Gen.Splice -- Incremented when going inside a bracket, -- decremented when going inside a splice -- NB: ThLevel is one greater than the 'n' in Fig 2 of the -- original "Template meta-programming for Haskell" paper impLevel, outerLevel :: ThLevel impLevel = 0 -- Imported things; they can be used inside a top level splice outerLevel = 1 -- Things defined outside brackets thLevel :: ThStage -> ThLevel thLevel (Splice _) = 0 thLevel Comp = 1 thLevel (Brack s _) = thLevel s + 1 thLevel (RunSplice _) = panic "thLevel: called when running a splice" -- See Note [RunSplice ThLevel]. {- Note [RunSplice ThLevel] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'RunSplice' stage is set when executing a splice, and only when running a splice. In particular it is not set when the splice is renamed or typechecked. 'RunSplice' is needed to provide a reference where 'addModFinalizer' can insert the finalizer (see Note [Delaying modFinalizers in untyped splices]), and 'addModFinalizer' runs when doing Q things. Therefore, It doesn't make sense to set 'RunSplice' when renaming or typechecking the splice, where 'Splice', 'Brack' or 'Comp' are used instead. -} --------------------------- -- Arrow-notation context --------------------------- {- Note [Escaping the arrow scope] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In arrow notation, a variable bound by a proc (or enclosed let/kappa) is not in scope to the left of an arrow tail (-<) or the head of (|..|). For example proc x -> (e1 -< e2) Here, x is not in scope in e1, but it is in scope in e2. This can get a bit complicated: let x = 3 in proc y -> (proc z -> e1) -< e2 Here, x and z are in scope in e1, but y is not. We implement this by recording the environment when passing a proc (using newArrowScope), and returning to that (using escapeArrowScope) on the left of -< and the head of (|..|). All this can be dealt with by the *renamer*. But the type checker needs to be involved too. Example (arrowfail001) class Foo a where foo :: a -> () data Bar = forall a. Foo a => Bar a get :: Bar -> () get = proc x -> case x of Bar a -> foo -< a Here the call of 'foo' gives rise to a (Foo a) constraint that should not be captured by the pattern match on 'Bar'. Rather it should join the constraints from further out. So we must capture the constraint bag from further out in the ArrowCtxt that we push inwards. -} data ArrowCtxt -- Note [Escaping the arrow scope] = NoArrowCtxt | ArrowCtxt LocalRdrEnv (TcRef WantedConstraints) --------------------------- -- TcTyThing --------------------------- -- | A typecheckable thing available in a local context. Could be -- 'AGlobal' 'TyThing', but also lexically scoped variables, etc. -- See "GHC.Tc.Utils.Env" for how to retrieve a 'TyThing' given a 'Name'. data TcTyThing = AGlobal TyThing -- Used only in the return type of a lookup | ATcId -- Ids defined in this module; may not be fully zonked { tct_id :: TcId , tct_info :: IdBindingInfo -- See Note [Meaning of IdBindingInfo] } | ATyVar Name TcTyVar -- See Note [Type variables in the type environment] | ATcTyCon TyCon -- Used temporarily, during kind checking, for the -- tycons and clases in this recursive group -- The TyCon is always a TcTyCon. Its kind -- can be a mono-kind or a poly-kind; in TcTyClsDcls see -- Note [Type checking recursive type and class declarations] | APromotionErr PromotionErr -- | Matches on either a global 'TyCon' or a 'TcTyCon'. tcTyThingTyCon_maybe :: TcTyThing -> Maybe TyCon tcTyThingTyCon_maybe (AGlobal (ATyCon tc)) = Just tc tcTyThingTyCon_maybe (ATcTyCon tc_tc) = Just tc_tc tcTyThingTyCon_maybe _ = Nothing data PromotionErr = TyConPE -- TyCon used in a kind before we are ready -- data T :: T -> * where ... | ClassPE -- Ditto Class | FamDataConPE -- Data constructor for a data family -- See Note [AFamDataCon: not promoting data family constructors] -- in GHC.Tc.Utils.Env. | ConstrainedDataConPE PredType -- Data constructor with a non-equality context -- See Note [Don't promote data constructors with -- non-equality contexts] in GHC.Tc.Gen.HsType | PatSynPE -- Pattern synonyms -- See Note [Don't promote pattern synonyms] in GHC.Tc.Utils.Env | RecDataConPE -- Data constructor in a recursive loop -- See Note [Recursion and promoting data constructors] in GHC.Tc.TyCl | NoDataKindsDC -- -XDataKinds not enabled (for a datacon) instance Outputable TcTyThing where -- Debugging only ppr (AGlobal g) = ppr g ppr elt@(ATcId {}) = text "Identifier" <> brackets (ppr (tct_id elt) <> dcolon <> ppr (varType (tct_id elt)) <> comma <+> ppr (tct_info elt)) ppr (ATyVar n tv) = text "Type variable" <+> quotes (ppr n) <+> equals <+> ppr tv <+> dcolon <+> ppr (varType tv) ppr (ATcTyCon tc) = text "ATcTyCon" <+> ppr tc <+> dcolon <+> ppr (tyConKind tc) ppr (APromotionErr err) = text "APromotionErr" <+> ppr err -- | IdBindingInfo describes how an Id is bound. -- -- It is used for the following purposes: -- a) for static forms in 'GHC.Tc.Gen.Expr.checkClosedInStaticForm' and -- b) to figure out when a nested binding can be generalised, -- in 'GHC.Tc.Gen.Bind.decideGeneralisationPlan'. -- data IdBindingInfo -- See Note [Meaning of IdBindingInfo] = NotLetBound | ClosedLet | NonClosedLet RhsNames -- Used for (static e) checks only ClosedTypeId -- Used for generalisation checks -- and for (static e) checks -- | IsGroupClosed describes a group of mutually-recursive bindings data IsGroupClosed = IsGroupClosed (NameEnv RhsNames) -- Free var info for the RHS of each binding in the group -- Used only for (static e) checks ClosedTypeId -- True <=> all the free vars of the group are -- imported or ClosedLet or -- NonClosedLet with ClosedTypeId=True. -- In particular, no tyvars, no NotLetBound type RhsNames = NameSet -- Names of variables, mentioned on the RHS of -- a definition, that are not Global or ClosedLet type ClosedTypeId = Bool -- See Note [Meaning of IdBindingInfo] {- Note [Meaning of IdBindingInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NotLetBound means that the Id is not let-bound (e.g. it is bound in a lambda-abstraction or in a case pattern) ClosedLet means that - The Id is let-bound, - Any free term variables are also Global or ClosedLet - Its type has no free variables (NB: a top-level binding subject to the MR might have free vars in its type) These ClosedLets can definitely be floated to top level; and we may need to do so for static forms. Property: ClosedLet is equivalent to NonClosedLet emptyNameSet True (NonClosedLet (fvs::RhsNames) (cl::ClosedTypeId)) means that - The Id is let-bound - The fvs::RhsNames contains the free names of the RHS, excluding Global and ClosedLet ones. - For the ClosedTypeId field see Note [Bindings with closed types] For (static e) to be valid, we need for every 'x' free in 'e', that x's binding is floatable to the top level. Specifically: * x's RhsNames must be empty * x's type has no free variables See Note [Grand plan for static forms] in "GHC.Iface.Tidy.StaticPtrTable". This test is made in GHC.Tc.Gen.Expr.checkClosedInStaticForm. Actually knowing x's RhsNames (rather than just its emptiness or otherwise) is just so we can produce better error messages Note [Bindings with closed types: ClosedTypeId] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f x = let g ys = map not ys in ... Can we generalise 'g' under the OutsideIn algorithm? Yes, because all g's free variables are top-level; that is they themselves have no free type variables, and it is the type variables in the environment that makes things tricky for OutsideIn generalisation. Here's the invariant: If an Id has ClosedTypeId=True (in its IdBindingInfo), then the Id's type is /definitely/ closed (has no free type variables). Specifically, a) The Id's actual type is closed (has no free tyvars) b) Either the Id has a (closed) user-supplied type signature or all its free variables are Global/ClosedLet or NonClosedLet with ClosedTypeId=True. In particular, none are NotLetBound. Why is (b) needed? Consider \x. (x :: Int, let y = x+1 in ...) Initially x::alpha. If we happen to typecheck the 'let' before the (x::Int), y's type will have a free tyvar; but if the other way round it won't. So we treat any let-bound variable with a free non-let-bound variable as not ClosedTypeId, regardless of what the free vars of its type actually are. But if it has a signature, all is well: \x. ...(let { y::Int; y = x+1 } in let { v = y+2 } in ...)... Here the signature on 'v' makes 'y' a ClosedTypeId, so we can generalise 'v'. Note that: * A top-level binding may not have ClosedTypeId=True, if it suffers from the MR * A nested binding may be closed (eg 'g' in the example we started with). Indeed, that's the point; whether a function is defined at top level or nested is orthogonal to the question of whether or not it is closed. * A binding may be non-closed because it mentions a lexically scoped *type variable* Eg f :: forall a. blah f x = let g y = ...(y::a)... Under OutsideIn we are free to generalise an Id all of whose free variables have ClosedTypeId=True (or imported). This is an extension compared to the JFP paper on OutsideIn, which used "top-level" as a proxy for "closed". (It's not a good proxy anyway -- the MR can make a top-level binding with a free type variable.) Note [Type variables in the type environment] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type environment has a binding for each lexically-scoped type variable that is in scope. For example f :: forall a. a -> a f x = (x :: a) g1 :: [a] -> a g1 (ys :: [b]) = head ys :: b g2 :: [Int] -> Int g2 (ys :: [c]) = head ys :: c * The forall'd variable 'a' in the signature scopes over f's RHS. * The pattern-bound type variable 'b' in 'g1' scopes over g1's RHS; note that it is bound to a skolem 'a' which is not itself lexically in scope. * The pattern-bound type variable 'c' in 'g2' is bound to Int; that is, pattern-bound type variables can stand for arbitrary types. (see GHC proposal #128 "Allow ScopedTypeVariables to refer to types" https://github.com/ghc-proposals/ghc-proposals/pull/128, and the paper "Type variables in patterns", Haskell Symposium 2018. This is implemented by the constructor ATyVar Name TcTyVar in the type environment. * The Name is the name of the original, lexically scoped type variable * The TcTyVar is sometimes a skolem (like in 'f'), and sometimes a unification variable (like in 'g1', 'g2'). We never zonk the type environment so in the latter case it always stays as a unification variable, although that variable may be later unified with a type (such as Int in 'g2'). -} instance Outputable IdBindingInfo where ppr NotLetBound = text "NotLetBound" ppr ClosedLet = text "TopLevelLet" ppr (NonClosedLet fvs closed_type) = text "TopLevelLet" <+> ppr fvs <+> ppr closed_type instance Outputable PromotionErr where ppr ClassPE = text "ClassPE" ppr TyConPE = text "TyConPE" ppr PatSynPE = text "PatSynPE" ppr FamDataConPE = text "FamDataConPE" ppr (ConstrainedDataConPE pred) = text "ConstrainedDataConPE" <+> parens (ppr pred) ppr RecDataConPE = text "RecDataConPE" ppr NoDataKindsDC = text "NoDataKindsDC" -------------- pprTcTyThingCategory :: TcTyThing -> SDoc pprTcTyThingCategory = text . capitalise . tcTyThingCategory tcTyThingCategory :: TcTyThing -> String tcTyThingCategory (AGlobal thing) = tyThingCategory thing tcTyThingCategory (ATyVar {}) = "type variable" tcTyThingCategory (ATcId {}) = "local identifier" tcTyThingCategory (ATcTyCon {}) = "local tycon" tcTyThingCategory (APromotionErr pe) = peCategory pe -------------- pprPECategory :: PromotionErr -> SDoc pprPECategory = text . capitalise . peCategory peCategory :: PromotionErr -> String peCategory ClassPE = "class" peCategory TyConPE = "type constructor" peCategory PatSynPE = "pattern synonym" peCategory FamDataConPE = "data constructor" peCategory ConstrainedDataConPE{} = "data constructor" peCategory RecDataConPE = "data constructor" peCategory NoDataKindsDC = "data constructor" {- ************************************************************************ * * Operations over ImportAvails * * ************************************************************************ -} mkModDeps :: Set (UnitId, ModuleNameWithIsBoot) -> InstalledModuleEnv ModuleNameWithIsBoot mkModDeps deps = S.foldl' add emptyInstalledModuleEnv deps where add env (uid, elt) = extendInstalledModuleEnv env (mkModule uid (gwib_mod elt)) elt plusModDeps :: InstalledModuleEnv ModuleNameWithIsBoot -> InstalledModuleEnv ModuleNameWithIsBoot -> InstalledModuleEnv ModuleNameWithIsBoot plusModDeps = plusInstalledModuleEnv plus_mod_dep where plus_mod_dep r1@(GWIB { gwib_mod = m1, gwib_isBoot = boot1 }) r2@(GWIB {gwib_mod = m2, gwib_isBoot = boot2}) | assertPpr (m1 == m2) ((ppr m1 <+> ppr m2) $$ (ppr (boot1 == IsBoot) <+> ppr (boot2 == IsBoot))) boot1 == IsBoot = r2 | otherwise = r1 -- If either side can "see" a non-hi-boot interface, use that -- Reusing existing tuples saves 10% of allocations on test -- perf/compiler/MultiLayerModules emptyImportAvails :: ImportAvails emptyImportAvails = ImportAvails { imp_mods = emptyModuleEnv, imp_direct_dep_mods = emptyInstalledModuleEnv, imp_dep_direct_pkgs = S.empty, imp_sig_mods = [], imp_trust_pkgs = S.empty, imp_trust_own_pkg = False, imp_boot_mods = emptyInstalledModuleEnv, imp_orphs = [], imp_finsts = [] } -- | Union two ImportAvails -- -- This function is a key part of Import handling, basically -- for each import we create a separate ImportAvails structure -- and then union them all together with this function. plusImportAvails :: ImportAvails -> ImportAvails -> ImportAvails plusImportAvails (ImportAvails { imp_mods = mods1, imp_direct_dep_mods = ddmods1, imp_dep_direct_pkgs = ddpkgs1, imp_boot_mods = srs1, imp_sig_mods = sig_mods1, imp_trust_pkgs = tpkgs1, imp_trust_own_pkg = tself1, imp_orphs = orphs1, imp_finsts = finsts1 }) (ImportAvails { imp_mods = mods2, imp_direct_dep_mods = ddmods2, imp_dep_direct_pkgs = ddpkgs2, imp_boot_mods = srcs2, imp_sig_mods = sig_mods2, imp_trust_pkgs = tpkgs2, imp_trust_own_pkg = tself2, imp_orphs = orphs2, imp_finsts = finsts2 }) = ImportAvails { imp_mods = plusModuleEnv_C (++) mods1 mods2, imp_direct_dep_mods = ddmods1 `plusModDeps` ddmods2, imp_dep_direct_pkgs = ddpkgs1 `S.union` ddpkgs2, imp_trust_pkgs = tpkgs1 `S.union` tpkgs2, imp_trust_own_pkg = tself1 || tself2, imp_boot_mods = srs1 `plusModDeps` srcs2, imp_sig_mods = sig_mods1 `unionLists` sig_mods2, imp_orphs = orphs1 `unionLists` orphs2, imp_finsts = finsts1 `unionLists` finsts2 } {- ************************************************************************ * * \subsection{Where from} * * ************************************************************************ The @WhereFrom@ type controls where the renamer looks for an interface file -} data WhereFrom = ImportByUser IsBootInterface -- Ordinary user import (perhaps {-# SOURCE #-}) | ImportBySystem -- Non user import. | ImportByPlugin -- Importing a plugin; -- See Note [Care with plugin imports] in GHC.Iface.Load instance Outputable WhereFrom where ppr (ImportByUser IsBoot) = text "{- SOURCE -}" ppr (ImportByUser NotBoot) = empty ppr ImportBySystem = text "{- SYSTEM -}" ppr ImportByPlugin = text "{- PLUGIN -}" {- ********************************************************************* * * Type signatures * * ********************************************************************* -} -- These data types need to be here only because -- GHC.Tc.Solver uses them, and GHC.Tc.Solver is fairly -- low down in the module hierarchy type TcSigFun = Name -> Maybe TcSigInfo data TcSigInfo = TcIdSig TcIdSigInfo | TcPatSynSig TcPatSynInfo data TcIdSigInfo -- See Note [Complete and partial type signatures] = CompleteSig -- A complete signature with no wildcards, -- so the complete polymorphic type is known. { sig_bndr :: TcId -- The polymorphic Id with that type , sig_ctxt :: UserTypeCtxt -- In the case of type-class default methods, -- the Name in the FunSigCtxt is not the same -- as the TcId; the former is 'op', while the -- latter is '$dmop' or some such , sig_loc :: SrcSpan -- Location of the type signature } | PartialSig -- A partial type signature (i.e. includes one or more -- wildcards). In this case it doesn't make sense to give -- the polymorphic Id, because we are going to /infer/ its -- type, so we can't make the polymorphic Id ab-initio { psig_name :: Name -- Name of the function; used when report wildcards , psig_hs_ty :: LHsSigWcType GhcRn -- The original partial signature in -- HsSyn form , sig_ctxt :: UserTypeCtxt , sig_loc :: SrcSpan -- Location of the type signature } {- Note [Complete and partial type signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A type signature is partial when it contains one or more wildcards (= type holes). The wildcard can either be: * A (type) wildcard occurring in sig_theta or sig_tau. These are stored in sig_wcs. f :: Bool -> _ g :: Eq _a => _a -> _a -> Bool * Or an extra-constraints wildcard, stored in sig_cts: h :: (Num a, _) => a -> a A type signature is a complete type signature when there are no wildcards in the type signature, i.e. iff sig_wcs is empty and sig_extra_cts is Nothing. -} data TcIdSigInst = TISI { sig_inst_sig :: TcIdSigInfo , sig_inst_skols :: [(Name, InvisTVBinder)] -- Instantiated type and kind variables, TyVarTvs -- The Name is the Name that the renamer chose; -- but the TcTyVar may come from instantiating -- the type and hence have a different unique. -- No need to keep track of whether they are truly lexically -- scoped because the renamer has named them uniquely -- See Note [Binding scoped type variables] in GHC.Tc.Gen.Sig -- -- NB: The order of sig_inst_skols is irrelevant -- for a CompleteSig, but for a PartialSig see -- Note [Quantified variables in partial type signatures] , sig_inst_theta :: TcThetaType -- Instantiated theta. In the case of a -- PartialSig, sig_theta does not include -- the extra-constraints wildcard , sig_inst_tau :: TcSigmaType -- Instantiated tau -- See Note [sig_inst_tau may be polymorphic] -- Relevant for partial signature only , sig_inst_wcs :: [(Name, TcTyVar)] -- Like sig_inst_skols, but for /named/ wildcards (_a etc). -- The named wildcards scope over the binding, and hence -- their Names may appear in type signatures in the binding , sig_inst_wcx :: Maybe TcType -- Extra-constraints wildcard to fill in, if any -- If this exists, it is surely of the form (meta_tv |> co) -- (where the co might be reflexive). This is filled in -- only from the return value of GHC.Tc.Gen.HsType.tcAnonWildCardOcc } {- Note [sig_inst_tau may be polymorphic] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that "sig_inst_tau" might actually be a polymorphic type, if the original function had a signature like forall a. Eq a => forall b. Ord b => .... But that's ok: tcMatchesFun (called by tcRhs) can deal with that It happens, too! See Note [Polymorphic methods] in GHC.Tc.TyCl.Class. Note [Quantified variables in partial type signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f :: forall a b. _ -> a -> _ -> b f (x,y) p q = q Then we expect f's final type to be f :: forall {x,y}. forall a b. (x,y) -> a -> b -> b Note that x,y are Inferred, and can't be use for visible type application (VTA). But a,b are Specified, and remain Specified in the final type, so we can use VTA for them. (Exception: if it turns out that a's kind mentions b we need to reorder them with scopedSort.) The sig_inst_skols of the TISI from a partial signature records that original order, and is used to get the variables of f's final type in the correct order. Note [Wildcards in partial signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The wildcards in psig_wcs may stand for a type mentioning the universally-quantified tyvars of psig_ty E.g. f :: forall a. _ -> a f x = x We get sig_inst_skols = [a] sig_inst_tau = _22 -> a sig_inst_wcs = [_22] and _22 in the end is unified with the type 'a' Moreover the kind of a wildcard in sig_inst_wcs may mention the universally-quantified tyvars sig_inst_skols e.g. f :: t a -> t _ Here we get sig_inst_skols = [k:*, (t::k ->*), (a::k)] sig_inst_tau = t a -> t _22 sig_inst_wcs = [ _22::k ] -} data TcPatSynInfo = TPSI { patsig_name :: Name, patsig_implicit_bndrs :: [InvisTVBinder], -- Implicitly-bound kind vars (Inferred) and -- implicitly-bound type vars (Specified) -- See Note [The pattern-synonym signature splitting rule] in GHC.Tc.TyCl.PatSyn patsig_univ_bndrs :: [InvisTVBinder], -- Bound by explicit user forall patsig_req :: TcThetaType, patsig_ex_bndrs :: [InvisTVBinder], -- Bound by explicit user forall patsig_prov :: TcThetaType, patsig_body_ty :: TcSigmaType } instance Outputable TcSigInfo where ppr (TcIdSig idsi) = ppr idsi ppr (TcPatSynSig tpsi) = text "TcPatSynInfo" <+> ppr tpsi instance Outputable TcIdSigInfo where ppr (CompleteSig { sig_bndr = bndr }) = ppr bndr <+> dcolon <+> ppr (idType bndr) ppr (PartialSig { psig_name = name, psig_hs_ty = hs_ty }) = text "psig" <+> ppr name <+> dcolon <+> ppr hs_ty instance Outputable TcIdSigInst where ppr (TISI { sig_inst_sig = sig, sig_inst_skols = skols , sig_inst_theta = theta, sig_inst_tau = tau }) = hang (ppr sig) 2 (vcat [ ppr skols, ppr theta <+> darrow <+> ppr tau ]) instance Outputable TcPatSynInfo where ppr (TPSI{ patsig_name = name}) = ppr name isPartialSig :: TcIdSigInst -> Bool isPartialSig (TISI { sig_inst_sig = PartialSig {} }) = True isPartialSig _ = False -- | No signature or a partial signature hasCompleteSig :: TcSigFun -> Name -> Bool hasCompleteSig sig_fn name = case sig_fn name of Just (TcIdSig (CompleteSig {})) -> True _ -> False {- Constraint Solver Plugins ------------------------- -} -- | The @solve@ function of a type-checking plugin takes in Given -- and Wanted constraints, and should return a 'TcPluginSolveResult' -- indicating which Wanted constraints it could solve, or whether any are -- insoluble. type TcPluginSolver = EvBindsVar -> [Ct] -- ^ Givens -> [Ct] -- ^ Wanteds -> TcPluginM TcPluginSolveResult -- | For rewriting type family applications, a type-checking plugin provides -- a function of this type for each type family 'TyCon'. -- -- The function is provided with the current set of Given constraints, together -- with the arguments to the type family. -- The type family application will always be fully saturated. type TcPluginRewriter = RewriteEnv -- ^ Rewriter environment -> [Ct] -- ^ Givens -> [TcType] -- ^ type family arguments -> TcPluginM TcPluginRewriteResult -- | 'TcPluginM' is the monad in which type-checking plugins operate. newtype TcPluginM a = TcPluginM { runTcPluginM :: TcM a } deriving newtype (Functor, Applicative, Monad, MonadFail) -- | This function provides an escape for direct access to -- the 'TcM` monad. It should not be used lightly, and -- the provided 'TcPluginM' API should be favoured instead. unsafeTcPluginTcM :: TcM a -> TcPluginM a unsafeTcPluginTcM = TcPluginM data TcPlugin = forall s. TcPlugin { tcPluginInit :: TcPluginM s -- ^ Initialize plugin, when entering type-checker. , tcPluginSolve :: s -> TcPluginSolver -- ^ Solve some constraints. -- -- This function will be invoked at two points in the constraint solving -- process: once to simplify Given constraints, and once to solve -- Wanted constraints. In the first case (and only in the first case), -- no Wanted constraints will be passed to the plugin. -- -- The plugin can either return a contradiction, -- or specify that it has solved some constraints (with evidence), -- and possibly emit additional constraints. These returned constraints -- must be Givens in the first case, and Wanteds in the second. -- -- Use @ \\ _ _ _ _ _ -> pure $ TcPluginOK [] [] @ if your plugin -- does not provide this functionality. , tcPluginRewrite :: s -> UniqFM TyCon TcPluginRewriter -- ^ Rewrite saturated type family applications. -- -- The plugin is expected to supply a mapping from type family names to -- rewriting functions. For each type family 'TyCon', the plugin should -- provide a function which takes in the given constraints and arguments -- of a saturated type family application, and return a possible rewriting. -- See 'TcPluginRewriter' for the expected shape of such a function. -- -- Use @ \\ _ -> emptyUFM @ if your plugin does not provide this functionality. , tcPluginStop :: s -> TcPluginM () -- ^ Clean up after the plugin, when exiting the type-checker. } -- | The plugin found a contradiction. -- The returned constraints are removed from the inert set, -- and recorded as insoluble. -- -- The returned list of constraints should never be empty. pattern TcPluginContradiction :: [Ct] -> TcPluginSolveResult pattern TcPluginContradiction insols = TcPluginSolveResult { tcPluginInsolubleCts = insols , tcPluginSolvedCts = [] , tcPluginNewCts = [] } -- | The plugin has not found any contradictions, -- -- The first field is for constraints that were solved. -- The second field contains new work, that should be processed by -- the constraint solver. pattern TcPluginOk :: [(EvTerm, Ct)] -> [Ct] -> TcPluginSolveResult pattern TcPluginOk solved new = TcPluginSolveResult { tcPluginInsolubleCts = [] , tcPluginSolvedCts = solved , tcPluginNewCts = new } -- | Result of running a solver plugin. data TcPluginSolveResult = TcPluginSolveResult { -- | Insoluble constraints found by the plugin. -- -- These constraints will be added to the inert set, -- and reported as insoluble to the user. tcPluginInsolubleCts :: [Ct] -- | Solved constraints, together with their evidence. -- -- These are removed from the inert set, and the -- evidence for them is recorded. , tcPluginSolvedCts :: [(EvTerm, Ct)] -- | New constraints that the plugin wishes to emit. -- -- These will be added to the work list. , tcPluginNewCts :: [Ct] } data TcPluginRewriteResult = -- | The plugin does not rewrite the type family application. TcPluginNoRewrite -- | The plugin rewrites the type family application -- providing a rewriting together with evidence: a 'Reduction', -- which contains the rewritten type together with a 'Coercion' -- whose right-hand-side type is the rewritten type. -- -- The plugin can also emit additional Wanted constraints. | TcPluginRewriteTo { tcPluginReduction :: !Reduction , tcRewriterNewWanteds :: [Ct] } -- | A collection of candidate default types for a type variable. data DefaultingProposal = DefaultingProposal { deProposalTyVar :: TcTyVar -- ^ The type variable to default. , deProposalCandidates :: [Type] -- ^ Candidate types to default the type variable to. , deProposalCts :: [Ct] -- ^ The constraints against which defaults are checked. } instance Outputable DefaultingProposal where ppr p = text "DefaultingProposal" <+> ppr (deProposalTyVar p) <+> ppr (deProposalCandidates p) <+> ppr (deProposalCts p) type DefaultingPluginResult = [DefaultingProposal] type FillDefaulting = WantedConstraints -> TcPluginM DefaultingPluginResult -- | A plugin for controlling defaulting. data DefaultingPlugin = forall s. DefaultingPlugin { dePluginInit :: TcPluginM s -- ^ Initialize plugin, when entering type-checker. , dePluginRun :: s -> FillDefaulting -- ^ Default some types , dePluginStop :: s -> TcPluginM () -- ^ Clean up after the plugin, when exiting the type-checker. } {- ********************************************************************* * * Role annotations * * ********************************************************************* -} type RoleAnnotEnv = NameEnv (LRoleAnnotDecl GhcRn) mkRoleAnnotEnv :: [LRoleAnnotDecl GhcRn] -> RoleAnnotEnv mkRoleAnnotEnv role_annot_decls = mkNameEnv [ (name, ra_decl) | ra_decl <- role_annot_decls , let name = roleAnnotDeclName (unLoc ra_decl) , not (isUnboundName name) ] -- Some of the role annots will be unbound; -- we don't wish to include these emptyRoleAnnotEnv :: RoleAnnotEnv emptyRoleAnnotEnv = emptyNameEnv lookupRoleAnnot :: RoleAnnotEnv -> Name -> Maybe (LRoleAnnotDecl GhcRn) lookupRoleAnnot = lookupNameEnv getRoleAnnots :: [Name] -> RoleAnnotEnv -> [LRoleAnnotDecl GhcRn] getRoleAnnots bndrs role_env = mapMaybe (lookupRoleAnnot role_env) bndrs {- ********************************************************************* * * Linting a TcGblEnv * * ********************************************************************* -} -- | Check the 'TcGblEnv' for consistency. Currently, only checks -- axioms, but should check other aspects, too. lintGblEnv :: Logger -> DynFlags -> TcGblEnv -> TcM () lintGblEnv logger dflags tcg_env = liftIO $ lintAxioms logger dflags (text "TcGblEnv axioms") axioms where axioms = typeEnvCoAxioms (tcg_type_env tcg_env) -- | This is a mirror of Template Haskell's DocLoc, but the TH names are -- resolved to GHC names. data DocLoc = DeclDoc Name | ArgDoc Name Int | InstDoc Name | ModuleDoc deriving (Eq, Ord) -- | The current collection of docs that Template Haskell has built up via -- putDoc. type THDocs = Map DocLoc (HsDoc GhcRn) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types/Constraint.hs0000644000000000000000000027146314472400113022003 0ustar0000000000000000 {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE TypeApplications #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | This module defines types and simple operations over constraints, as used -- in the type-checker and constraint solver. module GHC.Tc.Types.Constraint ( -- QCInst QCInst(..), pendingScInst_maybe, -- Canonical constraints Xi, Ct(..), Cts, emptyCts, andCts, andManyCts, pprCts, singleCt, listToCts, ctsElts, consCts, snocCts, extendCtsList, isEmptyCts, isPendingScDict, pendingScDict_maybe, superClassesMightHelp, getPendingWantedScs, isWantedCt, isGivenCt, isUserTypeError, getUserTypeErrorMsg, ctEvidence, ctLoc, ctPred, ctFlavour, ctEqRel, ctOrigin, ctRewriters, ctEvId, wantedEvId_maybe, mkTcEqPredLikeEv, mkNonCanonical, mkNonCanonicalCt, mkGivens, mkIrredCt, ctEvPred, ctEvLoc, ctEvOrigin, ctEvEqRel, ctEvExpr, ctEvTerm, ctEvCoercion, ctEvEvId, ctEvRewriters, tyCoVarsOfCt, tyCoVarsOfCts, tyCoVarsOfCtList, tyCoVarsOfCtsList, CtIrredReason(..), isInsolubleReason, CheckTyEqResult, CheckTyEqProblem, cteProblem, cterClearOccursCheck, cteOK, cteImpredicative, cteTypeFamily, cteInsolubleOccurs, cteSolubleOccurs, cterSetOccursCheckSoluble, cterHasNoProblem, cterHasProblem, cterHasOnlyProblem, cterRemoveProblem, cterHasOccursCheck, cterFromKind, CanEqLHS(..), canEqLHS_maybe, canEqLHSKind, canEqLHSType, eqCanEqLHS, Hole(..), HoleSort(..), isOutOfScopeHole, DelayedError(..), NotConcreteError(..), NotConcreteReason(..), WantedConstraints(..), insolubleWC, emptyWC, isEmptyWC, isSolvedWC, andWC, unionsWC, mkSimpleWC, mkImplicWC, addInsols, dropMisleading, addSimples, addImplics, addHoles, addNotConcreteError, addDelayedErrors, tyCoVarsOfWC, tyCoVarsOfWCList, insolubleWantedCt, insolubleEqCt, insolubleCt, insolubleImplic, nonDefaultableTyVarsOfWC, Implication(..), implicationPrototype, checkTelescopeSkol, ImplicStatus(..), isInsolubleStatus, isSolvedStatus, UserGiven, getUserGivensFromImplics, HasGivenEqs(..), checkImplicationInvariants, SubGoalDepth, initialSubGoalDepth, maxSubGoalDepth, bumpSubGoalDepth, subGoalDepthExceeded, CtLoc(..), ctLocSpan, ctLocEnv, ctLocLevel, ctLocOrigin, ctLocTypeOrKind_maybe, ctLocDepth, bumpCtLocDepth, isGivenLoc, setCtLocOrigin, updateCtLocOrigin, setCtLocEnv, setCtLocSpan, pprCtLoc, -- CtEvidence CtEvidence(..), TcEvDest(..), mkKindLoc, toKindLoc, mkGivenLoc, isWanted, isGiven, ctEvRole, setCtEvPredType, setCtEvLoc, arisesFromGivens, tyCoVarsOfCtEvList, tyCoVarsOfCtEv, tyCoVarsOfCtEvsList, ctEvUnique, tcEvDestUnique, RewriterSet(..), emptyRewriterSet, isEmptyRewriterSet, -- exported concretely only for anyUnfilledCoercionHoles rewriterSetFromType, rewriterSetFromTypes, rewriterSetFromCo, addRewriterSet, wrapType, CtFlavour(..), ctEvFlavour, CtFlavourRole, ctEvFlavourRole, ctFlavourRole, eqCanRewrite, eqCanRewriteFR, -- Pretty printing pprEvVarTheta, pprEvVars, pprEvVarWithType, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Tc.Types ( TcLclEnv, setLclEnvTcLevel, getLclEnvTcLevel , setLclEnvLoc, getLclEnvLoc ) import GHC.Core.Predicate import GHC.Core.Type import GHC.Core.Coercion import GHC.Core.Class import GHC.Core.TyCon import GHC.Types.Name import GHC.Types.Var import GHC.Tc.Utils.TcType import GHC.Tc.Types.Evidence import GHC.Tc.Types.Origin import GHC.Core import GHC.Core.TyCo.Ppr import GHC.Utils.FV import GHC.Types.Var.Set import GHC.Driver.Session import GHC.Types.Basic import GHC.Types.Unique import GHC.Types.Unique.Set import GHC.Utils.Outputable import GHC.Types.SrcLoc import GHC.Data.Bag import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Constants (debugIsOn) import Data.Coerce import Data.Monoid ( Endo(..) ) import qualified Data.Semigroup as S import Control.Monad ( msum, when ) import Data.Maybe ( mapMaybe ) import Data.List.NonEmpty ( NonEmpty ) -- these are for CheckTyEqResult import Data.Word ( Word8 ) import Data.List ( intersperse ) {- ************************************************************************ * * * Canonical constraints * * * * These are the constraints the low-level simplifier works with * * * ************************************************************************ Note [CEqCan occurs check] ~~~~~~~~~~~~~~~~~~~~~~~~~~ A CEqCan relates a CanEqLHS (a type variable or type family applications) on its left to an arbitrary type on its right. It is used for rewriting. Because it is used for rewriting, it would be disastrous if the RHS were to mention the LHS: this would cause a loop in rewriting. We thus perform an occurs-check. There is, of course, some subtlety: * For type variables, the occurs-check looks deeply. This is because a CEqCan over a meta-variable is also used to inform unification, in GHC.Tc.Solver.Interact.solveByUnification. If the LHS appears anywhere, at all, in the RHS, unification will create an infinite structure, which is bad. * For type family applications, the occurs-check is shallow; it looks only in places where we might rewrite. (Specifically, it does not look in kinds or coercions.) An occurrence of the LHS in, say, an RHS coercion is OK, as we do not rewrite in coercions. No loop to be found. You might also worry about the possibility that a type family application LHS doesn't exactly appear in the RHS, but something that reduces to the LHS does. Yet that can't happen: the RHS is already inert, with all type family redexes reduced. So a simple syntactic check is just fine. The occurs check is performed in GHC.Tc.Utils.Unify.checkTypeEq and forms condition T3 in Note [Extending the inert equalities] in GHC.Tc.Solver.InertSet. -} -- | A 'Xi'-type is one that has been fully rewritten with respect -- to the inert set; that is, it has been rewritten by the algorithm -- in GHC.Tc.Solver.Rewrite. (Historical note: 'Xi', for years and years, -- meant that a type was type-family-free. It does *not* mean this -- any more.) type Xi = TcType type Cts = Bag Ct data Ct -- Atomic canonical constraints = CDictCan { -- e.g. Num ty cc_ev :: CtEvidence, -- See Note [Ct/evidence invariant] cc_class :: Class, cc_tyargs :: [Xi], -- cc_tyargs are rewritten w.r.t. inerts, so Xi cc_pend_sc :: Bool, -- See Note [The superclass story] in GHC.Tc.Solver.Canonical -- True <=> (a) cc_class has superclasses -- (b) we have not (yet) added those -- superclasses as Givens cc_fundeps :: Bool -- See Note [Fundeps with instances] in GHC.Tc.Solver.Interact -- True <=> the class has fundeps, and we have not yet -- compared this constraint with the global -- instances for fundep improvement } | CIrredCan { -- These stand for yet-unusable predicates cc_ev :: CtEvidence, -- See Note [Ct/evidence invariant] cc_reason :: CtIrredReason -- For the might-be-soluble case, the ctev_pred of the evidence is -- of form (tv xi1 xi2 ... xin) with a tyvar at the head -- or (lhs1 ~ ty2) where the CEqCan kind invariant (TyEq:K) fails -- See Note [CIrredCan constraints] -- The definitely-insoluble case is for things like -- Int ~ Bool tycons don't match -- a ~ [a] occurs check } | CEqCan { -- CanEqLHS ~ rhs -- Invariants: -- * See Note [inert_eqs: the inert equalities] in GHC.Tc.Solver.InertSet -- * Many are checked in checkTypeEq in GHC.Tc.Utils.Unify -- * (TyEq:OC) lhs does not occur in rhs (occurs check) -- Note [CEqCan occurs check] -- * (TyEq:F) rhs has no foralls -- (this avoids substituting a forall for the tyvar in other types) -- * (TyEq:K) tcTypeKind lhs `tcEqKind` tcTypeKind rhs; Note [Ct kind invariant] -- * (TyEq:N) If the equality is representational, rhs has no top-level newtype -- See Note [No top-level newtypes on RHS of representational equalities] -- in GHC.Tc.Solver.Canonical. (Applies only when constructor of newtype is -- in scope.) -- * (TyEq:TV) If rhs (perhaps under a cast) is also CanEqLHS, then it is oriented -- to give best chance of -- unification happening; eg if rhs is touchable then lhs is too -- Note [TyVar/TyVar orientation] in GHC.Tc.Utils.Unify cc_ev :: CtEvidence, -- See Note [Ct/evidence invariant] cc_lhs :: CanEqLHS, cc_rhs :: Xi, -- See invariants above cc_eq_rel :: EqRel -- INVARIANT: cc_eq_rel = ctEvEqRel cc_ev } | CNonCanonical { -- See Note [NonCanonical Semantics] in GHC.Tc.Solver.Monad cc_ev :: CtEvidence } | CQuantCan QCInst -- A quantified constraint -- NB: I expect to make more of the cases in Ct -- look like this, with the payload in an -- auxiliary type ------------ -- | A 'CanEqLHS' is a type that can appear on the left of a canonical -- equality: a type variable or exactly-saturated type family application. data CanEqLHS = TyVarLHS TcTyVar | TyFamLHS TyCon -- ^ of the family [Xi] -- ^ exactly saturating the family instance Outputable CanEqLHS where ppr (TyVarLHS tv) = ppr tv ppr (TyFamLHS fam_tc fam_args) = ppr (mkTyConApp fam_tc fam_args) ------------ data QCInst -- A much simplified version of ClsInst -- See Note [Quantified constraints] in GHC.Tc.Solver.Canonical = QCI { qci_ev :: CtEvidence -- Always of type forall tvs. context => ty -- Always Given , qci_tvs :: [TcTyVar] -- The tvs , qci_pred :: TcPredType -- The ty , qci_pend_sc :: Bool -- Same as cc_pend_sc flag in CDictCan -- Invariant: True => qci_pred is a ClassPred } instance Outputable QCInst where ppr (QCI { qci_ev = ev }) = ppr ev ------------------------------------------------------------------------------ -- -- Holes and other delayed errors -- ------------------------------------------------------------------------------ -- | A delayed error, to be reported after constraint solving, in order to benefit -- from deferred unifications. data DelayedError = DE_Hole Hole -- ^ A hole (in a type or in a term). -- -- See Note [Holes]. | DE_NotConcrete NotConcreteError -- ^ A type could not be ensured to be concrete. -- -- See Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete. instance Outputable DelayedError where ppr (DE_Hole hole) = ppr hole ppr (DE_NotConcrete err) = ppr err -- | A hole stores the information needed to report diagnostics -- about holes in terms (unbound identifiers or underscores) or -- in types (also called wildcards, as used in partial type -- signatures). See Note [Holes]. data Hole = Hole { hole_sort :: HoleSort -- ^ What flavour of hole is this? , hole_occ :: OccName -- ^ The name of this hole , hole_ty :: TcType -- ^ Type to be printed to the user -- For expression holes: type of expr -- For type holes: the missing type , hole_loc :: CtLoc -- ^ Where hole was written } -- For the hole_loc, we usually only want the TcLclEnv stored within. -- Except when we rewrite, where we need a whole location. And this -- might get reported to the user if reducing type families in a -- hole type loops. -- | Used to indicate which sort of hole we have. data HoleSort = ExprHole HoleExprRef -- ^ Either an out-of-scope variable or a "true" hole in an -- expression (TypedHoles). -- The HoleExprRef says where to write the -- the erroring expression for -fdefer-type-errors. | TypeHole -- ^ A hole in a type (PartialTypeSignatures) | ConstraintHole -- ^ A hole in a constraint, like @f :: (_, Eq a) => ... -- Differentiated from TypeHole because a ConstraintHole -- is simplified differently. See -- Note [Do not simplify ConstraintHoles] in GHC.Tc.Solver. instance Outputable Hole where ppr (Hole { hole_sort = ExprHole ref , hole_occ = occ , hole_ty = ty }) = parens $ (braces $ ppr occ <> colon <> ppr ref) <+> dcolon <+> ppr ty ppr (Hole { hole_sort = _other , hole_occ = occ , hole_ty = ty }) = braces $ ppr occ <> colon <> ppr ty instance Outputable HoleSort where ppr (ExprHole ref) = text "ExprHole:" <+> ppr ref ppr TypeHole = text "TypeHole" ppr ConstraintHole = text "ConstraintHole" -- | Why did we require that a certain type be concrete? data NotConcreteError -- | Concreteness was required by a representation-polymorphism -- check. -- -- See Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete. = NCE_FRR { nce_loc :: CtLoc -- ^ Where did this check take place? , nce_frr_origin :: FixedRuntimeRepOrigin -- ^ Which representation-polymorphism check did we perform? , nce_reasons :: NonEmpty NotConcreteReason -- ^ Why did the check fail? } -- | Why did we decide that a type was not concrete? data NotConcreteReason -- | The type contains a 'TyConApp' of a non-concrete 'TyCon'. -- -- See Note [Concrete types] in GHC.Tc.Utils.Concrete. = NonConcreteTyCon TyCon [TcType] -- | The type contains a type variable that could not be made -- concrete (e.g. a skolem type variable). | NonConcretisableTyVar TyVar -- | The type contains a cast. | ContainsCast TcType TcCoercionN -- | The type contains a forall. | ContainsForall TyCoVarBinder TcType -- | The type contains a 'CoercionTy'. | ContainsCoercionTy TcCoercion instance Outputable NotConcreteError where ppr (NCE_FRR { nce_frr_origin = frr_orig }) = text "NCE_FRR" <+> parens (ppr (frr_type frr_orig)) ------------ -- | Used to indicate extra information about why a CIrredCan is irreducible data CtIrredReason = IrredShapeReason -- ^ this constraint has a non-canonical shape (e.g. @c Int@, for a variable @c@) | NonCanonicalReason CheckTyEqResult -- ^ an equality where some invariant other than (TyEq:H) of 'CEqCan' is not satisfied; -- the 'CheckTyEqResult' states exactly why | ReprEqReason -- ^ an equality that cannot be decomposed because it is representational. -- Example: @a b ~R# Int@. -- These might still be solved later. -- INVARIANT: The constraint is a representational equality constraint | ShapeMismatchReason -- ^ a nominal equality that relates two wholly different types, -- like @Int ~# Bool@ or @a b ~# 3@. -- INVARIANT: The constraint is a nominal equality constraint | AbstractTyConReason -- ^ an equality like @T a b c ~ Q d e@ where either @T@ or @Q@ -- is an abstract type constructor. See Note [Skolem abstract data] -- in GHC.Core.TyCon. -- INVARIANT: The constraint is an equality constraint between two TyConApps instance Outputable CtIrredReason where ppr IrredShapeReason = text "(irred)" ppr (NonCanonicalReason cter) = ppr cter ppr ReprEqReason = text "(repr)" ppr ShapeMismatchReason = text "(shape)" ppr AbstractTyConReason = text "(abstc)" -- | Are we sure that more solving will never solve this constraint? isInsolubleReason :: CtIrredReason -> Bool isInsolubleReason IrredShapeReason = False isInsolubleReason (NonCanonicalReason cter) = cterIsInsoluble cter isInsolubleReason ReprEqReason = False isInsolubleReason ShapeMismatchReason = True isInsolubleReason AbstractTyConReason = True ------------------------------------------------------------------------------ -- -- CheckTyEqResult, defined here because it is stored in a CtIrredReason -- ------------------------------------------------------------------------------ -- | A set of problems in checking the validity of a type equality. -- See 'checkTypeEq'. newtype CheckTyEqResult = CTER Word8 -- | No problems in checking the validity of a type equality. cteOK :: CheckTyEqResult cteOK = CTER zeroBits -- | Check whether a 'CheckTyEqResult' is marked successful. cterHasNoProblem :: CheckTyEqResult -> Bool cterHasNoProblem (CTER 0) = True cterHasNoProblem _ = False -- | An individual problem that might be logged in a 'CheckTyEqResult' newtype CheckTyEqProblem = CTEP Word8 cteImpredicative, cteTypeFamily, cteInsolubleOccurs, cteSolubleOccurs :: CheckTyEqProblem cteImpredicative = CTEP (bit 0) -- forall or (=>) encountered cteTypeFamily = CTEP (bit 1) -- type family encountered cteInsolubleOccurs = CTEP (bit 2) -- occurs-check cteSolubleOccurs = CTEP (bit 3) -- occurs-check under a type function or in a coercion -- must be one bit to the left of cteInsolubleOccurs -- See also Note [Insoluble occurs check] in GHC.Tc.Errors cteProblem :: CheckTyEqProblem -> CheckTyEqResult cteProblem (CTEP mask) = CTER mask occurs_mask :: Word8 occurs_mask = insoluble_mask .|. soluble_mask where CTEP insoluble_mask = cteInsolubleOccurs CTEP soluble_mask = cteSolubleOccurs -- | Check whether a 'CheckTyEqResult' has a 'CheckTyEqProblem' cterHasProblem :: CheckTyEqResult -> CheckTyEqProblem -> Bool CTER bits `cterHasProblem` CTEP mask = (bits .&. mask) /= 0 -- | Check whether a 'CheckTyEqResult' has one 'CheckTyEqProblem' and no other cterHasOnlyProblem :: CheckTyEqResult -> CheckTyEqProblem -> Bool CTER bits `cterHasOnlyProblem` CTEP mask = bits == mask cterRemoveProblem :: CheckTyEqResult -> CheckTyEqProblem -> CheckTyEqResult cterRemoveProblem (CTER bits) (CTEP mask) = CTER (bits .&. complement mask) cterHasOccursCheck :: CheckTyEqResult -> Bool cterHasOccursCheck (CTER bits) = (bits .&. occurs_mask) /= 0 cterClearOccursCheck :: CheckTyEqResult -> CheckTyEqResult cterClearOccursCheck (CTER bits) = CTER (bits .&. complement occurs_mask) -- | Mark a 'CheckTyEqResult' as not having an insoluble occurs-check: any occurs -- check under a type family or in a representation equality is soluble. cterSetOccursCheckSoluble :: CheckTyEqResult -> CheckTyEqResult cterSetOccursCheckSoluble (CTER bits) = CTER $ ((bits .&. insoluble_mask) `shift` 1) .|. (bits .&. complement insoluble_mask) where CTEP insoluble_mask = cteInsolubleOccurs -- | Retain only information about occurs-check failures, because only that -- matters after recurring into a kind. cterFromKind :: CheckTyEqResult -> CheckTyEqResult cterFromKind (CTER bits) = CTER (bits .&. occurs_mask) cterIsInsoluble :: CheckTyEqResult -> Bool cterIsInsoluble (CTER bits) = (bits .&. mask) /= 0 where mask = impredicative_mask .|. insoluble_occurs_mask CTEP impredicative_mask = cteImpredicative CTEP insoluble_occurs_mask = cteInsolubleOccurs instance Semigroup CheckTyEqResult where CTER bits1 <> CTER bits2 = CTER (bits1 .|. bits2) instance Monoid CheckTyEqResult where mempty = cteOK instance Outputable CheckTyEqResult where ppr cter | cterHasNoProblem cter = text "cteOK" | otherwise = parens $ fcat $ intersperse vbar $ set_bits where all_bits = [ (cteImpredicative, "cteImpredicative") , (cteTypeFamily, "cteTypeFamily") , (cteInsolubleOccurs, "cteInsolubleOccurs") , (cteSolubleOccurs, "cteSolubleOccurs") ] set_bits = [ text str | (bitmask, str) <- all_bits , cter `cterHasProblem` bitmask ] {- Note [CIrredCan constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CIrredCan constraints are used for constraints that are "stuck" - we can't solve them (yet) - we can't use them to solve other constraints - but they may become soluble if we substitute for some of the type variables in the constraint Example 1: (c Int), where c :: * -> Constraint. We can't do anything with this yet, but if later c := Num, *then* we can solve it Example 2: a ~ b, where a :: *, b :: k, where k is a kind variable We don't want to use this to substitute 'b' for 'a', in case 'k' is subsequently unified with (say) *->*, because then we'd have ill-kinded types floating about. Rather we want to defer using the equality altogether until 'k' get resolved. Note [Ct/evidence invariant] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If ct :: Ct, then extra fields of 'ct' cache precisely the ctev_pred field of (cc_ev ct), and is fully rewritten wrt the substitution. Eg for CDictCan, ctev_pred (cc_ev ct) = (cc_class ct) (cc_tyargs ct) This holds by construction; look at the unique place where CDictCan is built (in GHC.Tc.Solver.Canonical). Note [Ct kind invariant] ~~~~~~~~~~~~~~~~~~~~~~~~ CEqCan requires that the kind of the lhs matches the kind of the rhs. This is necessary because these constraints are used for substitutions during solving. If the kinds differed, then the substitution would take a well-kinded type to an ill-kinded one. Note [Holes] ~~~~~~~~~~~~ This Note explains how GHC tracks *holes*. A hole represents one of two conditions: - A missing bit of an expression. Example: foo x = x + _ - A missing bit of a type. Example: bar :: Int -> _ What these have in common is that both cause GHC to emit a diagnostic to the user describing the bit that is left out. When a hole is encountered, a new entry of type Hole is added to the ambient WantedConstraints. The type (hole_ty) of the hole is then simplified during solving (with respect to any Givens in surrounding implications). It is reported with all the other errors in GHC.Tc.Errors. For expression holes, the user has the option of deferring errors until runtime with -fdefer-type-errors. In this case, the hole actually has evidence: this evidence is an erroring expression that prints an error and crashes at runtime. The ExprHole variant of holes stores an IORef EvTerm that will contain this evidence; during constraint generation, this IORef was stored in the HsUnboundVar extension field by the type checker. The desugarer simply dereferences to get the CoreExpr. Prior to fixing #17812, we used to invent an Id to hold the erroring expression, and then bind it during type-checking. But this does not support representation-polymorphic out-of-scope identifiers. See typecheck/should_compile/T17812. We thus use the mutable-CoreExpr approach described above. You might think that the type in the HoleExprRef is the same as the type of the hole. However, because the hole type (hole_ty) is rewritten with respect to givens, this might not be the case. That is, the hole_ty is always (~) to the type of the HoleExprRef, but they might not be `eqType`. We need the type of the generated evidence to match what is expected in the context of the hole, and so we must store these types separately. Type-level holes have no evidence at all. -} mkNonCanonical :: CtEvidence -> Ct mkNonCanonical ev = CNonCanonical { cc_ev = ev } mkNonCanonicalCt :: Ct -> Ct mkNonCanonicalCt ct = CNonCanonical { cc_ev = cc_ev ct } mkIrredCt :: CtIrredReason -> CtEvidence -> Ct mkIrredCt reason ev = CIrredCan { cc_ev = ev, cc_reason = reason } mkGivens :: CtLoc -> [EvId] -> [Ct] mkGivens loc ev_ids = map mk ev_ids where mk ev_id = mkNonCanonical (CtGiven { ctev_evar = ev_id , ctev_pred = evVarPred ev_id , ctev_loc = loc }) ctEvidence :: Ct -> CtEvidence ctEvidence (CQuantCan (QCI { qci_ev = ev })) = ev ctEvidence ct = cc_ev ct ctLoc :: Ct -> CtLoc ctLoc = ctEvLoc . ctEvidence ctOrigin :: Ct -> CtOrigin ctOrigin = ctLocOrigin . ctLoc ctPred :: Ct -> PredType -- See Note [Ct/evidence invariant] ctPred ct = ctEvPred (ctEvidence ct) ctRewriters :: Ct -> RewriterSet ctRewriters = ctEvRewriters . ctEvidence ctEvId :: HasDebugCallStack => Ct -> EvVar -- The evidence Id for this Ct ctEvId ct = ctEvEvId (ctEvidence ct) -- | Returns the evidence 'Id' for the argument 'Ct' -- when this 'Ct' is a 'Wanted'. -- -- Returns 'Nothing' otherwise. wantedEvId_maybe :: Ct -> Maybe EvVar wantedEvId_maybe ct = case ctEvidence ct of ctev@(CtWanted {}) | otherwise -> Just $ ctEvEvId ctev CtGiven {} -> Nothing -- | Makes a new equality predicate with the same role as the given -- evidence. mkTcEqPredLikeEv :: CtEvidence -> TcType -> TcType -> TcType mkTcEqPredLikeEv ev = case predTypeEqRel pred of NomEq -> mkPrimEqPred ReprEq -> mkReprPrimEqPred where pred = ctEvPred ev -- | Get the flavour of the given 'Ct' ctFlavour :: Ct -> CtFlavour ctFlavour = ctEvFlavour . ctEvidence -- | Get the equality relation for the given 'Ct' ctEqRel :: Ct -> EqRel ctEqRel = ctEvEqRel . ctEvidence instance Outputable Ct where ppr ct = ppr (ctEvidence ct) <+> parens pp_sort where pp_sort = case ct of CEqCan {} -> text "CEqCan" CNonCanonical {} -> text "CNonCanonical" CDictCan { cc_pend_sc = psc, cc_fundeps = fds } | psc, fds -> text "CDictCan(psc,fds)" | psc, not fds -> text "CDictCan(psc)" | not psc, fds -> text "CDictCan(fds)" | otherwise -> text "CDictCan" CIrredCan { cc_reason = reason } -> text "CIrredCan" <> ppr reason CQuantCan (QCI { qci_pend_sc = pend_sc }) | pend_sc -> text "CQuantCan(psc)" | otherwise -> text "CQuantCan" ----------------------------------- -- | Is a type a canonical LHS? That is, is it a tyvar or an exactly-saturated -- type family application? -- Does not look through type synonyms. canEqLHS_maybe :: Xi -> Maybe CanEqLHS canEqLHS_maybe xi | Just tv <- tcGetTyVar_maybe xi = Just $ TyVarLHS tv | Just (tc, args) <- tcSplitTyConApp_maybe xi , isTypeFamilyTyCon tc , args `lengthIs` tyConArity tc = Just $ TyFamLHS tc args | otherwise = Nothing -- | Convert a 'CanEqLHS' back into a 'Type' canEqLHSType :: CanEqLHS -> TcType canEqLHSType (TyVarLHS tv) = mkTyVarTy tv canEqLHSType (TyFamLHS fam_tc fam_args) = mkTyConApp fam_tc fam_args -- | Retrieve the kind of a 'CanEqLHS' canEqLHSKind :: CanEqLHS -> TcKind canEqLHSKind (TyVarLHS tv) = tyVarKind tv canEqLHSKind (TyFamLHS fam_tc fam_args) = piResultTys (tyConKind fam_tc) fam_args -- | Are two 'CanEqLHS's equal? eqCanEqLHS :: CanEqLHS -> CanEqLHS -> Bool eqCanEqLHS (TyVarLHS tv1) (TyVarLHS tv2) = tv1 == tv2 eqCanEqLHS (TyFamLHS fam_tc1 fam_args1) (TyFamLHS fam_tc2 fam_args2) = tcEqTyConApps fam_tc1 fam_args1 fam_tc2 fam_args2 eqCanEqLHS _ _ = False {- ************************************************************************ * * Simple functions over evidence variables * * ************************************************************************ -} ---------------- Getting free tyvars ------------------------- -- | Returns free variables of constraints as a non-deterministic set tyCoVarsOfCt :: Ct -> TcTyCoVarSet tyCoVarsOfCt = fvVarSet . tyCoFVsOfCt -- | Returns free variables of constraints as a non-deterministic set tyCoVarsOfCtEv :: CtEvidence -> TcTyCoVarSet tyCoVarsOfCtEv = fvVarSet . tyCoFVsOfCtEv -- | Returns free variables of constraints as a deterministically ordered -- list. See Note [Deterministic FV] in GHC.Utils.FV. tyCoVarsOfCtList :: Ct -> [TcTyCoVar] tyCoVarsOfCtList = fvVarList . tyCoFVsOfCt -- | Returns free variables of constraints as a deterministically ordered -- list. See Note [Deterministic FV] in GHC.Utils.FV. tyCoVarsOfCtEvList :: CtEvidence -> [TcTyCoVar] tyCoVarsOfCtEvList = fvVarList . tyCoFVsOfType . ctEvPred -- | Returns free variables of constraints as a composable FV computation. -- See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfCt :: Ct -> FV tyCoFVsOfCt ct = tyCoFVsOfType (ctPred ct) -- This must consult only the ctPred, so that it gets *tidied* fvs if the -- constraint has been tidied. Tidying a constraint does not tidy the -- fields of the Ct, only the predicate in the CtEvidence. -- | Returns free variables of constraints as a composable FV computation. -- See Note [Deterministic FV] in GHC.Utils.FV. tyCoFVsOfCtEv :: CtEvidence -> FV tyCoFVsOfCtEv ct = tyCoFVsOfType (ctEvPred ct) -- | Returns free variables of a bag of constraints as a non-deterministic -- set. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfCts :: Cts -> TcTyCoVarSet tyCoVarsOfCts = fvVarSet . tyCoFVsOfCts -- | Returns free variables of a bag of constraints as a deterministically -- ordered list. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfCtsList :: Cts -> [TcTyCoVar] tyCoVarsOfCtsList = fvVarList . tyCoFVsOfCts -- | Returns free variables of a bag of constraints as a deterministically -- ordered list. See Note [Deterministic FV] in GHC.Utils.FV. tyCoVarsOfCtEvsList :: [CtEvidence] -> [TcTyCoVar] tyCoVarsOfCtEvsList = fvVarList . tyCoFVsOfCtEvs -- | Returns free variables of a bag of constraints as a composable FV -- computation. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfCts :: Cts -> FV tyCoFVsOfCts = foldr (unionFV . tyCoFVsOfCt) emptyFV -- | Returns free variables of a bag of constraints as a composable FV -- computation. See Note [Deterministic FV] in GHC.Utils.FV. tyCoFVsOfCtEvs :: [CtEvidence] -> FV tyCoFVsOfCtEvs = foldr (unionFV . tyCoFVsOfCtEv) emptyFV -- | Returns free variables of WantedConstraints as a non-deterministic -- set. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfWC :: WantedConstraints -> TyCoVarSet -- Only called on *zonked* things tyCoVarsOfWC = fvVarSet . tyCoFVsOfWC -- | Returns free variables of WantedConstraints as a deterministically -- ordered list. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoVarsOfWCList :: WantedConstraints -> [TyCoVar] -- Only called on *zonked* things tyCoVarsOfWCList = fvVarList . tyCoFVsOfWC -- | Returns free variables of WantedConstraints as a composable FV -- computation. See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfWC :: WantedConstraints -> FV -- Only called on *zonked* things tyCoFVsOfWC (WC { wc_simple = simple, wc_impl = implic, wc_errors = errors }) = tyCoFVsOfCts simple `unionFV` tyCoFVsOfBag tyCoFVsOfImplic implic `unionFV` tyCoFVsOfBag tyCoFVsOfDelayedError errors -- | Returns free variables of Implication as a composable FV computation. -- See Note [Deterministic FV] in "GHC.Utils.FV". tyCoFVsOfImplic :: Implication -> FV -- Only called on *zonked* things tyCoFVsOfImplic (Implic { ic_skols = skols , ic_given = givens , ic_wanted = wanted }) | isEmptyWC wanted = emptyFV | otherwise = tyCoFVsVarBndrs skols $ tyCoFVsVarBndrs givens $ tyCoFVsOfWC wanted tyCoFVsOfDelayedError :: DelayedError -> FV tyCoFVsOfDelayedError (DE_Hole hole) = tyCoFVsOfHole hole tyCoFVsOfDelayedError (DE_NotConcrete {}) = emptyFV tyCoFVsOfHole :: Hole -> FV tyCoFVsOfHole (Hole { hole_ty = ty }) = tyCoFVsOfType ty tyCoFVsOfBag :: (a -> FV) -> Bag a -> FV tyCoFVsOfBag tvs_of = foldr (unionFV . tvs_of) emptyFV isGivenLoc :: CtLoc -> Bool isGivenLoc loc = isGivenOrigin (ctLocOrigin loc) {- ************************************************************************ * * CtEvidence The "flavor" of a canonical constraint * * ************************************************************************ -} isWantedCt :: Ct -> Bool isWantedCt = isWanted . ctEvidence isGivenCt :: Ct -> Bool isGivenCt = isGiven . ctEvidence {- Note [Custom type errors in constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When GHC reports a type-error about an unsolved-constraint, we check to see if the constraint contains any custom-type errors, and if so we report them. Here are some examples of constraints containing type errors: TypeError msg -- The actual constraint is a type error TypError msg ~ Int -- Some type was supposed to be Int, but ended up -- being a type error instead Eq (TypeError msg) -- A class constraint is stuck due to a type error F (TypeError msg) ~ a -- A type function failed to evaluate due to a type err It is also possible to have constraints where the type error is nested deeper, for example see #11990, and also: Eq (F (TypeError msg)) -- Here the type error is nested under a type-function -- call, which failed to evaluate because of it, -- and so the `Eq` constraint was unsolved. -- This may happen when one function calls another -- and the called function produced a custom type error. -} -- | A constraint is considered to be a custom type error, if it contains -- custom type errors anywhere in it. -- See Note [Custom type errors in constraints] getUserTypeErrorMsg :: PredType -> Maybe Type getUserTypeErrorMsg pred = msum $ userTypeError_maybe pred : map getUserTypeErrorMsg (subTys pred) where -- Richard thinks this function is very broken. What is subTys -- supposed to be doing? Why are exactly-saturated tyconapps special? -- What stops this from accidentally ripping apart a call to TypeError? subTys t = case splitAppTys t of (t,[]) -> case splitTyConApp_maybe t of Nothing -> [] Just (_,ts) -> ts (t,ts) -> t : ts isUserTypeError :: PredType -> Bool isUserTypeError pred = case getUserTypeErrorMsg pred of Just _ -> True _ -> False isPendingScDict :: Ct -> Bool isPendingScDict (CDictCan { cc_pend_sc = psc }) = psc -- Says whether this is a CDictCan with cc_pend_sc is True; -- i.e. pending un-expanded superclasses isPendingScDict _ = False pendingScDict_maybe :: Ct -> Maybe Ct -- Says whether this is a CDictCan with cc_pend_sc is True, -- AND if so flips the flag pendingScDict_maybe ct@(CDictCan { cc_pend_sc = True }) = Just (ct { cc_pend_sc = False }) pendingScDict_maybe _ = Nothing pendingScInst_maybe :: QCInst -> Maybe QCInst -- Same as isPendingScDict, but for QCInsts pendingScInst_maybe qci@(QCI { qci_pend_sc = True }) = Just (qci { qci_pend_sc = False }) pendingScInst_maybe _ = Nothing superClassesMightHelp :: WantedConstraints -> Bool -- ^ True if taking superclasses of givens, or of wanteds (to perhaps -- expose more equalities or functional dependencies) might help to -- solve this constraint. See Note [When superclasses help] superClassesMightHelp (WC { wc_simple = simples, wc_impl = implics }) = anyBag might_help_ct simples || anyBag might_help_implic implics where might_help_implic ic | IC_Unsolved <- ic_status ic = superClassesMightHelp (ic_wanted ic) | otherwise = False might_help_ct ct = not (is_ip ct) is_ip (CDictCan { cc_class = cls }) = isIPClass cls is_ip _ = False getPendingWantedScs :: Cts -> ([Ct], Cts) getPendingWantedScs simples = mapAccumBagL get [] simples where get acc ct | Just ct' <- pendingScDict_maybe ct = (ct':acc, ct') | otherwise = (acc, ct) {- Note [When superclasses help] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First read Note [The superclass story] in GHC.Tc.Solver.Canonical. We expand superclasses and iterate only if there is at unsolved wanted for which expansion of superclasses (e.g. from given constraints) might actually help. The function superClassesMightHelp tells if doing this superclass expansion might help solve this constraint. Note that * We look inside implications; maybe it'll help to expand the Givens at level 2 to help solve an unsolved Wanted buried inside an implication. E.g. forall a. Ord a => forall b. [W] Eq a * We say "no" for implicit parameters. we have [W] ?x::ty, expanding superclasses won't help: - Superclasses can't be implicit parameters - If we have a [G] ?x:ty2, then we'll have another unsolved [W] ty ~ ty2 (from the functional dependency) which will trigger superclass expansion. It's a bit of a special case, but it's easy to do. The runtime cost is low because the unsolved set is usually empty anyway (errors aside), and the first non-implicit-parameter will terminate the search. The special case is worth it (#11480, comment:2) because it applies to CallStack constraints, which aren't type errors. If we have f :: (C a) => blah f x = ...undefined... we'll get a CallStack constraint. If that's the only unsolved constraint it'll eventually be solved by defaulting. So we don't want to emit warnings about hitting the simplifier's iteration limit. A CallStack constraint really isn't an unsolved constraint; it can always be solved by defaulting. -} singleCt :: Ct -> Cts singleCt = unitBag andCts :: Cts -> Cts -> Cts andCts = unionBags listToCts :: [Ct] -> Cts listToCts = listToBag ctsElts :: Cts -> [Ct] ctsElts = bagToList consCts :: Ct -> Cts -> Cts consCts = consBag snocCts :: Cts -> Ct -> Cts snocCts = snocBag extendCtsList :: Cts -> [Ct] -> Cts extendCtsList cts xs | null xs = cts | otherwise = cts `unionBags` listToBag xs andManyCts :: [Cts] -> Cts andManyCts = unionManyBags emptyCts :: Cts emptyCts = emptyBag isEmptyCts :: Cts -> Bool isEmptyCts = isEmptyBag pprCts :: Cts -> SDoc pprCts cts = vcat (map ppr (bagToList cts)) {- ************************************************************************ * * Wanted constraints * * ************************************************************************ -} data WantedConstraints = WC { wc_simple :: Cts -- Unsolved constraints, all wanted , wc_impl :: Bag Implication , wc_errors :: Bag DelayedError } emptyWC :: WantedConstraints emptyWC = WC { wc_simple = emptyBag , wc_impl = emptyBag , wc_errors = emptyBag } mkSimpleWC :: [CtEvidence] -> WantedConstraints mkSimpleWC cts = emptyWC { wc_simple = listToBag (map mkNonCanonical cts) } mkImplicWC :: Bag Implication -> WantedConstraints mkImplicWC implic = emptyWC { wc_impl = implic } isEmptyWC :: WantedConstraints -> Bool isEmptyWC (WC { wc_simple = f, wc_impl = i, wc_errors = errors }) = isEmptyBag f && isEmptyBag i && isEmptyBag errors -- | Checks whether a the given wanted constraints are solved, i.e. -- that there are no simple constraints left and all the implications -- are solved. isSolvedWC :: WantedConstraints -> Bool isSolvedWC WC {wc_simple = wc_simple, wc_impl = wc_impl, wc_errors = errors} = isEmptyBag wc_simple && allBag (isSolvedStatus . ic_status) wc_impl && isEmptyBag errors andWC :: WantedConstraints -> WantedConstraints -> WantedConstraints andWC (WC { wc_simple = f1, wc_impl = i1, wc_errors = e1 }) (WC { wc_simple = f2, wc_impl = i2, wc_errors = e2 }) = WC { wc_simple = f1 `unionBags` f2 , wc_impl = i1 `unionBags` i2 , wc_errors = e1 `unionBags` e2 } unionsWC :: [WantedConstraints] -> WantedConstraints unionsWC = foldr andWC emptyWC addSimples :: WantedConstraints -> Bag Ct -> WantedConstraints addSimples wc cts = wc { wc_simple = wc_simple wc `unionBags` cts } -- Consider: Put the new constraints at the front, so they get solved first addImplics :: WantedConstraints -> Bag Implication -> WantedConstraints addImplics wc implic = wc { wc_impl = wc_impl wc `unionBags` implic } addInsols :: WantedConstraints -> Bag Ct -> WantedConstraints addInsols wc cts = wc { wc_simple = wc_simple wc `unionBags` cts } addHoles :: WantedConstraints -> Bag Hole -> WantedConstraints addHoles wc holes = wc { wc_errors = mapBag DE_Hole holes `unionBags` wc_errors wc } addNotConcreteError :: WantedConstraints -> NotConcreteError -> WantedConstraints addNotConcreteError wc err = wc { wc_errors = unitBag (DE_NotConcrete err) `unionBags` wc_errors wc } addDelayedErrors :: WantedConstraints -> Bag DelayedError -> WantedConstraints addDelayedErrors wc errs = wc { wc_errors = errs `unionBags` wc_errors wc } dropMisleading :: WantedConstraints -> WantedConstraints -- Drop misleading constraints; really just class constraints -- See Note [Constraints and errors] in GHC.Tc.Utils.Monad -- for why this function is so strange, treating the 'simples' -- and the implications differently. Sigh. dropMisleading (WC { wc_simple = simples, wc_impl = implics, wc_errors = errors }) = WC { wc_simple = filterBag insolubleWantedCt simples , wc_impl = mapBag drop_implic implics , wc_errors = filterBag keep_delayed_error errors } where drop_implic implic = implic { ic_wanted = drop_wanted (ic_wanted implic) } drop_wanted (WC { wc_simple = simples, wc_impl = implics, wc_errors = errors }) = WC { wc_simple = filterBag keep_ct simples , wc_impl = mapBag drop_implic implics , wc_errors = filterBag keep_delayed_error errors } keep_ct ct = case classifyPredType (ctPred ct) of ClassPred {} -> False _ -> True keep_delayed_error (DE_Hole hole) = isOutOfScopeHole hole keep_delayed_error (DE_NotConcrete {}) = True isSolvedStatus :: ImplicStatus -> Bool isSolvedStatus (IC_Solved {}) = True isSolvedStatus _ = False isInsolubleStatus :: ImplicStatus -> Bool isInsolubleStatus IC_Insoluble = True isInsolubleStatus IC_BadTelescope = True isInsolubleStatus _ = False insolubleImplic :: Implication -> Bool insolubleImplic ic = isInsolubleStatus (ic_status ic) -- | Gather all the type variables from 'WantedConstraints' -- that it would be unhelpful to default. For the moment, -- these are only 'ConcreteTv' metavariables participating -- in a nominal equality whose other side is not concrete; -- it's usually better to report those as errors instead of -- defaulting. nonDefaultableTyVarsOfWC :: WantedConstraints -> TyCoVarSet -- Currently used in simplifyTop and in tcRule. -- TODO: should we also use this in decideQuantifiedTyVars, kindGeneralize{All,Some}? nonDefaultableTyVarsOfWC (WC { wc_simple = simples, wc_impl = implics, wc_errors = errs }) = concatMapBag non_defaultable_tvs_of_ct simples `unionVarSet` concatMapBag (nonDefaultableTyVarsOfWC . ic_wanted) implics `unionVarSet` concatMapBag non_defaultable_tvs_of_err errs where concatMapBag :: (a -> TyVarSet) -> Bag a -> TyCoVarSet concatMapBag f = foldr (\ r acc -> f r `unionVarSet` acc) emptyVarSet -- Don't default ConcreteTv metavariables involved -- in an equality with something non-concrete: it's usually -- better to report the unsolved Wanted. -- -- Example: alpha[conc] ~# rr[sk]. non_defaultable_tvs_of_ct :: Ct -> TyCoVarSet non_defaultable_tvs_of_ct ct = -- NB: using classifyPredType instead of inspecting the Ct -- so that we deal uniformly with CNonCanonical (which come up in tcRule), -- CEqCan (unsolved but potentially soluble, e.g. @alpha[conc] ~# RR@) -- and CIrredCan. case classifyPredType $ ctPred ct of EqPred NomEq lhs rhs | Just tv <- getTyVar_maybe lhs , isConcreteTyVar tv , not (isConcrete rhs) -> unitVarSet tv | Just tv <- getTyVar_maybe rhs , isConcreteTyVar tv , not (isConcrete lhs) -> unitVarSet tv _ -> emptyVarSet -- Make sure to apply the same logic as above to delayed errors. non_defaultable_tvs_of_err (DE_NotConcrete err) = case err of NCE_FRR { nce_frr_origin = frr } -> tyCoVarsOfType (frr_type frr) non_defaultable_tvs_of_err (DE_Hole {}) = emptyVarSet insolubleWC :: WantedConstraints -> Bool insolubleWC (WC { wc_impl = implics, wc_simple = simples, wc_errors = errors }) = anyBag insolubleWantedCt simples || anyBag insolubleImplic implics || anyBag is_insoluble errors where is_insoluble (DE_Hole hole) = isOutOfScopeHole hole -- See Note [Insoluble holes] is_insoluble (DE_NotConcrete {}) = True insolubleWantedCt :: Ct -> Bool -- Definitely insoluble, in particular /excluding/ type-hole constraints -- Namely: -- a) an insoluble constraint as per 'insolubleCt', i.e. either -- - an insoluble equality constraint (e.g. Int ~ Bool), or -- - a custom type error constraint, TypeError msg :: Constraint -- b) that does not arise from a Given or a Wanted/Wanted fundep interaction -- -- See Note [Given insolubles]. insolubleWantedCt ct = insolubleCt ct && not (arisesFromGivens ct) && not (isWantedWantedFunDepOrigin (ctOrigin ct)) insolubleEqCt :: Ct -> Bool -- Returns True of /equality/ constraints -- that are /definitely/ insoluble -- It won't detect some definite errors like -- F a ~ T (F a) -- where F is a type family, which actually has an occurs check -- -- The function is tuned for application /after/ constraint solving -- i.e. assuming canonicalisation has been done -- E.g. It'll reply True for a ~ [a] -- but False for [a] ~ a -- and -- True for Int ~ F a Int -- but False for Maybe Int ~ F a Int Int -- (where F is an arity-1 type function) insolubleEqCt (CIrredCan { cc_reason = reason }) = isInsolubleReason reason insolubleEqCt _ = False -- | Returns True of equality constraints that are definitely insoluble, -- as well as TypeError constraints. -- Can return 'True' for Given constraints, unlike 'insolubleWantedCt'. -- -- This function is critical for accurate pattern-match overlap warnings. -- See Note [Pattern match warnings with insoluble Givens] in GHC.Tc.Solver -- -- Note that this does not traverse through the constraint to find -- nested custom type errors: it only detects @TypeError msg :: Constraint@, -- and not e.g. @Eq (TypeError msg)@. insolubleCt :: Ct -> Bool insolubleCt ct | Just _ <- userTypeError_maybe (ctPred ct) -- Don't use 'isUserTypeErrorCt' here, as that function is too eager: -- the TypeError might appear inside a type family application -- which might later reduce, but we only want to return 'True' -- for constraints that are definitely insoluble. -- -- Test case: T11503, with the 'Assert' type family: -- -- > type Assert :: Bool -> Constraint -> Constraint -- > type family Assert check errMsg where -- > Assert 'True _errMsg = () -- > Assert _check errMsg = errMsg = True | otherwise = insolubleEqCt ct -- | Does this hole represent an "out of scope" error? -- See Note [Insoluble holes] isOutOfScopeHole :: Hole -> Bool isOutOfScopeHole (Hole { hole_occ = occ }) = not (startsWithUnderscore occ) instance Outputable WantedConstraints where ppr (WC {wc_simple = s, wc_impl = i, wc_errors = e}) = text "WC" <+> braces (vcat [ ppr_bag (text "wc_simple") s , ppr_bag (text "wc_impl") i , ppr_bag (text "wc_errors") e ]) ppr_bag :: Outputable a => SDoc -> Bag a -> SDoc ppr_bag doc bag | isEmptyBag bag = empty | otherwise = hang (doc <+> equals) 2 (foldr (($$) . ppr) empty bag) {- Note [Given insolubles] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (#14325, comment:) class (a~b) => C a b foo :: C a c => a -> c foo x = x hm3 :: C (f b) b => b -> f b hm3 x = foo x In the RHS of hm3, from the [G] C (f b) b we get the insoluble [G] f b ~# b. Then we also get an unsolved [W] C b (f b). Residual implication looks like forall b. C (f b) b => [G] f b ~# b [W] C f (f b) We do /not/ want to set the implication status to IC_Insoluble, because that'll suppress reports of [W] C b (f b). But we may not report the insoluble [G] f b ~# b either (see Note [Given errors] in GHC.Tc.Errors), so we may fail to report anything at all! Yikes. Bottom line: insolubleWC (called in GHC.Tc.Solver.setImplicationStatus) should ignore givens even if they are insoluble. Note [Insoluble holes] ~~~~~~~~~~~~~~~~~~~~~~ Hole constraints that ARE NOT treated as truly insoluble: a) type holes, arising from PartialTypeSignatures, b) "true" expression holes arising from TypedHoles An "expression hole" or "type hole" isn't really an error at all; it's a report saying "_ :: Int" here. But an out-of-scope variable masquerading as expression holes IS treated as truly insoluble, so that it trumps other errors during error reporting. Yuk! ************************************************************************ * * Implication constraints * * ************************************************************************ -} data Implication = Implic { -- Invariants for a tree of implications: -- see TcType Note [TcLevel invariants] ic_tclvl :: TcLevel, -- TcLevel of unification variables -- allocated /inside/ this implication ic_info :: SkolemInfoAnon, -- See Note [Skolems in an implication] -- See Note [Shadowing in a constraint] ic_skols :: [TcTyVar], -- Introduced skolems; always skolem TcTyVars -- Their level numbers should be precisely ic_tclvl -- Their SkolemInfo should be precisely ic_info (almost) -- See Note [Implication invariants] ic_given :: [EvVar], -- Given evidence variables -- (order does not matter) -- See Invariant (GivenInv) in GHC.Tc.Utils.TcType ic_given_eqs :: HasGivenEqs, -- Are there Given equalities here? ic_warn_inaccessible :: Bool, -- True <=> -Winaccessible-code is enabled -- at construction. See -- Note [Avoid -Winaccessible-code when deriving] -- in GHC.Tc.TyCl.Instance ic_env :: TcLclEnv, -- Records the TcLClEnv at the time of creation. -- -- The TcLclEnv gives the source location -- and error context for the implication, and -- hence for all the given evidence variables. ic_wanted :: WantedConstraints, -- The wanteds -- See Invariant (WantedInf) in GHC.Tc.Utils.TcType ic_binds :: EvBindsVar, -- Points to the place to fill in the -- abstraction and bindings. -- The ic_need fields keep track of which Given evidence -- is used by this implication or its children -- NB: including stuff used by nested implications that have since -- been discarded -- See Note [Needed evidence variables] ic_need_inner :: VarSet, -- Includes all used Given evidence ic_need_outer :: VarSet, -- Includes only the free Given evidence -- i.e. ic_need_inner after deleting -- (a) givens (b) binders of ic_binds ic_status :: ImplicStatus } implicationPrototype :: Implication implicationPrototype = Implic { -- These fields must be initialised ic_tclvl = panic "newImplic:tclvl" , ic_binds = panic "newImplic:binds" , ic_info = panic "newImplic:info" , ic_env = panic "newImplic:env" , ic_warn_inaccessible = panic "newImplic:warn_inaccessible" -- The rest have sensible default values , ic_skols = [] , ic_given = [] , ic_wanted = emptyWC , ic_given_eqs = MaybeGivenEqs , ic_status = IC_Unsolved , ic_need_inner = emptyVarSet , ic_need_outer = emptyVarSet } data ImplicStatus = IC_Solved -- All wanteds in the tree are solved, all the way down { ics_dead :: [EvVar] } -- Subset of ic_given that are not needed -- See Note [Tracking redundant constraints] in GHC.Tc.Solver | IC_Insoluble -- At least one insoluble constraint in the tree | IC_BadTelescope -- Solved, but the skolems in the telescope are out of -- dependency order. See Note [Checking telescopes] | IC_Unsolved -- Neither of the above; might go either way data HasGivenEqs -- See Note [HasGivenEqs] = NoGivenEqs -- Definitely no given equalities, -- except by Note [Let-bound skolems] in GHC.Tc.Solver.InertSet | LocalGivenEqs -- Might have Given equalities, but only ones that affect only -- local skolems e.g. forall a b. (a ~ F b) => ... | MaybeGivenEqs -- Might have any kind of Given equalities; no floating out -- is possible. deriving Eq type UserGiven = Implication getUserGivensFromImplics :: [Implication] -> [UserGiven] getUserGivensFromImplics implics = reverse (filterOut (null . ic_given) implics) {- Note [HasGivenEqs] ~~~~~~~~~~~~~~~~~~~~~ The GivenEqs data type describes the Given constraints of an implication constraint: * NoGivenEqs: definitely no Given equalities, except perhaps let-bound skolems which don't count: see Note [Let-bound skolems] in GHC.Tc.Solver.InertSet Examples: forall a. Eq a => ... forall a. (Show a, Num a) => ... forall a. a ~ Either Int Bool => ... -- Let-bound skolem * LocalGivenEqs: definitely no Given equalities that would affect principal types. But may have equalities that affect only skolems of this implication (and hence do not affect princial types) Examples: forall a. F a ~ Int => ... forall a b. F a ~ G b => ... * MaybeGivenEqs: may have Given equalities that would affect principal types Examples: forall. (a ~ b) => ... forall a. F a ~ b => ... forall a. c a => ... -- The 'c' might be instantiated to (b ~) forall a. C a b => .... where class x~y => C a b so there is an equality in the superclass of a Given The HasGivenEqs classifications affect two things: * Suppressing redundant givens during error reporting; see GHC.Tc.Errors Note [Suppress redundant givens during error reporting] * Floating in approximateWC. Specifically, here's how it goes: Stops floating | Suppresses Givens in errors in approximateWC | ----------------------------------------------- NoGivenEqs NO | YES LocalGivenEqs NO | NO MaybeGivenEqs YES | NO -} instance Outputable Implication where ppr (Implic { ic_tclvl = tclvl, ic_skols = skols , ic_given = given, ic_given_eqs = given_eqs , ic_wanted = wanted, ic_status = status , ic_binds = binds , ic_need_inner = need_in, ic_need_outer = need_out , ic_info = info }) = hang (text "Implic" <+> lbrace) 2 (sep [ text "TcLevel =" <+> ppr tclvl , text "Skolems =" <+> pprTyVars skols , text "Given-eqs =" <+> ppr given_eqs , text "Status =" <+> ppr status , hang (text "Given =") 2 (pprEvVars given) , hang (text "Wanted =") 2 (ppr wanted) , text "Binds =" <+> ppr binds , whenPprDebug (text "Needed inner =" <+> ppr need_in) , whenPprDebug (text "Needed outer =" <+> ppr need_out) , pprSkolInfo info ] <+> rbrace) instance Outputable ImplicStatus where ppr IC_Insoluble = text "Insoluble" ppr IC_BadTelescope = text "Bad telescope" ppr IC_Unsolved = text "Unsolved" ppr (IC_Solved { ics_dead = dead }) = text "Solved" <+> (braces (text "Dead givens =" <+> ppr dead)) checkTelescopeSkol :: SkolemInfoAnon -> Bool -- See Note [Checking telescopes] checkTelescopeSkol (ForAllSkol {}) = True checkTelescopeSkol _ = False instance Outputable HasGivenEqs where ppr NoGivenEqs = text "NoGivenEqs" ppr LocalGivenEqs = text "LocalGivenEqs" ppr MaybeGivenEqs = text "MaybeGivenEqs" -- Used in GHC.Tc.Solver.Monad.getHasGivenEqs instance Semigroup HasGivenEqs where NoGivenEqs <> other = other other <> NoGivenEqs = other MaybeGivenEqs <> _other = MaybeGivenEqs _other <> MaybeGivenEqs = MaybeGivenEqs LocalGivenEqs <> LocalGivenEqs = LocalGivenEqs -- Used in GHC.Tc.Solver.Monad.getHasGivenEqs instance Monoid HasGivenEqs where mempty = NoGivenEqs {- Note [Checking telescopes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When kind-checking a /user-written/ type, we might have a "bad telescope" like this one: data SameKind :: forall k. k -> k -> Type type Foo :: forall a k (b :: k). SameKind a b -> Type The kind of 'a' mentions 'k' which is bound after 'a'. Oops. One approach to doing this would be to bring each of a, k, and b into scope, one at a time, creating a separate implication constraint for each one, and bumping the TcLevel. This would work, because the kind of, say, a would be untouchable when k is in scope (and the constraint couldn't float out because k blocks it). However, it leads to terrible error messages, complaining about skolem escape. While it is indeed a problem of skolem escape, we can do better. Instead, our approach is to bring the block of variables into scope all at once, creating one implication constraint for the lot: * We make a single implication constraint when kind-checking the 'forall' in Foo's kind, something like forall a k (b::k). { wanted constraints } * Having solved {wanted}, before discarding the now-solved implication, the constraint solver checks the dependency order of the skolem variables (ic_skols). This is done in setImplicationStatus. * This check is only necessary if the implication was born from a 'forall' in a user-written signature (the HsForAllTy case in GHC.Tc.Gen.HsType. If, say, it comes from checking a pattern match that binds existentials, where the type of the data constructor is known to be valid (it in tcConPat), no need for the check. So the check is done /if and only if/ ic_info is ForAllSkol. * If ic_info is (ForAllSkol dt dvs), the dvs::SDoc displays the original, user-written type variables. * Be careful /NOT/ to discard an implication with a ForAllSkol ic_info, even if ic_wanted is empty. We must give the constraint solver a chance to make that bad-telescope test! Hence the extra guard in emitResidualTvConstraint; see #16247 * Don't mix up inferred and explicit variables in the same implication constraint. E.g. foo :: forall a kx (b :: kx). SameKind a b We want an implication Implic { ic_skol = [(a::kx), kx, (b::kx)], ... } but GHC will attempt to quantify over kx, since it is free in (a::kx), and it's hopelessly confusing to report an error about quantified variables kx (a::kx) kx (b::kx). Instead, the outer quantification over kx should be in a separate implication. TL;DR: an explicit forall should generate an implication quantified only over those explicitly quantified variables. Note [Needed evidence variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Th ic_need_evs field holds the free vars of ic_binds, and all the ic_binds in nested implications. * Main purpose: if one of the ic_givens is not mentioned in here, it is redundant. * solveImplication may drop an implication altogether if it has no remaining 'wanteds'. But we still track the free vars of its evidence binds, even though it has now disappeared. Note [Shadowing in a constraint] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We assume NO SHADOWING in a constraint. Specifically * The unification variables are all implicitly quantified at top level, and are all unique * The skolem variables bound in ic_skols are all freah when the implication is created. So we can safely substitute. For example, if we have forall a. a~Int => ...(forall b. ...a...)... we can push the (a~Int) constraint inwards in the "givens" without worrying that 'b' might clash. Note [Skolems in an implication] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The skolems in an implication are used: * When considering floating a constraint outside the implication in GHC.Tc.Solver.floatEqualities or GHC.Tc.Solver.approximateImplications For this, we can treat ic_skols as a set. * When checking that a /user-specified/ forall (ic_info = ForAllSkol tvs) has its variables in the correct order; see Note [Checking telescopes]. Only for these implications does ic_skols need to be a list. Nota bene: Although ic_skols is a list, it is not necessarily in dependency order: - In the ic_info=ForAllSkol case, the user might have written them in the wrong order - In the case of a type signature like f :: [a] -> [b] the renamer gathers the implicit "outer" forall'd variables {a,b}, but does not know what order to put them in. The type checker can sort them into dependency order, but only after solving all the kind constraints; and to do that it's convenient to create the Implication! So we accept that ic_skols may be out of order. Think of it as a set or (in the case of ic_info=ForAllSkol, a list in user-specified, and possibly wrong, order. Note [Insoluble constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some of the errors that we get during canonicalization are best reported when all constraints have been simplified as much as possible. For instance, assume that during simplification the following constraints arise: [Wanted] F alpha ~ uf1 [Wanted] beta ~ uf1 beta When canonicalizing the wanted (beta ~ uf1 beta), if we eagerly fail we will simply see a message: 'Can't construct the infinite type beta ~ uf1 beta' and the user has no idea what the uf1 variable is. Instead our plan is that we will NOT fail immediately, but: (1) Record the "frozen" error in the ic_insols field (2) Isolate the offending constraint from the rest of the inerts (3) Keep on simplifying/canonicalizing At the end, we will hopefully have substituted uf1 := F alpha, and we will be able to report a more informative error: 'Can't construct the infinite type beta ~ F alpha beta' ************************************************************************ * * Invariant checking (debug only) * * ************************************************************************ Note [Implication invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The skolems of an implication have the following invariants, which are checked by checkImplicationInvariants: a) They are all SkolemTv TcTyVars; no TyVars, no unification variables b) Their TcLevel matches the ic_lvl for the implication c) Their SkolemInfo matches the implication. Actually (c) is not quite true. Consider data T a = forall b. MkT a b In tcConDecl for MkT we'll create an implication with ic_info of DataConSkol; but the type variable 'a' will have a SkolemInfo of TyConSkol. So we allow the tyvar to have a SkolemInfo of TyConFlav if the implication SkolemInfo is DataConSkol. -} checkImplicationInvariants, check_implic :: (HasCallStack, Applicative m) => Implication -> m () {-# INLINE checkImplicationInvariants #-} -- Nothing => OK, Just doc => doc gives info checkImplicationInvariants implic = when debugIsOn (check_implic implic) check_implic implic@(Implic { ic_tclvl = lvl , ic_info = skol_info , ic_skols = skols }) | null bads = pure () | otherwise = massertPpr False (vcat [ text "checkImplicationInvariants failure" , nest 2 (vcat bads) , ppr implic ]) where bads = mapMaybe check skols check :: TcTyVar -> Maybe SDoc check tv | not (isTcTyVar tv) = Just (ppr tv <+> text "is not a TcTyVar") | otherwise = check_details tv (tcTyVarDetails tv) check_details :: TcTyVar -> TcTyVarDetails -> Maybe SDoc check_details tv (SkolemTv tv_skol_info tv_lvl _) | not (tv_lvl == lvl) = Just (vcat [ ppr tv <+> text "has level" <+> ppr tv_lvl , text "ic_lvl" <+> ppr lvl ]) | not (skol_info `checkSkolInfoAnon` skol_info_anon) = Just (vcat [ ppr tv <+> text "has skol info" <+> ppr skol_info_anon , text "ic_info" <+> ppr skol_info ]) | otherwise = Nothing where skol_info_anon = getSkolemInfo tv_skol_info check_details tv details = Just (ppr tv <+> text "is not a SkolemTv" <+> ppr details) checkSkolInfoAnon :: SkolemInfoAnon -- From the implication -> SkolemInfoAnon -- From the type variable -> Bool -- True <=> ok -- Used only for debug-checking; checkImplicationInvariants -- So it doesn't matter much if its's incomplete checkSkolInfoAnon sk1 sk2 = go sk1 sk2 where go (SigSkol c1 t1 s1) (SigSkol c2 t2 s2) = c1==c2 && t1 `tcEqType` t2 && s1==s2 go (SigTypeSkol cx1) (SigTypeSkol cx2) = cx1==cx2 go (ForAllSkol _) (ForAllSkol _) = True go (IPSkol ips1) (IPSkol ips2) = ips1 == ips2 go (DerivSkol pred1) (DerivSkol pred2) = pred1 `tcEqType` pred2 go (TyConSkol f1 n1) (TyConSkol f2 n2) = f1==f2 && n1==n2 go (DataConSkol n1) (DataConSkol n2) = n1==n2 go InstSkol InstSkol = True go FamInstSkol FamInstSkol = True go BracketSkol BracketSkol = True go (RuleSkol n1) (RuleSkol n2) = n1==n2 go (PatSkol c1 _) (PatSkol c2 _) = getName c1 == getName c2 -- Too tedious to compare the HsMatchContexts go (InferSkol ids1) (InferSkol ids2) = equalLength ids1 ids2 && and (zipWith eq_pr ids1 ids2) go (UnifyForAllSkol t1) (UnifyForAllSkol t2) = t1 `tcEqType` t2 go ReifySkol ReifySkol = True go QuantCtxtSkol QuantCtxtSkol = True go RuntimeUnkSkol RuntimeUnkSkol = True go ArrowReboundIfSkol ArrowReboundIfSkol = True go (UnkSkol _) (UnkSkol _) = True -------- Three slightly strange special cases -------- go (DataConSkol _) (TyConSkol f _) = h98_data_decl f -- In the H98 declaration data T a = forall b. MkT a b -- in tcConDecl for MkT we'll have a SkolemInfo in the implication of -- DataConSkol, but the type variable 'a' will have a SkolemInfo of TyConSkol go (DataConSkol _) FamInstSkol = True -- In data/newtype instance T a = MkT (a -> a), -- in tcConDecl for MkT we'll have a SkolemInfo in the implication of -- DataConSkol, but 'a' will have SkolemInfo of FamInstSkol go FamInstSkol InstSkol = True -- In instance C (T a) where { type F (T a) b = ... } -- we have 'a' with SkolemInfo InstSkol, but we make an implication wi -- SkolemInfo of FamInstSkol. Very like the ConDecl/TyConSkol case go (ForAllSkol _) _ = True -- Telescope tests: we need a ForAllSkol to force the telescope -- test, but the skolems might come from (say) a family instance decl -- type instance forall a. F [a] = a->a go (SigTypeSkol DerivClauseCtxt) (TyConSkol f _) = h98_data_decl f -- e.g. newtype T a = MkT ... deriving blah -- We use the skolems from T (TyConSkol) when typechecking -- the deriving clauses (SigTypeSkol DerivClauseCtxt) go _ _ = False eq_pr :: (Name,TcType) -> (Name,TcType) -> Bool eq_pr (i1,_) (i2,_) = i1==i2 -- Types may be differently zonked h98_data_decl DataTypeFlavour = True h98_data_decl NewtypeFlavour = True h98_data_decl _ = False {- ********************************************************************* * * Pretty printing * * ********************************************************************* -} pprEvVars :: [EvVar] -> SDoc -- Print with their types pprEvVars ev_vars = vcat (map pprEvVarWithType ev_vars) pprEvVarTheta :: [EvVar] -> SDoc pprEvVarTheta ev_vars = pprTheta (map evVarPred ev_vars) pprEvVarWithType :: EvVar -> SDoc pprEvVarWithType v = ppr v <+> dcolon <+> pprType (evVarPred v) wrapType :: Type -> [TyVar] -> [PredType] -> Type wrapType ty skols givens = mkSpecForAllTys skols $ mkPhiTy givens ty {- ************************************************************************ * * CtEvidence * * ************************************************************************ Note [CtEvidence invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The `ctev_pred` field of a `CtEvidence` is a just a cache for the type of the evidence. More precisely: * For Givens, `ctev_pred` = `varType ctev_evar` * For Wanteds, `ctev_pred` = `evDestType ctev_dest` where evDestType :: TcEvDest -> TcType evDestType (EvVarDest evVar) = varType evVar evDestType (HoleDest coercionHole) = varType (coHoleCoVar coercionHole) The invariant is maintained by `setCtEvPredType`, the only function that updates the `ctev_pred` field of a `CtEvidence`. Why is the invariant important? Because when the evidence is a coercion, it may be used in (CastTy ty co); and then we may call `typeKind` on that type (e.g. in the kind-check of `eqType`); and expect to see a fully zonked kind. (This came up in test T13333, in the MR that fixed #20641, namely !6942.) Historical Note [Evidence field of CtEvidence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the past we tried leaving the `ctev_evar`/`ctev_dest` field of a constraint untouched (and hence un-zonked) on the grounds that it is never looked at. But in fact it is: the evidence can become part of a type (via `CastTy ty kco`) and we may later ask the kind of that type and expect a zonked result. (For example, in the kind-check of `eqType`.) The safest thing is simply to keep `ctev_evar`/`ctev_dest` in sync with `ctev_pref`, as stated in `Note [CtEvidence invariants]`. Note [Bind new Givens immediately] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For Givens we make new EvVars and bind them immediately. Two main reasons: * Gain sharing. E.g. suppose we start with g :: C a b, where class D a => C a b class (E a, F a) => D a If we generate all g's superclasses as separate EvTerms we might get selD1 (selC1 g) :: E a selD2 (selC1 g) :: F a selC1 g :: D a which we could do more economically as: g1 :: D a = selC1 g g2 :: E a = selD1 g1 g3 :: F a = selD2 g1 * For *coercion* evidence we *must* bind each given: class (a~b) => C a b where .... f :: C a b => .... Then in f's Givens we have g:(C a b) and the superclass sc(g,0):a~b. But that superclass selector can't (yet) appear in a coercion (see evTermCoercion), so the easy thing is to bind it to an Id. So a Given has EvVar inside it rather than (as previously) an EvTerm. -} -- | A place for type-checking evidence to go after it is generated. -- -- - Wanted equalities use HoleDest, -- - other Wanteds use EvVarDest. data TcEvDest = EvVarDest EvVar -- ^ bind this var to the evidence -- EvVarDest is always used for non-type-equalities -- e.g. class constraints | HoleDest CoercionHole -- ^ fill in this hole with the evidence -- HoleDest is always used for type-equalities -- See Note [Coercion holes] in GHC.Core.TyCo.Rep data CtEvidence = CtGiven -- Truly given, not depending on subgoals { ctev_pred :: TcPredType -- See Note [Ct/evidence invariant] , ctev_evar :: EvVar -- See Note [CtEvidence invariants] , ctev_loc :: CtLoc } | CtWanted -- Wanted goal { ctev_pred :: TcPredType -- See Note [Ct/evidence invariant] , ctev_dest :: TcEvDest -- See Note [CtEvidence invariants] , ctev_loc :: CtLoc , ctev_rewriters :: RewriterSet } -- See Note [Wanteds rewrite Wanteds] ctEvPred :: CtEvidence -> TcPredType -- The predicate of a flavor ctEvPred = ctev_pred ctEvLoc :: CtEvidence -> CtLoc ctEvLoc = ctev_loc ctEvOrigin :: CtEvidence -> CtOrigin ctEvOrigin = ctLocOrigin . ctEvLoc -- | Get the equality relation relevant for a 'CtEvidence' ctEvEqRel :: CtEvidence -> EqRel ctEvEqRel = predTypeEqRel . ctEvPred -- | Get the role relevant for a 'CtEvidence' ctEvRole :: CtEvidence -> Role ctEvRole = eqRelRole . ctEvEqRel ctEvTerm :: CtEvidence -> EvTerm ctEvTerm ev = EvExpr (ctEvExpr ev) -- | Extract the set of rewriters from a 'CtEvidence' -- See Note [Wanteds rewrite Wanteds] -- If the provided CtEvidence is not for a Wanted, just -- return an empty set. ctEvRewriters :: CtEvidence -> RewriterSet ctEvRewriters (CtWanted { ctev_rewriters = rewriters }) = rewriters ctEvRewriters _other = emptyRewriterSet ctEvExpr :: HasDebugCallStack => CtEvidence -> EvExpr ctEvExpr ev@(CtWanted { ctev_dest = HoleDest _ }) = Coercion $ ctEvCoercion ev ctEvExpr ev = evId (ctEvEvId ev) ctEvCoercion :: HasDebugCallStack => CtEvidence -> TcCoercion ctEvCoercion (CtGiven { ctev_evar = ev_id }) = mkTcCoVarCo ev_id ctEvCoercion (CtWanted { ctev_dest = dest }) | HoleDest hole <- dest = -- ctEvCoercion is only called on type equalities -- and they always have HoleDests mkHoleCo hole ctEvCoercion ev = pprPanic "ctEvCoercion" (ppr ev) ctEvEvId :: CtEvidence -> EvVar ctEvEvId (CtWanted { ctev_dest = EvVarDest ev }) = ev ctEvEvId (CtWanted { ctev_dest = HoleDest h }) = coHoleCoVar h ctEvEvId (CtGiven { ctev_evar = ev }) = ev ctEvUnique :: CtEvidence -> Unique ctEvUnique (CtGiven { ctev_evar = ev }) = varUnique ev ctEvUnique (CtWanted { ctev_dest = dest }) = tcEvDestUnique dest tcEvDestUnique :: TcEvDest -> Unique tcEvDestUnique (EvVarDest ev_var) = varUnique ev_var tcEvDestUnique (HoleDest co_hole) = varUnique (coHoleCoVar co_hole) setCtEvLoc :: CtEvidence -> CtLoc -> CtEvidence setCtEvLoc ctev loc = ctev { ctev_loc = loc } arisesFromGivens :: Ct -> Bool arisesFromGivens ct = isGivenCt ct || isGivenLoc (ctLoc ct) -- | Set the type of CtEvidence. -- -- This function ensures that the invariants on 'CtEvidence' hold, by updating -- the evidence and the ctev_pred in sync with each other. -- See Note [CtEvidence invariants]. setCtEvPredType :: HasDebugCallStack => CtEvidence -> Type -> CtEvidence setCtEvPredType old_ctev new_pred = case old_ctev of CtGiven { ctev_evar = ev, ctev_loc = loc } -> CtGiven { ctev_pred = new_pred , ctev_evar = setVarType ev new_pred , ctev_loc = loc } CtWanted { ctev_dest = dest, ctev_loc = loc, ctev_rewriters = rewriters } -> CtWanted { ctev_pred = new_pred , ctev_dest = new_dest , ctev_loc = loc , ctev_rewriters = rewriters } where new_dest = case dest of EvVarDest ev -> EvVarDest (setVarType ev new_pred) HoleDest h -> HoleDest (setCoHoleType h new_pred) instance Outputable TcEvDest where ppr (HoleDest h) = text "hole" <> ppr h ppr (EvVarDest ev) = ppr ev instance Outputable CtEvidence where ppr ev = ppr (ctEvFlavour ev) <+> pp_ev <+> braces (ppr (ctl_depth (ctEvLoc ev)) <> pp_rewriters) -- Show the sub-goal depth too <> dcolon <+> ppr (ctEvPred ev) where pp_ev = case ev of CtGiven { ctev_evar = v } -> ppr v CtWanted {ctev_dest = d } -> ppr d rewriters = ctEvRewriters ev pp_rewriters | isEmptyRewriterSet rewriters = empty | otherwise = semi <> ppr rewriters isWanted :: CtEvidence -> Bool isWanted (CtWanted {}) = True isWanted _ = False isGiven :: CtEvidence -> Bool isGiven (CtGiven {}) = True isGiven _ = False {- ************************************************************************ * * RewriterSet * * ************************************************************************ -} -- | Stores a set of CoercionHoles that have been used to rewrite a constraint. -- See Note [Wanteds rewrite Wanteds]. newtype RewriterSet = RewriterSet (UniqSet CoercionHole) deriving newtype (Outputable, Semigroup, Monoid) emptyRewriterSet :: RewriterSet emptyRewriterSet = RewriterSet emptyUniqSet isEmptyRewriterSet :: RewriterSet -> Bool isEmptyRewriterSet (RewriterSet set) = isEmptyUniqSet set addRewriterSet :: RewriterSet -> CoercionHole -> RewriterSet addRewriterSet = coerce (addOneToUniqSet @CoercionHole) -- | Makes a 'RewriterSet' from all the coercion holes that occur in the -- given coercion. rewriterSetFromCo :: Coercion -> RewriterSet rewriterSetFromCo co = appEndo (rewriter_set_from_co co) emptyRewriterSet -- | Makes a 'RewriterSet' from all the coercion holes that occur in the -- given type. rewriterSetFromType :: Type -> RewriterSet rewriterSetFromType ty = appEndo (rewriter_set_from_ty ty) emptyRewriterSet -- | Makes a 'RewriterSet' from all the coercion holes that occur in the -- given types. rewriterSetFromTypes :: [Type] -> RewriterSet rewriterSetFromTypes tys = appEndo (rewriter_set_from_tys tys) emptyRewriterSet rewriter_set_from_ty :: Type -> Endo RewriterSet rewriter_set_from_tys :: [Type] -> Endo RewriterSet rewriter_set_from_co :: Coercion -> Endo RewriterSet (rewriter_set_from_ty, rewriter_set_from_tys, rewriter_set_from_co, _) = foldTyCo folder () where folder :: TyCoFolder () (Endo RewriterSet) folder = TyCoFolder { tcf_view = noView , tcf_tyvar = \ _ tv -> rewriter_set_from_ty (tyVarKind tv) , tcf_covar = \ _ cv -> rewriter_set_from_ty (varType cv) , tcf_hole = \ _ hole -> coerce (`addOneToUniqSet` hole) S.<> rewriter_set_from_ty (varType (coHoleCoVar hole)) , tcf_tycobinder = \ _ _ _ -> () } {- ************************************************************************ * * CtFlavour * * ************************************************************************ -} data CtFlavour = Given -- we have evidence | Wanted -- we want evidence deriving Eq instance Outputable CtFlavour where ppr Given = text "[G]" ppr Wanted = text "[W]" ctEvFlavour :: CtEvidence -> CtFlavour ctEvFlavour (CtWanted {}) = Wanted ctEvFlavour (CtGiven {}) = Given -- | Whether or not one 'Ct' can rewrite another is determined by its -- flavour and its equality relation. See also -- Note [Flavours with roles] in GHC.Tc.Solver.InertSet type CtFlavourRole = (CtFlavour, EqRel) -- | Extract the flavour, role, and boxity from a 'CtEvidence' ctEvFlavourRole :: CtEvidence -> CtFlavourRole ctEvFlavourRole ev = (ctEvFlavour ev, ctEvEqRel ev) -- | Extract the flavour and role from a 'Ct' ctFlavourRole :: Ct -> CtFlavourRole -- Uses short-cuts to role for special cases ctFlavourRole (CDictCan { cc_ev = ev }) = (ctEvFlavour ev, NomEq) ctFlavourRole (CEqCan { cc_ev = ev, cc_eq_rel = eq_rel }) = (ctEvFlavour ev, eq_rel) ctFlavourRole ct = ctEvFlavourRole (ctEvidence ct) {- Note [eqCanRewrite] ~~~~~~~~~~~~~~~~~~~~~~ (eqCanRewrite ct1 ct2) holds if the constraint ct1 (a CEqCan of form lhs ~ ty) can be used to rewrite ct2. It must satisfy the properties of a can-rewrite relation, see Definition [Can-rewrite relation] in GHC.Tc.Solver.Monad. With the solver handling Coercible constraints like equality constraints, the rewrite conditions must take role into account, never allowing a representational equality to rewrite a nominal one. Note [Wanteds rewrite Wanteds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Should one Wanted constraint be allowed to rewrite another? This example (along with #8450) suggests not: f :: a -> Bool f x = ( [x,'c'], [x,True] ) `seq` True Here we get [W] a ~ Char [W] a ~ Bool but we do not want to complain about Bool ~ Char! This example suggests yes (indexed-types/should_fail/T4093a): type family Foo a f :: (Foo e ~ Maybe e) => Foo e In the ambiguity check, we get [G] g1 :: Foo e ~ Maybe e [W] w1 :: Foo alpha ~ Foo e [W] w2 :: Foo alpha ~ Maybe alpha w1 gets rewritten by the Given to become [W] w3 :: Foo alpha ~ Maybe e Now, the only way to make progress is to allow Wanteds to rewrite Wanteds. Rewriting w3 with w2 gives us [W] w4 :: Maybe alpha ~ Maybe e which will soon get us to alpha := e and thence to victory. TL;DR we want equality saturation. We thus want Wanteds to rewrite Wanteds in order to accept more programs, but we don't want Wanteds to rewrite Wanteds because doing so can create inscrutable error messages. We choose to allow the rewriting, but every Wanted tracks the set of Wanteds it has been rewritten by. This is called a RewriterSet, stored in the ctev_rewriters field of the CtWanted constructor of CtEvidence. (Only Wanteds have RewriterSets.) Let's continue our first example above: inert: [W] w1 :: a ~ Char work: [W] w2 :: a ~ Bool Because Wanteds can rewrite Wanteds, w1 will rewrite w2, yielding inert: [W] w1 :: a ~ Char [W] w2 {w1}:: Char ~ Bool The {w1} in the second line of output is the RewriterSet of w1. A RewriterSet is just a set of unfilled CoercionHoles. This is sufficient because only equalities (evidenced by coercion holes) are used for rewriting; other (dictionary) constraints cannot ever rewrite. The rewriter (in e.g. GHC.Tc.Solver.Rewrite.rewrite) tracks and returns a RewriterSet, consisting of the evidence (a CoercionHole) for any Wanted equalities used in rewriting. Then rewriteEvidence and rewriteEqEvidence (in GHC.Tc.Solver.Canonical) add this RewriterSet to the rewritten constraint's rewriter set. In error reporting, we simply suppress any errors that have been rewritten by /unsolved/ wanteds. This suppression happens in GHC.Tc.Errors.mkErrorItem, which uses GHC.Tc.Utils.anyUnfilledCoercionHoles to look through any filled coercion holes. The idea is that we wish to report the "root cause" -- the error that rewrote all the others. Worry: It seems possible that *all* unsolved wanteds are rewritten by other unsolved wanteds, so that e.g. w1 has w2 in its rewriter set, and w2 has w1 in its rewiter set. We are unable to come up with an example of this in practice, however, and so we believe this case cannot happen. Note [Avoiding rewriting cycles] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [inert_eqs: the inert equalities] in GHC.Tc.Solver.InertSet describes the can-rewrite relation among CtFlavour/Role pairs, saying which constraints can rewrite which other constraints. It puts forth (R2): (R2) If f1 >= f, and f2 >= f, then either f1 >= f2 or f2 >= f1 The naive can-rewrite relation says that (Given, Representational) can rewrite (Wanted, Representational) and that (Wanted, Nominal) can rewrite (Wanted, Representational), but neither of (Given, Representational) and (Wanted, Nominal) can rewrite the other. This would violate (R2). See also Note [Why R2?] in GHC.Tc.Solver.InertSet. To keep R2, we do not allow (Wanted, Nominal) to rewrite (Wanted, Representational). This can, in theory, bite, in this scenario: type family F a data T a type role T nominal [G] F a ~N T a [W] F alpha ~N T alpha [W] F alpha ~R T a As written, this makes no progress, and GHC errors. But, if we allowed W/N to rewrite W/R, the first W could rewrite the second: [G] F a ~N T a [W] F alpha ~N T alpha [W] T alpha ~R T a Now we decompose the second W to get [W] alpha ~N a noting the role annotation on T. This causes (alpha := a), and then everything else unlocks. What to do? We could "decompose" nominal equalities into nominal-only ("NO") equalities and representational ones, where a NO equality rewrites only nominals. That is, when considering whether [W] F alpha ~N T alpha should rewrite [W] F alpha ~R T a, we could require splitting the first W into [W] F alpha ~NO T alpha, [W] F alpha ~R T alpha. Then, we use the R half of the split to rewrite the second W, and off we go. This splitting would allow the split-off R equality to be rewritten by other equalities, thus avoiding the problem in Note [Why R2?] in GHC.Tc.Solver.InertSet. However, note that I said that this bites in theory. That's because no known program actually gives rise to this scenario. A direct encoding ends up starting with [G] F a ~ T a [W] F alpha ~ T alpha [W] Coercible (F alpha) (T a) where ~ and Coercible denote lifted class constraints. The ~s quickly reduce to ~N: good. But the Coercible constraint gets rewritten to [W] Coercible (T alpha) (T a) by the first Wanted. This is because Coercible is a class, and arguments in class constraints use *nominal* rewriting, not the representational rewriting that is restricted due to (R2). Note that reordering the code doesn't help, because equalities (including lifted ones) are prioritized over Coercible. Thus, I (Richard E.) see no way to write a program that is rejected because of this infelicity. I have not proved it impossible, exactly, but my usual tricks have not yielded results. In the olden days, when we had Derived constraints, this Note was all about G/R and D/N both rewriting D/R. Back then, the code in typecheck/should_compile/T19665 really did get rejected. But now, according to the rewriting of the Coercible constraint, the program is accepted. -} eqCanRewrite :: EqRel -> EqRel -> Bool eqCanRewrite NomEq _ = True eqCanRewrite ReprEq ReprEq = True eqCanRewrite ReprEq NomEq = False eqCanRewriteFR :: CtFlavourRole -> CtFlavourRole -> Bool -- Can fr1 actually rewrite fr2? -- Very important function! -- See Note [eqCanRewrite] -- See Note [Wanteds rewrite Wanteds] -- See Note [Avoiding rewriting cycles] eqCanRewriteFR (Given, r1) (_, r2) = eqCanRewrite r1 r2 eqCanRewriteFR (Wanted, NomEq) (Wanted, ReprEq) = False eqCanRewriteFR (Wanted, r1) (Wanted, r2) = eqCanRewrite r1 r2 eqCanRewriteFR (Wanted, _) (Given, _) = False {- ************************************************************************ * * SubGoalDepth * * ************************************************************************ Note [SubGoalDepth] ~~~~~~~~~~~~~~~~~~~ The 'SubGoalDepth' takes care of stopping the constraint solver from looping. The counter starts at zero and increases. It includes dictionary constraints, equality simplification, and type family reduction. (Why combine these? Because it's actually quite easy to mistake one for another, in sufficiently involved scenarios, like ConstraintKinds.) The flag -freduction-depth=n fixes the maximium level. * The counter includes the depth of type class instance declarations. Example: [W] d{7} : Eq [Int] That is d's dictionary-constraint depth is 7. If we use the instance $dfEqList :: Eq a => Eq [a] to simplify it, we get d{7} = $dfEqList d'{8} where d'{8} : Eq Int, and d' has depth 8. For civilised (decidable) instance declarations, each increase of depth removes a type constructor from the type, so the depth never gets big; i.e. is bounded by the structural depth of the type. * The counter also increments when resolving equalities involving type functions. Example: Assume we have a wanted at depth 7: [W] d{7} : F () ~ a If there is a type function equation "F () = Int", this would be rewritten to [W] d{8} : Int ~ a and remembered as having depth 8. Again, without UndecidableInstances, this counter is bounded, but without it can resolve things ad infinitum. Hence there is a maximum level. * Lastly, every time an equality is rewritten, the counter increases. Again, rewriting an equality constraint normally makes progress, but it's possible the "progress" is just the reduction of an infinitely-reducing type family. Hence we need to track the rewrites. When compiling a program requires a greater depth, then GHC recommends turning off this check entirely by setting -freduction-depth=0. This is because the exact number that works is highly variable, and is likely to change even between minor releases. Because this check is solely to prevent infinite compilation times, it seems safe to disable it when a user has ascertained that their program doesn't loop at the type level. -} -- | See Note [SubGoalDepth] newtype SubGoalDepth = SubGoalDepth Int deriving (Eq, Ord, Outputable) initialSubGoalDepth :: SubGoalDepth initialSubGoalDepth = SubGoalDepth 0 bumpSubGoalDepth :: SubGoalDepth -> SubGoalDepth bumpSubGoalDepth (SubGoalDepth n) = SubGoalDepth (n + 1) maxSubGoalDepth :: SubGoalDepth -> SubGoalDepth -> SubGoalDepth maxSubGoalDepth (SubGoalDepth n) (SubGoalDepth m) = SubGoalDepth (n `max` m) subGoalDepthExceeded :: DynFlags -> SubGoalDepth -> Bool subGoalDepthExceeded dflags (SubGoalDepth d) = mkIntWithInf d > reductionDepth dflags {- ************************************************************************ * * CtLoc * * ************************************************************************ The 'CtLoc' gives information about where a constraint came from. This is important for decent error message reporting because dictionaries don't appear in the original source code. -} data CtLoc = CtLoc { ctl_origin :: CtOrigin , ctl_env :: TcLclEnv , ctl_t_or_k :: Maybe TypeOrKind -- OK if we're not sure , ctl_depth :: !SubGoalDepth } -- The TcLclEnv includes particularly -- source location: tcl_loc :: RealSrcSpan -- context: tcl_ctxt :: [ErrCtxt] -- binder stack: tcl_bndrs :: TcBinderStack -- level: tcl_tclvl :: TcLevel mkKindLoc :: TcType -> TcType -- original *types* being compared -> CtLoc -> CtLoc mkKindLoc s1 s2 loc = setCtLocOrigin (toKindLoc loc) (KindEqOrigin s1 s2 (ctLocOrigin loc) (ctLocTypeOrKind_maybe loc)) -- | Take a CtLoc and moves it to the kind level toKindLoc :: CtLoc -> CtLoc toKindLoc loc = loc { ctl_t_or_k = Just KindLevel } mkGivenLoc :: TcLevel -> SkolemInfoAnon -> TcLclEnv -> CtLoc mkGivenLoc tclvl skol_info env = CtLoc { ctl_origin = GivenOrigin skol_info , ctl_env = setLclEnvTcLevel env tclvl , ctl_t_or_k = Nothing -- this only matters for error msgs , ctl_depth = initialSubGoalDepth } ctLocEnv :: CtLoc -> TcLclEnv ctLocEnv = ctl_env ctLocLevel :: CtLoc -> TcLevel ctLocLevel loc = getLclEnvTcLevel (ctLocEnv loc) ctLocDepth :: CtLoc -> SubGoalDepth ctLocDepth = ctl_depth ctLocOrigin :: CtLoc -> CtOrigin ctLocOrigin = ctl_origin ctLocSpan :: CtLoc -> RealSrcSpan ctLocSpan (CtLoc { ctl_env = lcl}) = getLclEnvLoc lcl ctLocTypeOrKind_maybe :: CtLoc -> Maybe TypeOrKind ctLocTypeOrKind_maybe = ctl_t_or_k setCtLocSpan :: CtLoc -> RealSrcSpan -> CtLoc setCtLocSpan ctl@(CtLoc { ctl_env = lcl }) loc = setCtLocEnv ctl (setLclEnvLoc lcl loc) bumpCtLocDepth :: CtLoc -> CtLoc bumpCtLocDepth loc@(CtLoc { ctl_depth = d }) = loc { ctl_depth = bumpSubGoalDepth d } setCtLocOrigin :: CtLoc -> CtOrigin -> CtLoc setCtLocOrigin ctl orig = ctl { ctl_origin = orig } updateCtLocOrigin :: CtLoc -> (CtOrigin -> CtOrigin) -> CtLoc updateCtLocOrigin ctl@(CtLoc { ctl_origin = orig }) upd = ctl { ctl_origin = upd orig } setCtLocEnv :: CtLoc -> TcLclEnv -> CtLoc setCtLocEnv ctl env = ctl { ctl_env = env } pprCtLoc :: CtLoc -> SDoc -- "arising from ... at ..." -- Not an instance of Outputable because of the "arising from" prefix pprCtLoc (CtLoc { ctl_origin = o, ctl_env = lcl}) = sep [ pprCtOrigin o , text "at" <+> ppr (getLclEnvLoc lcl)] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types/Evidence.hs0000644000000000000000000012047314472400113021373 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE LambdaCase #-} module GHC.Tc.Types.Evidence ( -- * HsWrapper HsWrapper(..), (<.>), mkWpTyApps, mkWpEvApps, mkWpEvVarApps, mkWpTyLams, mkWpLams, mkWpLet, mkWpFun, mkWpCastN, mkWpCastR, collectHsWrapBinders, idHsWrapper, isIdHsWrapper, pprHsWrapper, hsWrapDictBinders, -- * Evidence bindings TcEvBinds(..), EvBindsVar(..), EvBindMap(..), emptyEvBindMap, extendEvBinds, lookupEvBind, evBindMapBinds, foldEvBindMap, nonDetStrictFoldEvBindMap, filterEvBindMap, isEmptyEvBindMap, evBindMapToVarSet, varSetMinusEvBindMap, EvBind(..), emptyTcEvBinds, isEmptyTcEvBinds, mkGivenEvBind, mkWantedEvBind, evBindVar, isCoEvBindsVar, -- * EvTerm (already a CoreExpr) EvTerm(..), EvExpr, evId, evCoercion, evCast, evDFunApp, evDataConApp, evSelector, mkEvCast, evVarsOfTerm, mkEvScSelectors, evTypeable, findNeededEvVars, evTermCoercion, evTermCoercion_maybe, EvCallStack(..), EvTypeable(..), -- * HoleExprRef HoleExprRef(..), -- * TcCoercion TcCoercion, TcCoercionR, TcCoercionN, TcCoercionP, CoercionHole, TcMCoercion, TcMCoercionN, TcMCoercionR, Role(..), LeftOrRight(..), pickLR, mkTcReflCo, mkTcNomReflCo, mkTcRepReflCo, mkTcTyConAppCo, mkTcAppCo, mkTcFunCo, mkTcAxInstCo, mkTcUnbranchedAxInstCo, mkTcForAllCo, mkTcForAllCos, mkTcSymCo, mkTcSymMCo, mkTcTransCo, mkTcNthCo, mkTcLRCo, mkTcSubCo, maybeTcSymCo, maybeTcSubCo, tcDowngradeRole, mkTcAxiomRuleCo, mkTcGReflRightCo, mkTcGReflRightMCo, mkTcGReflLeftCo, mkTcGReflLeftMCo, mkTcPhantomCo, mkTcCoherenceLeftCo, mkTcCoherenceRightCo, mkTcKindCo, tcCoercionKind, mkTcCoVarCo, mkTcFamilyTyConAppCo, isTcReflCo, isTcReflexiveCo, tcCoercionRole, unwrapIP, wrapIP, -- * QuoteWrapper QuoteWrapper(..), applyQuoteWrapper, quoteWrapperTyVarTy ) where import GHC.Prelude import GHC.Types.Unique.DFM import GHC.Types.Unique.FM import GHC.Types.Var import GHC.Core.Coercion.Axiom import GHC.Core.Coercion import GHC.Core.Ppr () -- Instance OutputableBndr TyVar import GHC.Tc.Utils.TcType import GHC.Core.Type import GHC.Core.TyCon import GHC.Core.DataCon ( DataCon, dataConWrapId ) import GHC.Builtin.Names import GHC.Types.Var.Env import GHC.Types.Var.Set import GHC.Core.Predicate import GHC.Data.Pair import GHC.Types.Basic import GHC.Core import GHC.Core.Class (Class, classSCSelId ) import GHC.Core.FVs ( exprSomeFreeVars ) import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Outputable import GHC.Data.Bag import GHC.Data.FastString import qualified Data.Data as Data import GHC.Types.SrcLoc import Data.IORef( IORef ) import GHC.Types.Unique.Set import GHC.Core.Multiplicity import qualified Data.Semigroup as S {- Note [TcCoercions] ~~~~~~~~~~~~~~~~~~ | TcCoercions are a hack used by the typechecker. Normally, Coercions have free variables of type (a ~# b): we call these CoVars. However, the type checker passes around equality evidence (boxed up) at type (a ~ b). An TcCoercion is simply a Coercion whose free variables have may be either boxed or unboxed. After we are done with typechecking the desugarer finds the boxed free variables, unboxes them, and creates a resulting real Coercion with kosher free variables. -} type TcCoercion = Coercion type TcCoercionN = CoercionN -- A Nominal coercion ~N type TcCoercionR = CoercionR -- A Representational coercion ~R type TcCoercionP = CoercionP -- a phantom coercion type TcMCoercion = MCoercion type TcMCoercionN = MCoercionN -- nominal type TcMCoercionR = MCoercionR -- representational mkTcReflCo :: Role -> TcType -> TcCoercion mkTcSymCo :: TcCoercion -> TcCoercion mkTcSymMCo :: TcMCoercion -> TcMCoercion mkTcTransCo :: TcCoercion -> TcCoercion -> TcCoercion mkTcNomReflCo :: TcType -> TcCoercionN mkTcRepReflCo :: TcType -> TcCoercionR mkTcTyConAppCo :: Role -> TyCon -> [TcCoercion] -> TcCoercion mkTcAppCo :: TcCoercion -> TcCoercionN -> TcCoercion mkTcFunCo :: Role -> TcCoercion -> TcCoercion -> TcCoercion -> TcCoercion mkTcAxInstCo :: Role -> CoAxiom br -> BranchIndex -> [TcType] -> [TcCoercion] -> TcCoercion mkTcUnbranchedAxInstCo :: CoAxiom Unbranched -> [TcType] -> [TcCoercion] -> TcCoercionR mkTcForAllCo :: TyVar -> TcCoercionN -> TcCoercion -> TcCoercion mkTcForAllCos :: [(TyVar, TcCoercionN)] -> TcCoercion -> TcCoercion mkTcNthCo :: Role -> Int -> TcCoercion -> TcCoercion mkTcLRCo :: LeftOrRight -> TcCoercion -> TcCoercion mkTcSubCo :: HasDebugCallStack => TcCoercionN -> TcCoercionR tcDowngradeRole :: Role -> Role -> TcCoercion -> TcCoercion mkTcAxiomRuleCo :: CoAxiomRule -> [TcCoercion] -> TcCoercionR mkTcGReflRightCo :: Role -> TcType -> TcCoercionN -> TcCoercion mkTcGReflRightMCo :: Role -> TcType -> TcMCoercionN -> TcCoercion mkTcGReflLeftCo :: Role -> TcType -> TcCoercionN -> TcCoercion mkTcGReflLeftMCo :: Role -> TcType -> TcMCoercionN -> TcCoercion mkTcCoherenceLeftCo :: Role -> TcType -> TcCoercionN -> TcCoercion -> TcCoercion mkTcCoherenceRightCo :: Role -> TcType -> TcCoercionN -> TcCoercion -> TcCoercion mkTcPhantomCo :: TcCoercionN -> TcType -> TcType -> TcCoercionP mkTcKindCo :: TcCoercion -> TcCoercionN mkTcCoVarCo :: CoVar -> TcCoercion mkTcFamilyTyConAppCo :: TyCon -> [TcCoercionN] -> TcCoercionN tcCoercionKind :: TcCoercion -> Pair TcType tcCoercionRole :: TcCoercion -> Role isTcReflCo :: TcCoercion -> Bool -- | This version does a slow check, calculating the related types and seeing -- if they are equal. isTcReflexiveCo :: TcCoercion -> Bool mkTcReflCo = mkReflCo mkTcSymCo = mkSymCo mkTcSymMCo = mkSymMCo mkTcTransCo = mkTransCo mkTcNomReflCo = mkNomReflCo mkTcRepReflCo = mkRepReflCo mkTcTyConAppCo = mkTyConAppCo mkTcAppCo = mkAppCo mkTcFunCo = mkFunCo mkTcAxInstCo = mkAxInstCo mkTcUnbranchedAxInstCo = mkUnbranchedAxInstCo Representational mkTcForAllCo = mkForAllCo mkTcForAllCos = mkForAllCos mkTcNthCo = mkNthCo mkTcLRCo = mkLRCo mkTcSubCo = mkSubCo tcDowngradeRole = downgradeRole mkTcAxiomRuleCo = mkAxiomRuleCo mkTcGReflRightCo = mkGReflRightCo mkTcGReflRightMCo = mkGReflRightMCo mkTcGReflLeftCo = mkGReflLeftCo mkTcGReflLeftMCo = mkGReflLeftMCo mkTcCoherenceLeftCo = mkCoherenceLeftCo mkTcCoherenceRightCo = mkCoherenceRightCo mkTcPhantomCo = mkPhantomCo mkTcKindCo = mkKindCo mkTcCoVarCo = mkCoVarCo mkTcFamilyTyConAppCo = mkFamilyTyConAppCo tcCoercionKind = coercionKind tcCoercionRole = coercionRole isTcReflCo = isReflCo isTcReflexiveCo = isReflexiveCo -- | If the EqRel is ReprEq, makes a SubCo; otherwise, does nothing. -- Note that the input coercion should always be nominal. maybeTcSubCo :: HasDebugCallStack => EqRel -> TcCoercionN -> TcCoercion maybeTcSubCo NomEq = id maybeTcSubCo ReprEq = mkTcSubCo -- | If a 'SwapFlag' is 'IsSwapped', flip the orientation of a coercion maybeTcSymCo :: SwapFlag -> TcCoercion -> TcCoercion maybeTcSymCo IsSwapped co = mkTcSymCo co maybeTcSymCo NotSwapped co = co {- %************************************************************************ %* * HsWrapper * * ************************************************************************ -} data HsWrapper = WpHole -- The identity coercion | WpCompose HsWrapper HsWrapper -- (wrap1 `WpCompose` wrap2)[e] = wrap1[ wrap2[ e ]] -- -- Hence (\a. []) `WpCompose` (\b. []) = (\a b. []) -- But ([] a) `WpCompose` ([] b) = ([] b a) | WpFun HsWrapper HsWrapper (Scaled TcTypeFRR) -- (WpFun wrap1 wrap2 (w, t1))[e] = \(x:_w t1). wrap2[ e wrap1[x] ] -- So note that if wrap1 :: exp_arg <= act_arg -- wrap2 :: act_res <= exp_res -- then WpFun wrap1 wrap2 : (act_arg -> arg_res) <= (exp_arg -> exp_res) -- This isn't the same as for mkFunCo, but it has to be this way -- because we can't use 'sym' to flip around these HsWrappers -- The TcType is the "from" type of the first wrapper -- -- Use 'mkWpFun' to construct such a wrapper. | WpCast TcCoercionR -- A cast: [] `cast` co -- Guaranteed not the identity coercion -- At role Representational -- Evidence abstraction and application -- (both dictionaries and coercions) | WpEvLam EvVar -- \d. [] the 'd' is an evidence variable | WpEvApp EvTerm -- [] d the 'd' is evidence for a constraint -- Kind and Type abstraction and application | WpTyLam TyVar -- \a. [] the 'a' is a type/kind variable (not coercion var) | WpTyApp KindOrType -- [] t the 't' is a type (not coercion) | WpLet TcEvBinds -- Non-empty (or possibly non-empty) evidence bindings, -- so that the identity coercion is always exactly WpHole | WpMultCoercion Coercion -- Require that a Coercion be reflexive; otherwise, -- error in the desugarer. See GHC.Tc.Utils.Unify -- Note [Wrapper returned from tcSubMult] deriving Data.Data -- | The Semigroup instance is a bit fishy, since @WpCompose@, as a data -- constructor, is "syntactic" and not associative. Concretely, if @a@, @b@, -- and @c@ aren't @WpHole@: -- -- > (a <> b) <> c ?= a <> (b <> c) -- -- ==> -- -- > (a `WpCompose` b) `WpCompose` c /= @ a `WpCompose` (b `WpCompose` c) -- -- However these two associations are are "semantically equal" in the sense -- that they produce equal functions when passed to -- @GHC.HsToCore.Binds.dsHsWrapper@. instance S.Semigroup HsWrapper where (<>) = (<.>) instance Monoid HsWrapper where mempty = WpHole (<.>) :: HsWrapper -> HsWrapper -> HsWrapper WpHole <.> c = c c <.> WpHole = c c1 <.> c2 = c1 `WpCompose` c2 -- | Smart constructor to create a 'WpFun' 'HsWrapper'. -- -- PRECONDITION: the "from" type of the first wrapper must have a syntactically -- fixed RuntimeRep (see Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete). mkWpFun :: HsWrapper -> HsWrapper -> Scaled TcTypeFRR -- ^ the "from" type of the first wrapper -- MUST have a fixed RuntimeRep -> TcType -- ^ either type of the second wrapper (used only when the -- second wrapper is the identity) -> HsWrapper -- NB: we can't check that the argument type has a fixed RuntimeRep with an assertion, -- because of [Wrinkle: Typed Template Haskell] in Note [hasFixedRuntimeRep] -- in GHC.Tc.Utils.Concrete. mkWpFun WpHole WpHole _ _ = WpHole mkWpFun WpHole (WpCast co2) (Scaled w t1) _ = WpCast (mkTcFunCo Representational (multToCo w) (mkTcRepReflCo t1) co2) mkWpFun (WpCast co1) WpHole (Scaled w _) t2 = WpCast (mkTcFunCo Representational (multToCo w) (mkTcSymCo co1) (mkTcRepReflCo t2)) mkWpFun (WpCast co1) (WpCast co2) (Scaled w _) _ = WpCast (mkTcFunCo Representational (multToCo w) (mkTcSymCo co1) co2) mkWpFun co1 co2 t1 _ = WpFun co1 co2 t1 mkWpCastR :: TcCoercionR -> HsWrapper mkWpCastR co | isTcReflCo co = WpHole | otherwise = assertPpr (tcCoercionRole co == Representational) (ppr co) $ WpCast co mkWpCastN :: TcCoercionN -> HsWrapper mkWpCastN co | isTcReflCo co = WpHole | otherwise = assertPpr (tcCoercionRole co == Nominal) (ppr co) $ WpCast (mkTcSubCo co) -- The mkTcSubCo converts Nominal to Representational mkWpTyApps :: [Type] -> HsWrapper mkWpTyApps tys = mk_co_app_fn WpTyApp tys mkWpEvApps :: [EvTerm] -> HsWrapper mkWpEvApps args = mk_co_app_fn WpEvApp args mkWpEvVarApps :: [EvVar] -> HsWrapper mkWpEvVarApps vs = mk_co_app_fn WpEvApp (map (EvExpr . evId) vs) mkWpTyLams :: [TyVar] -> HsWrapper mkWpTyLams ids = mk_co_lam_fn WpTyLam ids mkWpLams :: [Var] -> HsWrapper mkWpLams ids = mk_co_lam_fn WpEvLam ids mkWpLet :: TcEvBinds -> HsWrapper -- This no-op is a quite a common case mkWpLet (EvBinds b) | isEmptyBag b = WpHole mkWpLet ev_binds = WpLet ev_binds mk_co_lam_fn :: (a -> HsWrapper) -> [a] -> HsWrapper mk_co_lam_fn f as = foldr (\x wrap -> f x <.> wrap) WpHole as mk_co_app_fn :: (a -> HsWrapper) -> [a] -> HsWrapper -- For applications, the *first* argument must -- come *last* in the composition sequence mk_co_app_fn f as = foldr (\x wrap -> wrap <.> f x) WpHole as idHsWrapper :: HsWrapper idHsWrapper = WpHole isIdHsWrapper :: HsWrapper -> Bool isIdHsWrapper WpHole = True isIdHsWrapper _ = False hsWrapDictBinders :: HsWrapper -> Bag DictId -- ^ Identifies the /lambda-bound/ dictionaries of an 'HsWrapper'. This is used -- (only) to allow the pattern-match overlap checker to know what Given -- dictionaries are in scope. -- -- We specifically do not collect dictionaries bound in a 'WpLet'. These are -- either superclasses of lambda-bound ones, or (extremely numerous) results of -- binding Wanted dictionaries. We definitely don't want all those cluttering -- up the Given dictionaries for pattern-match overlap checking! hsWrapDictBinders wrap = go wrap where go (WpEvLam dict_id) = unitBag dict_id go (w1 `WpCompose` w2) = go w1 `unionBags` go w2 go (WpFun _ w _) = go w go WpHole = emptyBag go (WpCast {}) = emptyBag go (WpEvApp {}) = emptyBag go (WpTyLam {}) = emptyBag go (WpTyApp {}) = emptyBag go (WpLet {}) = emptyBag go (WpMultCoercion {}) = emptyBag collectHsWrapBinders :: HsWrapper -> ([Var], HsWrapper) -- Collect the outer lambda binders of a HsWrapper, -- stopping as soon as you get to a non-lambda binder collectHsWrapBinders wrap = go wrap [] where -- go w ws = collectHsWrapBinders (w <.> w1 <.> ... <.> wn) go :: HsWrapper -> [HsWrapper] -> ([Var], HsWrapper) go (WpEvLam v) wraps = add_lam v (gos wraps) go (WpTyLam v) wraps = add_lam v (gos wraps) go (WpCompose w1 w2) wraps = go w1 (w2:wraps) go wrap wraps = ([], foldl' (<.>) wrap wraps) gos [] = ([], WpHole) gos (w:ws) = go w ws add_lam v (vs,w) = (v:vs, w) {- ************************************************************************ * * Evidence bindings * * ************************************************************************ -} data TcEvBinds = TcEvBinds -- Mutable evidence bindings EvBindsVar -- Mutable because they are updated "later" -- when an implication constraint is solved | EvBinds -- Immutable after zonking (Bag EvBind) data EvBindsVar = EvBindsVar { ebv_uniq :: Unique, -- The Unique is for debug printing only ebv_binds :: IORef EvBindMap, -- The main payload: the value-level evidence bindings -- (dictionaries etc) -- Some Given, some Wanted ebv_tcvs :: IORef CoVarSet -- The free Given coercion vars needed by Wanted coercions that -- are solved by filling in their HoleDest in-place. Since they -- don't appear in ebv_binds, we keep track of their free -- variables so that we can report unused given constraints -- See Note [Tracking redundant constraints] in GHC.Tc.Solver } | CoEvBindsVar { -- See Note [Coercion evidence only] -- See above for comments on ebv_uniq, ebv_tcvs ebv_uniq :: Unique, ebv_tcvs :: IORef CoVarSet } instance Data.Data TcEvBinds where -- Placeholder; we can't travers into TcEvBinds toConstr _ = abstractConstr "TcEvBinds" gunfold _ _ = error "gunfold" dataTypeOf _ = Data.mkNoRepType "TcEvBinds" {- Note [Coercion evidence only] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Class constraints etc give rise to /term/ bindings for evidence, and we have nowhere to put term bindings in /types/. So in some places we use CoEvBindsVar (see newCoTcEvBinds) to signal that no term-level evidence bindings are allowed. Notebly (): - Places in types where we are solving kind constraints (all of which are equalities); see solveEqualities - When unifying forall-types -} isCoEvBindsVar :: EvBindsVar -> Bool isCoEvBindsVar (CoEvBindsVar {}) = True isCoEvBindsVar (EvBindsVar {}) = False ----------------- newtype EvBindMap = EvBindMap { ev_bind_varenv :: DVarEnv EvBind } -- Map from evidence variables to evidence terms -- We use @DVarEnv@ here to get deterministic ordering when we -- turn it into a Bag. -- If we don't do that, when we generate let bindings for -- dictionaries in dsTcEvBinds they will be generated in random -- order. -- -- For example: -- -- let $dEq = GHC.Classes.$fEqInt in -- let $$dNum = GHC.Num.$fNumInt in ... -- -- vs -- -- let $dNum = GHC.Num.$fNumInt in -- let $dEq = GHC.Classes.$fEqInt in ... -- -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why -- @UniqFM@ can lead to nondeterministic order. emptyEvBindMap :: EvBindMap emptyEvBindMap = EvBindMap { ev_bind_varenv = emptyDVarEnv } extendEvBinds :: EvBindMap -> EvBind -> EvBindMap extendEvBinds bs ev_bind = EvBindMap { ev_bind_varenv = extendDVarEnv (ev_bind_varenv bs) (eb_lhs ev_bind) ev_bind } isEmptyEvBindMap :: EvBindMap -> Bool isEmptyEvBindMap (EvBindMap m) = isEmptyDVarEnv m lookupEvBind :: EvBindMap -> EvVar -> Maybe EvBind lookupEvBind bs = lookupDVarEnv (ev_bind_varenv bs) evBindMapBinds :: EvBindMap -> Bag EvBind evBindMapBinds = foldEvBindMap consBag emptyBag foldEvBindMap :: (EvBind -> a -> a) -> a -> EvBindMap -> a foldEvBindMap k z bs = foldDVarEnv k z (ev_bind_varenv bs) -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldEvBindMap :: (EvBind -> a -> a) -> a -> EvBindMap -> a nonDetStrictFoldEvBindMap k z bs = nonDetStrictFoldDVarEnv k z (ev_bind_varenv bs) filterEvBindMap :: (EvBind -> Bool) -> EvBindMap -> EvBindMap filterEvBindMap k (EvBindMap { ev_bind_varenv = env }) = EvBindMap { ev_bind_varenv = filterDVarEnv k env } evBindMapToVarSet :: EvBindMap -> VarSet evBindMapToVarSet (EvBindMap dve) = unsafeUFMToUniqSet (mapUFM evBindVar (udfmToUfm dve)) varSetMinusEvBindMap :: VarSet -> EvBindMap -> VarSet varSetMinusEvBindMap vs (EvBindMap dve) = vs `uniqSetMinusUDFM` dve instance Outputable EvBindMap where ppr (EvBindMap m) = ppr m ----------------- -- All evidence is bound by EvBinds; no side effects data EvBind = EvBind { eb_lhs :: EvVar , eb_rhs :: EvTerm , eb_is_given :: Bool -- True <=> given -- See Note [Tracking redundant constraints] in GHC.Tc.Solver } evBindVar :: EvBind -> EvVar evBindVar = eb_lhs mkWantedEvBind :: EvVar -> EvTerm -> EvBind mkWantedEvBind ev tm = EvBind { eb_is_given = False, eb_lhs = ev, eb_rhs = tm } -- EvTypeable are never given, so we can work with EvExpr here instead of EvTerm mkGivenEvBind :: EvVar -> EvTerm -> EvBind mkGivenEvBind ev tm = EvBind { eb_is_given = True, eb_lhs = ev, eb_rhs = tm } -- An EvTerm is, conceptually, a CoreExpr that implements the constraint. -- Unfortunately, we cannot just do -- type EvTerm = CoreExpr -- Because of staging problems issues around EvTypeable data EvTerm = EvExpr EvExpr | EvTypeable Type EvTypeable -- Dictionary for (Typeable ty) | EvFun -- /\as \ds. let binds in v { et_tvs :: [TyVar] , et_given :: [EvVar] , et_binds :: TcEvBinds -- This field is why we need an EvFun -- constructor, and can't just use EvExpr , et_body :: EvVar } deriving Data.Data type EvExpr = CoreExpr -- An EvTerm is (usually) constructed by any of the constructors here -- and those more complicates ones who were moved to module GHC.Tc.Types.EvTerm -- | Any sort of evidence Id, including coercions evId :: EvId -> EvExpr evId = Var -- coercion bindings -- See Note [Coercion evidence terms] evCoercion :: TcCoercion -> EvTerm evCoercion co = EvExpr (Coercion co) -- | d |> co evCast :: EvExpr -> TcCoercion -> EvTerm evCast et tc | isReflCo tc = EvExpr et | otherwise = EvExpr (Cast et tc) -- Dictionary instance application evDFunApp :: DFunId -> [Type] -> [EvExpr] -> EvTerm evDFunApp df tys ets = EvExpr $ Var df `mkTyApps` tys `mkApps` ets evDataConApp :: DataCon -> [Type] -> [EvExpr] -> EvTerm evDataConApp dc tys ets = evDFunApp (dataConWrapId dc) tys ets -- Selector id plus the types at which it -- should be instantiated, used for HasField -- dictionaries; see Note [HasField instances] -- in TcInterface evSelector :: Id -> [Type] -> [EvExpr] -> EvExpr evSelector sel_id tys tms = Var sel_id `mkTyApps` tys `mkApps` tms -- Dictionary for (Typeable ty) evTypeable :: Type -> EvTypeable -> EvTerm evTypeable = EvTypeable -- | Instructions on how to make a 'Typeable' dictionary. -- See Note [Typeable evidence terms] data EvTypeable = EvTypeableTyCon TyCon [EvTerm] -- ^ Dictionary for @Typeable T@ where @T@ is a type constructor with all of -- its kind variables saturated. The @[EvTerm]@ is @Typeable@ evidence for -- the applied kinds.. | EvTypeableTyApp EvTerm EvTerm -- ^ Dictionary for @Typeable (s t)@, -- given a dictionaries for @s@ and @t@. | EvTypeableTrFun EvTerm EvTerm EvTerm -- ^ Dictionary for @Typeable (s % w -> t)@, -- given a dictionaries for @w@, @s@, and @t@. | EvTypeableTyLit EvTerm -- ^ Dictionary for a type literal, -- e.g. @Typeable "foo"@ or @Typeable 3@ -- The 'EvTerm' is evidence of, e.g., @KnownNat 3@ -- (see #10348) deriving Data.Data -- | Evidence for @CallStack@ implicit parameters. data EvCallStack -- See Note [Overview of implicit CallStacks] = EvCsEmpty | EvCsPushCall FastString -- Usually the name of the function being called -- but can also be "the literal 42" -- or "an if-then-else expression", etc RealSrcSpan -- Location of the call EvExpr -- Rest of the stack -- ^ @EvCsPushCall origin loc stk@ represents a call from @origin@, -- occurring at @loc@, in a calling context @stk@. deriving Data.Data {- ************************************************************************ * * Evidence for holes * * ************************************************************************ -} -- | Where to store evidence for expression holes -- See Note [Holes] in GHC.Tc.Types.Constraint data HoleExprRef = HER (IORef EvTerm) -- ^ where to write the erroring expression TcType -- ^ expected type of that expression Unique -- ^ for debug output only instance Outputable HoleExprRef where ppr (HER _ _ u) = ppr u instance Data.Data HoleExprRef where -- Placeholder; we can't traverse into HoleExprRef toConstr _ = abstractConstr "HoleExprRef" gunfold _ _ = error "gunfold" dataTypeOf _ = Data.mkNoRepType "HoleExprRef" {- Note [Typeable evidence terms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The EvTypeable data type looks isomorphic to Type, but the EvTerms inside can be EvIds. Eg f :: forall a. Typeable a => a -> TypeRep f x = typeRep (undefined :: Proxy [a]) Here for the (Typeable [a]) dictionary passed to typeRep we make evidence dl :: Typeable [a] = EvTypeable [a] (EvTypeableTyApp (EvTypeableTyCon []) (EvId d)) where d :: Typable a is the lambda-bound dictionary passed into f. Note [Coercion evidence terms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A "coercion evidence term" takes one of these forms co_tm ::= EvId v where v :: t1 ~# t2 | EvCoercion co | EvCast co_tm co We do quite often need to get a TcCoercion from an EvTerm; see 'evTermCoercion'. INVARIANT: The evidence for any constraint with type (t1 ~# t2) is a coercion evidence term. Consider for example [G] d :: F Int a If we have ax7 a :: F Int a ~ (a ~ Bool) then we do NOT generate the constraint [G] (d |> ax7 a) :: a ~ Bool because that does not satisfy the invariant (d is not a coercion variable). Instead we make a binding g1 :: a~Bool = g |> ax7 a and the constraint [G] g1 :: a~Bool See #7238 and Note [Bind new Givens immediately] in GHC.Tc.Types.Constraint Note [EvBinds/EvTerm] ~~~~~~~~~~~~~~~~~~~~~ How evidence is created and updated. Bindings for dictionaries, and coercions and implicit parameters are carried around in TcEvBinds which during constraint generation and simplification is always of the form (TcEvBinds ref). After constraint simplification is finished it will be transformed to t an (EvBinds ev_bag). Evidence for coercions *SHOULD* be filled in using the TcEvBinds However, all EvVars that correspond to *wanted* coercion terms in an EvBind must be mutable variables so that they can be readily inlined (by zonking) after constraint simplification is finished. Conclusion: a new wanted coercion variable should be made mutable. [Notice though that evidence variables that bind coercion terms from super classes will be "given" and hence rigid] Note [Overview of implicit CallStacks] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (See https://gitlab.haskell.org/ghc/ghc/wikis/explicit-call-stack/implicit-locations) The goal of CallStack evidence terms is to reify locations in the program source as runtime values, without any support from the RTS. We accomplish this by assigning a special meaning to constraints of type GHC.Stack.Types.HasCallStack, an alias type HasCallStack = (?callStack :: CallStack) Implicit parameters of type GHC.Stack.Types.CallStack (the name is not important) are solved in three steps: 1. Explicit, user-written occurrences of `?stk :: CallStack` which have IPOccOrigin, are solved directly from the given IP, just like a regular IP; see GHC.Tc.Solver.Interact.interactDict. For example, the occurrence of `?stk` in error :: (?stk :: CallStack) => String -> a error s = raise (ErrorCall (s ++ prettyCallStack ?stk)) will be solved for the `?stk` in `error`s context as before. 2. In a function call, instead of simply passing the given IP, we first append the current call-site to it. For example, consider a call to the callstack-aware `error` above. foo :: (?stk :: CallStack) => a foo = error "undefined!" Here we want to take the given `?stk` and append the current call-site, before passing it to `error`. In essence, we want to rewrite `foo "undefined!"` to let ?stk = pushCallStack ?stk in foo "undefined!" We achieve this as follows: * At a call of foo :: (?stk :: CallStack) => blah we emit a Wanted [W] d1 : IP "stk" CallStack with CtOrigin = OccurrenceOf "foo" * We /solve/ this constraint, in GHC.Tc.Solver.Canonical.canClassNC by emitting a NEW Wanted [W] d2 :: IP "stk" CallStack with CtOrigin = IPOccOrigin and solve d1 = EvCsPushCall "foo" (EvId d1) * The new Wanted, for `d2` will be solved per rule (1), ie as a regular IP. 3. We use the predicate isPushCallStackOrigin to identify whether we want to do (1) solve directly, or (2) push and then solve directly. Key point (see #19918): the CtOrigin where we want to push an item on the call stack can include IfThenElseOrigin etc, when RebindableSyntax is involved. See the defn of fun_orig in GHC.Tc.Gen.App.tcInstFun; it is this CtOrigin that is pinned on the constraints generated by functions in the "expansion" for rebindable syntax. c.f. GHC.Rename.Expr Note [Handling overloaded and rebindable constructs] 4. We default any insoluble CallStacks to the empty CallStack. Suppose `undefined` did not request a CallStack, ie undefinedNoStk :: a undefinedNoStk = error "undefined!" Under the usual IP rules, the new wanted from rule (2) would be insoluble as there's no given IP from which to solve it, so we would get an "unbound implicit parameter" error. We don't ever want to emit an insoluble CallStack IP, so we add a defaulting pass to default any remaining wanted CallStacks to the empty CallStack with the evidence term EvCsEmpty (see GHC.Tc.Solver.simplifyTopWanteds and GHC.Tc.Solver.defaultCallStacks) This provides a lightweight mechanism for building up call-stacks explicitly, but is notably limited by the fact that the stack will stop at the first function whose type does not include a CallStack IP. For example, using the above definition of `undefined`: head :: [a] -> a head [] = undefined head (x:_) = x g = head [] the resulting CallStack will include the call to `undefined` in `head` and the call to `error` in `undefined`, but *not* the call to `head` in `g`, because `head` did not explicitly request a CallStack. Important Details: - GHC should NEVER report an insoluble CallStack constraint. - GHC should NEVER infer a CallStack constraint unless one was requested with a partial type signature (See GHC.Tc.Solver..pickQuantifiablePreds). - A CallStack (defined in GHC.Stack.Types) is a [(String, SrcLoc)], where the String is the name of the binder that is used at the SrcLoc. SrcLoc is also defined in GHC.Stack.Types and contains the package/module/file name, as well as the full source-span. Both CallStack and SrcLoc are kept abstract so only GHC can construct new values. - We will automatically solve any wanted CallStack regardless of the name of the IP, i.e. f = show (?stk :: CallStack) g = show (?loc :: CallStack) are both valid. However, we will only push new SrcLocs onto existing CallStacks when the IP names match, e.g. in head :: (?loc :: CallStack) => [a] -> a head [] = error (show (?stk :: CallStack)) the printed CallStack will NOT include head's call-site. This reflects the standard scoping rules of implicit-parameters. - An EvCallStack term desugars to a CoreExpr of type `IP "some str" CallStack`. The desugarer will need to unwrap the IP newtype before pushing a new call-site onto a given stack (See GHC.HsToCore.Binds.dsEvCallStack) - When we emit a new wanted CallStack from rule (2) we set its origin to `IPOccOrigin ip_name` instead of the original `OccurrenceOf func` (see GHC.Tc.Solver.Interact.interactDict). This is a bit shady, but is how we ensure that the new wanted is solved like a regular IP. -} mkEvCast :: EvExpr -> TcCoercion -> EvTerm mkEvCast ev lco | assertPpr (tcCoercionRole lco == Representational) (vcat [text "Coercion of wrong role passed to mkEvCast:", ppr ev, ppr lco]) $ isTcReflCo lco = EvExpr ev | otherwise = evCast ev lco mkEvScSelectors -- Assume class (..., D ty, ...) => C a b :: Class -> [TcType] -- C ty1 ty2 -> [(TcPredType, -- D ty[ty1/a,ty2/b] EvExpr) -- :: C ty1 ty2 -> D ty[ty1/a,ty2/b] ] mkEvScSelectors cls tys = zipWith mk_pr (immSuperClasses cls tys) [0..] where mk_pr pred i = (pred, Var sc_sel_id `mkTyApps` tys) where sc_sel_id = classSCSelId cls i -- Zero-indexed emptyTcEvBinds :: TcEvBinds emptyTcEvBinds = EvBinds emptyBag isEmptyTcEvBinds :: TcEvBinds -> Bool isEmptyTcEvBinds (EvBinds b) = isEmptyBag b isEmptyTcEvBinds (TcEvBinds {}) = panic "isEmptyTcEvBinds" evTermCoercion_maybe :: EvTerm -> Maybe TcCoercion -- Applied only to EvTerms of type (s~t) -- See Note [Coercion evidence terms] evTermCoercion_maybe ev_term | EvExpr e <- ev_term = go e | otherwise = Nothing where go :: EvExpr -> Maybe TcCoercion go (Var v) = return (mkCoVarCo v) go (Coercion co) = return co go (Cast tm co) = do { co' <- go tm ; return (mkCoCast co' co) } go _ = Nothing evTermCoercion :: EvTerm -> TcCoercion evTermCoercion tm = case evTermCoercion_maybe tm of Just co -> co Nothing -> pprPanic "evTermCoercion" (ppr tm) {- ********************************************************************* * * Free variables * * ********************************************************************* -} findNeededEvVars :: EvBindMap -> VarSet -> VarSet -- Find all the Given evidence needed by seeds, -- looking transitively through binds findNeededEvVars ev_binds seeds = transCloVarSet also_needs seeds where also_needs :: VarSet -> VarSet also_needs needs = nonDetStrictFoldUniqSet add emptyVarSet needs -- It's OK to use a non-deterministic fold here because we immediately -- forget about the ordering by creating a set add :: Var -> VarSet -> VarSet add v needs | Just ev_bind <- lookupEvBind ev_binds v , EvBind { eb_is_given = is_given, eb_rhs = rhs } <- ev_bind , is_given = evVarsOfTerm rhs `unionVarSet` needs | otherwise = needs evVarsOfTerm :: EvTerm -> VarSet evVarsOfTerm (EvExpr e) = exprSomeFreeVars isEvVar e evVarsOfTerm (EvTypeable _ ev) = evVarsOfTypeable ev evVarsOfTerm (EvFun {}) = emptyVarSet -- See Note [Free vars of EvFun] evVarsOfTerms :: [EvTerm] -> VarSet evVarsOfTerms = mapUnionVarSet evVarsOfTerm evVarsOfTypeable :: EvTypeable -> VarSet evVarsOfTypeable ev = case ev of EvTypeableTyCon _ e -> mapUnionVarSet evVarsOfTerm e EvTypeableTyApp e1 e2 -> evVarsOfTerms [e1,e2] EvTypeableTrFun em e1 e2 -> evVarsOfTerms [em,e1,e2] EvTypeableTyLit e -> evVarsOfTerm e {- Note [Free vars of EvFun] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Finding the free vars of an EvFun is made tricky by the fact the bindings et_binds may be a mutable variable. Fortunately, we can just squeeze by. Here's how. * evVarsOfTerm is used only by GHC.Tc.Solver.neededEvVars. * Each EvBindsVar in an et_binds field of an EvFun is /also/ in the ic_binds field of an Implication * So we can track usage via the processing for that implication, (see Note [Tracking redundant constraints] in GHC.Tc.Solver). We can ignore usage from the EvFun altogether. ************************************************************************ * * Pretty printing * * ************************************************************************ -} instance Outputable HsWrapper where ppr co_fn = pprHsWrapper co_fn (no_parens (text "<>")) pprHsWrapper :: HsWrapper -> (Bool -> SDoc) -> SDoc -- With -fprint-typechecker-elaboration, print the wrapper -- otherwise just print what's inside -- The pp_thing_inside function takes Bool to say whether -- it's in a position that needs parens for a non-atomic thing pprHsWrapper wrap pp_thing_inside = sdocOption sdocPrintTypecheckerElaboration $ \case True -> help pp_thing_inside wrap False False -> pp_thing_inside False where help :: (Bool -> SDoc) -> HsWrapper -> Bool -> SDoc -- True <=> appears in function application position -- False <=> appears as body of let or lambda help it WpHole = it help it (WpCompose f1 f2) = help (help it f2) f1 help it (WpFun f1 f2 (Scaled w t1)) = add_parens $ text "\\(x" <> dcolon <> brackets (ppr w) <> ppr t1 <> text ")." <+> help (\_ -> it True <+> help (\_ -> text "x") f1 True) f2 False help it (WpCast co) = add_parens $ sep [it False, nest 2 (text "|>" <+> pprParendCo co)] help it (WpEvApp id) = no_parens $ sep [it True, nest 2 (ppr id)] help it (WpTyApp ty) = no_parens $ sep [it True, text "@" <> pprParendType ty] help it (WpEvLam id) = add_parens $ sep [ text "\\" <> pprLamBndr id <> dot, it False] help it (WpTyLam tv) = add_parens $ sep [text "/\\" <> pprLamBndr tv <> dot, it False] help it (WpLet binds) = add_parens $ sep [text "let" <+> braces (ppr binds), it False] help it (WpMultCoercion co) = add_parens $ sep [it False, nest 2 (text "" <+> pprParendCo co)] pprLamBndr :: Id -> SDoc pprLamBndr v = pprBndr LambdaBind v add_parens, no_parens :: SDoc -> Bool -> SDoc add_parens d True = parens d add_parens d False = d no_parens d _ = d instance Outputable TcEvBinds where ppr (TcEvBinds v) = ppr v ppr (EvBinds bs) = text "EvBinds" <> braces (vcat (map ppr (bagToList bs))) instance Outputable EvBindsVar where ppr (EvBindsVar { ebv_uniq = u }) = text "EvBindsVar" <> angleBrackets (ppr u) ppr (CoEvBindsVar { ebv_uniq = u }) = text "CoEvBindsVar" <> angleBrackets (ppr u) instance Uniquable EvBindsVar where getUnique = ebv_uniq instance Outputable EvBind where ppr (EvBind { eb_lhs = v, eb_rhs = e, eb_is_given = is_given }) = sep [ pp_gw <+> ppr v , nest 2 $ equals <+> ppr e ] where pp_gw = brackets (if is_given then char 'G' else char 'W') -- We cheat a bit and pretend EqVars are CoVars for the purposes of pretty printing instance Outputable EvTerm where ppr (EvExpr e) = ppr e ppr (EvTypeable ty ev) = ppr ev <+> dcolon <+> text "Typeable" <+> ppr ty ppr (EvFun { et_tvs = tvs, et_given = gs, et_binds = bs, et_body = w }) = hang (text "\\" <+> sep (map pprLamBndr (tvs ++ gs)) <+> arrow) 2 (ppr bs $$ ppr w) -- Not very pretty instance Outputable EvCallStack where ppr EvCsEmpty = text "[]" ppr (EvCsPushCall orig loc tm) = ppr (orig,loc) <+> text ":" <+> ppr tm instance Outputable EvTypeable where ppr (EvTypeableTyCon ts _) = text "TyCon" <+> ppr ts ppr (EvTypeableTyApp t1 t2) = parens (ppr t1 <+> ppr t2) ppr (EvTypeableTrFun tm t1 t2) = parens (ppr t1 <+> mulArrow (ppr tm) <+> ppr t2) ppr (EvTypeableTyLit t1) = text "TyLit" <> ppr t1 ---------------------------------------------------------------------- -- Helper functions for dealing with IP newtype-dictionaries ---------------------------------------------------------------------- -- | Create a 'Coercion' that unwraps an implicit-parameter -- dictionary to expose the underlying value. -- We expect the 'Type' to have the form `IP sym ty`, -- and return a 'Coercion' `co :: IP sym ty ~ ty` unwrapIP :: Type -> CoercionR unwrapIP ty = case unwrapNewTyCon_maybe tc of Just (_,_,ax) -> mkUnbranchedAxInstCo Representational ax tys [] Nothing -> pprPanic "unwrapIP" $ text "The dictionary for" <+> quotes (ppr tc) <+> text "is not a newtype!" where (tc, tys) = splitTyConApp ty -- | Create a 'Coercion' that wraps a value in an implicit-parameter -- dictionary. See 'unwrapIP'. wrapIP :: Type -> CoercionR wrapIP ty = mkSymCo (unwrapIP ty) ---------------------------------------------------------------------- -- A datatype used to pass information when desugaring quotations ---------------------------------------------------------------------- -- We have to pass a `EvVar` and `Type` into `dsBracket` so that the -- correct evidence and types are applied to all the TH combinators. -- This data type bundles them up together with some convenience methods. -- -- The EvVar is evidence for `Quote m` -- The Type is a metavariable for `m` -- data QuoteWrapper = QuoteWrapper EvVar Type deriving Data.Data quoteWrapperTyVarTy :: QuoteWrapper -> Type quoteWrapperTyVarTy (QuoteWrapper _ t) = t -- | Convert the QuoteWrapper into a normal HsWrapper which can be used to -- apply its contents. applyQuoteWrapper :: QuoteWrapper -> HsWrapper applyQuoteWrapper (QuoteWrapper ev_var m_var) = mkWpEvVarApps [ev_var] <.> mkWpTyApps [m_var] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types/Origin.hs0000644000000000000000000016373014472400113021103 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE PolyKinds #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Describes the provenance of types as they flow through the type-checker. -- The datatypes here are mainly used for error message generation. module GHC.Tc.Types.Origin ( -- * UserTypeCtxt UserTypeCtxt(..), pprUserTypeCtxt, isSigMaybe, ReportRedundantConstraints(..), reportRedundantConstraints, redundantConstraintsSpan, -- * SkolemInfo SkolemInfo(..), SkolemInfoAnon(..), mkSkolemInfo, getSkolemInfo, pprSigSkolInfo, pprSkolInfo, unkSkol, unkSkolAnon, -- * CtOrigin CtOrigin(..), exprCtOrigin, lexprCtOrigin, matchesCtOrigin, grhssCtOrigin, isVisibleOrigin, toInvisibleOrigin, pprCtOrigin, isGivenOrigin, isWantedWantedFunDepOrigin, isWantedSuperclassOrigin, TypedThing(..), TyVarBndrs(..), -- * CtOrigin and CallStack isPushCallStackOrigin, callStackOriginFS, -- * FixedRuntimeRep origin FixedRuntimeRepOrigin(..), FixedRuntimeRepContext(..), pprFixedRuntimeRepContext, StmtOrigin(..), -- * Arrow command origin FRRArrowContext(..), pprFRRArrowContext, ExpectedFunTyOrigin(..), pprExpectedFunTyOrigin, pprExpectedFunTyHerald, ) where import GHC.Prelude import GHC.Tc.Utils.TcType import GHC.Hs import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.TyCon import GHC.Core.InstEnv import GHC.Core.PatSyn import GHC.Core.Multiplicity ( scaledThing ) import GHC.Unit.Module import GHC.Types.Id import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.Basic import GHC.Types.SrcLoc import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Stack import GHC.Utils.Monad import GHC.Types.Unique import GHC.Types.Unique.Supply {- ********************************************************************* * * UserTypeCtxt * * ********************************************************************* -} ------------------------------------- -- | UserTypeCtxt describes the origin of the polymorphic type -- in the places where we need an expression to have that type data UserTypeCtxt = FunSigCtxt -- Function type signature, when checking the type -- Also used for types in SPECIALISE pragmas Name -- Name of the function ReportRedundantConstraints -- This is usually 'WantRCC', but 'NoRCC' for -- * Record selectors (not important here) -- * Class and instance methods. Here the code may legitimately -- be more polymorphic than the signature generated from the -- class declaration | InfSigCtxt Name -- Inferred type for function | ExprSigCtxt -- Expression type signature ReportRedundantConstraints | KindSigCtxt -- Kind signature | StandaloneKindSigCtxt -- Standalone kind signature Name -- Name of the type/class | TypeAppCtxt -- Visible type application | ConArgCtxt Name -- Data constructor argument | TySynCtxt Name -- RHS of a type synonym decl | PatSynCtxt Name -- Type sig for a pattern synonym | PatSigCtxt -- Type sig in pattern -- eg f (x::t) = ... -- or (x::t, y) = e | RuleSigCtxt FastString Name -- LHS of a RULE forall -- RULE "foo" forall (x :: a -> a). f (Just x) = ... | ForSigCtxt Name -- Foreign import or export signature | DefaultDeclCtxt -- Types in a default declaration | InstDeclCtxt Bool -- An instance declaration -- True: stand-alone deriving -- False: vanilla instance declaration | SpecInstCtxt -- SPECIALISE instance pragma | GenSigCtxt -- Higher-rank or impredicative situations -- e.g. (f e) where f has a higher-rank type -- We might want to elaborate this | GhciCtxt Bool -- GHCi command :kind -- The Bool indicates if we are checking the outermost -- type application. -- See Note [Unsaturated type synonyms in GHCi] in -- GHC.Tc.Validity. | ClassSCCtxt Name -- Superclasses of a class | SigmaCtxt -- Theta part of a normal for-all type -- f :: => a -> a | DataTyCtxt Name -- The "stupid theta" part of a data decl -- data => T a = MkT a | DerivClauseCtxt -- A 'deriving' clause | TyVarBndrKindCtxt Name -- The kind of a type variable being bound | DataKindCtxt Name -- The kind of a data/newtype (instance) | TySynKindCtxt Name -- The kind of the RHS of a type synonym | TyFamResKindCtxt Name -- The result kind of a type family deriving( Eq ) -- Just for checkSkolInfoAnon -- | Report Redundant Constraints. data ReportRedundantConstraints = NoRRC -- ^ Don't report redundant constraints | WantRRC SrcSpan -- ^ Report redundant constraints, and here -- is the SrcSpan for the constraints -- E.g. f :: (Eq a, Ord b) => blah -- The span is for the (Eq a, Ord b) deriving( Eq ) -- Just for checkSkolInfoAnon reportRedundantConstraints :: ReportRedundantConstraints -> Bool reportRedundantConstraints NoRRC = False reportRedundantConstraints (WantRRC {}) = True redundantConstraintsSpan :: UserTypeCtxt -> SrcSpan redundantConstraintsSpan (FunSigCtxt _ (WantRRC span)) = span redundantConstraintsSpan (ExprSigCtxt (WantRRC span)) = span redundantConstraintsSpan _ = noSrcSpan {- -- Notes re TySynCtxt -- We allow type synonyms that aren't types; e.g. type List = [] -- -- If the RHS mentions tyvars that aren't in scope, we'll -- quantify over them: -- e.g. type T = a->a -- will become type T = forall a. a->a -- -- With gla-exts that's right, but for H98 we should complain. -} pprUserTypeCtxt :: UserTypeCtxt -> SDoc pprUserTypeCtxt (FunSigCtxt n _) = text "the type signature for" <+> quotes (ppr n) pprUserTypeCtxt (InfSigCtxt n) = text "the inferred type for" <+> quotes (ppr n) pprUserTypeCtxt (RuleSigCtxt _ n) = text "the type signature for" <+> quotes (ppr n) pprUserTypeCtxt (ExprSigCtxt _) = text "an expression type signature" pprUserTypeCtxt KindSigCtxt = text "a kind signature" pprUserTypeCtxt (StandaloneKindSigCtxt n) = text "a standalone kind signature for" <+> quotes (ppr n) pprUserTypeCtxt TypeAppCtxt = text "a type argument" pprUserTypeCtxt (ConArgCtxt c) = text "the type of the constructor" <+> quotes (ppr c) pprUserTypeCtxt (TySynCtxt c) = text "the RHS of the type synonym" <+> quotes (ppr c) pprUserTypeCtxt PatSigCtxt = text "a pattern type signature" pprUserTypeCtxt (ForSigCtxt n) = text "the foreign declaration for" <+> quotes (ppr n) pprUserTypeCtxt DefaultDeclCtxt = text "a type in a `default' declaration" pprUserTypeCtxt (InstDeclCtxt False) = text "an instance declaration" pprUserTypeCtxt (InstDeclCtxt True) = text "a stand-alone deriving instance declaration" pprUserTypeCtxt SpecInstCtxt = text "a SPECIALISE instance pragma" pprUserTypeCtxt GenSigCtxt = text "a type expected by the context" pprUserTypeCtxt (GhciCtxt {}) = text "a type in a GHCi command" pprUserTypeCtxt (ClassSCCtxt c) = text "the super-classes of class" <+> quotes (ppr c) pprUserTypeCtxt SigmaCtxt = text "the context of a polymorphic type" pprUserTypeCtxt (DataTyCtxt tc) = text "the context of the data type declaration for" <+> quotes (ppr tc) pprUserTypeCtxt (PatSynCtxt n) = text "the signature for pattern synonym" <+> quotes (ppr n) pprUserTypeCtxt (DerivClauseCtxt) = text "a `deriving' clause" pprUserTypeCtxt (TyVarBndrKindCtxt n) = text "the kind annotation on the type variable" <+> quotes (ppr n) pprUserTypeCtxt (DataKindCtxt n) = text "the kind annotation on the declaration for" <+> quotes (ppr n) pprUserTypeCtxt (TySynKindCtxt n) = text "the kind annotation on the declaration for" <+> quotes (ppr n) pprUserTypeCtxt (TyFamResKindCtxt n) = text "the result kind for" <+> quotes (ppr n) isSigMaybe :: UserTypeCtxt -> Maybe Name isSigMaybe (FunSigCtxt n _) = Just n isSigMaybe (ConArgCtxt n) = Just n isSigMaybe (ForSigCtxt n) = Just n isSigMaybe (PatSynCtxt n) = Just n isSigMaybe _ = Nothing {- ************************************************************************ * * SkolemInfo * * ************************************************************************ -} -- | 'SkolemInfo' stores the origin of a skolem type variable, -- so that we can display this information to the user in case of a type error. -- -- The 'Unique' field allows us to report all skolem type variables bound in the -- same place in a single report. data SkolemInfo = SkolemInfo Unique -- ^ used to common up skolem variables bound at the same location (only used in pprSkols) SkolemInfoAnon -- ^ the information about the origin of the skolem type variable instance Uniquable SkolemInfo where getUnique (SkolemInfo u _) = u -- | 'SkolemInfoAnon' stores the origin of a skolem type variable (e.g. bound by -- a user-written forall, the header of a data declaration, a deriving clause, ...). -- -- This information is displayed when reporting an error message, such as -- -- @"Couldn't match 'k' with 'l'"@ -- -- This allows us to explain where the type variable came from. -- -- When several skolem type variables are bound at once, prefer using 'SkolemInfo', -- which stores a 'Unique' which allows these type variables to be reported data SkolemInfoAnon = SigSkol -- A skolem that is created by instantiating -- a programmer-supplied type signature -- Location of the binding site is on the TyVar -- See Note [SigSkol SkolemInfo] UserTypeCtxt -- What sort of signature TcType -- Original type signature (before skolemisation) [(Name,TcTyVar)] -- Maps the original name of the skolemised tyvar -- to its instantiated version | SigTypeSkol UserTypeCtxt -- like SigSkol, but when we're kind-checking the *type* -- hence, we have less info | ForAllSkol -- Bound by a user-written "forall". TyVarBndrs -- Shows just the binders, used when reporting a bad telescope -- See Note [Checking telescopes] in GHC.Tc.Types.Constraint | DerivSkol Type -- Bound by a 'deriving' clause; -- the type is the instance we are trying to derive | InstSkol -- Bound at an instance decl | FamInstSkol -- Bound at a family instance decl | PatSkol -- An existential type variable bound by a pattern for ConLike -- a data constructor with an existential type. (HsMatchContext GhcTc) -- e.g. data T = forall a. Eq a => MkT a -- f (MkT x) = ... -- The pattern MkT x will allocate an existential type -- variable for 'a'. | IPSkol [HsIPName] -- Binding site of an implicit parameter | RuleSkol RuleName -- The LHS of a RULE | InferSkol [(Name,TcType)] -- We have inferred a type for these (mutually-recursivive) -- polymorphic Ids, and are now checking that their RHS -- constraints are satisfied. | BracketSkol -- Template Haskell bracket | UnifyForAllSkol -- We are unifying two for-all types TcType -- The instantiated type *inside* the forall | TyConSkol TyConFlavour Name -- bound in a type declaration of the given flavour | DataConSkol Name -- bound as an existential in a Haskell98 datacon decl or -- as any variable in a GADT datacon decl | ReifySkol -- Bound during Template Haskell reification | QuantCtxtSkol -- Quantified context, e.g. -- f :: forall c. (forall a. c a => c [a]) => blah | RuntimeUnkSkol -- Runtime skolem from the GHCi debugger #14628 | ArrowReboundIfSkol -- Bound by the expected type of the rebound arrow ifThenElse command. | UnkSkol CallStack -- | Use this when you can't specify a helpful origin for -- some skolem type variable. -- -- We're hoping to be able to get rid of this entirely, but for the moment -- it's still needed. unkSkol :: HasCallStack => SkolemInfo unkSkol = SkolemInfo (mkUniqueGrimily 0) unkSkolAnon unkSkolAnon :: HasCallStack => SkolemInfoAnon unkSkolAnon = UnkSkol callStack -- | Wrap up the origin of a skolem type variable with a new 'Unique', -- so that we can common up skolem type variables whose 'SkolemInfo' -- shares a certain 'Unique'. mkSkolemInfo :: MonadIO m => SkolemInfoAnon -> m SkolemInfo mkSkolemInfo sk_anon = do u <- liftIO $! uniqFromMask 's' return (SkolemInfo u sk_anon) getSkolemInfo :: SkolemInfo -> SkolemInfoAnon getSkolemInfo (SkolemInfo _ skol_anon) = skol_anon instance Outputable SkolemInfo where ppr (SkolemInfo _ sk_info ) = ppr sk_info instance Outputable SkolemInfoAnon where ppr = pprSkolInfo pprSkolInfo :: SkolemInfoAnon -> SDoc -- Complete the sentence "is a rigid type variable bound by..." pprSkolInfo (SigSkol cx ty _) = pprSigSkolInfo cx ty pprSkolInfo (SigTypeSkol cx) = pprUserTypeCtxt cx pprSkolInfo (ForAllSkol tvs) = text "an explicit forall" <+> ppr tvs pprSkolInfo (IPSkol ips) = text "the implicit-parameter binding" <> plural ips <+> text "for" <+> pprWithCommas ppr ips pprSkolInfo (DerivSkol pred) = text "the deriving clause for" <+> quotes (ppr pred) pprSkolInfo InstSkol = text "the instance declaration" pprSkolInfo FamInstSkol = text "a family instance declaration" pprSkolInfo BracketSkol = text "a Template Haskell bracket" pprSkolInfo (RuleSkol name) = text "the RULE" <+> pprRuleName name pprSkolInfo (PatSkol cl mc) = sep [ pprPatSkolInfo cl , text "in" <+> pprMatchContext mc ] pprSkolInfo (InferSkol ids) = hang (text "the inferred type" <> plural ids <+> text "of") 2 (vcat [ ppr name <+> dcolon <+> ppr ty | (name,ty) <- ids ]) pprSkolInfo (UnifyForAllSkol ty) = text "the type" <+> ppr ty pprSkolInfo (TyConSkol flav name) = text "the" <+> ppr flav <+> text "declaration for" <+> quotes (ppr name) pprSkolInfo (DataConSkol name) = text "the type signature for" <+> quotes (ppr name) pprSkolInfo ReifySkol = text "the type being reified" pprSkolInfo (QuantCtxtSkol {}) = text "a quantified context" pprSkolInfo RuntimeUnkSkol = text "Unknown type from GHCi runtime" pprSkolInfo ArrowReboundIfSkol = text "the expected type of a rebound if-then-else command" -- unkSkol -- For type variables the others are dealt with by pprSkolTvBinding. -- For Insts, these cases should not happen pprSkolInfo (UnkSkol cs) = text "UnkSkol (please report this as a bug)" $$ prettyCallStackDoc cs pprSigSkolInfo :: UserTypeCtxt -> TcType -> SDoc -- The type is already tidied pprSigSkolInfo ctxt ty = case ctxt of FunSigCtxt f _ -> vcat [ text "the type signature for:" , nest 2 (pprPrefixOcc f <+> dcolon <+> ppr ty) ] PatSynCtxt {} -> pprUserTypeCtxt ctxt -- See Note [Skolem info for pattern synonyms] _ -> vcat [ pprUserTypeCtxt ctxt <> colon , nest 2 (ppr ty) ] pprPatSkolInfo :: ConLike -> SDoc pprPatSkolInfo (RealDataCon dc) = sdocOption sdocLinearTypes (\show_linear_types -> sep [ text "a pattern with constructor:" , nest 2 $ ppr dc <+> dcolon <+> pprType (dataConDisplayType show_linear_types dc) <> comma ]) -- pprType prints forall's regardless of -fprint-explicit-foralls -- which is what we want here, since we might be saying -- type variable 't' is bound by ... pprPatSkolInfo (PatSynCon ps) = sep [ text "a pattern with pattern synonym:" , nest 2 $ ppr ps <+> dcolon <+> pprPatSynType ps <> comma ] {- Note [Skolem info for pattern synonyms] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For pattern synonym SkolemInfo we have SigSkol (PatSynCtxt p) ty _ but the type 'ty' is not very helpful. The full pattern-synonym type has the provided and required pieces, which it is inconvenient to record and display here. So we simply don't display the type at all, contenting ourselves with just the name of the pattern synonym, which is fine. We could do more, but it doesn't seem worth it. Note [SigSkol SkolemInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we skolemise a type f :: forall a. Eq a => forall b. b -> a Then we'll instantiate [a :-> a', b :-> b'], and with the instantiated a' -> b' -> a. But when, in an error message, we report that "b is a rigid type variable bound by the type signature for f", we want to show the foralls in the right place. So we proceed as follows: * In SigSkol we record - the original signature forall a. a -> forall b. b -> a - the instantiation mapping [a :-> a', b :-> b'] * Then when tidying in GHC.Tc.Utils.TcMType.tidySkolemInfo, we first tidy a' to whatever it tidies to, say a''; and then we walk over the type replacing the binder a by the tidied version a'', to give forall a''. Eq a'' => forall b''. b'' -> a'' We need to do this under (=>) arrows, to match what topSkolemise does. * Typically a'' will have a nice pretty name like "a", but the point is that the foral-bound variables of the signature we report line up with the instantiated skolems lying around in other types. ************************************************************************ * * CtOrigin * * ************************************************************************ -} -- | Some thing which has a type. -- -- This datatype is used when we want to report to the user -- that something has an unexpected type. data TypedThing = HsTypeRnThing (HsType GhcRn) | TypeThing Type | HsExprRnThing (HsExpr GhcRn) | NameThing Name -- | Some kind of type variable binder. -- -- Used for reporting errors, in 'SkolemInfo' and 'TcSolverReportMsg'. data TyVarBndrs = forall flag. OutputableBndrFlag flag 'Renamed => HsTyVarBndrsRn [HsTyVarBndr flag GhcRn] instance Outputable TypedThing where ppr (HsTypeRnThing ty) = ppr ty ppr (TypeThing ty) = ppr ty ppr (HsExprRnThing expr) = ppr expr ppr (NameThing name) = ppr name instance Outputable TyVarBndrs where ppr (HsTyVarBndrsRn bndrs) = fsep (map ppr bndrs) data CtOrigin = -- | A given constraint from a user-written type signature. The -- 'SkolemInfo' inside gives more information. GivenOrigin SkolemInfoAnon -- The following are other origins for given constraints that cannot produce -- new skolems -- hence no SkolemInfo. -- | 'InstSCOrigin' is used for a Given constraint obtained by superclass selection -- from the context of an instance declaration. E.g. -- instance @(Foo a, Bar a) => C [a]@ where ... -- When typechecking the instance decl itself, including producing evidence -- for the superclasses of @C@, the superclasses of @(Foo a)@ and @(Bar a)@ will -- have 'InstSCOrigin' origin. | InstSCOrigin ScDepth -- ^ The number of superclass selections necessary to -- get this constraint; see Note [Replacement vs keeping] -- and Note [Use only the best local instance], both in -- GHC.Tc.Solver.Interact TypeSize -- ^ If @(C ty1 .. tyn)@ is the largest class from -- which we made a superclass selection in the chain, -- then @TypeSize = sizeTypes [ty1, .., tyn]@ -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance -- | 'OtherSCOrigin' is used for a Given constraint obtained by superclass -- selection from a constraint /other than/ the context of an instance -- declaration. (For the latter we use 'InstSCOrigin'.) E.g. -- f :: Foo a => blah -- f = e -- When typechecking body of 'f', the superclasses of the Given (Foo a) -- will have 'OtherSCOrigin'. -- Needed for Note [Replacement vs keeping] and -- Note [Use only the best local instance], both in GHC.Tc.Solver.Interact. | OtherSCOrigin ScDepth -- ^ The number of superclass selections necessary to -- get this constraint SkolemInfoAnon -- ^ Where the sub-class constraint arose from -- (used only for printing) -- All the others are for *wanted* constraints | OccurrenceOf Name -- Occurrence of an overloaded identifier | OccurrenceOfRecSel RdrName -- Occurrence of a record selector | AppOrigin -- An application of some kind | SpecPragOrigin UserTypeCtxt -- Specialisation pragma for -- function or instance | TypeEqOrigin { uo_actual :: TcType , uo_expected :: TcType , uo_thing :: Maybe TypedThing -- ^ The thing that has type "actual" , uo_visible :: Bool -- ^ Is at least one of the three elements above visible? -- (Errors from the polymorphic subsumption check are considered -- visible.) Only used for prioritizing error messages. } | KindEqOrigin TcType TcType -- A kind equality arising from unifying these two types CtOrigin -- originally arising from this (Maybe TypeOrKind) -- the level of the eq this arises from | IPOccOrigin HsIPName -- Occurrence of an implicit parameter | OverLabelOrigin FastString -- Occurrence of an overloaded label | LiteralOrigin (HsOverLit GhcRn) -- Occurrence of a literal | NegateOrigin -- Occurrence of syntactic negation | ArithSeqOrigin (ArithSeqInfo GhcRn) -- [x..], [x..y] etc | AssocFamPatOrigin -- When matching the patterns of an associated -- family instance with that of its parent class -- IMPORTANT: These constraints will never cause errors; -- See Note [Constraints to ignore] in GHC.Tc.Errors | SectionOrigin | HasFieldOrigin FastString | TupleOrigin -- (..,..) | ExprSigOrigin -- e :: ty | PatSigOrigin -- p :: ty | PatOrigin -- Instantiating a polytyped pattern at a constructor | ProvCtxtOrigin -- The "provided" context of a pattern synonym signature (PatSynBind GhcRn GhcRn) -- Information about the pattern synonym, in -- particular the name and the right-hand side | RecordUpdOrigin | ViewPatOrigin -- | 'ScOrigin' is used only for the Wanted constraints for the -- superclasses of an instance declaration. -- If the instance head is @C ty1 .. tyn@ -- then @TypeSize = sizeTypes [ty1, .., tyn]@ -- See Note [Solving superclass constraints] in GHC.Tc.TyCl.Instance | ScOrigin TypeSize | DerivClauseOrigin -- Typechecking a deriving clause (as opposed to -- standalone deriving). | DerivOriginDC DataCon Int Bool -- Checking constraints arising from this data con and field index. The -- Bool argument in DerivOriginDC and DerivOriginCoerce is True if -- standalong deriving (with a wildcard constraint) is being used. This -- is used to inform error messages on how to recommended fixes (e.g., if -- the argument is True, then don't recommend "use standalone deriving", -- but rather "fill in the wildcard constraint yourself"). -- See Note [Inferring the instance context] in GHC.Tc.Deriv.Infer | DerivOriginCoerce Id Type Type Bool -- DerivOriginCoerce id ty1 ty2: Trying to coerce class method `id` from -- `ty1` to `ty2`. | StandAloneDerivOrigin -- Typechecking stand-alone deriving. Useful for -- constraints coming from a wildcard constraint, -- e.g., deriving instance _ => Eq (Foo a) -- See Note [Inferring the instance context] -- in GHC.Tc.Deriv.Infer | DefaultOrigin -- Typechecking a default decl | DoOrigin -- Arising from a do expression | DoPatOrigin (LPat GhcRn) -- Arising from a failable pattern in -- a do expression | MCompOrigin -- Arising from a monad comprehension | MCompPatOrigin (LPat GhcRn) -- Arising from a failable pattern in a -- monad comprehension | ProcOrigin -- Arising from a proc expression | ArrowCmdOrigin -- Arising from an arrow command | AnnOrigin -- An annotation | FunDepOrigin1 -- A functional dependency from combining PredType CtOrigin RealSrcSpan -- This constraint arising from ... PredType CtOrigin RealSrcSpan -- and this constraint arising from ... | FunDepOrigin2 -- A functional dependency from combining PredType CtOrigin -- This constraint arising from ... PredType SrcSpan -- and this top-level instance -- We only need a CtOrigin on the first, because the location -- is pinned on the entire error message | InjTFOrigin1 -- injective type family equation combining PredType CtOrigin RealSrcSpan -- This constraint arising from ... PredType CtOrigin RealSrcSpan -- and this constraint arising from ... | ExprHoleOrigin (Maybe OccName) -- from an expression hole | TypeHoleOrigin OccName -- from a type hole (partial type signature) | PatCheckOrigin -- normalisation of a type during pattern-match checking | ListOrigin -- An overloaded list | IfThenElseOrigin -- An if-then-else expression | BracketOrigin -- An overloaded quotation bracket | StaticOrigin -- A static form | Shouldn'tHappenOrigin String -- the user should never see this one | GhcBug20076 -- see #20076 -- | Testing whether the constraint associated with an instance declaration -- in a signature file is satisfied upon instantiation. -- -- Test cases: backpack/should_fail/bkpfail{11,43}.bkp | InstProvidedOrigin Module -- ^ Module in which the instance was declared ClsInst -- ^ The declared typeclass instance | NonLinearPatternOrigin | UsageEnvironmentOf Name | CycleBreakerOrigin CtOrigin -- origin of the original constraint -- See Detail (7) of Note [Type equality cycles] in GHC.Tc.Solver.Canonical | FRROrigin FixedRuntimeRepOrigin | WantedSuperclassOrigin PredType CtOrigin -- From expanding out the superclasses of a Wanted; the PredType -- is the subclass predicate, and the origin -- of the original Wanted is the CtOrigin | InstanceSigOrigin -- from the sub-type check of an InstanceSig Name -- the method name Type -- the instance-sig type Type -- the instantiated type of the method | AmbiguityCheckOrigin UserTypeCtxt -- | The number of superclass selections needed to get this Given. -- If @d :: C ty@ has @ScDepth=2@, then the evidence @d@ will look -- like @sc_sel (sc_sel dg)@, where @dg@ is a Given. type ScDepth = Int -- An origin is visible if the place where the constraint arises is manifest -- in user code. Currently, all origins are visible except for invisible -- TypeEqOrigins. This is used when choosing which error of -- several to report isVisibleOrigin :: CtOrigin -> Bool isVisibleOrigin (TypeEqOrigin { uo_visible = vis }) = vis isVisibleOrigin (KindEqOrigin _ _ sub_orig _) = isVisibleOrigin sub_orig isVisibleOrigin _ = True -- Converts a visible origin to an invisible one, if possible. Currently, -- this works only for TypeEqOrigin toInvisibleOrigin :: CtOrigin -> CtOrigin toInvisibleOrigin orig@(TypeEqOrigin {}) = orig { uo_visible = False } toInvisibleOrigin orig = orig isGivenOrigin :: CtOrigin -> Bool isGivenOrigin (GivenOrigin {}) = True isGivenOrigin (InstSCOrigin {}) = True isGivenOrigin (OtherSCOrigin {}) = True isGivenOrigin (CycleBreakerOrigin o) = isGivenOrigin o isGivenOrigin _ = False -- See Note [Suppressing confusing errors] in GHC.Tc.Errors isWantedWantedFunDepOrigin :: CtOrigin -> Bool isWantedWantedFunDepOrigin (FunDepOrigin1 _ orig1 _ _ orig2 _) = not (isGivenOrigin orig1) && not (isGivenOrigin orig2) isWantedWantedFunDepOrigin (InjTFOrigin1 _ orig1 _ _ orig2 _) = not (isGivenOrigin orig1) && not (isGivenOrigin orig2) isWantedWantedFunDepOrigin _ = False -- | Did a constraint arise from expanding a Wanted constraint -- to look at superclasses? isWantedSuperclassOrigin :: CtOrigin -> Bool isWantedSuperclassOrigin (WantedSuperclassOrigin {}) = True isWantedSuperclassOrigin _ = False instance Outputable CtOrigin where ppr = pprCtOrigin ctoHerald :: SDoc ctoHerald = text "arising from" -- | Extract a suitable CtOrigin from a HsExpr lexprCtOrigin :: LHsExpr GhcRn -> CtOrigin lexprCtOrigin (L _ e) = exprCtOrigin e exprCtOrigin :: HsExpr GhcRn -> CtOrigin exprCtOrigin (HsVar _ (L _ name)) = OccurrenceOf name exprCtOrigin (HsGetField _ _ (L _ f)) = HasFieldOrigin (unLoc $ dfoLabel f) exprCtOrigin (HsUnboundVar {}) = Shouldn'tHappenOrigin "unbound variable" exprCtOrigin (HsRecSel _ f) = OccurrenceOfRecSel (unLoc $ foLabel f) exprCtOrigin (HsOverLabel _ l) = OverLabelOrigin l exprCtOrigin (ExplicitList {}) = ListOrigin exprCtOrigin (HsIPVar _ ip) = IPOccOrigin ip exprCtOrigin (HsOverLit _ lit) = LiteralOrigin lit exprCtOrigin (HsLit {}) = Shouldn'tHappenOrigin "concrete literal" exprCtOrigin (HsLam _ matches) = matchesCtOrigin matches exprCtOrigin (HsLamCase _ _ ms) = matchesCtOrigin ms exprCtOrigin (HsApp _ e1 _) = lexprCtOrigin e1 exprCtOrigin (HsAppType _ e1 _) = lexprCtOrigin e1 exprCtOrigin (OpApp _ _ op _) = lexprCtOrigin op exprCtOrigin (NegApp _ e _) = lexprCtOrigin e exprCtOrigin (HsPar _ _ e _) = lexprCtOrigin e exprCtOrigin (HsProjection _ _) = SectionOrigin exprCtOrigin (SectionL _ _ _) = SectionOrigin exprCtOrigin (SectionR _ _ _) = SectionOrigin exprCtOrigin (ExplicitTuple {}) = Shouldn'tHappenOrigin "explicit tuple" exprCtOrigin ExplicitSum{} = Shouldn'tHappenOrigin "explicit sum" exprCtOrigin (HsCase _ _ matches) = matchesCtOrigin matches exprCtOrigin (HsIf {}) = IfThenElseOrigin exprCtOrigin (HsMultiIf _ rhs) = lGRHSCtOrigin rhs exprCtOrigin (HsLet _ _ _ _ e) = lexprCtOrigin e exprCtOrigin (HsDo {}) = DoOrigin exprCtOrigin (RecordCon {}) = Shouldn'tHappenOrigin "record construction" exprCtOrigin (RecordUpd {}) = RecordUpdOrigin exprCtOrigin (ExprWithTySig {}) = ExprSigOrigin exprCtOrigin (ArithSeq {}) = Shouldn'tHappenOrigin "arithmetic sequence" exprCtOrigin (HsPragE _ _ e) = lexprCtOrigin e exprCtOrigin (HsTypedBracket {}) = Shouldn'tHappenOrigin "TH typed bracket" exprCtOrigin (HsUntypedBracket {}) = Shouldn'tHappenOrigin "TH untyped bracket" exprCtOrigin (HsSpliceE {}) = Shouldn'tHappenOrigin "TH splice" exprCtOrigin (HsProc {}) = Shouldn'tHappenOrigin "proc" exprCtOrigin (HsStatic {}) = Shouldn'tHappenOrigin "static expression" exprCtOrigin (XExpr (HsExpanded a _)) = exprCtOrigin a -- | Extract a suitable CtOrigin from a MatchGroup matchesCtOrigin :: MatchGroup GhcRn (LHsExpr GhcRn) -> CtOrigin matchesCtOrigin (MG { mg_alts = alts }) | L _ [L _ match] <- alts , Match { m_grhss = grhss } <- match = grhssCtOrigin grhss | otherwise = Shouldn'tHappenOrigin "multi-way match" -- | Extract a suitable CtOrigin from guarded RHSs grhssCtOrigin :: GRHSs GhcRn (LHsExpr GhcRn) -> CtOrigin grhssCtOrigin (GRHSs { grhssGRHSs = lgrhss }) = lGRHSCtOrigin lgrhss -- | Extract a suitable CtOrigin from a list of guarded RHSs lGRHSCtOrigin :: [LGRHS GhcRn (LHsExpr GhcRn)] -> CtOrigin lGRHSCtOrigin [L _ (GRHS _ _ (L _ e))] = exprCtOrigin e lGRHSCtOrigin _ = Shouldn'tHappenOrigin "multi-way GRHS" pprCtOrigin :: CtOrigin -> SDoc -- "arising from ..." pprCtOrigin (GivenOrigin sk) = ctoHerald <+> ppr sk pprCtOrigin (InstSCOrigin {}) = ctoHerald <+> pprSkolInfo InstSkol -- keep output in sync pprCtOrigin (OtherSCOrigin _ si) = ctoHerald <+> pprSkolInfo si pprCtOrigin (SpecPragOrigin ctxt) = case ctxt of FunSigCtxt n _ -> text "for" <+> quotes (ppr n) SpecInstCtxt -> text "a SPECIALISE INSTANCE pragma" _ -> text "a SPECIALISE pragma" -- Never happens I think pprCtOrigin (FunDepOrigin1 pred1 orig1 loc1 pred2 orig2 loc2) = hang (ctoHerald <+> text "a functional dependency between constraints:") 2 (vcat [ hang (quotes (ppr pred1)) 2 (pprCtOrigin orig1 <+> text "at" <+> ppr loc1) , hang (quotes (ppr pred2)) 2 (pprCtOrigin orig2 <+> text "at" <+> ppr loc2) ]) pprCtOrigin (FunDepOrigin2 pred1 orig1 pred2 loc2) = hang (ctoHerald <+> text "a functional dependency between:") 2 (vcat [ hang (text "constraint" <+> quotes (ppr pred1)) 2 (pprCtOrigin orig1 ) , hang (text "instance" <+> quotes (ppr pred2)) 2 (text "at" <+> ppr loc2) ]) pprCtOrigin (InjTFOrigin1 pred1 orig1 loc1 pred2 orig2 loc2) = hang (ctoHerald <+> text "reasoning about an injective type family using constraints:") 2 (vcat [ hang (quotes (ppr pred1)) 2 (pprCtOrigin orig1 <+> text "at" <+> ppr loc1) , hang (quotes (ppr pred2)) 2 (pprCtOrigin orig2 <+> text "at" <+> ppr loc2) ]) pprCtOrigin AssocFamPatOrigin = text "when matching a family LHS with its class instance head" pprCtOrigin (TypeEqOrigin { uo_actual = t1, uo_expected = t2, uo_visible = vis }) = hang (ctoHerald <+> text "a type equality" <> whenPprDebug (brackets (ppr vis))) 2 (sep [ppr t1, char '~', ppr t2]) pprCtOrigin (KindEqOrigin t1 t2 _ _) = hang (ctoHerald <+> text "a kind equality arising from") 2 (sep [ppr t1, char '~', ppr t2]) pprCtOrigin (DerivOriginDC dc n _) = hang (ctoHerald <+> text "the" <+> speakNth n <+> text "field of" <+> quotes (ppr dc)) 2 (parens (text "type" <+> quotes (ppr (scaledThing ty)))) where ty = dataConOrigArgTys dc !! (n-1) pprCtOrigin (DerivOriginCoerce meth ty1 ty2 _) = hang (ctoHerald <+> text "the coercion of the method" <+> quotes (ppr meth)) 2 (sep [ text "from type" <+> quotes (ppr ty1) , nest 2 $ text "to type" <+> quotes (ppr ty2) ]) pprCtOrigin (DoPatOrigin pat) = ctoHerald <+> text "a do statement" $$ text "with the failable pattern" <+> quotes (ppr pat) pprCtOrigin (MCompPatOrigin pat) = ctoHerald <+> hsep [ text "the failable pattern" , quotes (ppr pat) , text "in a statement in a monad comprehension" ] pprCtOrigin (Shouldn'tHappenOrigin note) = vcat [ text "<< This should not appear in error messages. If you see this" , text "in an error message, please report a bug mentioning" <+> quotes (text note) <+> text "at" , text "https://gitlab.haskell.org/ghc/ghc/wikis/report-a-bug >>" ] pprCtOrigin GhcBug20076 = vcat [ text "GHC Bug #20076 " , text "Assuming you have a partial type signature, you can avoid this error" , text "by either adding an extra-constraints wildcard (like `(..., _) => ...`," , text "with the underscore at the end of the constraint), or by avoiding the" , text "use of a simplifiable constraint in your partial type signature." ] pprCtOrigin (ProvCtxtOrigin PSB{ psb_id = (L _ name) }) = hang (ctoHerald <+> text "the \"provided\" constraints claimed by") 2 (text "the signature of" <+> quotes (ppr name)) pprCtOrigin (InstProvidedOrigin mod cls_inst) = vcat [ text "arising when attempting to show that" , ppr cls_inst , text "is provided by" <+> quotes (ppr mod)] pprCtOrigin (CycleBreakerOrigin orig) = pprCtOrigin orig pprCtOrigin (FRROrigin {}) = ctoHerald <+> text "a representation-polymorphism check" pprCtOrigin (WantedSuperclassOrigin subclass_pred subclass_orig) = sep [ ctoHerald <+> text "a superclass required to satisfy" <+> quotes (ppr subclass_pred) <> comma , pprCtOrigin subclass_orig ] pprCtOrigin (InstanceSigOrigin method_name sig_type orig_method_type) = vcat [ ctoHerald <+> text "the check that an instance signature is more general" , text "than the type of the method (instantiated for this instance)" , hang (text "instance signature:") 2 (ppr method_name <+> dcolon <+> ppr sig_type) , hang (text "instantiated method type:") 2 (ppr orig_method_type) ] pprCtOrigin (AmbiguityCheckOrigin ctxt) = ctoHerald <+> text "a type ambiguity check for" $$ pprUserTypeCtxt ctxt pprCtOrigin simple_origin = ctoHerald <+> pprCtO simple_origin -- | Short one-liners pprCtO :: HasCallStack => CtOrigin -> SDoc pprCtO (OccurrenceOf name) = hsep [text "a use of", quotes (ppr name)] pprCtO (OccurrenceOfRecSel name) = hsep [text "a use of", quotes (ppr name)] pprCtO AppOrigin = text "an application" pprCtO (IPOccOrigin name) = hsep [text "a use of implicit parameter", quotes (ppr name)] pprCtO (OverLabelOrigin l) = hsep [text "the overloaded label" ,quotes (char '#' <> ppr l)] pprCtO RecordUpdOrigin = text "a record update" pprCtO ExprSigOrigin = text "an expression type signature" pprCtO PatSigOrigin = text "a pattern type signature" pprCtO PatOrigin = text "a pattern" pprCtO ViewPatOrigin = text "a view pattern" pprCtO (LiteralOrigin lit) = hsep [text "the literal", quotes (ppr lit)] pprCtO (ArithSeqOrigin seq) = hsep [text "the arithmetic sequence", quotes (ppr seq)] pprCtO SectionOrigin = text "an operator section" pprCtO (HasFieldOrigin f) = hsep [text "selecting the field", quotes (ppr f)] pprCtO AssocFamPatOrigin = text "the LHS of a family instance" pprCtO TupleOrigin = text "a tuple" pprCtO NegateOrigin = text "a use of syntactic negation" pprCtO (ScOrigin n) = text "the superclasses of an instance declaration" <> whenPprDebug (parens (ppr n)) pprCtO DerivClauseOrigin = text "the 'deriving' clause of a data type declaration" pprCtO StandAloneDerivOrigin = text "a 'deriving' declaration" pprCtO DefaultOrigin = text "a 'default' declaration" pprCtO DoOrigin = text "a do statement" pprCtO MCompOrigin = text "a statement in a monad comprehension" pprCtO ProcOrigin = text "a proc expression" pprCtO ArrowCmdOrigin = text "an arrow command" pprCtO AnnOrigin = text "an annotation" pprCtO (ExprHoleOrigin Nothing) = text "an expression hole" pprCtO (ExprHoleOrigin (Just occ)) = text "a use of" <+> quotes (ppr occ) pprCtO (TypeHoleOrigin occ) = text "a use of wildcard" <+> quotes (ppr occ) pprCtO PatCheckOrigin = text "a pattern-match completeness check" pprCtO ListOrigin = text "an overloaded list" pprCtO IfThenElseOrigin = text "an if-then-else expression" pprCtO StaticOrigin = text "a static form" pprCtO NonLinearPatternOrigin = text "a non-linear pattern" pprCtO (UsageEnvironmentOf x) = hsep [text "multiplicity of", quotes (ppr x)] pprCtO BracketOrigin = text "a quotation bracket" -- These ones are handled by pprCtOrigin, but we nevertheless sometimes -- get here via callStackOriginFS, when doing ambiguity checks -- A bit silly, but no great harm pprCtO (GivenOrigin {}) = text "a given constraint" pprCtO (InstSCOrigin {}) = text "the superclass of an instance constraint" pprCtO (OtherSCOrigin {}) = text "the superclass of a given constraint" pprCtO (SpecPragOrigin {}) = text "a SPECIALISE pragma" pprCtO (FunDepOrigin1 {}) = text "a functional dependency" pprCtO (FunDepOrigin2 {}) = text "a functional dependency" pprCtO (InjTFOrigin1 {}) = text "an injective type family" pprCtO (TypeEqOrigin {}) = text "a type equality" pprCtO (KindEqOrigin {}) = text "a kind equality" pprCtO (DerivOriginDC {}) = text "a deriving clause" pprCtO (DerivOriginCoerce {}) = text "a derived method" pprCtO (DoPatOrigin {}) = text "a do statement" pprCtO (MCompPatOrigin {}) = text "a monad comprehension pattern" pprCtO (Shouldn'tHappenOrigin note) = text note pprCtO (ProvCtxtOrigin {}) = text "a provided constraint" pprCtO (InstProvidedOrigin {}) = text "a provided constraint" pprCtO (CycleBreakerOrigin orig) = pprCtO orig pprCtO (FRROrigin {}) = text "a representation-polymorphism check" pprCtO GhcBug20076 = text "GHC Bug #20076" pprCtO (WantedSuperclassOrigin {}) = text "a superclass constraint" pprCtO (InstanceSigOrigin {}) = text "a type signature in an instance" pprCtO (AmbiguityCheckOrigin {}) = text "a type ambiguity check" {- ********************************************************************* * * CallStacks and CtOrigin See Note [Overview of implicit CallStacks] in GHC.Tc.Types.Evidence * * ********************************************************************* -} isPushCallStackOrigin :: CtOrigin -> Bool -- Do we want to solve this IP constraint directly (return False) -- or push the call site (return True) -- See Note [Overview of implicit CallStacks] in GHc.Tc.Types.Evidence isPushCallStackOrigin (IPOccOrigin {}) = False isPushCallStackOrigin _ = True callStackOriginFS :: CtOrigin -> FastString -- This is the string that appears in the CallStack callStackOriginFS (OccurrenceOf fun) = occNameFS (getOccName fun) callStackOriginFS orig = mkFastString (showSDocUnsafe (pprCtO orig)) {- ************************************************************************ * * Checking for representation polymorphism * * ************************************************************************ Note [Reporting representation-polymorphism errors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As explained in Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete, to check that (ty :: ki) has a fixed runtime representation, we emit an equality constraint of the form ki ~# concrete_tv where concrete_tv is a concrete metavariable. In this situation, we attach a 'FixedRuntimeRepOrigin' to both the equality and the concrete type variable. The 'FixedRuntimeRepOrigin' consists of two pieces of information: - the type 'ty' on which we performed the representation-polymorphism check, - a 'FixedRuntimeRepContext' which explains why we needed to perform a check (e.g. because 'ty' was the kind of a function argument, or of a bound variable in a lambda abstraction, ...). This information gets passed along as we make progress on solving the constraint, and if we end up with an unsolved constraint we can report an informative error message to the user using the 'FixedRuntimeRepOrigin'. The error reporting goes through two different paths: - constraints whose 'CtOrigin' contains a 'FixedRuntimeRepOrigin' are reported using 'mkFRRErr' in 'reportWanteds', - equality constraints in which one side is a concrete metavariable and the other side is not concrete are reported using 'mkTyVarEqErr'. In this case, we pass on the type variable and the non-concrete type for error reporting, using the 'frr_info_not_concrete' field. This is why we have the 'FixedRuntimeRepErrorInfo' datatype: so that we can optionally include this extra message about an unsolved equality between a concrete type variable and a non-concrete type. -} -- | The context for a representation-polymorphism check. -- -- For example, when typechecking @ \ (a :: k) -> ...@, -- we are checking the type @a@ because it's the type of -- a term variable bound in a lambda, so we use 'FRRBinder'. data FixedRuntimeRepOrigin = FixedRuntimeRepOrigin { frr_type :: Type -- ^ What type are we checking? -- For example, `a[tau]` in `a[tau] :: TYPE rr[tau]`. , frr_context :: FixedRuntimeRepContext -- ^ What context requires a fixed runtime representation? } -- | The context in which a representation-polymorphism check was performed. -- -- Does not include the type on which the check was performed; see -- 'FixedRuntimeRepOrigin' for that. data FixedRuntimeRepContext -- | Record fields in record updates must have a fixed runtime representation. -- -- Test case: RepPolyRecordUpdate. = FRRRecordUpdate !RdrName !(HsExpr GhcTc) -- | Variable binders must have a fixed runtime representation. -- -- Test cases: LevPolyLet, RepPolyPatBind. | FRRBinder !Name -- | Pattern binds must have a fixed runtime representation. -- -- Test case: RepPolyInferPatBind. | FRRPatBind -- | Pattern synonym arguments must have a fixed runtime representation. -- -- Test case: RepPolyInferPatSyn. | FRRPatSynArg -- | The type of the scrutinee in a case statement must have a -- fixed runtime representation. -- -- Test cases: RepPolyCase{1,2}. | FRRCase -- | An instantiation of a newtype/data constructor in which -- an argument type does not have a fixed runtime representation. -- -- The argument can either be an expression or a pattern. -- -- Test cases: -- Expression: UnliftedNewtypesLevityBinder. -- Pattern: T20363. | FRRDataConArg !ExprOrPat !DataCon !Int -- | An instantiation of an 'Id' with no binding (e.g. `coerce`, `unsafeCoerce#`) -- in which one of the remaining arguments types does not have a fixed runtime representation. -- -- Test cases: RepPolyWrappedVar, T14561, UnliftedNewtypesCoerceFail. | FRRNoBindingResArg !Id !Int -- | Arguments to unboxed tuples must have fixed runtime representations. -- -- Test case: RepPolyTuple. | FRRTupleArg !Int -- | Tuple sections must have a fixed runtime representation. -- -- Test case: RepPolyTupleSection. | FRRTupleSection !Int -- | Unboxed sums must have a fixed runtime representation. -- -- Test cases: RepPolySum. | FRRUnboxedSum -- | The body of a @do@ expression or a monad comprehension must -- have a fixed runtime representation. -- -- Test cases: RepPolyDoBody{1,2}, RepPolyMcBody. | FRRBodyStmt !StmtOrigin !Int -- | Arguments to a guard in a monad comprehesion must have -- a fixed runtime representation. -- -- Test case: RepPolyMcGuard. | FRRBodyStmtGuard -- | Arguments to `(>>=)` arising from a @do@ expression -- or a monad comprehension must have a fixed runtime representation. -- -- Test cases: RepPolyDoBind, RepPolyMcBind. | FRRBindStmt !StmtOrigin -- | A value bound by a pattern guard must have a fixed runtime representation. -- -- Test cases: none. | FRRBindStmtGuard -- | A representation-polymorphism check arising from arrow notation. -- -- See 'FRRArrowContext' for more details. | FRRArrow !FRRArrowContext -- | A representation-polymorphic check arising from a call -- to 'matchExpectedFunTys' or 'matchActualFunTySigma'. -- -- See 'ExpectedFunTyOrigin' for more details. | FRRExpectedFunTy !ExpectedFunTyOrigin !Int -- ^ argument position (1-indexed) -- | Print the context for a @FixedRuntimeRep@ representation-polymorphism check. -- -- Note that this function does not include the specific 'RuntimeRep' -- which is not fixed. That information is stored in 'FixedRuntimeRepOrigin' -- and is reported separately. pprFixedRuntimeRepContext :: FixedRuntimeRepContext -> SDoc pprFixedRuntimeRepContext (FRRRecordUpdate lbl _arg) = sep [ text "The record update at field" , quotes (ppr lbl) ] pprFixedRuntimeRepContext (FRRBinder binder) = sep [ text "The binder" , quotes (ppr binder) ] pprFixedRuntimeRepContext FRRPatBind = text "The pattern binding" pprFixedRuntimeRepContext FRRPatSynArg = text "The pattern synonym argument pattern" pprFixedRuntimeRepContext FRRCase = text "The scrutinee of the case statement" pprFixedRuntimeRepContext (FRRDataConArg expr_or_pat con i) = text "The" <+> what where arg, what :: SDoc arg = case expr_or_pat of Expression -> text "argument" Pattern -> text "pattern" what | isNewDataCon con = text "newtype constructor" <+> arg | otherwise = text "data constructor" <+> arg <+> text "in" <+> speakNth i <+> text "position" pprFixedRuntimeRepContext (FRRNoBindingResArg fn i) = vcat [ text "Unsaturated use of a representation-polymorphic primitive function." , text "The" <+> speakNth i <+> text "argument of" <+> quotes (ppr $ getName fn) ] pprFixedRuntimeRepContext (FRRTupleArg i) = text "The tuple argument in" <+> speakNth i <+> text "position" pprFixedRuntimeRepContext (FRRTupleSection i) = text "The" <+> speakNth i <+> text "component of the tuple section" pprFixedRuntimeRepContext FRRUnboxedSum = text "The unboxed sum" pprFixedRuntimeRepContext (FRRBodyStmt stmtOrig i) = vcat [ text "The" <+> speakNth i <+> text "argument to (>>)" <> comma , text "arising from the" <+> ppr stmtOrig <> comma ] pprFixedRuntimeRepContext FRRBodyStmtGuard = vcat [ text "The argument to" <+> quotes (text "guard") <> comma , text "arising from the" <+> ppr MonadComprehension <> comma ] pprFixedRuntimeRepContext (FRRBindStmt stmtOrig) = vcat [ text "The first argument to (>>=)" <> comma , text "arising from the" <+> ppr stmtOrig <> comma ] pprFixedRuntimeRepContext FRRBindStmtGuard = sep [ text "The body of the bind statement" ] pprFixedRuntimeRepContext (FRRArrow arrowContext) = pprFRRArrowContext arrowContext pprFixedRuntimeRepContext (FRRExpectedFunTy funTyOrig arg_pos) = pprExpectedFunTyOrigin funTyOrig arg_pos instance Outputable FixedRuntimeRepContext where ppr = pprFixedRuntimeRepContext -- | Are we in a @do@ expression or a monad comprehension? -- -- This datatype is only used to report this context to the user in error messages. data StmtOrigin = MonadComprehension | DoNotation instance Outputable StmtOrigin where ppr MonadComprehension = text "monad comprehension" ppr DoNotation = quotes ( text "do" ) <+> text "statement" {- ********************************************************************* * * FixedRuntimeRep: arrows * * ********************************************************************* -} -- | While typechecking arrow notation, in which context -- did a representation polymorphism check arise? -- -- See 'FixedRuntimeRepContext' for more general origins of -- representation polymorphism checks. data FRRArrowContext -- | The result of an arrow command does not have a fixed runtime representation. -- -- Test case: RepPolyArrowCmd. = ArrowCmdResTy !(HsCmd GhcRn) -- | The argument to an arrow in an arrow command application does not have -- a fixed runtime representation. -- -- Test cases: none. | ArrowCmdApp !(HsCmd GhcRn) !(HsExpr GhcRn) -- | A function in an arrow application does not have -- a fixed runtime representation. -- -- Test cases: none. | ArrowCmdArrApp !(HsExpr GhcRn) !(HsExpr GhcRn) !HsArrAppType -- | The scrutinee type in an arrow command case statement does not have a -- fixed runtime representation. -- -- Test cases: none. | ArrowCmdCase -- | The overall type of an arrow proc expression does not have -- a fixed runtime representation. -- -- Test case: RepPolyArrowFun. | ArrowFun !(HsExpr GhcRn) pprFRRArrowContext :: FRRArrowContext -> SDoc pprFRRArrowContext (ArrowCmdResTy cmd) = vcat [ hang (text "The arrow command") 2 (quotes (ppr cmd)) ] pprFRRArrowContext (ArrowCmdApp fun arg) = vcat [ text "The argument in the arrow command application of" , nest 2 (quotes (ppr fun)) , text "to" , nest 2 (quotes (ppr arg)) ] pprFRRArrowContext (ArrowCmdArrApp fun arg ho_app) = vcat [ text "The function in the" <+> pprHsArrType ho_app <+> text "of" , nest 2 (quotes (ppr fun)) , text "to" , nest 2 (quotes (ppr arg)) ] pprFRRArrowContext ArrowCmdCase = text "The scrutinee of the arrow case command" pprFRRArrowContext (ArrowFun fun) = vcat [ text "The return type of the arrow function" , nest 2 (quotes (ppr fun)) ] instance Outputable FRRArrowContext where ppr = pprFRRArrowContext {- ********************************************************************* * * FixedRuntimeRep: ExpectedFunTy origin * * ********************************************************************* -} -- | In what context are we calling 'matchExpectedFunTys' -- or 'matchActualFunTySigma'? -- -- Used for two things: -- -- 1. Reporting error messages which explain that a function has been -- given an unexpected number of arguments. -- Uses 'pprExpectedFunTyHerald'. -- See Note [Herald for matchExpectedFunTys] in GHC.Tc.Utils.Unify. -- -- 2. Reporting representation-polymorphism errors when a function argument -- doesn't have a fixed RuntimeRep as per Note [Fixed RuntimeRep] -- in GHC.Tc.Utils.Concrete. -- Uses 'pprExpectedFunTyOrigin'. -- See 'FixedRuntimeRepContext' for the situations in which -- representation-polymorphism checks are performed. data ExpectedFunTyOrigin -- | A rebindable syntax operator is expected to have a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyDoBind, RepPolyDoBody{1,2}, RepPolyMc{Bind,Body,Guard}, RepPolyNPlusK = ExpectedFunTySyntaxOp !CtOrigin !(HsExpr GhcRn) -- ^ rebindable syntax operator -- | A view pattern must have a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyBinder | ExpectedFunTyViewPat !(HsExpr GhcRn) -- ^ function used in the view pattern -- | Need to be able to extract an argument type from a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyApp | forall (p :: Pass) . (OutputableBndrId p) => ExpectedFunTyArg !TypedThing -- ^ function !(HsExpr (GhcPass p)) -- ^ argument -- | Ensure that a function defined by equations indeed has a function type -- with the appropriate number of arguments. -- -- Test cases for representation-polymorphism checks: -- RepPolyBinder, RepPolyRecordPattern, RepPolyWildcardPattern | ExpectedFunTyMatches !TypedThing -- ^ name of the function !(MatchGroup GhcRn (LHsExpr GhcRn)) -- ^ equations -- | Ensure that a lambda abstraction has a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyLambda | ExpectedFunTyLam !(MatchGroup GhcRn (LHsExpr GhcRn)) -- | Ensure that a lambda case expression has a function type. -- -- Test cases for representation-polymorphism checks: -- RepPolyMatch | ExpectedFunTyLamCase LamCaseVariant !(HsExpr GhcRn) -- ^ the entire lambda-case expression pprExpectedFunTyOrigin :: ExpectedFunTyOrigin -> Int -- ^ argument position (starting at 1) -> SDoc pprExpectedFunTyOrigin funTy_origin i = case funTy_origin of ExpectedFunTySyntaxOp orig op -> vcat [ sep [ the_arg_of , text "the rebindable syntax operator" , quotes (ppr op) ] , nest 2 (ppr orig) ] ExpectedFunTyViewPat expr -> vcat [ the_arg_of <+> text "the view pattern" , nest 2 (ppr expr) ] ExpectedFunTyArg fun arg -> sep [ text "The argument" , quotes (ppr arg) , text "of" , quotes (ppr fun) ] ExpectedFunTyMatches fun (MG { mg_alts = L _ alts }) | null alts -> the_arg_of <+> quotes (ppr fun) | otherwise -> text "The" <+> speakNth i <+> text "pattern in the equation" <> plural alts <+> text "for" <+> quotes (ppr fun) ExpectedFunTyLam {} -> binder_of $ text "lambda" ExpectedFunTyLamCase lc_variant _ -> binder_of $ lamCaseKeyword lc_variant where the_arg_of :: SDoc the_arg_of = text "The" <+> speakNth i <+> text "argument of" binder_of :: SDoc -> SDoc binder_of what = text "The binder of the" <+> what <+> text "expression" pprExpectedFunTyHerald :: ExpectedFunTyOrigin -> SDoc pprExpectedFunTyHerald (ExpectedFunTySyntaxOp {}) = text "This rebindable syntax expects a function with" pprExpectedFunTyHerald (ExpectedFunTyViewPat {}) = text "A view pattern expression expects" pprExpectedFunTyHerald (ExpectedFunTyArg fun _) = sep [ text "The function" <+> quotes (ppr fun) , text "is applied to" ] pprExpectedFunTyHerald (ExpectedFunTyMatches fun (MG { mg_alts = L _ alts })) = text "The equation" <> plural alts <+> text "for" <+> quotes (ppr fun) <+> hasOrHave alts pprExpectedFunTyHerald (ExpectedFunTyLam match) = sep [ text "The lambda expression" <+> quotes (pprSetDepth (PartWay 1) $ pprMatches match) -- The pprSetDepth makes the lambda abstraction print briefly , text "has" ] pprExpectedFunTyHerald (ExpectedFunTyLamCase _ expr) = sep [ text "The function" <+> quotes (ppr expr) , text "requires" ] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types/Rank.hs0000644000000000000000000000267714472400113020551 0ustar0000000000000000module GHC.Tc.Types.Rank (Rank(..)) where import GHC.Base (Bool) import GHC.Utils.Outputable (Outputable, (<+>), parens, ppr, text) {- Note [Higher rank types] ~~~~~~~~~~~~~~~~~~~~~~~~ Technically Int -> forall a. a->a is still a rank-1 type, but it's not Haskell 98 (#5957). So the validity checker allow a forall after an arrow only if we allow it before -- that is, with Rank2Types or RankNTypes -} data Rank = ArbitraryRank -- Any rank ok | LimitedRank -- Note [Higher rank types] Bool -- Forall ok at top Rank -- Use for function arguments -- Monotypes that could be a polytype through an extension | MonoTypeRankZero -- RankNTypes | MonoTypeTyConArg -- ImpredicativeTypes | MonoTypeSynArg -- LiberalTypeSynonyms | MonoTypeConstraint -- QuantifiedConstraints -- | MustBeMonoType -- Monotype regardless of flags instance Outputable Rank where ppr ArbitraryRank = text "ArbitraryRank" ppr (LimitedRank top_forall_ok r) = text "LimitedRank" <+> ppr top_forall_ok <+> parens (ppr r) ppr MonoTypeRankZero = text "MonoTypeRankZero" ppr MonoTypeTyConArg = text "MonoTypeTyConArg" ppr MonoTypeSynArg = text "MonoTypeSynArg" ppr MonoTypeConstraint = text "MonoTypeConstraint" ppr MustBeMonoType = text "MustBeMonoType" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Utils/TcType.hs0000644000000000000000000027711514472400113021063 0ustar0000000000000000 {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | Types used in the typechecker -- -- This module provides the Type interface for front-end parts of the -- compiler. These parts -- -- * treat "source types" as opaque: -- newtypes, and predicates are meaningful. -- * look through usage types -- module GHC.Tc.Utils.TcType ( -------------------------------- -- Types TcType, TcSigmaType, TcTypeFRR, TcSigmaTypeFRR, TcRhoType, TcTauType, TcPredType, TcThetaType, TcTyVar, TcTyVarSet, TcDTyVarSet, TcTyCoVarSet, TcDTyCoVarSet, TcKind, TcCoVar, TcTyCoVar, TcTyVarBinder, TcInvisTVBinder, TcReqTVBinder, TcTyCon, MonoTcTyCon, PolyTcTyCon, TcTyConBinder, KnotTied, ExpType(..), InferResult(..), ExpTypeFRR, ExpSigmaType, ExpSigmaTypeFRR, ExpRhoType, mkCheckExpType, SyntaxOpType(..), synKnownType, mkSynFunTys, -- TcLevel TcLevel(..), topTcLevel, pushTcLevel, isTopTcLevel, strictlyDeeperThan, deeperThanOrSame, sameDepthAs, tcTypeLevel, tcTyVarLevel, maxTcLevel, -------------------------------- -- MetaDetails TcTyVarDetails(..), pprTcTyVarDetails, vanillaSkolemTvUnk, MetaDetails(Flexi, Indirect), MetaInfo(..), skolemSkolInfo, isImmutableTyVar, isSkolemTyVar, isMetaTyVar, isMetaTyVarTy, isTyVarTy, tcIsTcTyVar, isTyVarTyVar, isOverlappableTyVar, isTyConableTyVar, ConcreteTvOrigin(..), isConcreteTyVar_maybe, isConcreteTyVar, isConcreteTyVarTy, isConcreteTyVarTy_maybe, isAmbiguousTyVar, isCycleBreakerTyVar, metaTyVarRef, metaTyVarInfo, isFlexi, isIndirect, isRuntimeUnkSkol, metaTyVarTcLevel, setMetaTyVarTcLevel, metaTyVarTcLevel_maybe, isTouchableMetaTyVar, isPromotableMetaTyVar, findDupTyVarTvs, mkTyVarNamePairs, -------------------------------- -- Builders mkPhiTy, mkInfSigmaTy, mkSpecSigmaTy, mkSigmaTy, mkTcAppTy, mkTcAppTys, mkTcCastTy, -------------------------------- -- Splitters -- These are important because they do not look through newtypes getTyVar, tcSplitForAllTyVarBinder_maybe, tcSplitForAllTyVars, tcSplitForAllInvisTyVars, tcSplitSomeForAllTyVars, tcSplitForAllReqTVBinders, tcSplitForAllInvisTVBinders, tcSplitPiTys, tcSplitPiTy_maybe, tcSplitForAllTyVarBinders, tcSplitPhiTy, tcSplitPredFunTy_maybe, tcSplitFunTy_maybe, tcSplitFunTys, tcFunArgTy, tcFunResultTy, tcFunResultTyN, tcSplitFunTysN, tcSplitTyConApp, tcSplitTyConApp_maybe, tcTyConAppTyCon, tcTyConAppTyCon_maybe, tcTyConAppArgs, tcSplitAppTy_maybe, tcSplitAppTy, tcSplitAppTys, tcRepSplitAppTy_maybe, tcRepGetNumAppTys, tcGetCastedTyVar_maybe, tcGetTyVar_maybe, tcGetTyVar, tcSplitSigmaTy, tcSplitNestedSigmaTys, --------------------------------- -- Predicates. -- Again, newtypes are opaque eqType, eqTypes, nonDetCmpType, nonDetCmpTypes, eqTypeX, pickyEqType, tcEqType, tcEqKind, tcEqTypeNoKindCheck, tcEqTypeVis, tcEqTyConApps, isSigmaTy, isRhoTy, isRhoExpTy, isOverloadedTy, isFloatingPrimTy, isDoubleTy, isFloatTy, isIntTy, isWordTy, isStringTy, isIntegerTy, isNaturalTy, isBoolTy, isUnitTy, isCharTy, isTauTy, isTauTyCon, tcIsTyVarTy, tcIsForAllTy, isPredTy, isTyVarClassPred, checkValidClsArgs, hasTyVarHead, isRigidTy, --------------------------------- -- Misc type manipulators deNoteType, orphNamesOfType, orphNamesOfCo, orphNamesOfTypes, orphNamesOfCoCon, getDFunTyKey, evVarPred, ambigTkvsOfTy, --------------------------------- -- Predicate types mkMinimalBySCs, transSuperClasses, pickCapturedPreds, immSuperClasses, boxEqPred, isImprovementPred, -- * Finding type instances tcTyFamInsts, tcTyFamInstsAndVis, tcTyConAppTyFamInstsAndVis, isTyFamFree, -- * Finding "exact" (non-dead) type variables exactTyCoVarsOfType, exactTyCoVarsOfTypes, anyRewritableTyVar, anyRewritableTyFamApp, --------------------------------- -- Foreign import and export IllegalForeignTypeReason(..), TypeCannotBeMarshaledReason(..), isFFIArgumentTy, -- :: DynFlags -> Safety -> Type -> Bool isFFIImportResultTy, -- :: DynFlags -> Type -> Bool isFFIExportResultTy, -- :: Type -> Bool isFFIExternalTy, -- :: Type -> Bool isFFIDynTy, -- :: Type -> Type -> Bool isFFIPrimArgumentTy, -- :: DynFlags -> Type -> Bool isFFIPrimResultTy, -- :: DynFlags -> Type -> Bool isFFILabelTy, -- :: Type -> Bool isFunPtrTy, -- :: Type -> Bool tcSplitIOType_maybe, -- :: Type -> Maybe Type -------------------------------- -- Reexported from Kind Kind, tcTypeKind, liftedTypeKind, constraintKind, isLiftedTypeKind, isUnliftedTypeKind, classifiesTypeWithValues, -------------------------------- -- Reexported from Type Type, PredType, ThetaType, TyCoBinder, ArgFlag(..), AnonArgFlag(..), mkForAllTy, mkForAllTys, mkInvisForAllTys, mkTyCoInvForAllTys, mkSpecForAllTys, mkTyCoInvForAllTy, mkInfForAllTy, mkInfForAllTys, mkVisFunTy, mkVisFunTys, mkInvisFunTy, mkInvisFunTyMany, mkVisFunTyMany, mkVisFunTysMany, mkInvisFunTysMany, mkTyConApp, mkAppTy, mkAppTys, mkTyConTy, mkTyVarTy, mkTyVarTys, mkTyCoVarTy, mkTyCoVarTys, isClassPred, isEqPrimPred, isIPLikePred, isEqPred, isEqPredClass, mkClassPred, tcSplitDFunTy, tcSplitDFunHead, tcSplitMethodTy, isRuntimeRepVar, isFixedRuntimeRepKind, isVisibleBinder, isInvisibleBinder, -- Type substitutions TCvSubst(..), -- Representation visible to a few friends TvSubstEnv, emptyTCvSubst, mkEmptyTCvSubst, zipTvSubst, mkTvSubstPrs, notElemTCvSubst, unionTCvSubst, getTvSubstEnv, setTvSubstEnv, getTCvInScope, extendTCvInScope, extendTCvInScopeList, extendTCvInScopeSet, extendTvSubstAndInScope, Type.lookupTyVar, Type.extendTCvSubst, Type.substTyVarBndr, Type.extendTvSubst, isInScope, mkTCvSubst, mkTvSubst, zipTyEnv, zipCoEnv, Type.substTy, substTys, substScaledTys, substTyWith, substTyWithCoVars, substTyAddInScope, substTyUnchecked, substTysUnchecked, substScaledTyUnchecked, substThetaUnchecked, substTyWithUnchecked, substCoUnchecked, substCoWithUnchecked, substTheta, isUnliftedType, -- Source types are always lifted isUnboxedTupleType, -- Ditto isPrimitiveType, tcView, coreView, tyCoVarsOfType, tyCoVarsOfTypes, closeOverKinds, tyCoFVsOfType, tyCoFVsOfTypes, tyCoVarsOfTypeDSet, tyCoVarsOfTypesDSet, closeOverKindsDSet, tyCoVarsOfTypeList, tyCoVarsOfTypesList, noFreeVarsOfType, -------------------------------- pprKind, pprParendKind, pprSigmaType, pprType, pprParendType, pprTypeApp, pprTheta, pprParendTheta, pprThetaArrowTy, pprClassPred, pprTCvBndr, pprTCvBndrs, TypeSize, sizeType, sizeTypes, scopedSort, --------------------------------- -- argument visibility tcTyConVisibilities, isNextTyConArgVisible, isNextArgVisible ) where -- friends: import GHC.Prelude import GHC.Core.TyCo.Rep import GHC.Core.TyCo.Subst ( mkTvSubst, substTyWithCoVars ) import GHC.Core.TyCo.FVs import GHC.Core.TyCo.Ppr import GHC.Core.Class import GHC.Types.Var import GHC.Types.ForeignCall import GHC.Types.Var.Set import GHC.Core.Coercion import GHC.Core.Type as Type import GHC.Core.Predicate import GHC.Types.RepType import GHC.Core.TyCon import {-# SOURCE #-} GHC.Tc.Types.Origin ( SkolemInfo, unkSkol , FixedRuntimeRepOrigin, FixedRuntimeRepContext ) -- others: import GHC.Driver.Session import GHC.Core.FVs import GHC.Types.Name as Name -- We use this to make dictionaries for type literals. -- Perhaps there's a better way to do this? import GHC.Types.Name.Set import GHC.Types.Var.Env import GHC.Builtin.Names import GHC.Builtin.Types ( coercibleClass, eqClass, heqClass, unitTyCon, unitTyConKey , listTyCon, constraintKind ) import GHC.Types.Basic import GHC.Utils.Misc import GHC.Data.Maybe import GHC.Data.List.SetOps ( getNth, findDupsEq ) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Error( Validity'(..) ) import qualified GHC.LanguageExtensions as LangExt import Data.IORef import Data.List.NonEmpty( NonEmpty(..) ) import Data.List ( partition ) {- ************************************************************************ * * Types * * ************************************************************************ The type checker divides the generic Type world into the following more structured beasts: sigma ::= forall tyvars. phi -- A sigma type is a qualified type -- -- Note that even if 'tyvars' is empty, theta -- may not be: e.g. (?x::Int) => Int -- Note that 'sigma' is in prenex form: -- all the foralls are at the front. -- A 'phi' type has no foralls to the right of -- an arrow phi :: theta => rho rho ::= sigma -> rho | tau -- A 'tau' type has no quantification anywhere -- Note that the args of a type constructor must be taus tau ::= tyvar | tycon tau_1 .. tau_n | tau_1 tau_2 | tau_1 -> tau_2 -- In all cases, a (saturated) type synonym application is legal, -- provided it expands to the required form. Note [TcTyVars and TyVars in the typechecker] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The typechecker uses a lot of type variables with special properties, notably being a unification variable with a mutable reference. These use the 'TcTyVar' variant of Var.Var. Note, though, that a /bound/ type variable can (and probably should) be a TyVar. E.g forall a. a -> a Here 'a' is really just a deBruijn-number; it certainly does not have a significant TcLevel (as every TcTyVar does). So a forall-bound type variable should be TyVars; and hence a TyVar can appear free in a TcType. The type checker and constraint solver can also encounter /free/ type variables that use the 'TyVar' variant of Var.Var, for a couple of reasons: - When typechecking a class decl, say class C (a :: k) where foo :: T a -> Int We have first kind-check the header; fix k and (a:k) to be TyVars, bring 'k' and 'a' into scope, and kind check the signature for 'foo'. In doing so we call solveEqualities to solve any kind equalities in foo's signature. So the solver may see free occurrences of 'k'. See calls to tcExtendTyVarEnv for other places that ordinary TyVars are bought into scope, and hence may show up in the types and kinds generated by GHC.Tc.Gen.HsType. - The pattern-match overlap checker calls the constraint solver, long after TcTyVars have been zonked away It's convenient to simply treat these TyVars as skolem constants, which of course they are. We give them a level number of "outermost", so they behave as global constants. Specifically: * Var.tcTyVarDetails succeeds on a TyVar, returning vanillaSkolemTv, as well as on a TcTyVar. * tcIsTcTyVar returns True for both TyVar and TcTyVar variants of Var.Var. The "tc" prefix means "a type variable that can be encountered by the typechecker". This is a bit of a change from an earlier era when we remoselessly insisted on real TcTyVars in the type checker. But that seems unnecessary (for skolems, TyVars are fine) and it's now very hard to guarantee, with the advent of kind equalities. Note [Coercion variables in free variable lists] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are several places in the GHC codebase where functions like tyCoVarsOfType, tyCoVarsOfCt, et al. are used to compute the free type variables of a type. The "Co" part of these functions' names shouldn't be dismissed, as it is entirely possible that they will include coercion variables in addition to type variables! As a result, there are some places in GHC.Tc.Utils.TcType where we must take care to check that a variable is a _type_ variable (using isTyVar) before calling tcTyVarDetails--a partial function that is not defined for coercion variables--on the variable. Failing to do so led to GHC #12785. -} -- See Note [TcTyVars and TyVars in the typechecker] type TcCoVar = CoVar -- Used only during type inference type TcType = Type -- A TcType can have mutable type variables type TcTyCoVar = Var -- Either a TcTyVar or a CoVar -- | A type which has a syntactically fixed RuntimeRep as per -- Note [Fixed RuntimeRep] in GHC.Tc.Utils.Concrete. type TcTypeFRR = TcType -- TODO: consider making this a newtype. type TcTyVarBinder = TyVarBinder type TcInvisTVBinder = InvisTVBinder type TcReqTVBinder = ReqTVBinder -- See Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] type TcTyCon = TyCon type MonoTcTyCon = TcTyCon type PolyTcTyCon = TcTyCon type TcTyConBinder = TyConBinder -- With skolem TcTyVars -- These types do not have boxy type variables in them type TcPredType = PredType type TcThetaType = ThetaType type TcSigmaType = TcType -- | A 'TcSigmaTypeFRR' is a 'TcSigmaType' which has a syntactically -- fixed 'RuntimeRep' in the sense of Note [Fixed RuntimeRep] -- in GHC.Tc.Utils.Concrete. -- -- In particular, this means that: -- -- - 'GHC.Types.RepType.typePrimRep' does not panic, -- - 'GHC.Core.typeLevity_maybe' does not return 'Nothing'. -- -- This property is important in functions such as 'matchExpectedFunTys', where -- we want to provide argument types which have a known runtime representation. -- See Note [Return arguments with a fixed RuntimeRep. type TcSigmaTypeFRR = TcSigmaType -- TODO: consider making this a newtype. type TcRhoType = TcType -- Note [TcRhoType] type TcTauType = TcType type TcKind = Kind type TcTyVarSet = TyVarSet type TcTyCoVarSet = TyCoVarSet type TcDTyVarSet = DTyVarSet type TcDTyCoVarSet = DTyCoVarSet {- Note [TcTyCon, MonoTcTyCon, and PolyTcTyCon] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See Note [How TcTyCons work] in GHC.Tc.TyCl Invariants: * TcTyCon: a TyCon built with the TcTyCon constructor * TcTyConBinder: a TyConBinder with a TcTyVar inside (not a TyVar) * TcTyCons contain TcTyVars * MonoTcTyCon: - Flag tcTyConIsPoly = False - tyConScopedTyVars is important; maps a Name to a TyVarTv unification variable The order is important: Specified then Required variables. E.g. in data T a (b :: k) = ... the order will be [k, a, b]. NB: There are no Inferred binders in tyConScopedTyVars; 'a' may also be poly-kinded, but that kind variable will be added by generaliseTcTyCon, in the passage to a PolyTcTyCon. - tyConBinders are irrelevant; we just use tcTyConScopedTyVars Well not /quite/ irrelevant: its length gives the number of Required binders, and so allows up to distinguish between the Specified and Required elements of tyConScopedTyVars. * PolyTcTyCon: - Flag tcTyConIsPoly = True; this is used only to short-cut zonking - tyConBinders are still TcTyConBinders, but they are /skolem/ TcTyVars, with fixed kinds: no unification variables here tyConBinders includes the Inferred binders if any tyConBinders uses the Names from the original, renamed program. - tcTyConScopedTyVars is irrelevant: just use (binderVars tyConBinders) All the types have been swizzled back to use the original Names See Note [tyConBinders and lexical scoping] in GHC.Core.TyCon -} {- ********************************************************************* * * ExpType: an "expected type" in the type checker * * ********************************************************************* -} -- | An expected type to check against during type-checking. -- See Note [ExpType] in "GHC.Tc.Utils.TcMType", where you'll also find manipulators. data ExpType = Check TcType | Infer !InferResult data InferResult = IR { ir_uniq :: Unique -- ^ This 'Unique' is for debugging only , ir_lvl :: TcLevel -- ^ See Note [TcLevel of ExpType] in GHC.Tc.Utils.TcMType , ir_frr :: Maybe FixedRuntimeRepContext -- ^ See Note [FixedRuntimeRep context in ExpType] in GHC.Tc.Utils.TcMType , ir_ref :: IORef (Maybe TcType) } -- ^ The type that fills in this hole should be a @Type@, -- that is, its kind should be @TYPE rr@ for some @rr :: RuntimeRep@. -- -- Additionally, if the 'ir_frr' field is @Just frr_orig@ then -- @rr@ must be concrete, in the sense of Note [Concrete types] -- in GHC.Tc.Utils.Concrete. type ExpSigmaType = ExpType -- | An 'ExpType' which has a fixed RuntimeRep. -- -- For a 'Check' 'ExpType', the stored 'TcType' must have -- a fixed RuntimeRep. For an 'Infer' 'ExpType', the 'ir_frr' -- field must be of the form @Just frr_orig@. type ExpTypeFRR = ExpType -- | Like 'TcSigmaTypeFRR', but for an expected type. -- -- See 'ExpTypeFRR'. type ExpSigmaTypeFRR = ExpTypeFRR -- TODO: consider making this a newtype. type ExpRhoType = ExpType instance Outputable ExpType where ppr (Check ty) = text "Check" <> braces (ppr ty) ppr (Infer ir) = ppr ir instance Outputable InferResult where ppr (IR { ir_uniq = u, ir_lvl = lvl, ir_frr = mb_frr }) = text "Infer" <> mb_frr_text <> braces (ppr u <> comma <> ppr lvl) where mb_frr_text = case mb_frr of Just _ -> text "FRR" Nothing -> empty -- | Make an 'ExpType' suitable for checking. mkCheckExpType :: TcType -> ExpType mkCheckExpType = Check {- ********************************************************************* * * SyntaxOpType * * ********************************************************************* -} -- | What to expect for an argument to a rebindable-syntax operator. -- Quite like 'Type', but allows for holes to be filled in by tcSyntaxOp. -- The callback called from tcSyntaxOp gets a list of types; the meaning -- of these types is determined by a left-to-right depth-first traversal -- of the 'SyntaxOpType' tree. So if you pass in -- -- > SynAny `SynFun` (SynList `SynFun` SynType Int) `SynFun` SynAny -- -- you'll get three types back: one for the first 'SynAny', the /element/ -- type of the list, and one for the last 'SynAny'. You don't get anything -- for the 'SynType', because you've said positively that it should be an -- Int, and so it shall be. -- -- You'll also get three multiplicities back: one for each function arrow. See -- also Note [Linear types] in Multiplicity. -- -- This is defined here to avoid defining it in "GHC.Tc.Gen.Expr" boot file. data SyntaxOpType = SynAny -- ^ Any type | SynRho -- ^ A rho type, skolemised or instantiated as appropriate | SynList -- ^ A list type. You get back the element type of the list | SynFun SyntaxOpType SyntaxOpType -- ^ A function. | SynType ExpType -- ^ A known type. infixr 0 `SynFun` -- | Like 'SynType' but accepts a regular TcType synKnownType :: TcType -> SyntaxOpType synKnownType = SynType . mkCheckExpType -- | Like 'mkFunTys' but for 'SyntaxOpType' mkSynFunTys :: [SyntaxOpType] -> ExpType -> SyntaxOpType mkSynFunTys arg_tys res_ty = foldr SynFun (SynType res_ty) arg_tys {- Note [TcRhoType] ~~~~~~~~~~~~~~~~ A TcRhoType has no foralls or contexts at the top NO forall a. a -> Int NO Eq a => a -> a YES a -> a YES (forall a. a->a) -> Int YES Int -> forall a. a -> Int ************************************************************************ * * TyVarDetails, MetaDetails, MetaInfo * * ************************************************************************ TyVarDetails gives extra info about type variables, used during type checking. It's attached to mutable type variables only. It's knot-tied back to "GHC.Types.Var". There is no reason in principle why "GHC.Types.Var" shouldn't actually have the definition, but it "belongs" here. Note [TyVars and TcTyVars during type checking] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Var type has constructors TyVar and TcTyVar. They are used as follows: * TcTyVar: used /only/ during type checking. Should never appear afterwards. May contain a mutable field, in the MetaTv case. * TyVar: is never seen by the constraint solver, except locally inside a type like (forall a. [a] ->[a]), where 'a' is a TyVar. We instantiate these with TcTyVars before exposing the type to the constraint solver. I have swithered about the latter invariant, excluding TyVars from the constraint solver. It's not strictly essential, and indeed (historically but still there) Var.tcTyVarDetails returns vanillaSkolemTv for a TyVar. But ultimately I want to seeparate Type from TcType, and in that case we would need to enforce the separation. -} -- A TyVarDetails is inside a TyVar -- See Note [TyVars and TcTyVars] data TcTyVarDetails = SkolemTv -- A skolem SkolemInfo TcLevel -- Level of the implication that binds it -- See GHC.Tc.Utils.Unify Note [Deeper level on the left] for -- how this level number is used Bool -- True <=> this skolem type variable can be overlapped -- when looking up instances -- See Note [Binding when looking up instances] in GHC.Core.InstEnv | RuntimeUnk -- Stands for an as-yet-unknown type in the GHCi -- interactive context | MetaTv { mtv_info :: MetaInfo , mtv_ref :: IORef MetaDetails , mtv_tclvl :: TcLevel } -- See Note [TcLevel invariants] vanillaSkolemTvUnk :: HasCallStack => TcTyVarDetails vanillaSkolemTvUnk = SkolemTv unkSkol topTcLevel False instance Outputable TcTyVarDetails where ppr = pprTcTyVarDetails pprTcTyVarDetails :: TcTyVarDetails -> SDoc -- For debugging pprTcTyVarDetails (RuntimeUnk {}) = text "rt" pprTcTyVarDetails (SkolemTv _sk lvl True) = text "ssk" <> colon <> ppr lvl pprTcTyVarDetails (SkolemTv _sk lvl False) = text "sk" <> colon <> ppr lvl pprTcTyVarDetails (MetaTv { mtv_info = info, mtv_tclvl = tclvl }) = ppr info <> colon <> ppr tclvl ----------------------------- data MetaDetails = Flexi -- Flexi type variables unify to become Indirects | Indirect TcType -- | What restrictions are on this metavariable around unification? -- These are checked in GHC.Tc.Utils.Unify.startSolvingByUnification. data MetaInfo = TauTv -- ^ This MetaTv is an ordinary unification variable -- A TauTv is always filled in with a tau-type, which -- never contains any ForAlls. | TyVarTv -- ^ A variant of TauTv, except that it should not be -- unified with a type, only with a type variable -- See Note [TyVarTv] in GHC.Tc.Utils.TcMType | RuntimeUnkTv -- ^ A unification variable used in the GHCi debugger. -- It /is/ allowed to unify with a polytype, unlike TauTv | CycleBreakerTv -- Used to fix occurs-check problems in Givens -- See Note [Type equality cycles] in -- GHC.Tc.Solver.Canonical | ConcreteTv ConcreteTvOrigin -- ^ A unification variable that can only be unified -- with a concrete type, in the sense of -- Note [Concrete types] in GHC.Tc.Utils.Concrete. -- See Note [ConcreteTv] in GHC.Tc.Utils.Concrete. -- See also Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete -- for an overview of how this works in context. instance Outputable MetaDetails where ppr Flexi = text "Flexi" ppr (Indirect ty) = text "Indirect" <+> ppr ty instance Outputable MetaInfo where ppr TauTv = text "tau" ppr TyVarTv = text "tyv" ppr RuntimeUnkTv = text "rutv" ppr CycleBreakerTv = text "cbv" ppr (ConcreteTv {}) = text "conc" -- | What caused us to create a 'ConcreteTv' metavariable? -- See Note [ConcreteTv] in GHC.Tc.Utils.Concrete. data ConcreteTvOrigin -- | A 'ConcreteTv' used to enforce the representation-polymorphism invariants. -- -- See 'FixedRuntimeRepOrigin' for more information. = ConcreteFRR FixedRuntimeRepOrigin {- ********************************************************************* * * Untouchable type variables * * ********************************************************************* -} newtype TcLevel = TcLevel Int deriving( Eq, Ord ) -- See Note [TcLevel invariants] for what this Int is -- See also Note [TcLevel assignment] {- Note [TcLevel invariants] ~~~~~~~~~~~~~~~~~~~~~~~~~ * Each unification variable (MetaTv) and skolem (SkolemTv) and each Implication has a level number (of type TcLevel) * INVARIANTS. In a tree of Implications, (ImplicInv) The level number (ic_tclvl) of an Implication is STRICTLY GREATER THAN that of its parent (SkolInv) The level number of the skolems (ic_skols) of an Implication is equal to the level of the implication itself (ic_tclvl) (GivenInv) The level number of a unification variable appearing in the 'ic_given' of an implication I should be STRICTLY LESS THAN the ic_tclvl of I See Note [GivenInv] (WantedInv) The level number of a unification variable appearing in the 'ic_wanted' of an implication I should be LESS THAN OR EQUAL TO the ic_tclvl of I See Note [WantedInv] The level of a MetaTyVar also governs its untouchability. See Note [Unification preconditions] in GHC.Tc.Utils.Unify. Note [TcLevel assignment] ~~~~~~~~~~~~~~~~~~~~~~~~~ We arrange the TcLevels like this 0 Top level 1 First-level implication constraints 2 Second-level implication constraints ...etc... Note [GivenInv] ~~~~~~~~~~~~~~~ Invariant (GivenInv) is not essential, but it is easy to guarantee, and it is a useful extra piece of structure. It ensures that the Givens of an implication don't change because of unifications /at the same level/ caused by Wanteds. (Wanteds can also cause unifications at an outer level, but that will iterate the entire implication; see GHC.Tc.Solver.Monad Note [The Unification Level Flag].) Givens can certainly contain meta-tyvars from /outer/ levels. E.g. data T a where MkT :: Eq a => a -> MkT a f x = case x of MkT y -> y && True Then we'll infer (x :: T alpha[1]). The Givens from the implication arising from the pattern match will look like this: forall[2] . Eq alpha[1] => (alpha[1] ~ Bool) But if we unify alpha (which in this case we will), we'll iterate the entire implication via Note [The Unification Level Flag] in GHC.Tc.Solver.Monad. That isn't true of unifications at the /ambient/ level. It would be entirely possible to weaken (GivenInv), to LESS THAN OR EQUAL TO, but we'd need to think carefully about - kick-out for Givens - GHC.Tc.Solver.Monad.isOuterTyVar But in fact (GivenInv) is automatically true, so we're adhering to it for now. See #18929. * If a tyvar tv has level n, then the levels of all variables free in tv's kind are <= n. Consequence: if tv is untouchable, so are all variables in tv's kind. Note [WantedInv] ~~~~~~~~~~~~~~~~ Why is WantedInv important? Consider this implication, where the constraint (C alpha[3]) disobeys WantedInv: forall[2] a. blah => (C alpha[3]) (forall[3] b. alpha[3] ~ b) We can unify alpha:=b in the inner implication, because 'alpha' is touchable; but then 'b' has excaped its scope into the outer implication. -} maxTcLevel :: TcLevel -> TcLevel -> TcLevel maxTcLevel (TcLevel a) (TcLevel b) = TcLevel (a `max` b) topTcLevel :: TcLevel -- See Note [TcLevel assignment] topTcLevel = TcLevel 0 -- 0 = outermost level isTopTcLevel :: TcLevel -> Bool isTopTcLevel (TcLevel 0) = True isTopTcLevel _ = False pushTcLevel :: TcLevel -> TcLevel -- See Note [TcLevel assignment] pushTcLevel (TcLevel us) = TcLevel (us + 1) strictlyDeeperThan :: TcLevel -> TcLevel -> Bool strictlyDeeperThan (TcLevel tv_tclvl) (TcLevel ctxt_tclvl) = tv_tclvl > ctxt_tclvl deeperThanOrSame :: TcLevel -> TcLevel -> Bool deeperThanOrSame (TcLevel tv_tclvl) (TcLevel ctxt_tclvl) = tv_tclvl >= ctxt_tclvl sameDepthAs :: TcLevel -> TcLevel -> Bool sameDepthAs (TcLevel ctxt_tclvl) (TcLevel tv_tclvl) = ctxt_tclvl == tv_tclvl -- NB: invariant ctxt_tclvl >= tv_tclvl -- So <= would be equivalent checkTcLevelInvariant :: TcLevel -> TcLevel -> Bool -- Checks (WantedInv) from Note [TcLevel invariants] checkTcLevelInvariant (TcLevel ctxt_tclvl) (TcLevel tv_tclvl) = ctxt_tclvl >= tv_tclvl -- Returns topTcLevel for non-TcTyVars tcTyVarLevel :: TcTyVar -> TcLevel tcTyVarLevel tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = tv_lvl } -> tv_lvl SkolemTv _ tv_lvl _ -> tv_lvl RuntimeUnk -> topTcLevel tcTypeLevel :: TcType -> TcLevel -- Max level of any free var of the type tcTypeLevel ty = nonDetStrictFoldDVarSet add topTcLevel (tyCoVarsOfTypeDSet ty) -- It's safe to use a non-deterministic fold because `maxTcLevel` is -- commutative. where add v lvl | isTcTyVar v = lvl `maxTcLevel` tcTyVarLevel v | otherwise = lvl instance Outputable TcLevel where ppr (TcLevel us) = ppr us {- ********************************************************************* * * Finding type family instances * * ************************************************************************ -} -- | Finds outermost type-family applications occurring in a type, -- after expanding synonyms. In the list (F, tys) that is returned -- we guarantee that tys matches F's arity. For example, given -- type family F a :: * -> * (arity 1) -- calling tcTyFamInsts on (Maybe (F Int Bool) will return -- (F, [Int]), not (F, [Int,Bool]) -- -- This is important for its use in deciding termination of type -- instances (see #11581). E.g. -- type instance G [Int] = ...(F Int \)... -- we don't need to take \ into account when asking if -- the calls on the RHS are smaller than the LHS tcTyFamInsts :: Type -> [(TyCon, [Type])] tcTyFamInsts = map (\(_,b,c) -> (b,c)) . tcTyFamInstsAndVis -- | Like 'tcTyFamInsts', except that the output records whether the -- type family and its arguments occur as an /invisible/ argument in -- some type application. This information is useful because it helps GHC know -- when to turn on @-fprint-explicit-kinds@ during error reporting so that -- users can actually see the type family being mentioned. -- -- As an example, consider: -- -- @ -- class C a -- data T (a :: k) -- type family F a :: k -- instance C (T @(F Int) (F Bool)) -- @ -- -- There are two occurrences of the type family `F` in that `C` instance, so -- @'tcTyFamInstsAndVis' (C (T \@(F Int) (F Bool)))@ will return: -- -- @ -- [ ('True', F, [Int]) -- , ('False', F, [Bool]) ] -- @ -- -- @F Int@ is paired with 'True' since it appears as an /invisible/ argument -- to @C@, whereas @F Bool@ is paired with 'False' since it appears an a -- /visible/ argument to @C@. -- -- See also @Note [Kind arguments in error messages]@ in "GHC.Tc.Errors". tcTyFamInstsAndVis :: Type -> [(Bool, TyCon, [Type])] tcTyFamInstsAndVis = tcTyFamInstsAndVisX False tcTyFamInstsAndVisX :: Bool -- ^ Is this an invisible argument to some type application? -> Type -> [(Bool, TyCon, [Type])] tcTyFamInstsAndVisX = go where go is_invis_arg ty | Just exp_ty <- tcView ty = go is_invis_arg exp_ty go _ (TyVarTy _) = [] go is_invis_arg (TyConApp tc tys) | isTypeFamilyTyCon tc = [(is_invis_arg, tc, take (tyConArity tc) tys)] | otherwise = tcTyConAppTyFamInstsAndVisX is_invis_arg tc tys go _ (LitTy {}) = [] go is_invis_arg (ForAllTy bndr ty) = go is_invis_arg (binderType bndr) ++ go is_invis_arg ty go is_invis_arg (FunTy _ w ty1 ty2) = go is_invis_arg w ++ go is_invis_arg ty1 ++ go is_invis_arg ty2 go is_invis_arg ty@(AppTy _ _) = let (ty_head, ty_args) = splitAppTys ty ty_arg_flags = appTyArgFlags ty_head ty_args in go is_invis_arg ty_head ++ concat (zipWith (\flag -> go (isInvisibleArgFlag flag)) ty_arg_flags ty_args) go is_invis_arg (CastTy ty _) = go is_invis_arg ty go _ (CoercionTy _) = [] -- don't count tyfams in coercions, -- as they never get normalized, -- anyway -- | In an application of a 'TyCon' to some arguments, find the outermost -- occurrences of type family applications within the arguments. This function -- will not consider the 'TyCon' itself when checking for type family -- applications. -- -- See 'tcTyFamInstsAndVis' for more details on how this works (as this -- function is called inside of 'tcTyFamInstsAndVis'). tcTyConAppTyFamInstsAndVis :: TyCon -> [Type] -> [(Bool, TyCon, [Type])] tcTyConAppTyFamInstsAndVis = tcTyConAppTyFamInstsAndVisX False tcTyConAppTyFamInstsAndVisX :: Bool -- ^ Is this an invisible argument to some type application? -> TyCon -> [Type] -> [(Bool, TyCon, [Type])] tcTyConAppTyFamInstsAndVisX is_invis_arg tc tys = let (invis_tys, vis_tys) = partitionInvisibleTypes tc tys in concat $ map (tcTyFamInstsAndVisX True) invis_tys ++ map (tcTyFamInstsAndVisX is_invis_arg) vis_tys isTyFamFree :: Type -> Bool -- ^ Check that a type does not contain any type family applications. isTyFamFree = null . tcTyFamInsts any_rewritable :: EqRel -- Ambient role -> (EqRel -> TcTyVar -> Bool) -- check tyvar -> (EqRel -> TyCon -> [TcType] -> Bool) -- check type family -> (TyCon -> Bool) -- expand type synonym? -> TcType -> Bool -- Checks every tyvar and tyconapp (not including FunTys) within a type, -- ORing the results of the predicates above together -- Do not look inside casts and coercions -- See Note [anyRewritableTyVar must be role-aware] -- -- This looks like it should use foldTyCo, but that function is -- role-agnostic, and this one must be role-aware. We could make -- foldTyCon role-aware, but that may slow down more common usages. -- -- See Note [Rewritable] in GHC.Tc.Solver.InertSet for a specification for this function. {-# INLINE any_rewritable #-} -- this allows specialization of predicates any_rewritable role tv_pred tc_pred should_expand = go role emptyVarSet where go_tv rl bvs tv | tv `elemVarSet` bvs = False | otherwise = tv_pred rl tv go rl bvs ty@(TyConApp tc tys) | isTypeSynonymTyCon tc , should_expand tc , Just ty' <- tcView ty -- should always match = go rl bvs ty' | tc_pred rl tc tys = True | otherwise = go_tc rl bvs tc tys go rl bvs (TyVarTy tv) = go_tv rl bvs tv go _ _ (LitTy {}) = False go rl bvs (AppTy fun arg) = go rl bvs fun || go NomEq bvs arg go rl bvs (FunTy _ w arg res) = go NomEq bvs arg_rep || go NomEq bvs res_rep || go rl bvs arg || go rl bvs res || go NomEq bvs w where arg_rep = getRuntimeRep arg -- forgetting these causes #17024 res_rep = getRuntimeRep res go rl bvs (ForAllTy tv ty) = go rl (bvs `extendVarSet` binderVar tv) ty go rl bvs (CastTy ty _) = go rl bvs ty go _ _ (CoercionTy _) = False go_tc NomEq bvs _ tys = any (go NomEq bvs) tys go_tc ReprEq bvs tc tys = any (go_arg bvs) (tyConRolesRepresentational tc `zip` tys) go_arg bvs (Nominal, ty) = go NomEq bvs ty go_arg bvs (Representational, ty) = go ReprEq bvs ty go_arg _ (Phantom, _) = False -- We never rewrite with phantoms anyRewritableTyVar :: EqRel -- Ambient role -> (EqRel -> TcTyVar -> Bool) -- check tyvar -> TcType -> Bool -- See Note [Rewritable] in GHC.Tc.Solver.InertSet for a specification for this function. anyRewritableTyVar role pred = any_rewritable role pred (\ _ _ _ -> False) -- no special check for tyconapps -- (this False is ORed with other results, so it -- really means "do nothing special"; the arguments -- are still inspected) (\ _ -> False) -- don't expand synonyms -- NB: No need to expand synonyms, because we can find -- all free variables of a synonym by looking at its -- arguments anyRewritableTyFamApp :: EqRel -- Ambient role -> (EqRel -> TyCon -> [TcType] -> Bool) -- check tyconapp -- should return True only for type family applications -> TcType -> Bool -- always ignores casts & coercions -- See Note [Rewritable] in GHC.Tc.Solver.InertSet for a specification for this function. anyRewritableTyFamApp role check_tyconapp = any_rewritable role (\ _ _ -> False) check_tyconapp (not . isFamFreeTyCon) {- Note [anyRewritableTyVar must be role-aware] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ anyRewritableTyVar is used during kick-out from the inert set, to decide if, given a new equality (a ~ ty), we should kick out a constraint C. Rather than gather free variables and see if 'a' is among them, we instead pass in a predicate; this is just efficiency. Moreover, consider work item: [G] a ~R f b inert item: [G] b ~R f a We use anyRewritableTyVar to decide whether to kick out the inert item, on the grounds that the work item might rewrite it. Well, 'a' is certainly free in [G] b ~R f a. But because the role of a type variable ('f' in this case) is nominal, the work item can't actually rewrite the inert item. Moreover, if we were to kick out the inert item the exact same situation would re-occur and we end up with an infinite loop in which each kicks out the other (#14363). -} {- ********************************************************************* * * The "exact" free variables of a type * * ********************************************************************* -} {- Note [Silly type synonym] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider type T a = Int What are the free tyvars of (T x)? Empty, of course! exactTyCoVarsOfType is used by the type checker to figure out exactly which type variables are mentioned in a type. It only matters occasionally -- see the calls to exactTyCoVarsOfType. We place this function here in GHC.Tc.Utils.TcType, not in GHC.Core.TyCo.FVs, because we want to "see" tcView (efficiency issue only). -} exactTyCoVarsOfType :: Type -> TyCoVarSet exactTyCoVarsOfTypes :: [Type] -> TyCoVarSet -- Find the free type variables (of any kind) -- but *expand* type synonyms. See Note [Silly type synonym] above. exactTyCoVarsOfType ty = runTyCoVars (exact_ty ty) exactTyCoVarsOfTypes tys = runTyCoVars (exact_tys tys) exact_ty :: Type -> Endo TyCoVarSet exact_tys :: [Type] -> Endo TyCoVarSet (exact_ty, exact_tys, _, _) = foldTyCo exactTcvFolder emptyVarSet exactTcvFolder :: TyCoFolder TyCoVarSet (Endo TyCoVarSet) exactTcvFolder = deepTcvFolder { tcf_view = tcView } -- This is the key line {- ************************************************************************ * * Predicates * * ************************************************************************ -} tcIsTcTyVar :: TcTyVar -> Bool -- See Note [TcTyVars and TyVars in the typechecker] tcIsTcTyVar tv = isTyVar tv isPromotableMetaTyVar :: TcTyVar -> Bool -- True is this is a meta-tyvar that can be -- promoted to an outer level isPromotableMetaTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] , MetaTv { mtv_info = info } <- tcTyVarDetails tv = isTouchableInfo info -- Can't promote cycle breakers | otherwise = False isTouchableMetaTyVar :: TcLevel -> TcTyVar -> Bool isTouchableMetaTyVar ctxt_tclvl tv | isTyVar tv -- See Note [Coercion variables in free variable lists] , MetaTv { mtv_tclvl = tv_tclvl, mtv_info = info } <- tcTyVarDetails tv , isTouchableInfo info = assertPpr (checkTcLevelInvariant ctxt_tclvl tv_tclvl) (ppr tv $$ ppr tv_tclvl $$ ppr ctxt_tclvl) $ tv_tclvl `sameDepthAs` ctxt_tclvl | otherwise = False isImmutableTyVar :: TyVar -> Bool isImmutableTyVar tv = isSkolemTyVar tv isTyConableTyVar, isSkolemTyVar, isOverlappableTyVar, isMetaTyVar, isAmbiguousTyVar, isCycleBreakerTyVar :: TcTyVar -> Bool isTyConableTyVar tv -- True of a meta-type variable that can be filled in -- with a type constructor application; in particular, -- not a TyVarTv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of MetaTv { mtv_info = TyVarTv } -> False _ -> True | otherwise = True isSkolemTyVar tv = assertPpr (tcIsTcTyVar tv) (ppr tv) $ case tcTyVarDetails tv of MetaTv {} -> False _other -> True skolemSkolInfo :: TcTyVar -> SkolemInfo skolemSkolInfo tv = assert (isSkolemTyVar tv) $ case tcTyVarDetails tv of SkolemTv skol_info _ _ -> skol_info RuntimeUnk -> panic "RuntimeUnk" MetaTv {} -> panic "skolemSkolInfo" isOverlappableTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of SkolemTv _ _ overlappable -> overlappable _ -> False | otherwise = False isMetaTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of MetaTv {} -> True _ -> False | otherwise = False -- isAmbiguousTyVar is used only when reporting type errors -- It picks out variables that are unbound, namely meta -- type variables and the RuntimeUnk variables created by -- GHC.Runtime.Heap.Inspect.zonkRTTIType. These are "ambiguous" in -- the sense that they stand for an as-yet-unknown type isAmbiguousTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] = case tcTyVarDetails tv of MetaTv {} -> True RuntimeUnk {} -> True _ -> False | otherwise = False isCycleBreakerTyVar tv | isTyVar tv -- See Note [Coercion variables in free variable lists] , MetaTv { mtv_info = CycleBreakerTv } <- tcTyVarDetails tv = True | otherwise = False -- | Is this type variable a concrete type variable, i.e. -- it is a metavariable with 'ConcreteTv' 'MetaInfo'? -- -- Returns the 'ConcreteTvOrigin' stored in the type variable -- if so, or 'Nothing' otherwise. isConcreteTyVar_maybe :: TcTyVar -> Maybe ConcreteTvOrigin isConcreteTyVar_maybe tv | isTcTyVar tv , MetaTv { mtv_info = ConcreteTv conc_orig } <- tcTyVarDetails tv = Just conc_orig | otherwise = Nothing -- | Is this type variable a concrete type variable, i.e. -- it is a metavariable with 'ConcreteTv' 'MetaInfo'? isConcreteTyVar :: TcTyVar -> Bool isConcreteTyVar = isJust . isConcreteTyVar_maybe -- | Is this type concrete type variable, i.e. -- a metavariable with 'ConcreteTv' 'MetaInfo'? isConcreteTyVarTy :: TcType -> Bool isConcreteTyVarTy = isJust . isConcreteTyVarTy_maybe -- | Is this type a concrete type variable? If so, return -- the associated 'TcTyVar' and 'ConcreteTvOrigin'. isConcreteTyVarTy_maybe :: TcType -> Maybe (TcTyVar, ConcreteTvOrigin) isConcreteTyVarTy_maybe (TyVarTy tv) = (tv, ) <$> isConcreteTyVar_maybe tv isConcreteTyVarTy_maybe _ = Nothing isMetaTyVarTy :: TcType -> Bool isMetaTyVarTy (TyVarTy tv) = isMetaTyVar tv isMetaTyVarTy _ = False metaTyVarInfo :: TcTyVar -> MetaInfo metaTyVarInfo tv = case tcTyVarDetails tv of MetaTv { mtv_info = info } -> info _ -> pprPanic "metaTyVarInfo" (ppr tv) isTouchableInfo :: MetaInfo -> Bool isTouchableInfo info | CycleBreakerTv <- info = False | otherwise = True metaTyVarTcLevel :: TcTyVar -> TcLevel metaTyVarTcLevel tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = tclvl } -> tclvl _ -> pprPanic "metaTyVarTcLevel" (ppr tv) metaTyVarTcLevel_maybe :: TcTyVar -> Maybe TcLevel metaTyVarTcLevel_maybe tv = case tcTyVarDetails tv of MetaTv { mtv_tclvl = tclvl } -> Just tclvl _ -> Nothing metaTyVarRef :: TyVar -> IORef MetaDetails metaTyVarRef tv = case tcTyVarDetails tv of MetaTv { mtv_ref = ref } -> ref _ -> pprPanic "metaTyVarRef" (ppr tv) setMetaTyVarTcLevel :: TcTyVar -> TcLevel -> TcTyVar setMetaTyVarTcLevel tv tclvl = case tcTyVarDetails tv of details@(MetaTv {}) -> setTcTyVarDetails tv (details { mtv_tclvl = tclvl }) _ -> pprPanic "metaTyVarTcLevel" (ppr tv) isTyVarTyVar :: Var -> Bool isTyVarTyVar tv = case tcTyVarDetails tv of MetaTv { mtv_info = TyVarTv } -> True _ -> False isFlexi, isIndirect :: MetaDetails -> Bool isFlexi Flexi = True isFlexi _ = False isIndirect (Indirect _) = True isIndirect _ = False isRuntimeUnkSkol :: TyVar -> Bool -- Called only in GHC.Tc.Errors; see Note [Runtime skolems] there isRuntimeUnkSkol x | RuntimeUnk <- tcTyVarDetails x = True | otherwise = False mkTyVarNamePairs :: [TyVar] -> [(Name,TyVar)] -- Just pair each TyVar with its own name mkTyVarNamePairs tvs = [(tyVarName tv, tv) | tv <- tvs] findDupTyVarTvs :: [(Name,TcTyVar)] -> [(Name,Name)] -- If we have [...(x1,tv)...(x2,tv)...] -- return (x1,x2) in the result list findDupTyVarTvs prs = concatMap mk_result_prs $ findDupsEq eq_snd prs where eq_snd (_,tv1) (_,tv2) = tv1 == tv2 mk_result_prs ((n1,_) :| xs) = map (\(n2,_) -> (n1,n2)) xs -- | Returns the (kind, type) variables in a type that are -- as-yet-unknown: metavariables and RuntimeUnks ambigTkvsOfTy :: TcType -> ([Var],[Var]) ambigTkvsOfTy ty = partition (`elemVarSet` dep_tkv_set) ambig_tkvs where tkvs = tyCoVarsOfTypeList ty ambig_tkvs = filter isAmbiguousTyVar tkvs dep_tkv_set = tyCoVarsOfTypes (map tyVarKind tkvs) {- ************************************************************************ * * Tau, sigma and rho * * ************************************************************************ -} mkSigmaTy :: [TyCoVarBinder] -> [PredType] -> Type -> Type mkSigmaTy bndrs theta tau = mkForAllTys bndrs (mkPhiTy theta tau) -- | Make a sigma ty where all type variables are 'Inferred'. That is, -- they cannot be used with visible type application. mkInfSigmaTy :: [TyCoVar] -> [PredType] -> Type -> Type mkInfSigmaTy tyvars theta ty = mkSigmaTy (mkTyCoVarBinders Inferred tyvars) theta ty -- | Make a sigma ty where all type variables are "specified". That is, -- they can be used with visible type application mkSpecSigmaTy :: [TyVar] -> [PredType] -> Type -> Type mkSpecSigmaTy tyvars preds ty = mkSigmaTy (mkTyCoVarBinders Specified tyvars) preds ty mkPhiTy :: [PredType] -> Type -> Type mkPhiTy = mkInvisFunTysMany --------------- getDFunTyKey :: Type -> OccName -- Get some string from a type, to be used to -- construct a dictionary function name getDFunTyKey ty | Just ty' <- coreView ty = getDFunTyKey ty' getDFunTyKey (TyVarTy tv) = getOccName tv getDFunTyKey (TyConApp tc _) = getOccName tc getDFunTyKey (LitTy x) = getDFunTyLitKey x getDFunTyKey (AppTy fun _) = getDFunTyKey fun getDFunTyKey (FunTy {}) = getOccName funTyCon getDFunTyKey (ForAllTy _ t) = getDFunTyKey t getDFunTyKey (CastTy ty _) = getDFunTyKey ty getDFunTyKey t@(CoercionTy _) = pprPanic "getDFunTyKey" (ppr t) getDFunTyLitKey :: TyLit -> OccName getDFunTyLitKey (NumTyLit n) = mkOccName Name.varName (show n) getDFunTyLitKey (StrTyLit n) = mkOccName Name.varName (show n) -- hm getDFunTyLitKey (CharTyLit n) = mkOccName Name.varName (show n) {- ********************************************************************* * * Building types * * ********************************************************************* -} -- ToDo: I think we need Tc versions of these -- Reason: mkCastTy checks isReflexiveCastTy, which checks -- for equality; and that has a different answer -- depending on whether or not Type = Constraint mkTcAppTys :: Type -> [Type] -> Type mkTcAppTys = mkAppTys mkTcAppTy :: Type -> Type -> Type mkTcAppTy = mkAppTy mkTcCastTy :: Type -> Coercion -> Type mkTcCastTy = mkCastTy -- Do we need a tc version of mkCastTy? {- ************************************************************************ * * Expanding and splitting * * ************************************************************************ These tcSplit functions are like their non-Tc analogues, but *) they do not look through newtypes However, they are non-monadic and do not follow through mutable type variables. It's up to you to make sure this doesn't matter. -} -- | Splits a forall type into a list of 'TyBinder's and the inner type. -- Always succeeds, even if it returns an empty list. tcSplitPiTys :: Type -> ([TyBinder], Type) tcSplitPiTys ty = assert (all isTyBinder (fst sty) ) sty where sty = splitPiTys ty -- | Splits a type into a TyBinder and a body, if possible. Panics otherwise tcSplitPiTy_maybe :: Type -> Maybe (TyBinder, Type) tcSplitPiTy_maybe ty = assert (isMaybeTyBinder sty ) sty where sty = splitPiTy_maybe ty isMaybeTyBinder (Just (t,_)) = isTyBinder t isMaybeTyBinder _ = True tcSplitForAllTyVarBinder_maybe :: Type -> Maybe (TyVarBinder, Type) tcSplitForAllTyVarBinder_maybe ty | Just ty' <- tcView ty = tcSplitForAllTyVarBinder_maybe ty' tcSplitForAllTyVarBinder_maybe (ForAllTy tv ty) = assert (isTyVarBinder tv ) Just (tv, ty) tcSplitForAllTyVarBinder_maybe _ = Nothing -- | Like 'tcSplitPiTys', but splits off only named binders, -- returning just the tyvars. tcSplitForAllTyVars :: Type -> ([TyVar], Type) tcSplitForAllTyVars ty = assert (all isTyVar (fst sty) ) sty where sty = splitForAllTyCoVars ty -- | Like 'tcSplitForAllTyVars', but only splits 'ForAllTy's with 'Invisible' -- type variable binders. tcSplitForAllInvisTyVars :: Type -> ([TyVar], Type) tcSplitForAllInvisTyVars ty = tcSplitSomeForAllTyVars isInvisibleArgFlag ty -- | Like 'tcSplitForAllTyVars', but only splits a 'ForAllTy' if @argf_pred argf@ -- is 'True', where @argf@ is the visibility of the @ForAllTy@'s binder and -- @argf_pred@ is a predicate over visibilities provided as an argument to this -- function. tcSplitSomeForAllTyVars :: (ArgFlag -> Bool) -> Type -> ([TyVar], Type) tcSplitSomeForAllTyVars argf_pred ty = split ty ty [] where split _ (ForAllTy (Bndr tv argf) ty) tvs | argf_pred argf = split ty ty (tv:tvs) split orig_ty ty tvs | Just ty' <- coreView ty = split orig_ty ty' tvs split orig_ty _ tvs = (reverse tvs, orig_ty) -- | Like 'tcSplitForAllTyVars', but only splits 'ForAllTy's with 'Required' type -- variable binders. All split tyvars are annotated with '()'. tcSplitForAllReqTVBinders :: Type -> ([TcReqTVBinder], Type) tcSplitForAllReqTVBinders ty = assert (all (isTyVar . binderVar) (fst sty) ) sty where sty = splitForAllReqTVBinders ty -- | Like 'tcSplitForAllTyVars', but only splits 'ForAllTy's with 'Invisible' type -- variable binders. All split tyvars are annotated with their 'Specificity'. tcSplitForAllInvisTVBinders :: Type -> ([TcInvisTVBinder], Type) tcSplitForAllInvisTVBinders ty = assert (all (isTyVar . binderVar) (fst sty) ) sty where sty = splitForAllInvisTVBinders ty -- | Like 'tcSplitForAllTyVars', but splits off only named binders. tcSplitForAllTyVarBinders :: Type -> ([TyVarBinder], Type) tcSplitForAllTyVarBinders ty = assert (all isTyVarBinder (fst sty)) sty where sty = splitForAllTyCoVarBinders ty -- | Is this a ForAllTy with a named binder? tcIsForAllTy :: Type -> Bool tcIsForAllTy ty | Just ty' <- tcView ty = tcIsForAllTy ty' tcIsForAllTy (ForAllTy {}) = True tcIsForAllTy _ = False tcSplitPredFunTy_maybe :: Type -> Maybe (PredType, Type) -- Split off the first predicate argument from a type tcSplitPredFunTy_maybe ty | Just ty' <- tcView ty = tcSplitPredFunTy_maybe ty' tcSplitPredFunTy_maybe (FunTy { ft_af = InvisArg , ft_arg = arg, ft_res = res }) = Just (arg, res) tcSplitPredFunTy_maybe _ = Nothing tcSplitPhiTy :: Type -> (ThetaType, Type) tcSplitPhiTy ty = split ty [] where split ty ts = case tcSplitPredFunTy_maybe ty of Just (pred, ty) -> split ty (pred:ts) Nothing -> (reverse ts, ty) -- | Split a sigma type into its parts. This only splits /invisible/ type -- variable binders, as these are the only forms of binder that the typechecker -- will implicitly instantiate. tcSplitSigmaTy :: Type -> ([TyVar], ThetaType, Type) tcSplitSigmaTy ty = case tcSplitForAllInvisTyVars ty of (tvs, rho) -> case tcSplitPhiTy rho of (theta, tau) -> (tvs, theta, tau) -- | Split a sigma type into its parts, going underneath as many arrows -- and foralls as possible. See Note [tcSplitNestedSigmaTys] tcSplitNestedSigmaTys :: Type -> ([TyVar], ThetaType, Type) -- See Note [tcSplitNestedSigmaTys] -- NB: This is basically a pure version of deeplyInstantiate (from Unify) that -- doesn't compute an HsWrapper. tcSplitNestedSigmaTys ty -- If there's a forall, split it apart and try splitting the rho type -- underneath it. | (arg_tys, body_ty) <- tcSplitFunTys ty , (tvs1, theta1, rho1) <- tcSplitSigmaTy body_ty , not (null tvs1 && null theta1) = let (tvs2, theta2, rho2) = tcSplitNestedSigmaTys rho1 in (tvs1 ++ tvs2, theta1 ++ theta2, mkVisFunTys arg_tys rho2) -- If there's no forall, we're done. | otherwise = ([], [], ty) {- Note [tcSplitNestedSigmaTys] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ tcSplitNestedSigmaTys splits out all the /nested/ foralls and constraints, including under function arrows. E.g. given this type synonym: type Traversal s t a b = forall f. Applicative f => (a -> f b) -> s -> f t then tcSplitNestedSigmaTys (forall s t a b. C s t a b => Int -> Traversal s t a b) will return ( [s,t,a,b,f] , [C s t a b, Applicative f] , Int -> (a -> f b) -> s -> f t)@. This function is used in these places: * Improving error messages in GHC.Tc.Gen.Head.addFunResCtxt * Validity checking for default methods: GHC.Tc.TyCl.checkValidClass * A couple of calls in the GHCi debugger: GHC.Runtime.Heap.Inspect In other words, just in validity checking and error messages; hence no wrappers or evidence generation. Notice that tcSplitNestedSigmaTys even looks under function arrows; doing so is the Right Thing even with simple subsumption, not just with deep subsumption. -} ----------------------- tcTyConAppTyCon :: Type -> TyCon tcTyConAppTyCon ty = case tcTyConAppTyCon_maybe ty of Just tc -> tc Nothing -> pprPanic "tcTyConAppTyCon" (pprType ty) -- | Like 'tcRepSplitTyConApp_maybe', but only returns the 'TyCon'. tcTyConAppTyCon_maybe :: Type -> Maybe TyCon tcTyConAppTyCon_maybe ty | Just ty' <- tcView ty = tcTyConAppTyCon_maybe ty' tcTyConAppTyCon_maybe (TyConApp tc _) = Just tc tcTyConAppTyCon_maybe (FunTy { ft_af = VisArg }) = Just funTyCon -- (=>) is /not/ a TyCon in its own right -- C.f. tcRepSplitAppTy_maybe tcTyConAppTyCon_maybe _ = Nothing tcTyConAppArgs :: Type -> [Type] tcTyConAppArgs ty = case tcSplitTyConApp_maybe ty of Just (_, args) -> args Nothing -> pprPanic "tcTyConAppArgs" (pprType ty) tcSplitTyConApp :: Type -> (TyCon, [Type]) tcSplitTyConApp ty = case tcSplitTyConApp_maybe ty of Just stuff -> stuff Nothing -> pprPanic "tcSplitTyConApp" (pprType ty) ----------------------- tcSplitFunTys :: Type -> ([Scaled Type], Type) tcSplitFunTys ty = case tcSplitFunTy_maybe ty of Nothing -> ([], ty) Just (arg,res) -> (arg:args, res') where (args,res') = tcSplitFunTys res tcSplitFunTy_maybe :: Type -> Maybe (Scaled Type, Type) tcSplitFunTy_maybe ty | Just ty' <- tcView ty = tcSplitFunTy_maybe ty' tcSplitFunTy_maybe (FunTy { ft_af = af, ft_mult = w, ft_arg = arg, ft_res = res }) | VisArg <- af = Just (Scaled w arg, res) tcSplitFunTy_maybe _ = Nothing -- Note the VisArg guard -- Consider (?x::Int) => Bool -- We don't want to treat this as a function type! -- A concrete example is test tc230: -- f :: () -> (?p :: ()) => () -> () -- -- g = f () () tcSplitFunTysN :: Arity -- n: Number of desired args -> TcRhoType -> Either Arity -- Number of missing arrows ([Scaled TcSigmaType],-- Arg types (always N types) TcSigmaType) -- The rest of the type -- ^ Split off exactly the specified number argument types -- Returns -- (Left m) if there are 'm' missing arrows in the type -- (Right (tys,res)) if the type looks like t1 -> ... -> tn -> res tcSplitFunTysN n ty | n == 0 = Right ([], ty) | Just (arg,res) <- tcSplitFunTy_maybe ty = case tcSplitFunTysN (n-1) res of Left m -> Left m Right (args,body) -> Right (arg:args, body) | otherwise = Left n tcSplitFunTy :: Type -> (Scaled Type, Type) tcSplitFunTy ty = expectJust "tcSplitFunTy" (tcSplitFunTy_maybe ty) tcFunArgTy :: Type -> Scaled Type tcFunArgTy ty = fst (tcSplitFunTy ty) tcFunResultTy :: Type -> Type tcFunResultTy ty = snd (tcSplitFunTy ty) -- | Strips off n *visible* arguments and returns the resulting type tcFunResultTyN :: HasDebugCallStack => Arity -> Type -> Type tcFunResultTyN n ty | Right (_, res_ty) <- tcSplitFunTysN n ty = res_ty | otherwise = pprPanic "tcFunResultTyN" (ppr n <+> ppr ty) ----------------------- tcSplitAppTy_maybe :: Type -> Maybe (Type, Type) tcSplitAppTy_maybe ty | Just ty' <- tcView ty = tcSplitAppTy_maybe ty' tcSplitAppTy_maybe ty = tcRepSplitAppTy_maybe ty tcSplitAppTy :: Type -> (Type, Type) tcSplitAppTy ty = case tcSplitAppTy_maybe ty of Just stuff -> stuff Nothing -> pprPanic "tcSplitAppTy" (pprType ty) tcSplitAppTys :: Type -> (Type, [Type]) tcSplitAppTys ty = go ty [] where go ty args = case tcSplitAppTy_maybe ty of Just (ty', arg) -> go ty' (arg:args) Nothing -> (ty,args) -- | Returns the number of arguments in the given type, without -- looking through synonyms. This is used only for error reporting. -- We don't look through synonyms because of #11313. tcRepGetNumAppTys :: Type -> Arity tcRepGetNumAppTys = length . snd . repSplitAppTys ----------------------- -- | If the type is a tyvar, possibly under a cast, returns it, along -- with the coercion. Thus, the co is :: kind tv ~N kind type tcGetCastedTyVar_maybe :: Type -> Maybe (TyVar, CoercionN) tcGetCastedTyVar_maybe ty | Just ty' <- tcView ty = tcGetCastedTyVar_maybe ty' tcGetCastedTyVar_maybe (CastTy (TyVarTy tv) co) = Just (tv, co) tcGetCastedTyVar_maybe (TyVarTy tv) = Just (tv, mkNomReflCo (tyVarKind tv)) tcGetCastedTyVar_maybe _ = Nothing tcGetTyVar_maybe :: Type -> Maybe TyVar tcGetTyVar_maybe ty | Just ty' <- tcView ty = tcGetTyVar_maybe ty' tcGetTyVar_maybe (TyVarTy tv) = Just tv tcGetTyVar_maybe _ = Nothing tcGetTyVar :: String -> Type -> TyVar tcGetTyVar msg ty = case tcGetTyVar_maybe ty of Just tv -> tv Nothing -> pprPanic msg (ppr ty) tcIsTyVarTy :: Type -> Bool tcIsTyVarTy ty | Just ty' <- tcView ty = tcIsTyVarTy ty' tcIsTyVarTy (CastTy ty _) = tcIsTyVarTy ty -- look through casts, as -- this is only used for -- e.g., FlexibleContexts tcIsTyVarTy (TyVarTy _) = True tcIsTyVarTy _ = False ----------------------- tcSplitDFunTy :: Type -> ([TyVar], [Type], Class, [Type]) -- Split the type of a dictionary function -- We don't use tcSplitSigmaTy, because a DFun may (with NDP) -- have non-Pred arguments, such as -- df :: forall m. (forall b. Eq b => Eq (m b)) -> C m -- -- Also NB splitFunTys, not tcSplitFunTys; -- the latter specifically stops at PredTy arguments, -- and we don't want to do that here tcSplitDFunTy ty = case tcSplitForAllInvisTyVars ty of { (tvs, rho) -> case splitFunTys rho of { (theta, tau) -> case tcSplitDFunHead tau of { (clas, tys) -> (tvs, map scaledThing theta, clas, tys) }}} tcSplitDFunHead :: Type -> (Class, [Type]) tcSplitDFunHead = getClassPredTys tcSplitMethodTy :: Type -> ([TyVar], PredType, Type) -- A class method (selector) always has a type like -- forall as. C as => blah -- So if the class looks like -- class C a where -- op :: forall b. (Eq a, Ix b) => a -> b -- the class method type looks like -- op :: forall a. C a => forall b. (Eq a, Ix b) => a -> b -- -- tcSplitMethodTy just peels off the outer forall and -- that first predicate tcSplitMethodTy ty | (sel_tyvars,sel_rho) <- tcSplitForAllInvisTyVars ty , Just (first_pred, local_meth_ty) <- tcSplitPredFunTy_maybe sel_rho = (sel_tyvars, first_pred, local_meth_ty) | otherwise = pprPanic "tcSplitMethodTy" (ppr ty) {- ********************************************************************* * * Type equalities * * ********************************************************************* -} tcEqKind :: HasDebugCallStack => TcKind -> TcKind -> Bool tcEqKind = tcEqType tcEqType :: HasDebugCallStack => TcType -> TcType -> Bool -- ^ tcEqType implements typechecker equality, as described in -- @Note [Typechecker equality vs definitional equality]@. tcEqType ty1 ty2 = tcEqTypeNoSyns ki1 ki2 && tcEqTypeNoSyns ty1 ty2 where ki1 = tcTypeKind ty1 ki2 = tcTypeKind ty2 -- | Just like 'tcEqType', but will return True for types of different kinds -- as long as their non-coercion structure is identical. tcEqTypeNoKindCheck :: TcType -> TcType -> Bool tcEqTypeNoKindCheck ty1 ty2 = tcEqTypeNoSyns ty1 ty2 -- | Check whether two TyConApps are the same; if the number of arguments -- are different, just checks the common prefix of arguments. tcEqTyConApps :: TyCon -> [Type] -> TyCon -> [Type] -> Bool tcEqTyConApps tc1 args1 tc2 args2 = tc1 == tc2 && and (zipWith tcEqTypeNoKindCheck args1 args2) -- No kind check necessary: if both arguments are well typed, then -- any difference in the kinds of later arguments would show up -- as differences in earlier (dependent) arguments {- Note [Specialising tc_eq_type] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The type equality predicates in TcType are hit pretty hard during typechecking. Consequently we take pains to ensure that these paths are compiled to efficient, minimally-allocating code. To this end we place an INLINE on tc_eq_type, ensuring that it is inlined into its publicly-visible interfaces (e.g. tcEqType). In addition to eliminating some dynamic branches, this allows the simplifier to eliminate the closure allocations that would otherwise be necessary to capture the two boolean "mode" flags. This reduces allocations by a good fraction of a percent when compiling Cabal. See #19226. -} -- | Type equality comparing both visible and invisible arguments and expanding -- type synonyms. tcEqTypeNoSyns :: TcType -> TcType -> Bool tcEqTypeNoSyns ta tb = tc_eq_type False False ta tb -- | Like 'tcEqType', but returns True if the /visible/ part of the types -- are equal, even if they are really unequal (in the invisible bits) tcEqTypeVis :: TcType -> TcType -> Bool tcEqTypeVis ty1 ty2 = tc_eq_type False True ty1 ty2 -- | Like 'pickyEqTypeVis', but returns a Bool for convenience pickyEqType :: TcType -> TcType -> Bool -- Check when two types _look_ the same, _including_ synonyms. -- So (pickyEqType String [Char]) returns False -- This ignores kinds and coercions, because this is used only for printing. pickyEqType ty1 ty2 = tc_eq_type True False ty1 ty2 -- | Real worker for 'tcEqType'. No kind check! tc_eq_type :: Bool -- ^ True <=> do not expand type synonyms -> Bool -- ^ True <=> compare visible args only -> Type -> Type -> Bool -- Flags False, False is the usual setting for tc_eq_type -- See Note [Computing equality on types] in Type tc_eq_type keep_syns vis_only orig_ty1 orig_ty2 = go orig_env orig_ty1 orig_ty2 where go :: RnEnv2 -> Type -> Type -> Bool -- See Note [Comparing nullary type synonyms] in GHC.Core.Type. go _ (TyConApp tc1 []) (TyConApp tc2 []) | tc1 == tc2 = True go env t1 t2 | not keep_syns, Just t1' <- tcView t1 = go env t1' t2 go env t1 t2 | not keep_syns, Just t2' <- tcView t2 = go env t1 t2' go env (TyVarTy tv1) (TyVarTy tv2) = rnOccL env tv1 == rnOccR env tv2 go _ (LitTy lit1) (LitTy lit2) = lit1 == lit2 go env (ForAllTy (Bndr tv1 vis1) ty1) (ForAllTy (Bndr tv2 vis2) ty2) = vis1 `sameVis` vis2 -- See Note [ForAllTy and typechecker equality] in -- GHC.Tc.Solver.Canonical for why we use `sameVis` here && (vis_only || go env (varType tv1) (varType tv2)) && go (rnBndr2 env tv1 tv2) ty1 ty2 -- Make sure we handle all FunTy cases since falling through to the -- AppTy case means that tcRepSplitAppTy_maybe may see an unzonked -- kind variable, which causes things to blow up. -- See Note [Equality on FunTys] in GHC.Core.TyCo.Rep: we must check -- kinds here go env (FunTy _ w1 arg1 res1) (FunTy _ w2 arg2 res2) = kinds_eq && go env arg1 arg2 && go env res1 res2 && go env w1 w2 where kinds_eq | vis_only = True | otherwise = go env (typeKind arg1) (typeKind arg2) && go env (typeKind res1) (typeKind res2) -- See Note [Equality on AppTys] in GHC.Core.Type go env (AppTy s1 t1) ty2 | Just (s2, t2) <- tcRepSplitAppTy_maybe ty2 = go env s1 s2 && go env t1 t2 go env ty1 (AppTy s2 t2) | Just (s1, t1) <- tcRepSplitAppTy_maybe ty1 = go env s1 s2 && go env t1 t2 go env (TyConApp tc1 ts1) (TyConApp tc2 ts2) = tc1 == tc2 && gos env (tc_vis tc1) ts1 ts2 go env (CastTy t1 _) t2 = go env t1 t2 go env t1 (CastTy t2 _) = go env t1 t2 go _ (CoercionTy {}) (CoercionTy {}) = True go _ _ _ = False gos _ _ [] [] = True gos env (ig:igs) (t1:ts1) (t2:ts2) = (ig || go env t1 t2) && gos env igs ts1 ts2 gos _ _ _ _ = False tc_vis :: TyCon -> [Bool] -- True for the fields we should ignore tc_vis tc | vis_only = inviss ++ repeat False -- Ignore invisibles | otherwise = repeat False -- Ignore nothing -- The repeat False is necessary because tycons -- can legitimately be oversaturated where bndrs = tyConBinders tc inviss = map isInvisibleTyConBinder bndrs orig_env = mkRnEnv2 $ mkInScopeSet $ tyCoVarsOfTypes [orig_ty1, orig_ty2] {-# INLINE tc_eq_type #-} -- See Note [Specialising tc_eq_type]. {- Note [Typechecker equality vs definitional equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC has two notions of equality over Core types: * Definitional equality, as implemented by GHC.Core.Type.eqType. See Note [Non-trivial definitional equality] in GHC.Core.TyCo.Rep. * Typechecker equality, as implemented by tcEqType (in GHC.Tc.Utils.TcType). GHC.Tc.Solver.Canonical.canEqNC also respects typechecker equality. Typechecker equality implies definitional equality: if two types are equal according to typechecker equality, then they are also equal according to definitional equality. The converse is not always true, as typechecker equality is more finer-grained than definitional equality in two places: * Unlike definitional equality, which equates Type and Constraint, typechecker treats them as distinct types. See Note [Kind Constraint and kind Type] in GHC.Core.Type. * Unlike definitional equality, which does not care about the ArgFlag of a ForAllTy, typechecker equality treats Required type variable binders as distinct from Invisible type variable binders. See Note [ForAllTy and typechecker equality] in GHC.Tc.Solver.Canonical. -} {- ********************************************************************* * * Predicate types * * ************************************************************************ Deconstructors and tests on predicate types Note [Kind polymorphic type classes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class C f where... -- C :: forall k. k -> Constraint g :: forall (f::*). C f => f -> f Here the (C f) in the signature is really (C * f), and we don't want to complain that the * isn't a type variable! -} isTyVarClassPred :: PredType -> Bool isTyVarClassPred ty = case getClassPredTys_maybe ty of Just (_, tys) -> all isTyVarTy tys _ -> False ------------------------- checkValidClsArgs :: Bool -> Class -> [KindOrType] -> Bool -- If the Bool is True (flexible contexts), return True (i.e. ok) -- Otherwise, check that the type (not kind) args are all headed by a tyvar -- E.g. (Eq a) accepted, (Eq (f a)) accepted, but (Eq Int) rejected -- This function is here rather than in GHC.Tc.Validity because it is -- called from GHC.Tc.Solver, which itself is imported by GHC.Tc.Validity checkValidClsArgs flexible_contexts cls kts | flexible_contexts = True | otherwise = all hasTyVarHead tys where tys = filterOutInvisibleTypes (classTyCon cls) kts hasTyVarHead :: Type -> Bool -- Returns true of (a t1 .. tn), where 'a' is a type variable hasTyVarHead ty -- Haskell 98 allows predicates of form | tcIsTyVarTy ty = True -- C (a ty1 .. tyn) | otherwise -- where a is a type variable = case tcSplitAppTy_maybe ty of Just (ty, _) -> hasTyVarHead ty Nothing -> False evVarPred :: EvVar -> PredType evVarPred var = varType var -- Historical note: I used to have an ASSERT here, -- checking (isEvVarType (varType var)). But with something like -- f :: c => _ -> _ -- we end up with (c :: kappa), and (kappa ~ Constraint). Until -- we solve and zonk (which there is no particular reason to do for -- partial signatures, (isEvVarType kappa) will return False. But -- nothing is wrong. So I just removed the ASSERT. --------------------------- boxEqPred :: EqRel -> Type -> Type -> Maybe (Class, [Type]) -- Given (t1 ~# t2) or (t1 ~R# t2) return the boxed version -- (t1 ~ t2) or (t1 `Coercible` t2) boxEqPred eq_rel ty1 ty2 = case eq_rel of NomEq | homo_kind -> Just (eqClass, [k1, ty1, ty2]) | otherwise -> Just (heqClass, [k1, k2, ty1, ty2]) ReprEq | homo_kind -> Just (coercibleClass, [k1, ty1, ty2]) | otherwise -> Nothing -- Sigh: we do not have hererogeneous Coercible -- so we can't abstract over it -- Nothing fundamental: we could add it where k1 = tcTypeKind ty1 k2 = tcTypeKind ty2 homo_kind = k1 `tcEqType` k2 pickCapturedPreds :: TyVarSet -- Quantifying over these -> TcThetaType -- Proposed constraints to quantify -> TcThetaType -- A subset that we can actually quantify -- A simpler version of pickQuantifiablePreds, used to winnow down -- the inferred constraints of a group of bindings, into those for -- one particular identifier pickCapturedPreds qtvs theta = filter captured theta where captured pred = isIPLikePred pred || (tyCoVarsOfType pred `intersectsVarSet` qtvs) -- Superclasses type PredWithSCs a = (PredType, [PredType], a) mkMinimalBySCs :: forall a. (a -> PredType) -> [a] -> [a] -- Remove predicates that -- -- - are the same as another predicate -- -- - can be deduced from another by superclasses, -- -- - are a reflexive equality (e.g * ~ *) -- (see Note [Remove redundant provided dicts] in GHC.Tc.TyCl.PatSyn) -- -- The result is a subset of the input. -- The 'a' is just paired up with the PredType; -- typically it might be a dictionary Id mkMinimalBySCs get_pred xs = go preds_with_scs [] where preds_with_scs :: [PredWithSCs a] preds_with_scs = [ (pred, implicants pred, x) | x <- xs , let pred = get_pred x ] go :: [PredWithSCs a] -- Work list -> [PredWithSCs a] -- Accumulating result -> [a] go [] min_preds = reverse (map thdOf3 min_preds) -- The 'reverse' isn't strictly necessary, but it -- means that the results are returned in the same -- order as the input, which is generally saner go (work_item@(p,_,_) : work_list) min_preds | EqPred _ t1 t2 <- classifyPredType p , t1 `tcEqType` t2 -- See GHC.Tc.TyCl.PatSyn -- Note [Remove redundant provided dicts] = go work_list min_preds | p `in_cloud` work_list || p `in_cloud` min_preds -- Why look at work-list too? Suppose work_item is Eq a, -- and work-list contains Ord a = go work_list min_preds | otherwise = go work_list (work_item : min_preds) in_cloud :: PredType -> [PredWithSCs a] -> Bool in_cloud p ps = or [ p `tcEqType` p' | (_, scs, _) <- ps, p' <- scs ] implicants pred = pred : eq_extras pred ++ transSuperClasses pred -- Combine (a ~ b) and (b ~ a); no need to have both in one context -- These can arise when dealing with partial type signatures (e.g. T14715) eq_extras pred = case classifyPredType pred of EqPred r t1 t2 -> [mkPrimEqPredRole (eqRelRole r) t2 t1] ClassPred cls [k1,k2,t1,t2] | cls `hasKey` heqTyConKey -> [mkClassPred cls [k2, k1, t2, t1]] ClassPred cls [k,t1,t2] | cls `hasKey` eqTyConKey -> [mkClassPred cls [k, t2, t1]] _ -> [] transSuperClasses :: PredType -> [PredType] -- (transSuperClasses p) returns (p's superclasses) not including p -- Stop if you encounter the same class again -- See Note [Expanding superclasses] transSuperClasses p = go emptyNameSet p where go :: NameSet -> PredType -> [PredType] go rec_clss p | ClassPred cls tys <- classifyPredType p , let cls_nm = className cls , not (cls_nm `elemNameSet` rec_clss) , let rec_clss' | isCTupleClass cls = rec_clss | otherwise = rec_clss `extendNameSet` cls_nm = [ p' | sc <- immSuperClasses cls tys , p' <- sc : go rec_clss' sc ] | otherwise = [] immSuperClasses :: Class -> [Type] -> [PredType] immSuperClasses cls tys = substTheta (zipTvSubst tyvars tys) sc_theta where (tyvars,sc_theta,_,_) = classBigSig cls isImprovementPred :: PredType -> Bool -- Either it's an equality, or has some functional dependency isImprovementPred ty = case classifyPredType ty of EqPred NomEq t1 t2 -> not (t1 `tcEqType` t2) EqPred ReprEq _ _ -> False ClassPred cls _ -> classHasFds cls IrredPred {} -> True -- Might have equalities after reduction? ForAllPred {} -> False {- Note [Expanding superclasses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we expand superclasses, we use the following algorithm: transSuperClasses( C tys ) returns the transitive superclasses of (C tys), not including C itself For example class C a b => D a b class D b a => C a b Then transSuperClasses( Ord ty ) = [Eq ty] transSuperClasses( C ta tb ) = [D tb ta, C tb ta] Notice that in the recursive-superclass case we include C again at the end of the chain. One could exclude C in this case, but the code is more awkward and there seems no good reason to do so. (However C.f. GHC.Tc.Solver.Canonical.mk_strict_superclasses, which /does/ appear to do so.) The algorithm is expand( so_far, pred ): 1. If pred is not a class constraint, return empty set Otherwise pred = C ts 2. If C is in so_far, return empty set (breaks loops) 3. Find the immediate superclasses constraints of (C ts) 4. For each such sc_pred, return (sc_pred : expand( so_far+C, D ss ) Notice that * With normal Haskell-98 classes, the loop-detector will never bite, so we'll get all the superclasses. * We need the loop-breaker in case we have UndecidableSuperClasses on * Since there is only a finite number of distinct classes, expansion must terminate. * The loop breaking is a bit conservative. Notably, a tuple class could contain many times without threatening termination: (Eq a, (Ord a, Ix a)) And this is try of any class that we can statically guarantee as non-recursive (in some sense). For now, we just make a special case for tuples. Something better would be cool. See also GHC.Tc.TyCl.Utils.checkClassCycles. ************************************************************************ * * Classifying types * * ************************************************************************ -} isSigmaTy :: TcType -> Bool -- isSigmaTy returns true of any qualified type. It doesn't -- *necessarily* have any foralls. E.g -- f :: (?x::Int) => Int -> Int isSigmaTy ty | Just ty' <- tcView ty = isSigmaTy ty' isSigmaTy (ForAllTy {}) = True isSigmaTy (FunTy { ft_af = InvisArg }) = True isSigmaTy _ = False isRhoTy :: TcType -> Bool -- True of TcRhoTypes; see Note [TcRhoType] isRhoTy ty | Just ty' <- tcView ty = isRhoTy ty' isRhoTy (ForAllTy {}) = False isRhoTy (FunTy { ft_af = InvisArg }) = False isRhoTy _ = True -- | Like 'isRhoTy', but also says 'True' for 'Infer' types isRhoExpTy :: ExpType -> Bool isRhoExpTy (Check ty) = isRhoTy ty isRhoExpTy (Infer {}) = True isOverloadedTy :: Type -> Bool -- Yes for a type of a function that might require evidence-passing -- Used only by bindLocalMethods isOverloadedTy ty | Just ty' <- tcView ty = isOverloadedTy ty' isOverloadedTy (ForAllTy _ ty) = isOverloadedTy ty isOverloadedTy (FunTy { ft_af = InvisArg }) = True isOverloadedTy _ = False isFloatTy, isDoubleTy, isFloatPrimTy, isDoublePrimTy, isIntegerTy, isNaturalTy, isIntTy, isWordTy, isBoolTy, isUnitTy, isCharTy, isAnyTy :: Type -> Bool isFloatTy = is_tc floatTyConKey isDoubleTy = is_tc doubleTyConKey isFloatPrimTy = is_tc floatPrimTyConKey isDoublePrimTy = is_tc doublePrimTyConKey isIntegerTy = is_tc integerTyConKey isNaturalTy = is_tc naturalTyConKey isIntTy = is_tc intTyConKey isWordTy = is_tc wordTyConKey isBoolTy = is_tc boolTyConKey isUnitTy = is_tc unitTyConKey isCharTy = is_tc charTyConKey isAnyTy = is_tc anyTyConKey -- | Is the type inhabited by machine floating-point numbers? -- -- Used to check that we don't use floating-point literal patterns -- in Core. -- -- See #9238 and Note [Rules for floating-point comparisons] -- in GHC.Core.Opt.ConstantFold. isFloatingPrimTy :: Type -> Bool isFloatingPrimTy ty = isFloatPrimTy ty || isDoublePrimTy ty -- | Is a type 'String'? isStringTy :: Type -> Bool isStringTy ty = case tcSplitTyConApp_maybe ty of Just (tc, [arg_ty]) -> tc == listTyCon && isCharTy arg_ty _ -> False is_tc :: Unique -> Type -> Bool -- Newtypes are opaque to this is_tc uniq ty = case tcSplitTyConApp_maybe ty of Just (tc, _) -> uniq == getUnique tc Nothing -> False isRigidTy :: TcType -> Bool isRigidTy ty | Just (tc,_) <- tcSplitTyConApp_maybe ty = isGenerativeTyCon tc Nominal | Just {} <- tcSplitAppTy_maybe ty = True | isForAllTy ty = True | otherwise = False {- ************************************************************************ * * Misc * * ************************************************************************ Note [Visible type application] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC implements a generalisation of the algorithm described in the "Visible Type Application" paper (available from http://www.cis.upenn.edu/~sweirich/publications.html). A key part of that algorithm is to distinguish user-specified variables from inferred variables. For example, the following should typecheck: f :: forall a b. a -> b -> b f = const id g = const id x = f @Int @Bool 5 False y = g 5 @Bool False The idea is that we wish to allow visible type application when we are instantiating a specified, fixed variable. In practice, specified, fixed variables are either written in a type signature (or annotation), OR are imported from another module. (We could do better here, for example by doing SCC analysis on parts of a module and considering any type from outside one's SCC to be fully specified, but this is very confusing to users. The simple rule above is much more straightforward and predictable.) So, both of f's quantified variables are specified and may be instantiated. But g has no type signature, so only id's variable is specified (because id is imported). We write the type of g as forall {a}. a -> forall b. b -> b. Note that the a is in braces, meaning it cannot be instantiated with visible type application. Tracking specified vs. inferred variables is done conveniently by a field in TyBinder. -} deNoteType :: Type -> Type -- Remove all *outermost* type synonyms and other notes deNoteType ty | Just ty' <- coreView ty = deNoteType ty' deNoteType ty = ty {- Find the free tycons and classes of a type. This is used in the front end of the compiler. -} {- ************************************************************************ * * External types * * ************************************************************************ The compiler's foreign function interface supports the passing of a restricted set of types as arguments and results (the restricting factor being the ) -} tcSplitIOType_maybe :: Type -> Maybe (TyCon, Type) -- (tcSplitIOType_maybe t) returns Just (IO,t',co) -- if co : t ~ IO t' -- returns Nothing otherwise tcSplitIOType_maybe ty = case tcSplitTyConApp_maybe ty of Just (io_tycon, [io_res_ty]) | io_tycon `hasKey` ioTyConKey -> Just (io_tycon, io_res_ty) _ -> Nothing -- | Reason why a type in an FFI signature is invalid data IllegalForeignTypeReason = TypeCannotBeMarshaled !Type TypeCannotBeMarshaledReason | ForeignDynNotPtr !Type -- ^ Expected type !Type -- ^ Actual type | SafeHaskellMustBeInIO | IOResultExpected | UnexpectedNestedForall | LinearTypesNotAllowed | OneArgExpected | AtLeastOneArgExpected -- | Reason why a type cannot be marshalled through the FFI. data TypeCannotBeMarshaledReason = NotADataType | NewtypeDataConNotInScope !(Maybe TyCon) | UnliftedFFITypesNeeded | NotABoxedMarshalableTyCon | ForeignLabelNotAPtr | NotSimpleUnliftedType isFFIArgumentTy :: DynFlags -> Safety -> Type -> Validity' IllegalForeignTypeReason -- Checks for valid argument type for a 'foreign import' isFFIArgumentTy dflags safety ty = checkRepTyCon (legalOutgoingTyCon dflags safety) ty isFFIExternalTy :: Type -> Validity' IllegalForeignTypeReason -- Types that are allowed as arguments of a 'foreign export' isFFIExternalTy ty = checkRepTyCon legalFEArgTyCon ty isFFIImportResultTy :: DynFlags -> Type -> Validity' IllegalForeignTypeReason isFFIImportResultTy dflags ty = checkRepTyCon (legalFIResultTyCon dflags) ty isFFIExportResultTy :: Type -> Validity' IllegalForeignTypeReason isFFIExportResultTy ty = checkRepTyCon legalFEResultTyCon ty isFFIDynTy :: Type -> Type -> Validity' IllegalForeignTypeReason -- The type in a foreign import dynamic must be Ptr, FunPtr, or a newtype of -- either, and the wrapped function type must be equal to the given type. -- We assume that all types have been run through normaliseFfiType, so we don't -- need to worry about expanding newtypes here. isFFIDynTy expected ty -- Note [Foreign import dynamic] -- In the example below, expected would be 'CInt -> IO ()', while ty would -- be 'FunPtr (CDouble -> IO ())'. | Just (tc, [ty']) <- splitTyConApp_maybe ty , tyConUnique tc `elem` [ptrTyConKey, funPtrTyConKey] , eqType ty' expected = IsValid | otherwise = NotValid (ForeignDynNotPtr expected ty) isFFILabelTy :: Type -> Validity' IllegalForeignTypeReason -- The type of a foreign label must be Ptr, FunPtr, or a newtype of either. isFFILabelTy ty = checkRepTyCon ok ty where ok tc | tc `hasKey` funPtrTyConKey || tc `hasKey` ptrTyConKey = IsValid | otherwise = NotValid ForeignLabelNotAPtr isFFIPrimArgumentTy :: DynFlags -> Type -> Validity' IllegalForeignTypeReason -- Checks for valid argument type for a 'foreign import prim' -- Currently they must all be simple unlifted types, or the well-known type -- Any, which can be used to pass the address to a Haskell object on the heap to -- the foreign function. isFFIPrimArgumentTy dflags ty | isAnyTy ty = IsValid | otherwise = checkRepTyCon (legalFIPrimArgTyCon dflags) ty isFFIPrimResultTy :: DynFlags -> Type -> Validity' IllegalForeignTypeReason -- Checks for valid result type for a 'foreign import prim' Currently -- it must be an unlifted type, including unboxed tuples, unboxed -- sums, or the well-known type Any. isFFIPrimResultTy dflags ty | isAnyTy ty = IsValid | otherwise = checkRepTyCon (legalFIPrimResultTyCon dflags) ty isFunPtrTy :: Type -> Bool isFunPtrTy ty | Just (tc, [_]) <- splitTyConApp_maybe ty = tc `hasKey` funPtrTyConKey | otherwise = False -- normaliseFfiType gets run before checkRepTyCon, so we don't -- need to worry about looking through newtypes or type functions -- here; that's already been taken care of. checkRepTyCon :: (TyCon -> Validity' TypeCannotBeMarshaledReason) -> Type -> Validity' IllegalForeignTypeReason checkRepTyCon check_tc ty = fmap (TypeCannotBeMarshaled ty) $ case splitTyConApp_maybe ty of Just (tc, tys) | isNewTyCon tc -> NotValid (mk_nt_reason tc tys) | otherwise -> check_tc tc Nothing -> NotValid NotADataType where mk_nt_reason tc tys | null tys = NewtypeDataConNotInScope Nothing | otherwise = NewtypeDataConNotInScope (Just tc) {- Note [Foreign import dynamic] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A dynamic stub must be of the form 'FunPtr ft -> ft' where ft is any foreign type. Similarly, a wrapper stub must be of the form 'ft -> IO (FunPtr ft)'. We use isFFIDynTy to check whether a signature is well-formed. For example, given a (illegal) declaration like: foreign import ccall "dynamic" foo :: FunPtr (CDouble -> IO ()) -> CInt -> IO () isFFIDynTy will compare the 'FunPtr' type 'CDouble -> IO ()' with the curried result type 'CInt -> IO ()', and return False, as they are not equal. ---------------------------------------------- These chaps do the work; they are not exported ---------------------------------------------- -} legalFEArgTyCon :: TyCon -> Validity' TypeCannotBeMarshaledReason legalFEArgTyCon tc -- It's illegal to make foreign exports that take unboxed -- arguments. The RTS API currently can't invoke such things. --SDM 7/2000 = boxedMarshalableTyCon tc legalFIResultTyCon :: DynFlags -> TyCon -> Validity' TypeCannotBeMarshaledReason legalFIResultTyCon dflags tc | tc == unitTyCon = IsValid | otherwise = marshalableTyCon dflags tc legalFEResultTyCon :: TyCon -> Validity' TypeCannotBeMarshaledReason legalFEResultTyCon tc | tc == unitTyCon = IsValid | otherwise = boxedMarshalableTyCon tc legalOutgoingTyCon :: DynFlags -> Safety -> TyCon -> Validity' TypeCannotBeMarshaledReason -- Checks validity of types going from Haskell -> external world legalOutgoingTyCon dflags _ tc = marshalableTyCon dflags tc -- Check for marshalability of a primitive type. -- We exclude lifted types such as RealWorld and TYPE. -- They can technically appear in types, e.g. -- f :: RealWorld -> TYPE LiftedRep -> RealWorld -- f x _ = x -- but there are no values of type RealWorld or TYPE LiftedRep, -- so it doesn't make sense to use them in FFI. marshalablePrimTyCon :: TyCon -> Bool marshalablePrimTyCon tc = isPrimTyCon tc && not (isLiftedTypeKind (tyConResKind tc)) marshalableTyCon :: DynFlags -> TyCon -> Validity' TypeCannotBeMarshaledReason marshalableTyCon dflags tc | marshalablePrimTyCon tc , not (null (tyConPrimRep tc)) -- Note [Marshalling void] = validIfUnliftedFFITypes dflags | otherwise = boxedMarshalableTyCon tc boxedMarshalableTyCon :: TyCon -> Validity' TypeCannotBeMarshaledReason boxedMarshalableTyCon tc | getUnique tc `elem` [ intTyConKey, int8TyConKey, int16TyConKey , int32TyConKey, int64TyConKey , wordTyConKey, word8TyConKey, word16TyConKey , word32TyConKey, word64TyConKey , floatTyConKey, doubleTyConKey , ptrTyConKey, funPtrTyConKey , charTyConKey , stablePtrTyConKey , boolTyConKey ] = IsValid | otherwise = NotValid NotABoxedMarshalableTyCon legalFIPrimArgTyCon :: DynFlags -> TyCon -> Validity' TypeCannotBeMarshaledReason -- Check args of 'foreign import prim', only allow simple unlifted types. legalFIPrimArgTyCon dflags tc | marshalablePrimTyCon tc = validIfUnliftedFFITypes dflags | otherwise = NotValid NotSimpleUnliftedType legalFIPrimResultTyCon :: DynFlags -> TyCon -> Validity' TypeCannotBeMarshaledReason -- Check result type of 'foreign import prim'. Allow simple unlifted -- types and also unboxed tuple and sum result types. legalFIPrimResultTyCon dflags tc | marshalablePrimTyCon tc , not (null (tyConPrimRep tc)) -- Note [Marshalling void] = validIfUnliftedFFITypes dflags | isUnboxedTupleTyCon tc || isUnboxedSumTyCon tc = validIfUnliftedFFITypes dflags | otherwise = NotValid $ NotSimpleUnliftedType validIfUnliftedFFITypes :: DynFlags -> Validity' TypeCannotBeMarshaledReason validIfUnliftedFFITypes dflags | xopt LangExt.UnliftedFFITypes dflags = IsValid | otherwise = NotValid UnliftedFFITypesNeeded {- Note [Marshalling void] ~~~~~~~~~~~~~~~~~~~~~~~ We don't treat State# (whose PrimRep is VoidRep) as marshalable. In turn that means you can't write foreign import foo :: Int -> State# RealWorld Reason: the back end falls over with panic "primRepHint:VoidRep"; and there is no compelling reason to permit it -} {- ************************************************************************ * * The "Paterson size" of a type * * ************************************************************************ -} {- Note [Paterson conditions on PredTypes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are considering whether *class* constraints terminate (see Note [Paterson conditions]). Precisely, the Paterson conditions would have us check that "the constraint has fewer constructors and variables (taken together and counting repetitions) than the head.". However, we can be a bit more refined by looking at which kind of constraint this actually is. There are two main tricks: 1. It seems like it should be OK not to count the tuple type constructor for a PredType like (Show a, Eq a) :: Constraint, since we don't count the "implicit" tuple in the ThetaType itself. In fact, the Paterson test just checks *each component* of the top level ThetaType against the size bound, one at a time. By analogy, it should be OK to return the size of the *largest* tuple component as the size of the whole tuple. 2. Once we get into an implicit parameter or equality we can't get back to a class constraint, so it's safe to say "size 0". See #4200. NB: we don't want to detect PredTypes in sizeType (and then call sizePred on them), or we might get an infinite loop if that PredType is irreducible. See #5581. -} type TypeSize = IntWithInf sizeType :: Type -> TypeSize -- Size of a type: the number of variables and constructors -- Ignore kinds altogether sizeType = go where go ty | Just exp_ty <- tcView ty = go exp_ty go (TyVarTy {}) = 1 go (TyConApp tc tys) | isTypeFamilyTyCon tc = infinity -- Type-family applications can -- expand to any arbitrary size | otherwise = sizeTypes (filterOutInvisibleTypes tc tys) + 1 -- Why filter out invisible args? I suppose any -- size ordering is sound, but why is this better? -- I came across this when investigating #14010. go (LitTy {}) = 1 go (FunTy _ w arg res) = go w + go arg + go res + 1 go (AppTy fun arg) = go fun + go arg go (ForAllTy (Bndr tv vis) ty) | isVisibleArgFlag vis = go (tyVarKind tv) + go ty + 1 | otherwise = go ty + 1 go (CastTy ty _) = go ty go (CoercionTy {}) = 0 sizeTypes :: [Type] -> TypeSize sizeTypes tys = sum (map sizeType tys) ----------------------------------------------------------------------------------- ----------------------------------------------------------------------------------- ----------------------- -- | For every arg a tycon can take, the returned list says True if the argument -- is taken visibly, and False otherwise. Ends with an infinite tail of Trues to -- allow for oversaturation. tcTyConVisibilities :: TyCon -> [Bool] tcTyConVisibilities tc = tc_binder_viss ++ tc_return_kind_viss ++ repeat True where tc_binder_viss = map isVisibleTyConBinder (tyConBinders tc) tc_return_kind_viss = map isVisibleBinder (fst $ tcSplitPiTys (tyConResKind tc)) -- | If the tycon is applied to the types, is the next argument visible? isNextTyConArgVisible :: TyCon -> [Type] -> Bool isNextTyConArgVisible tc tys = tcTyConVisibilities tc `getNth` length tys -- | Should this type be applied to a visible argument? isNextArgVisible :: TcType -> Bool isNextArgVisible ty | Just (bndr, _) <- tcSplitPiTy_maybe ty = isVisibleBinder bndr | otherwise = True -- this second case might happen if, say, we have an unzonked TauTv. -- But TauTvs can't range over types that take invisible arguments ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Annotations.hs0000644000000000000000000001177214472400113021601 0ustar0000000000000000-- | -- Support for source code annotation feature of GHC. That is the ANN pragma. -- -- (c) The University of Glasgow 2006 -- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -- {-# LANGUAGE DeriveFunctor #-} module GHC.Types.Annotations ( -- * Main Annotation data types Annotation(..), AnnPayload, AnnTarget(..), CoreAnnTarget, -- * AnnEnv for collecting and querying Annotations AnnEnv, mkAnnEnv, extendAnnEnvList, plusAnnEnv, emptyAnnEnv, findAnns, findAnnsByTypeRep, deserializeAnns ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Unit.Module ( Module ) import GHC.Unit.Module.Env import GHC.Types.Name.Env import GHC.Types.Name import GHC.Utils.Outputable import GHC.Serialized import Control.Monad import Data.Maybe import Data.Typeable import Data.Word ( Word8 ) -- | Represents an annotation after it has been sufficiently desugared from -- it's initial form of 'GHC.Hs.Decls.AnnDecl' data Annotation = Annotation { ann_target :: CoreAnnTarget, -- ^ The target of the annotation ann_value :: AnnPayload } type AnnPayload = Serialized -- ^ The "payload" of an annotation -- allows recovery of its value at a given type, -- and can be persisted to an interface file -- | An annotation target data AnnTarget name = NamedTarget name -- ^ We are annotating something with a name: -- a type or identifier | ModuleTarget Module -- ^ We are annotating a particular module deriving (Functor) -- | The kind of annotation target found in the middle end of the compiler type CoreAnnTarget = AnnTarget Name instance Outputable name => Outputable (AnnTarget name) where ppr (NamedTarget nm) = text "Named target" <+> ppr nm ppr (ModuleTarget mod) = text "Module target" <+> ppr mod instance Binary name => Binary (AnnTarget name) where put_ bh (NamedTarget a) = do putByte bh 0 put_ bh a put_ bh (ModuleTarget a) = do putByte bh 1 put_ bh a get bh = do h <- getByte bh case h of 0 -> liftM NamedTarget $ get bh _ -> liftM ModuleTarget $ get bh instance Outputable Annotation where ppr ann = ppr (ann_target ann) -- | A collection of annotations data AnnEnv = MkAnnEnv { ann_mod_env :: !(ModuleEnv [AnnPayload]) , ann_name_env :: !(NameEnv [AnnPayload]) } -- | An empty annotation environment. emptyAnnEnv :: AnnEnv emptyAnnEnv = MkAnnEnv emptyModuleEnv emptyNameEnv -- | Construct a new annotation environment that contains the list of -- annotations provided. mkAnnEnv :: [Annotation] -> AnnEnv mkAnnEnv = extendAnnEnvList emptyAnnEnv -- | Add the given annotation to the environment. extendAnnEnvList :: AnnEnv -> [Annotation] -> AnnEnv extendAnnEnvList env = foldl' extendAnnEnv env extendAnnEnv :: AnnEnv -> Annotation -> AnnEnv extendAnnEnv (MkAnnEnv mod_env name_env) (Annotation tgt payload) = case tgt of NamedTarget name -> MkAnnEnv mod_env (extendNameEnv_C (++) name_env name [payload]) ModuleTarget mod -> MkAnnEnv (extendModuleEnvWith (++) mod_env mod [payload]) name_env -- | Union two annotation environments. plusAnnEnv :: AnnEnv -> AnnEnv -> AnnEnv plusAnnEnv a b = MkAnnEnv { ann_mod_env = plusModuleEnv_C (++) (ann_mod_env a) (ann_mod_env b) , ann_name_env = plusNameEnv_C (++) (ann_name_env a) (ann_name_env b) } -- | Find the annotations attached to the given target as 'Typeable' -- values of your choice. If no deserializer is specified, -- only transient annotations will be returned. findAnns :: Typeable a => ([Word8] -> a) -> AnnEnv -> CoreAnnTarget -> [a] findAnns deserialize env = mapMaybe (fromSerialized deserialize) . findAnnPayloads env -- | Find the annotations attached to the given target as 'Typeable' -- values of your choice. If no deserializer is specified, -- only transient annotations will be returned. findAnnsByTypeRep :: AnnEnv -> CoreAnnTarget -> TypeRep -> [[Word8]] findAnnsByTypeRep env target tyrep = [ ws | Serialized tyrep' ws <- findAnnPayloads env target , tyrep' == tyrep ] -- | Find payloads for the given 'CoreAnnTarget' in an 'AnnEnv'. findAnnPayloads :: AnnEnv -> CoreAnnTarget -> [AnnPayload] findAnnPayloads env target = case target of ModuleTarget mod -> lookupWithDefaultModuleEnv (ann_mod_env env) [] mod NamedTarget name -> fromMaybe [] $ lookupNameEnv (ann_name_env env) name -- | Deserialize all annotations of a given type. This happens lazily, that is -- no deserialization will take place until the [a] is actually demanded and -- the [a] can also be empty (the UniqFM is not filtered). deserializeAnns :: Typeable a => ([Word8] -> a) -> AnnEnv -> (ModuleEnv [a], NameEnv [a]) deserializeAnns deserialize env = ( mapModuleEnv deserAnns (ann_mod_env env) , mapNameEnv deserAnns (ann_name_env env) ) where deserAnns = mapMaybe (fromSerialized deserialize) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Avail.hs0000644000000000000000000003360014472400113020332 0ustar0000000000000000 {-# LANGUAGE DeriveDataTypeable #-} -- -- (c) The University of Glasgow -- module GHC.Types.Avail ( Avails, AvailInfo(..), avail, availField, availTC, availsToNameSet, availsToNameSetWithSelectors, availsToNameEnv, availExportsDecl, availName, availGreName, availNames, availNonFldNames, availNamesWithSelectors, availFlds, availGreNames, availSubordinateGreNames, stableAvailCmp, plusAvail, trimAvail, filterAvail, filterAvails, nubAvails, GreName(..), greNameMangledName, greNamePrintableName, greNameSrcSpan, greNameFieldLabel, partitionGreNames, stableGreNameCmp, ) where import GHC.Prelude import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Name.Set import GHC.Types.SrcLoc import GHC.Types.FieldLabel import GHC.Utils.Binary import GHC.Data.List.SetOps import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Utils.Constants (debugIsOn) import Data.Data ( Data ) import Data.Either ( partitionEithers ) import Data.List ( find ) import Data.Maybe -- ----------------------------------------------------------------------------- -- The AvailInfo type -- | Records what things are \"available\", i.e. in scope data AvailInfo -- | An ordinary identifier in scope, or a field label without a parent type -- (see Note [Representing pattern synonym fields in AvailInfo]). = Avail GreName -- | A type or class in scope -- -- The __AvailTC Invariant__: If the type or class is itself to be in scope, -- it must be /first/ in this list. Thus, typically: -- -- > AvailTC Eq [Eq, ==, \/=] | AvailTC Name -- ^ The name of the type or class [GreName] -- ^ The available pieces of type or class -- (see Note [Representing fields in AvailInfo]). deriving ( Eq -- ^ Used when deciding if the interface has changed , Data ) -- | A collection of 'AvailInfo' - several things that are \"available\" type Avails = [AvailInfo] {- Note [Representing fields in AvailInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also Note [FieldLabel] in GHC.Types.FieldLabel. When -XDuplicateRecordFields is disabled (the normal case), a datatype like data T = MkT { foo :: Int } gives rise to the AvailInfo AvailTC T [T, MkT, FieldLabel "foo" NoDuplicateRecordFields FieldSelectors foo] whereas if -XDuplicateRecordFields is enabled it gives AvailTC T [T, MkT, FieldLabel "foo" DuplicateRecordFields FieldSelectors $sel:foo:MkT] where the label foo does not match the selector name $sel:foo:MkT. The labels in a field list are not necessarily unique: data families allow the same parent (the family tycon) to have multiple distinct fields with the same label. For example, data family F a data instance F Int = MkFInt { foo :: Int } data instance F Bool = MkFBool { foo :: Bool} gives rise to AvailTC F [ F, MkFInt, MkFBool , FieldLabel "foo" DuplicateRecordFields FieldSelectors $sel:foo:MkFInt , FieldLabel "foo" DuplicateRecordFields FieldSelectors $sel:foo:MkFBool ] Moreover, note that the flHasDuplicateRecordFields or flFieldSelectors flags need not be the same for all the elements of the list. In the example above, this occurs if the two data instances are defined in different modules, with different states of the `-XDuplicateRecordFields` or `-XNoFieldSelectors` extensions. Thus it is possible to have AvailTC F [ F, MkFInt, MkFBool , FieldLabel "foo" DuplicateRecordFields FieldSelectors $sel:foo:MkFInt , FieldLabel "foo" NoDuplicateRecordFields FieldSelectors foo ] If the two data instances are defined in different modules, both without `-XDuplicateRecordFields` or `-XNoFieldSelectors`, it will be impossible to export them from the same module (even with `-XDuplicateRecordfields` enabled), because they would be represented identically. The workaround here is to enable `-XDuplicateRecordFields` or `-XNoFieldSelectors` on the defining modules. See also #13352. Note [Representing pattern synonym fields in AvailInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Record pattern synonym fields cannot be represented using AvailTC like fields of normal record types (see Note [Representing fields in AvailInfo]), because they do not always have a parent type constructor. So we represent them using the Avail constructor, with a NormalGreName that carries the underlying FieldLabel. Thus under -XDuplicateRecordFields -XPatternSynoynms, the declaration pattern MkFoo{f} = Bar f gives rise to the AvailInfo Avail (NormalGreName MkFoo) Avail (FieldGreName (FieldLabel "f" True $sel:f:MkFoo)) However, if `f` is bundled with a type constructor `T` by using `T(MkFoo,f)` in an export list, then whenever `f` is imported the parent will be `T`, represented as AvailTC T [ NormalGreName T , NormalGreName MkFoo , FieldGreName (FieldLabel "f" True $sel:f:MkFoo) ] See also Note [GreNames] in GHC.Types.Name.Reader. -} -- | Compare lexicographically stableAvailCmp :: AvailInfo -> AvailInfo -> Ordering stableAvailCmp (Avail c1) (Avail c2) = c1 `stableGreNameCmp` c2 stableAvailCmp (Avail {}) (AvailTC {}) = LT stableAvailCmp (AvailTC n ns) (AvailTC m ms) = (n `stableNameCmp` m) `thenCmp` (cmpList stableGreNameCmp ns ms) stableAvailCmp (AvailTC {}) (Avail {}) = GT stableGreNameCmp :: GreName -> GreName -> Ordering stableGreNameCmp (NormalGreName n1) (NormalGreName n2) = n1 `stableNameCmp` n2 stableGreNameCmp (NormalGreName {}) (FieldGreName {}) = LT stableGreNameCmp (FieldGreName f1) (FieldGreName f2) = flSelector f1 `stableNameCmp` flSelector f2 stableGreNameCmp (FieldGreName {}) (NormalGreName {}) = GT avail :: Name -> AvailInfo avail n = Avail (NormalGreName n) availField :: FieldLabel -> AvailInfo availField fl = Avail (FieldGreName fl) availTC :: Name -> [Name] -> [FieldLabel] -> AvailInfo availTC n ns fls = AvailTC n (map NormalGreName ns ++ map FieldGreName fls) -- ----------------------------------------------------------------------------- -- Operations on AvailInfo availsToNameSet :: [AvailInfo] -> NameSet availsToNameSet avails = foldr add emptyNameSet avails where add avail set = extendNameSetList set (availNames avail) availsToNameSetWithSelectors :: [AvailInfo] -> NameSet availsToNameSetWithSelectors avails = foldr add emptyNameSet avails where add avail set = extendNameSetList set (availNamesWithSelectors avail) availsToNameEnv :: [AvailInfo] -> NameEnv AvailInfo availsToNameEnv avails = foldr add emptyNameEnv avails where add avail env = extendNameEnvList env (zip (availNames avail) (repeat avail)) -- | Does this 'AvailInfo' export the parent decl? This depends on the -- invariant that the parent is first if it appears at all. availExportsDecl :: AvailInfo -> Bool availExportsDecl (AvailTC ty_name names) | n : _ <- names = NormalGreName ty_name == n | otherwise = False availExportsDecl _ = True -- | Just the main name made available, i.e. not the available pieces -- of type or class brought into scope by the 'AvailInfo' availName :: AvailInfo -> Name availName (Avail n) = greNameMangledName n availName (AvailTC n _) = n availGreName :: AvailInfo -> GreName availGreName (Avail c) = c availGreName (AvailTC n _) = NormalGreName n -- | All names made available by the availability information (excluding overloaded selectors) availNames :: AvailInfo -> [Name] availNames (Avail c) = childNonOverloadedNames c availNames (AvailTC _ cs) = concatMap childNonOverloadedNames cs childNonOverloadedNames :: GreName -> [Name] childNonOverloadedNames (NormalGreName n) = [n] childNonOverloadedNames (FieldGreName fl) = [ flSelector fl | not (flIsOverloaded fl) ] -- | All names made available by the availability information (including overloaded selectors) availNamesWithSelectors :: AvailInfo -> [Name] availNamesWithSelectors (Avail c) = [greNameMangledName c] availNamesWithSelectors (AvailTC _ cs) = map greNameMangledName cs -- | Names for non-fields made available by the availability information availNonFldNames :: AvailInfo -> [Name] availNonFldNames (Avail (NormalGreName n)) = [n] availNonFldNames (Avail (FieldGreName {})) = [] availNonFldNames (AvailTC _ ns) = mapMaybe f ns where f (NormalGreName n) = Just n f (FieldGreName {}) = Nothing -- | Fields made available by the availability information availFlds :: AvailInfo -> [FieldLabel] availFlds (Avail c) = maybeToList (greNameFieldLabel c) availFlds (AvailTC _ cs) = mapMaybe greNameFieldLabel cs -- | Names and fields made available by the availability information. availGreNames :: AvailInfo -> [GreName] availGreNames (Avail c) = [c] availGreNames (AvailTC _ cs) = cs -- | Names and fields made available by the availability information, other than -- the main decl itself. availSubordinateGreNames :: AvailInfo -> [GreName] availSubordinateGreNames (Avail {}) = [] availSubordinateGreNames avail@(AvailTC _ ns) | availExportsDecl avail = tail ns | otherwise = ns -- | Used where we may have an ordinary name or a record field label. -- See Note [GreNames] in GHC.Types.Name.Reader. data GreName = NormalGreName Name | FieldGreName FieldLabel deriving (Data, Eq) instance Outputable GreName where ppr (NormalGreName n) = ppr n ppr (FieldGreName fl) = ppr fl instance HasOccName GreName where occName (NormalGreName n) = occName n occName (FieldGreName fl) = occName fl -- | A 'Name' for internal use, but not for output to the user. For fields, the -- 'OccName' will be the selector. See Note [GreNames] in GHC.Types.Name.Reader. greNameMangledName :: GreName -> Name greNameMangledName (NormalGreName n) = n greNameMangledName (FieldGreName fl) = flSelector fl -- | A 'Name' suitable for output to the user. For fields, the 'OccName' will -- be the field label. See Note [GreNames] in GHC.Types.Name.Reader. greNamePrintableName :: GreName -> Name greNamePrintableName (NormalGreName n) = n greNamePrintableName (FieldGreName fl) = fieldLabelPrintableName fl greNameSrcSpan :: GreName -> SrcSpan greNameSrcSpan (NormalGreName n) = nameSrcSpan n greNameSrcSpan (FieldGreName fl) = nameSrcSpan (flSelector fl) greNameFieldLabel :: GreName -> Maybe FieldLabel greNameFieldLabel (NormalGreName {}) = Nothing greNameFieldLabel (FieldGreName fl) = Just fl partitionGreNames :: [GreName] -> ([Name], [FieldLabel]) partitionGreNames = partitionEithers . map to_either where to_either (NormalGreName n) = Left n to_either (FieldGreName fl) = Right fl -- ----------------------------------------------------------------------------- -- Utility plusAvail :: AvailInfo -> AvailInfo -> AvailInfo plusAvail a1 a2 | debugIsOn && availName a1 /= availName a2 = pprPanic "GHC.Rename.Env.plusAvail names differ" (hsep [ppr a1,ppr a2]) plusAvail a1@(Avail {}) (Avail {}) = a1 plusAvail (AvailTC _ []) a2@(AvailTC {}) = a2 plusAvail a1@(AvailTC {}) (AvailTC _ []) = a1 plusAvail (AvailTC n1 (s1:ss1)) (AvailTC n2 (s2:ss2)) = case (NormalGreName n1==s1, NormalGreName n2==s2) of -- Maintain invariant the parent is first (True,True) -> AvailTC n1 (s1 : (ss1 `unionLists` ss2)) (True,False) -> AvailTC n1 (s1 : (ss1 `unionLists` (s2:ss2))) (False,True) -> AvailTC n1 (s2 : ((s1:ss1) `unionLists` ss2)) (False,False) -> AvailTC n1 ((s1:ss1) `unionLists` (s2:ss2)) plusAvail a1 a2 = pprPanic "GHC.Rename.Env.plusAvail" (hsep [ppr a1,ppr a2]) -- | trims an 'AvailInfo' to keep only a single name trimAvail :: AvailInfo -> Name -> AvailInfo trimAvail avail@(Avail {}) _ = avail trimAvail avail@(AvailTC n ns) m = case find ((== m) . greNameMangledName) ns of Just c -> AvailTC n [c] Nothing -> pprPanic "trimAvail" (hsep [ppr avail, ppr m]) -- | filters 'AvailInfo's by the given predicate filterAvails :: (Name -> Bool) -> [AvailInfo] -> [AvailInfo] filterAvails keep avails = foldr (filterAvail keep) [] avails -- | filters an 'AvailInfo' by the given predicate filterAvail :: (Name -> Bool) -> AvailInfo -> [AvailInfo] -> [AvailInfo] filterAvail keep ie rest = case ie of Avail c | keep (greNameMangledName c) -> ie : rest | otherwise -> rest AvailTC tc cs -> let cs' = filter (keep . greNameMangledName) cs in if null cs' then rest else AvailTC tc cs' : rest -- | Combines 'AvailInfo's from the same family -- 'avails' may have several items with the same availName -- E.g import Ix( Ix(..), index ) -- will give Ix(Ix,index,range) and Ix(index) -- We want to combine these; addAvail does that nubAvails :: [AvailInfo] -> [AvailInfo] nubAvails avails = eltsDNameEnv (foldl' add emptyDNameEnv avails) where add env avail = extendDNameEnv_C plusAvail env (availName avail) avail -- ----------------------------------------------------------------------------- -- Printing instance Outputable AvailInfo where ppr = pprAvail pprAvail :: AvailInfo -> SDoc pprAvail (Avail n) = ppr n pprAvail (AvailTC n ns) = ppr n <> braces (fsep (punctuate comma (map ppr ns))) instance Binary AvailInfo where put_ bh (Avail aa) = do putByte bh 0 put_ bh aa put_ bh (AvailTC ab ac) = do putByte bh 1 put_ bh ab put_ bh ac get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (Avail aa) _ -> do ab <- get bh ac <- get bh return (AvailTC ab ac) instance Binary GreName where put_ bh (NormalGreName aa) = do putByte bh 0 put_ bh aa put_ bh (FieldGreName ab) = do putByte bh 1 put_ bh ab get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (NormalGreName aa) _ -> do ab <- get bh return (FieldGreName ab) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Basic.hs0000644000000000000000000021774314472400113020333 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1997-1998 \section[BasicTypes]{Miscellaneous types} This module defines a miscellaneously collection of very simple types that \begin{itemize} \item have no other obvious home \item don't depend on any other complicated types \item are used in more than one "part" of the compiler \end{itemize} -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Types.Basic ( LeftOrRight(..), pickLR, ConTag, ConTagZ, fIRST_TAG, Arity, RepArity, JoinArity, FullArgCount, Alignment, mkAlignment, alignmentOf, alignmentBytes, PromotionFlag(..), isPromoted, FunctionOrData(..), RecFlag(..), isRec, isNonRec, boolToRecFlag, Origin(..), isGenerated, RuleName, pprRuleName, TopLevelFlag(..), isTopLevel, isNotTopLevel, OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe, hasOverlappingFlag, hasOverlappableFlag, hasIncoherentFlag, Boxity(..), isBoxed, CbvMark(..), isMarkedCbv, PprPrec(..), topPrec, sigPrec, opPrec, funPrec, starPrec, appPrec, maybeParen, TupleSort(..), tupleSortBoxity, boxityTupleSort, tupleParens, UnboxedTupleOrSum(..), unboxedTupleOrSumExtension, sumParens, pprAlternative, -- ** The OneShotInfo type OneShotInfo(..), noOneShotInfo, hasNoOneShotInfo, isOneShotInfo, bestOneShot, worstOneShot, OccInfo(..), noOccInfo, seqOccInfo, zapFragileOcc, isOneOcc, isDeadOcc, isStrongLoopBreaker, isWeakLoopBreaker, isManyOccs, isNoOccInfo, strongLoopBreaker, weakLoopBreaker, InsideLam(..), BranchCount, oneBranch, InterestingCxt(..), TailCallInfo(..), tailCallInfo, zapOccTailCallInfo, isAlwaysTailCalled, EP(..), DefMethSpec(..), SwapFlag(..), flipSwap, unSwap, isSwapped, CompilerPhase(..), PhaseNum, beginPhase, nextPhase, laterPhase, Activation(..), isActive, competesWith, isNeverActive, isAlwaysActive, activeInFinalPhase, activateAfterInitial, activateDuringFinal, activeAfter, RuleMatchInfo(..), isConLike, isFunLike, InlineSpec(..), noUserInlineSpec, InlinePragma(..), defaultInlinePragma, alwaysInlinePragma, neverInlinePragma, dfunInlinePragma, isDefaultInlinePragma, isInlinePragma, isInlinablePragma, isNoInlinePragma, isOpaquePragma, isAnyInlinePragma, alwaysInlineConLikePragma, inlinePragmaSource, inlinePragmaName, inlineSpecSource, inlinePragmaSpec, inlinePragmaSat, inlinePragmaActivation, inlinePragmaRuleMatchInfo, setInlinePragmaActivation, setInlinePragmaRuleMatchInfo, pprInline, pprInlineDebug, SuccessFlag(..), succeeded, failed, successIf, IntWithInf, infinity, treatZeroAsInf, subWithInf, mkIntWithInf, intGtLimit, SpliceExplicitFlag(..), TypeOrKind(..), isTypeLevel, isKindLevel, Levity(..), mightBeLifted, mightBeUnlifted, ExprOrPat(..), NonStandardDefaultingStrategy(..), DefaultingStrategy(..), defaultNonStandardTyVars, ForeignSrcLang (..) ) where import GHC.Prelude import GHC.ForeignSrcLang import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Binary import GHC.Types.SourceText import qualified GHC.LanguageExtensions as LangExt import Data.Data import qualified Data.Semigroup as Semi {- ************************************************************************ * * Binary choice * * ************************************************************************ -} data LeftOrRight = CLeft | CRight deriving( Eq, Data ) pickLR :: LeftOrRight -> (a,a) -> a pickLR CLeft (l,_) = l pickLR CRight (_,r) = r instance Outputable LeftOrRight where ppr CLeft = text "Left" ppr CRight = text "Right" instance Binary LeftOrRight where put_ bh CLeft = putByte bh 0 put_ bh CRight = putByte bh 1 get bh = do { h <- getByte bh ; case h of 0 -> return CLeft _ -> return CRight } {- ************************************************************************ * * \subsection[Arity]{Arity} * * ************************************************************************ -} -- | The number of value arguments that can be applied to a value before it does -- "real work". So: -- fib 100 has arity 0 -- \x -> fib x has arity 1 -- See also Note [Definition of arity] in "GHC.Core.Opt.Arity" type Arity = Int -- | Representation Arity -- -- The number of represented arguments that can be applied to a value before it does -- "real work". So: -- fib 100 has representation arity 0 -- \x -> fib x has representation arity 1 -- \(# x, y #) -> fib (x + y) has representation arity 2 type RepArity = Int -- | The number of arguments that a join point takes. Unlike the arity of a -- function, this is a purely syntactic property and is fixed when the join -- point is created (or converted from a value). Both type and value arguments -- are counted. type JoinArity = Int -- | FullArgCount is the number of type or value arguments in an application, -- or the number of type or value binders in a lambda. Note: it includes -- both type and value arguments! type FullArgCount = Int {- ************************************************************************ * * Constructor tags * * ************************************************************************ -} -- | A *one-index* constructor tag -- -- Type of the tags associated with each constructor possibility or superclass -- selector type ConTag = Int -- | A *zero-indexed* constructor tag type ConTagZ = Int fIRST_TAG :: ConTag -- ^ Tags are allocated from here for real constructors -- or for superclass selectors fIRST_TAG = 1 {- ************************************************************************ * * \subsection[Alignment]{Alignment} * * ************************************************************************ -} -- | A power-of-two alignment newtype Alignment = Alignment { alignmentBytes :: Int } deriving (Eq, Ord) -- Builds an alignment, throws on non power of 2 input. This is not -- ideal, but convenient for internal use and better then silently -- passing incorrect data. mkAlignment :: Int -> Alignment mkAlignment n | n == 1 = Alignment 1 | n == 2 = Alignment 2 | n == 4 = Alignment 4 | n == 8 = Alignment 8 | n == 16 = Alignment 16 | n == 32 = Alignment 32 | n == 64 = Alignment 64 | n == 128 = Alignment 128 | n == 256 = Alignment 256 | n == 512 = Alignment 512 | otherwise = panic "mkAlignment: received either a non power of 2 argument or > 512" -- Calculates an alignment of a number. x is aligned at N bytes means -- the remainder from x / N is zero. Currently, interested in N <= 8, -- but can be expanded to N <= 16 or N <= 32 if used within SSE or AVX -- context. alignmentOf :: Int -> Alignment alignmentOf x = case x .&. 7 of 0 -> Alignment 8 4 -> Alignment 4 2 -> Alignment 2 _ -> Alignment 1 instance Outputable Alignment where ppr (Alignment m) = ppr m instance OutputableP env Alignment where pdoc _ = ppr {- ************************************************************************ * * One-shot information * * ************************************************************************ -} {- Note [OneShotInfo overview] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lambda-bound Ids (and only lambda-bound Ids) may be decorated with one-shot info. The idea is that if we see (\x{one-shot}. e) it means that this lambda will only be applied once. In particular that means we can float redexes under the lambda without losing work. For example, consider let t = expensive in (\x{one-shot}. case t of { True -> ...; False -> ... }) Because it's a one-shot lambda, we can safely inline t, giving (\x{one_shot}. case of { True -> ...; False -> ... }) Moving parts: * Usage analysis, performed as part of demand-analysis, finds out whether functions call their argument once. Consider f g x = Just (case g x of { ... }) Here 'f' is lazy in 'g', but it guarantees to call it no more than once. So g will get a C1(U) usage demand. * Occurrence analysis propagates this usage information (in the demand signature of a function) to its calls. Example, given 'f' above f (\x.e) blah Since f's demand signature says it has a C1(U) usage demand on its first argument, the occurrence analyser sets the \x to be one-shot. This is done via the occ_one_shots field of OccEnv. * Float-in and float-out take account of one-shot-ness * Occurrence analysis doesn't set "inside-lam" for occurrences inside a one-shot lambda Other notes * A one-shot lambda can use its argument many times. To elaborate the example above let t = expensive in (\x{one-shot}. case t of { True -> x+x; False -> x*x }) Here the '\x' is one-shot, which justifies inlining 't', but x is used many times. That's absolutely fine. * It's entirely possible to have (\x{one-shot}. \y{many-shot}. e) For example let t = expensive g = \x -> let v = x+t in \y -> x + v in map (g 5) xs Here the `\x` is a one-shot binder: `g` is applied to one argument exactly once. And because the `\x` is one-shot, it would be fine to float that `let t = expensive` binding inside the `\x`. But the `\y` is most definitely not one-shot! -} -- | If the 'Id' is a lambda-bound variable then it may have lambda-bound -- variable info. Sometimes we know whether the lambda binding this variable -- is a "one-shot" lambda; that is, whether it is applied at most once. -- -- This information may be useful in optimisation, as computations may -- safely be floated inside such a lambda without risk of duplicating -- work. -- -- See also Note [OneShotInfo overview] above. data OneShotInfo = NoOneShotInfo -- ^ No information | OneShotLam -- ^ The lambda is applied at most once. deriving (Eq) -- | It is always safe to assume that an 'Id' has no lambda-bound variable information noOneShotInfo :: OneShotInfo noOneShotInfo = NoOneShotInfo isOneShotInfo, hasNoOneShotInfo :: OneShotInfo -> Bool isOneShotInfo OneShotLam = True isOneShotInfo _ = False hasNoOneShotInfo NoOneShotInfo = True hasNoOneShotInfo _ = False worstOneShot, bestOneShot :: OneShotInfo -> OneShotInfo -> OneShotInfo worstOneShot NoOneShotInfo _ = NoOneShotInfo worstOneShot OneShotLam os = os bestOneShot NoOneShotInfo os = os bestOneShot OneShotLam _ = OneShotLam pprOneShotInfo :: OneShotInfo -> SDoc pprOneShotInfo NoOneShotInfo = empty pprOneShotInfo OneShotLam = text "OneShot" instance Outputable OneShotInfo where ppr = pprOneShotInfo {- ************************************************************************ * * Swap flag * * ************************************************************************ -} data SwapFlag = NotSwapped -- Args are: actual, expected | IsSwapped -- Args are: expected, actual instance Outputable SwapFlag where ppr IsSwapped = text "Is-swapped" ppr NotSwapped = text "Not-swapped" flipSwap :: SwapFlag -> SwapFlag flipSwap IsSwapped = NotSwapped flipSwap NotSwapped = IsSwapped isSwapped :: SwapFlag -> Bool isSwapped IsSwapped = True isSwapped NotSwapped = False unSwap :: SwapFlag -> (a->a->b) -> a -> a -> b unSwap NotSwapped f a b = f a b unSwap IsSwapped f a b = f b a {- ********************************************************************* * * Promotion flag * * ********************************************************************* -} -- | Is a TyCon a promoted data constructor or just a normal type constructor? data PromotionFlag = NotPromoted | IsPromoted deriving ( Eq, Data ) isPromoted :: PromotionFlag -> Bool isPromoted IsPromoted = True isPromoted NotPromoted = False instance Outputable PromotionFlag where ppr NotPromoted = text "NotPromoted" ppr IsPromoted = text "IsPromoted" instance Binary PromotionFlag where put_ bh NotPromoted = putByte bh 0 put_ bh IsPromoted = putByte bh 1 get bh = do n <- getByte bh case n of 0 -> return NotPromoted 1 -> return IsPromoted _ -> fail "Binary(IsPromoted): fail)" {- ************************************************************************ * * \subsection[FunctionOrData]{FunctionOrData} * * ************************************************************************ -} data FunctionOrData = IsFunction | IsData deriving (Eq, Ord, Data) instance Outputable FunctionOrData where ppr IsFunction = text "(function)" ppr IsData = text "(data)" instance Binary FunctionOrData where put_ bh IsFunction = putByte bh 0 put_ bh IsData = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return IsFunction 1 -> return IsData _ -> panic "Binary FunctionOrData" {- ************************************************************************ * * Rules * * ************************************************************************ -} type RuleName = FastString pprRuleName :: RuleName -> SDoc pprRuleName rn = doubleQuotes (ftext rn) {- ************************************************************************ * * \subsection[Top-level/local]{Top-level/not-top level flag} * * ************************************************************************ -} data TopLevelFlag = TopLevel | NotTopLevel deriving Data isTopLevel, isNotTopLevel :: TopLevelFlag -> Bool isNotTopLevel NotTopLevel = True isNotTopLevel TopLevel = False isTopLevel TopLevel = True isTopLevel NotTopLevel = False instance Outputable TopLevelFlag where ppr TopLevel = text "" ppr NotTopLevel = text "" {- ************************************************************************ * * Boxity flag * * ************************************************************************ -} data Boxity = Boxed | Unboxed deriving( Eq, Data ) isBoxed :: Boxity -> Bool isBoxed Boxed = True isBoxed Unboxed = False instance Outputable Boxity where ppr Boxed = text "Boxed" ppr Unboxed = text "Unboxed" instance Binary Boxity where -- implemented via isBoxed-isomorphism to Bool put_ bh = put_ bh . isBoxed get bh = do b <- get bh pure $ if b then Boxed else Unboxed {- ************************************************************************ * * Call by value flag * * ************************************************************************ -} -- | Should an argument be passed evaluated *and* tagged. data CbvMark = MarkedCbv | NotMarkedCbv deriving Eq instance Outputable CbvMark where ppr MarkedCbv = text "!" ppr NotMarkedCbv = text "~" instance Binary CbvMark where put_ bh NotMarkedCbv = putByte bh 0 put_ bh MarkedCbv = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return NotMarkedCbv 1 -> return MarkedCbv _ -> panic "Invalid binary format" isMarkedCbv :: CbvMark -> Bool isMarkedCbv MarkedCbv = True isMarkedCbv NotMarkedCbv = False {- ************************************************************************ * * Recursive/Non-Recursive flag * * ************************************************************************ -} -- | Recursivity Flag data RecFlag = Recursive | NonRecursive deriving( Eq, Data ) isRec :: RecFlag -> Bool isRec Recursive = True isRec NonRecursive = False isNonRec :: RecFlag -> Bool isNonRec Recursive = False isNonRec NonRecursive = True boolToRecFlag :: Bool -> RecFlag boolToRecFlag True = Recursive boolToRecFlag False = NonRecursive instance Outputable RecFlag where ppr Recursive = text "Recursive" ppr NonRecursive = text "NonRecursive" instance Binary RecFlag where put_ bh Recursive = putByte bh 0 put_ bh NonRecursive = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return Recursive _ -> return NonRecursive {- ************************************************************************ * * Code origin * * ************************************************************************ -} data Origin = FromSource | Generated deriving( Eq, Data ) isGenerated :: Origin -> Bool isGenerated Generated = True isGenerated FromSource = False instance Outputable Origin where ppr FromSource = text "FromSource" ppr Generated = text "Generated" {- ************************************************************************ * * Instance overlap flag * * ************************************************************************ -} -- | The semantics allowed for overlapping instances for a particular -- instance. See Note [Safe Haskell isSafeOverlap] (in "GHC.Core.InstEnv") for a -- explanation of the `isSafeOverlap` field. -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpen' @'\{-\# OVERLAPPABLE'@ or -- @'\{-\# OVERLAPPING'@ or -- @'\{-\# OVERLAPS'@ or -- @'\{-\# INCOHERENT'@, -- 'GHC.Parser.Annotation.AnnClose' @`\#-\}`@, -- For details on above see Note [exact print annotations] in "GHC.Parser.Annotation" data OverlapFlag = OverlapFlag { overlapMode :: OverlapMode , isSafeOverlap :: Bool } deriving (Eq, Data) setOverlapModeMaybe :: OverlapFlag -> Maybe OverlapMode -> OverlapFlag setOverlapModeMaybe f Nothing = f setOverlapModeMaybe f (Just m) = f { overlapMode = m } hasIncoherentFlag :: OverlapMode -> Bool hasIncoherentFlag mode = case mode of Incoherent _ -> True _ -> False hasOverlappableFlag :: OverlapMode -> Bool hasOverlappableFlag mode = case mode of Overlappable _ -> True Overlaps _ -> True Incoherent _ -> True _ -> False hasOverlappingFlag :: OverlapMode -> Bool hasOverlappingFlag mode = case mode of Overlapping _ -> True Overlaps _ -> True Incoherent _ -> True _ -> False data OverlapMode -- See Note [Rules for instance lookup] in GHC.Core.InstEnv = NoOverlap SourceText -- See Note [Pragma source text] -- ^ This instance must not overlap another `NoOverlap` instance. -- However, it may be overlapped by `Overlapping` instances, -- and it may overlap `Overlappable` instances. | Overlappable SourceText -- See Note [Pragma source text] -- ^ Silently ignore this instance if you find a -- more specific one that matches the constraint -- you are trying to resolve -- -- Example: constraint (Foo [Int]) -- instance Foo [Int] -- instance {-# OVERLAPPABLE #-} Foo [a] -- -- Since the second instance has the Overlappable flag, -- the first instance will be chosen (otherwise -- its ambiguous which to choose) | Overlapping SourceText -- See Note [Pragma source text] -- ^ Silently ignore any more general instances that may be -- used to solve the constraint. -- -- Example: constraint (Foo [Int]) -- instance {-# OVERLAPPING #-} Foo [Int] -- instance Foo [a] -- -- Since the first instance has the Overlapping flag, -- the second---more general---instance will be ignored (otherwise -- it is ambiguous which to choose) | Overlaps SourceText -- See Note [Pragma source text] -- ^ Equivalent to having both `Overlapping` and `Overlappable` flags. | Incoherent SourceText -- See Note [Pragma source text] -- ^ Behave like Overlappable and Overlapping, and in addition pick -- an arbitrary one if there are multiple matching candidates, and -- don't worry about later instantiation -- -- Example: constraint (Foo [b]) -- instance {-# INCOHERENT -} Foo [Int] -- instance Foo [a] -- Without the Incoherent flag, we'd complain that -- instantiating 'b' would change which instance -- was chosen. See also Note [Incoherent instances] in "GHC.Core.InstEnv" deriving (Eq, Data) instance Outputable OverlapFlag where ppr flag = ppr (overlapMode flag) <+> pprSafeOverlap (isSafeOverlap flag) instance Outputable OverlapMode where ppr (NoOverlap _) = empty ppr (Overlappable _) = text "[overlappable]" ppr (Overlapping _) = text "[overlapping]" ppr (Overlaps _) = text "[overlap ok]" ppr (Incoherent _) = text "[incoherent]" instance Binary OverlapMode where put_ bh (NoOverlap s) = putByte bh 0 >> put_ bh s put_ bh (Overlaps s) = putByte bh 1 >> put_ bh s put_ bh (Incoherent s) = putByte bh 2 >> put_ bh s put_ bh (Overlapping s) = putByte bh 3 >> put_ bh s put_ bh (Overlappable s) = putByte bh 4 >> put_ bh s get bh = do h <- getByte bh case h of 0 -> (get bh) >>= \s -> return $ NoOverlap s 1 -> (get bh) >>= \s -> return $ Overlaps s 2 -> (get bh) >>= \s -> return $ Incoherent s 3 -> (get bh) >>= \s -> return $ Overlapping s 4 -> (get bh) >>= \s -> return $ Overlappable s _ -> panic ("get OverlapMode" ++ show h) instance Binary OverlapFlag where put_ bh flag = do put_ bh (overlapMode flag) put_ bh (isSafeOverlap flag) get bh = do h <- get bh b <- get bh return OverlapFlag { overlapMode = h, isSafeOverlap = b } pprSafeOverlap :: Bool -> SDoc pprSafeOverlap True = text "[safe]" pprSafeOverlap False = empty {- ************************************************************************ * * Precedence * * ************************************************************************ -} -- | A general-purpose pretty-printing precedence type. newtype PprPrec = PprPrec Int deriving (Eq, Ord, Show) -- See Note [Precedence in types] topPrec, sigPrec, funPrec, opPrec, starPrec, appPrec :: PprPrec topPrec = PprPrec 0 -- No parens sigPrec = PprPrec 1 -- Explicit type signatures funPrec = PprPrec 2 -- Function args; no parens for constructor apps -- See [Type operator precedence] for why both -- funPrec and opPrec exist. opPrec = PprPrec 2 -- Infix operator starPrec = PprPrec 3 -- Star syntax for the type of types, i.e. the * in (* -> *) -- See Note [Star kind precedence] appPrec = PprPrec 4 -- Constructor args; no parens for atomic maybeParen :: PprPrec -> PprPrec -> SDoc -> SDoc maybeParen ctxt_prec inner_prec pretty | ctxt_prec < inner_prec = pretty | otherwise = parens pretty {- Note [Precedence in types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Many pretty-printing functions have type ppr_ty :: PprPrec -> Type -> SDoc The PprPrec gives the binding strength of the context. For example, in T ty1 ty2 we will pretty-print 'ty1' and 'ty2' with the call (ppr_ty appPrec ty) to indicate that the context is that of an argument of a TyConApp. We use this consistently for Type and HsType. Note [Type operator precedence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't keep the fixity of type operators in the operator. So the pretty printer follows the following precedence order: TyConPrec Type constructor application TyOpPrec/FunPrec Operator application and function arrow We have funPrec and opPrec to represent the precedence of function arrow and type operators respectively, but currently we implement funPrec == opPrec, so that we don't distinguish the two. Reason: it's hard to parse a type like a ~ b => c * d -> e - f By treating opPrec = funPrec we end up with more parens (a ~ b) => (c * d) -> (e - f) But the two are different constructors of PprPrec so we could make (->) bind more or less tightly if we wanted. Note [Star kind precedence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We parenthesize the (*) kind to avoid two issues: 1. Printing invalid or incorrect code. For example, instead of type F @(*) x = x GHC used to print type F @* x = x However, (@*) is a type operator, not a kind application. 2. Printing kinds that are correct but hard to read. Should Either * Int be read as Either (*) Int or as (*) Either Int ? This depends on whether -XStarIsType is enabled, but it would be easier if we didn't have to check for the flag when reading the code. At the same time, we cannot parenthesize (*) blindly. Consider this Haskell98 kind: ((* -> *) -> *) -> * With parentheses, it is less readable: (((*) -> (*)) -> (*)) -> (*) The solution is to assign a special precedence to (*), 'starPrec', which is higher than 'funPrec' but lower than 'appPrec': F * * * becomes F (*) (*) (*) F A * B becomes F A (*) B Proxy * becomes Proxy (*) a * -> * becomes a (*) -> * -} {- ************************************************************************ * * Tuples * * ************************************************************************ -} data TupleSort = BoxedTuple | UnboxedTuple | ConstraintTuple deriving( Eq, Data ) instance Outputable TupleSort where ppr ts = text $ case ts of BoxedTuple -> "BoxedTuple" UnboxedTuple -> "UnboxedTuple" ConstraintTuple -> "ConstraintTuple" instance Binary TupleSort where put_ bh BoxedTuple = putByte bh 0 put_ bh UnboxedTuple = putByte bh 1 put_ bh ConstraintTuple = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return BoxedTuple 1 -> return UnboxedTuple _ -> return ConstraintTuple tupleSortBoxity :: TupleSort -> Boxity tupleSortBoxity BoxedTuple = Boxed tupleSortBoxity UnboxedTuple = Unboxed tupleSortBoxity ConstraintTuple = Boxed boxityTupleSort :: Boxity -> TupleSort boxityTupleSort Boxed = BoxedTuple boxityTupleSort Unboxed = UnboxedTuple tupleParens :: TupleSort -> SDoc -> SDoc tupleParens BoxedTuple p = parens p tupleParens UnboxedTuple p = text "(#" <+> p <+> text "#)" tupleParens ConstraintTuple p -- In debug-style write (% Eq a, Ord b %) = ifPprDebug (text "(%" <+> p <+> text "%)") (parens p) {- ************************************************************************ * * Sums * * ************************************************************************ -} sumParens :: SDoc -> SDoc sumParens p = text "(#" <+> p <+> text "#)" -- | Pretty print an alternative in an unboxed sum e.g. "| a | |". pprAlternative :: (a -> SDoc) -- ^ The pretty printing function to use -> a -- ^ The things to be pretty printed -> ConTag -- ^ Alternative (one-based) -> Arity -- ^ Arity -> SDoc -- ^ 'SDoc' where the alternative havs been pretty -- printed and finally packed into a paragraph. pprAlternative pp x alt arity = fsep (replicate (alt - 1) vbar ++ [pp x] ++ replicate (arity - alt) vbar) -- | Are we dealing with an unboxed tuple or an unboxed sum? -- -- Used when validity checking, see 'check_ubx_tuple_or_sum'. data UnboxedTupleOrSum = UnboxedTupleType | UnboxedSumType deriving Eq instance Outputable UnboxedTupleOrSum where ppr UnboxedTupleType = text "UnboxedTupleType" ppr UnboxedSumType = text "UnboxedSumType" unboxedTupleOrSumExtension :: UnboxedTupleOrSum -> LangExt.Extension unboxedTupleOrSumExtension UnboxedTupleType = LangExt.UnboxedTuples unboxedTupleOrSumExtension UnboxedSumType = LangExt.UnboxedSums {- ************************************************************************ * * \subsection[Generic]{Generic flag} * * ************************************************************************ This is the "Embedding-Projection pair" datatype, it contains two pieces of code (normally either RenamedExpr's or Id's) If we have a such a pair (EP from to), the idea is that 'from' and 'to' represents functions of type from :: T -> Tring to :: Tring -> T And we should have to (from x) = x T and Tring are arbitrary, but typically T is the 'main' type while Tring is the 'representation' type. (This just helps us remember whether to use 'from' or 'to'. -} -- | Embedding Projection pair data EP a = EP { fromEP :: a, -- :: T -> Tring toEP :: a } -- :: Tring -> T {- Embedding-projection pairs are used in several places: First of all, each type constructor has an EP associated with it, the code in EP converts (datatype T) from T to Tring and back again. Secondly, when we are filling in Generic methods (in the typechecker, tcMethodBinds), we are constructing bimaps by induction on the structure of the type of the method signature. ************************************************************************ * * \subsection{Occurrence information} * * ************************************************************************ This data type is used exclusively by the simplifier, but it appears in a SubstResult, which is currently defined in GHC.Types.Var.Env, which is pretty near the base of the module hierarchy. So it seemed simpler to put the defn of OccInfo here, safely at the bottom -} -- | identifier Occurrence Information data OccInfo = ManyOccs { occ_tail :: !TailCallInfo } -- ^ There are many occurrences, or unknown occurrences | IAmDead -- ^ Marks unused variables. Sometimes useful for -- lambda and case-bound variables. | OneOcc { occ_in_lam :: !InsideLam , occ_n_br :: {-# UNPACK #-} !BranchCount , occ_int_cxt :: !InterestingCxt , occ_tail :: !TailCallInfo } -- ^ Occurs exactly once (per branch), not inside a rule -- | This identifier breaks a loop of mutually recursive functions. The field -- marks whether it is only a loop breaker due to a reference in a rule | IAmALoopBreaker { occ_rules_only :: !RulesOnly , occ_tail :: !TailCallInfo } -- Note [LoopBreaker OccInfo] deriving (Eq) type RulesOnly = Bool type BranchCount = Int -- For OneOcc, the BranchCount says how many syntactic occurrences there are -- At the moment we really only check for 1 or >1, but in principle -- we could pay attention to how *many* occurrences there are -- (notably in postInlineUnconditionally). -- But meanwhile, Ints are very efficiently represented. oneBranch :: BranchCount oneBranch = 1 {- Note [LoopBreaker OccInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~ IAmALoopBreaker True <=> A "weak" or rules-only loop breaker Do not preInlineUnconditionally IAmALoopBreaker False <=> A "strong" loop breaker Do not inline at all See OccurAnal Note [Weak loop breakers] -} noOccInfo :: OccInfo noOccInfo = ManyOccs { occ_tail = NoTailCallInfo } isNoOccInfo :: OccInfo -> Bool isNoOccInfo ManyOccs { occ_tail = NoTailCallInfo } = True isNoOccInfo _ = False isManyOccs :: OccInfo -> Bool isManyOccs ManyOccs{} = True isManyOccs _ = False seqOccInfo :: OccInfo -> () seqOccInfo occ = occ `seq` () ----------------- -- | Interesting Context data InterestingCxt = IsInteresting -- ^ Function: is applied -- Data value: scrutinised by a case with at least one non-DEFAULT branch | NotInteresting deriving (Eq) -- | If there is any 'interesting' identifier occurrence, then the -- aggregated occurrence info of that identifier is considered interesting. instance Semi.Semigroup InterestingCxt where NotInteresting <> x = x IsInteresting <> _ = IsInteresting instance Monoid InterestingCxt where mempty = NotInteresting mappend = (Semi.<>) ----------------- -- | Inside Lambda data InsideLam = IsInsideLam -- ^ Occurs inside a non-linear lambda -- Substituting a redex for this occurrence is -- dangerous because it might duplicate work. | NotInsideLam deriving (Eq) -- | If any occurrence of an identifier is inside a lambda, then the -- occurrence info of that identifier marks it as occurring inside a lambda instance Semi.Semigroup InsideLam where NotInsideLam <> x = x IsInsideLam <> _ = IsInsideLam instance Monoid InsideLam where mempty = NotInsideLam mappend = (Semi.<>) ----------------- data TailCallInfo = AlwaysTailCalled JoinArity -- See Note [TailCallInfo] | NoTailCallInfo deriving (Eq) tailCallInfo :: OccInfo -> TailCallInfo tailCallInfo IAmDead = NoTailCallInfo tailCallInfo other = occ_tail other zapOccTailCallInfo :: OccInfo -> OccInfo zapOccTailCallInfo IAmDead = IAmDead zapOccTailCallInfo occ = occ { occ_tail = NoTailCallInfo } isAlwaysTailCalled :: OccInfo -> Bool isAlwaysTailCalled occ = case tailCallInfo occ of AlwaysTailCalled{} -> True NoTailCallInfo -> False instance Outputable TailCallInfo where ppr (AlwaysTailCalled ar) = sep [ text "Tail", int ar ] ppr _ = empty ----------------- strongLoopBreaker, weakLoopBreaker :: OccInfo strongLoopBreaker = IAmALoopBreaker False NoTailCallInfo weakLoopBreaker = IAmALoopBreaker True NoTailCallInfo isWeakLoopBreaker :: OccInfo -> Bool isWeakLoopBreaker (IAmALoopBreaker{}) = True isWeakLoopBreaker _ = False isStrongLoopBreaker :: OccInfo -> Bool isStrongLoopBreaker (IAmALoopBreaker { occ_rules_only = False }) = True -- Loop-breaker that breaks a non-rule cycle isStrongLoopBreaker _ = False isDeadOcc :: OccInfo -> Bool isDeadOcc IAmDead = True isDeadOcc _ = False isOneOcc :: OccInfo -> Bool isOneOcc (OneOcc {}) = True isOneOcc _ = False zapFragileOcc :: OccInfo -> OccInfo -- Keep only the most robust data: deadness, loop-breaker-hood zapFragileOcc (OneOcc {}) = noOccInfo zapFragileOcc occ = zapOccTailCallInfo occ instance Outputable OccInfo where -- only used for debugging; never parsed. KSW 1999-07 ppr (ManyOccs tails) = pprShortTailCallInfo tails ppr IAmDead = text "Dead" ppr (IAmALoopBreaker rule_only tails) = text "LoopBreaker" <> pp_ro <> pprShortTailCallInfo tails where pp_ro | rule_only = char '!' | otherwise = empty ppr (OneOcc inside_lam one_branch int_cxt tail_info) = text "Once" <> pp_lam inside_lam <> ppr one_branch <> pp_args int_cxt <> pp_tail where pp_lam IsInsideLam = char 'L' pp_lam NotInsideLam = empty pp_args IsInteresting = char '!' pp_args NotInteresting = empty pp_tail = pprShortTailCallInfo tail_info pprShortTailCallInfo :: TailCallInfo -> SDoc pprShortTailCallInfo (AlwaysTailCalled ar) = char 'T' <> brackets (int ar) pprShortTailCallInfo NoTailCallInfo = empty {- Note [TailCallInfo] ~~~~~~~~~~~~~~~~~~~ The occurrence analyser determines what can be made into a join point, but it doesn't change the binder into a JoinId because then it would be inconsistent with the occurrences. Thus it's left to the simplifier (or to simpleOptExpr) to change the IdDetails. The AlwaysTailCalled marker actually means slightly more than simply that the function is always tail-called. See Note [Invariants on join points]. This info is quite fragile and should not be relied upon unless the occurrence analyser has *just* run. Use 'Id.isJoinId_maybe' for the permanent state of the join-point-hood of a binder; a join id itself will not be marked AlwaysTailCalled. Note that there is a 'TailCallInfo' on a 'ManyOccs' value. One might expect that being tail-called would mean that the variable could only appear once per branch (thus getting a `OneOcc { }` occurrence info), but a join point can also be invoked from other join points, not just from case branches: let j1 x = ... j2 y = ... j1 z {- tail call -} ... in case w of A -> j1 v B -> j2 u C -> j2 q Here both 'j1' and 'j2' will get marked AlwaysTailCalled, but j1 will get ManyOccs and j2 will get `OneOcc { occ_n_br = 2 }`. ************************************************************************ * * Default method specification * * ************************************************************************ The DefMethSpec enumeration just indicates what sort of default method is used for a class. It is generated from source code, and present in interface files; it is converted to Class.DefMethInfo before begin put in a Class object. -} -- | Default Method Specification data DefMethSpec ty = VanillaDM -- Default method given with polymorphic code | GenericDM ty -- Default method given with code of this type instance Outputable (DefMethSpec ty) where ppr VanillaDM = text "{- Has default method -}" ppr (GenericDM {}) = text "{- Has generic default method -}" {- ************************************************************************ * * \subsection{Success flag} * * ************************************************************************ -} data SuccessFlag = Succeeded | Failed instance Semigroup SuccessFlag where Failed <> _ = Failed _ <> Failed = Failed _ <> _ = Succeeded instance Outputable SuccessFlag where ppr Succeeded = text "Succeeded" ppr Failed = text "Failed" successIf :: Bool -> SuccessFlag successIf True = Succeeded successIf False = Failed succeeded, failed :: SuccessFlag -> Bool succeeded Succeeded = True succeeded Failed = False failed Succeeded = False failed Failed = True {- ************************************************************************ * * \subsection{Activation} * * ************************************************************************ When a rule or inlining is active Note [Compiler phases] ~~~~~~~~~~~~~~~~~~~~~~ The CompilerPhase says which phase the simplifier is running in: * InitialPhase: before all user-visible phases * Phase 2,1,0: user-visible phases; the phase number controls rule ordering an inlining. * FinalPhase: used for all subsequent simplifier runs. By delaying inlining of wrappers to FinalPhase we can ensure that RULE have a good chance to fire. See Note [Wrapper activation] in GHC.Core.Opt.WorkWrap NB: FinalPhase is run repeatedly, not just once. NB: users don't have access to InitialPhase or FinalPhase. They write {-# INLINE[n] f #-}, meaning (Phase n) The phase sequencing is done by GHC.Opt.Simplify.Driver -} -- | Phase Number type PhaseNum = Int -- Compilation phase -- Phases decrease towards zero -- Zero is the last phase data CompilerPhase = InitialPhase -- The first phase -- number = infinity! | Phase PhaseNum -- User-specificable phases | FinalPhase -- The last phase -- number = -infinity! deriving Eq instance Outputable CompilerPhase where ppr (Phase n) = int n ppr InitialPhase = text "InitialPhase" ppr FinalPhase = text "FinalPhase" -- See Note [Pragma source text] data Activation = AlwaysActive | ActiveBefore SourceText PhaseNum -- Active only *strictly before* this phase | ActiveAfter SourceText PhaseNum -- Active in this phase and later | FinalActive -- Active in final phase only | NeverActive deriving( Eq, Data ) -- Eq used in comparing rules in GHC.Hs.Decls beginPhase :: Activation -> CompilerPhase -- First phase in which the Activation is active -- or FinalPhase if it is never active beginPhase AlwaysActive = InitialPhase beginPhase (ActiveBefore {}) = InitialPhase beginPhase (ActiveAfter _ n) = Phase n beginPhase FinalActive = FinalPhase beginPhase NeverActive = FinalPhase activeAfter :: CompilerPhase -> Activation -- (activeAfter p) makes an Activation that is active in phase p and after -- Invariant: beginPhase (activeAfter p) = p activeAfter InitialPhase = AlwaysActive activeAfter (Phase n) = ActiveAfter NoSourceText n activeAfter FinalPhase = FinalActive nextPhase :: CompilerPhase -> CompilerPhase -- Tells you the next phase after this one -- Currently we have just phases [2,1,0,FinalPhase,FinalPhase,...] -- Where FinalPhase means GHC's internal simplification steps -- after all rules have run nextPhase InitialPhase = Phase 2 nextPhase (Phase 0) = FinalPhase nextPhase (Phase n) = Phase (n-1) nextPhase FinalPhase = FinalPhase laterPhase :: CompilerPhase -> CompilerPhase -> CompilerPhase -- Returns the later of two phases laterPhase (Phase n1) (Phase n2) = Phase (n1 `min` n2) laterPhase InitialPhase p2 = p2 laterPhase FinalPhase _ = FinalPhase laterPhase p1 InitialPhase = p1 laterPhase _ FinalPhase = FinalPhase activateAfterInitial :: Activation -- Active in the first phase after the initial phase activateAfterInitial = activeAfter (nextPhase InitialPhase) activateDuringFinal :: Activation -- Active in the final simplification phase (which is repeated) activateDuringFinal = FinalActive isActive :: CompilerPhase -> Activation -> Bool isActive InitialPhase act = activeInInitialPhase act isActive (Phase p) act = activeInPhase p act isActive FinalPhase act = activeInFinalPhase act activeInInitialPhase :: Activation -> Bool activeInInitialPhase AlwaysActive = True activeInInitialPhase (ActiveBefore {}) = True activeInInitialPhase _ = False activeInPhase :: PhaseNum -> Activation -> Bool activeInPhase _ AlwaysActive = True activeInPhase _ NeverActive = False activeInPhase _ FinalActive = False activeInPhase p (ActiveAfter _ n) = p <= n activeInPhase p (ActiveBefore _ n) = p > n activeInFinalPhase :: Activation -> Bool activeInFinalPhase AlwaysActive = True activeInFinalPhase FinalActive = True activeInFinalPhase (ActiveAfter {}) = True activeInFinalPhase _ = False isNeverActive, isAlwaysActive :: Activation -> Bool isNeverActive NeverActive = True isNeverActive _ = False isAlwaysActive AlwaysActive = True isAlwaysActive _ = False competesWith :: Activation -> Activation -> Bool -- See Note [Activation competition] competesWith AlwaysActive _ = True competesWith NeverActive _ = False competesWith _ NeverActive = False competesWith FinalActive FinalActive = True competesWith FinalActive _ = False competesWith (ActiveBefore {}) AlwaysActive = True competesWith (ActiveBefore {}) FinalActive = False competesWith (ActiveBefore {}) (ActiveBefore {}) = True competesWith (ActiveBefore _ a) (ActiveAfter _ b) = a < b competesWith (ActiveAfter {}) AlwaysActive = False competesWith (ActiveAfter {}) FinalActive = True competesWith (ActiveAfter {}) (ActiveBefore {}) = False competesWith (ActiveAfter _ a) (ActiveAfter _ b) = a >= b {- Note [Competing activations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes a RULE and an inlining may compete, or two RULES. See Note [Rules and inlining/other rules] in GHC.HsToCore. We say that act1 "competes with" act2 iff act1 is active in the phase when act2 *becomes* active NB: remember that phases count *down*: 2, 1, 0! It's too conservative to ensure that the two are never simultaneously active. For example, a rule might be always active, and an inlining might switch on in phase 2. We could switch off the rule, but it does no harm. -} {- ********************************************************************* * * InlinePragma, InlineSpec, RuleMatchInfo * * ********************************************************************* -} data InlinePragma -- Note [InlinePragma] = InlinePragma { inl_src :: SourceText -- Note [Pragma source text] , inl_inline :: InlineSpec -- See Note [inl_inline and inl_act] , inl_sat :: Maybe Arity -- Just n <=> Inline only when applied to n -- explicit (non-type, non-dictionary) args -- That is, inl_sat describes the number of *source-code* -- arguments the thing must be applied to. We add on the -- number of implicit, dictionary arguments when making -- the Unfolding, and don't look at inl_sat further , inl_act :: Activation -- Says during which phases inlining is allowed -- See Note [inl_inline and inl_act] , inl_rule :: RuleMatchInfo -- Should the function be treated like a constructor? } deriving( Eq, Data ) -- | Rule Match Information data RuleMatchInfo = ConLike -- See Note [CONLIKE pragma] | FunLike deriving( Eq, Data, Show ) -- Show needed for GHC.Parser.Lexer -- | Inline Specification data InlineSpec -- What the user's INLINE pragma looked like = Inline SourceText -- User wrote INLINE | Inlinable SourceText -- User wrote INLINABLE | NoInline SourceText -- User wrote NOINLINE | Opaque SourceText -- User wrote OPAQUE -- Each of the above keywords is accompanied with -- a string of type SourceText written by the user | NoUserInlinePrag -- User did not write any of INLINE/INLINABLE/NOINLINE -- e.g. in `defaultInlinePragma` or when created by CSE deriving( Eq, Data, Show ) -- Show needed for GHC.Parser.Lexer {- Note [InlinePragma] ~~~~~~~~~~~~~~~~~~~~~~ This data type mirrors what you can write in an INLINE or NOINLINE pragma in the source program. If you write nothing at all, you get defaultInlinePragma: inl_inline = NoUserInlinePrag inl_act = AlwaysActive inl_rule = FunLike It's not possible to get that combination by *writing* something, so if an Id has defaultInlinePragma it means the user didn't specify anything. If inl_inline = Inline or Inlineable, then the Id should have an InlineRule unfolding. If you want to know where InlinePragmas take effect: Look in GHC.HsToCore.Binds.makeCorePair Note [inl_inline and inl_act] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * inl_inline says what the user wrote: did they say INLINE, NOINLINE, INLINABLE, OPAQUE, or nothing at all * inl_act says in what phases the unfolding is active or inactive E.g If you write INLINE[1] then inl_act will be set to ActiveAfter 1 If you write NOINLINE[1] then inl_act will be set to ActiveBefore 1 If you write NOINLINE[~1] then inl_act will be set to ActiveAfter 1 So note that inl_act does not say what pragma you wrote: it just expresses its consequences * inl_act just says when the unfolding is active; it doesn't say what to inline. If you say INLINE f, then f's inl_act will be AlwaysActive, but in addition f will get a "stable unfolding" with UnfoldingGuidance that tells the inliner to be pretty eager about it. Note [CONLIKE pragma] ~~~~~~~~~~~~~~~~~~~~~ The ConLike constructor of a RuleMatchInfo is aimed at the following. Consider first {-# RULE "r/cons" forall a as. r (a:as) = f (a+1) #-} g b bs = let x = b:bs in ..x...x...(r x)... Now, the rule applies to the (r x) term, because GHC "looks through" the definition of 'x' to see that it is (b:bs). Now consider {-# RULE "r/f" forall v. r (f v) = f (v+1) #-} g v = let x = f v in ..x...x...(r x)... Normally the (r x) would *not* match the rule, because GHC would be scared about duplicating the redex (f v), so it does not "look through" the bindings. However the CONLIKE modifier says to treat 'f' like a constructor in this situation, and "look through" the unfolding for x. So (r x) fires, yielding (f (v+1)). This is all controlled with a user-visible pragma: {-# NOINLINE CONLIKE [1] f #-} The main effects of CONLIKE are: - The occurrence analyser (OccAnal) and simplifier (Simplify) treat CONLIKE thing like constructors, by ANF-ing them - New function GHC.Core.Utils.exprIsExpandable is like exprIsCheap, but additionally spots applications of CONLIKE functions - A CoreUnfolding has a field that caches exprIsExpandable - The rule matcher consults this field. See Note [Expanding variables] in GHC.Core.Rules. Note [OPAQUE pragma] ~~~~~~~~~~~~~~~~~~~~ Suppose a function `f` is marked {-# OPAQUE f #-}. Then every call of `f` should remain a call of `f` throughout optimisation; it should not be turned into a call of a name-mangled variant of `f` (e.g by worker/wrapper). The motivation for the OPAQUE pragma is discussed in GHC proposal 0415: https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0415-opaque-pragma.rst Basically it boils down to the desire of GHC API users and GHC RULE writers for calls to certain binders to be left completely untouched by GHCs optimisations. What this entails at the time of writing, is that for every binder annotated with the OPAQUE pragma we: * Do not do worker/wrapper via cast W/W: See the guard in GHC.Core.Opt.Simplify.tryCastWorkerWrapper * Do not any worker/wrapper after demand/CPR analysis. To that end add a guard in GHC.Core.Opt.WorkWrap.tryWW to disable worker/wrapper * It is important that the demand signature and CPR signature do not lie, else clients of the function will believe that it has the CPR property etc. But it won't, because we've disabled worker/wrapper. To avoid the signatures lying: * Strip boxity information from the demand signature in GHC.Core.Opt.DmdAnal.finaliseArgBoxities See Note [The OPAQUE pragma and avoiding the reboxing of arguments] * Strip CPR information from the CPR signature in GHC.Core.Opt.CprAnal.cprAnalBind See Note [The OPAQUE pragma and avoiding the reboxing of results] * Do create specialised versions of the function in * Specialise: see GHC.Core.Opt.Specialise.specCalls * SpecConstr: see GHC.Core.Opt.SpecConstr.specialise Both are accomplished easily: these passes already skip NOINLINE functions with NeverActive activation, and an OPAQUE function is also NeverActive. At the moment of writing, the major difference between the NOINLINE pragma and the OPAQUE pragma is that binders annoted with the NOINLINE pragma _are_ W/W transformed (see also Note [Worker/wrapper for NOINLINE functions]) where binders annoted with the OPAQUE pragma are _not_ W/W transformed. Future "name-mangling" optimisations should respect the OPAQUE pragma and update the list of moving parts referenced in this note. -} isConLike :: RuleMatchInfo -> Bool isConLike ConLike = True isConLike _ = False isFunLike :: RuleMatchInfo -> Bool isFunLike FunLike = True isFunLike _ = False noUserInlineSpec :: InlineSpec -> Bool noUserInlineSpec NoUserInlinePrag = True noUserInlineSpec _ = False defaultInlinePragma, alwaysInlinePragma, neverInlinePragma, dfunInlinePragma :: InlinePragma defaultInlinePragma = InlinePragma { inl_src = SourceText "{-# INLINE" , inl_act = AlwaysActive , inl_rule = FunLike , inl_inline = NoUserInlinePrag , inl_sat = Nothing } alwaysInlinePragma = defaultInlinePragma { inl_inline = Inline (inlinePragmaSource defaultInlinePragma) } neverInlinePragma = defaultInlinePragma { inl_act = NeverActive } alwaysInlineConLikePragma :: InlinePragma alwaysInlineConLikePragma = alwaysInlinePragma { inl_rule = ConLike } inlinePragmaSpec :: InlinePragma -> InlineSpec inlinePragmaSpec = inl_inline inlinePragmaSource :: InlinePragma -> SourceText inlinePragmaSource prag = case inl_inline prag of Inline x -> x Inlinable y -> y NoInline z -> z Opaque q -> q NoUserInlinePrag -> NoSourceText inlineSpecSource :: InlineSpec -> SourceText inlineSpecSource spec = case spec of Inline x -> x Inlinable y -> y NoInline z -> z Opaque q -> q NoUserInlinePrag -> NoSourceText -- A DFun has an always-active inline activation so that -- exprIsConApp_maybe can "see" its unfolding -- (However, its actual Unfolding is a DFunUnfolding, which is -- never inlined other than via exprIsConApp_maybe.) dfunInlinePragma = defaultInlinePragma { inl_act = AlwaysActive , inl_rule = ConLike } isDefaultInlinePragma :: InlinePragma -> Bool isDefaultInlinePragma (InlinePragma { inl_act = activation , inl_rule = match_info , inl_inline = inline }) = noUserInlineSpec inline && isAlwaysActive activation && isFunLike match_info isInlinePragma :: InlinePragma -> Bool isInlinePragma prag = case inl_inline prag of Inline _ -> True _ -> False isInlinablePragma :: InlinePragma -> Bool isInlinablePragma prag = case inl_inline prag of Inlinable _ -> True _ -> False isNoInlinePragma :: InlinePragma -> Bool isNoInlinePragma prag = case inl_inline prag of NoInline _ -> True _ -> False isAnyInlinePragma :: InlinePragma -> Bool -- INLINE or INLINABLE isAnyInlinePragma prag = case inl_inline prag of Inline _ -> True Inlinable _ -> True _ -> False isOpaquePragma :: InlinePragma -> Bool isOpaquePragma prag = case inl_inline prag of Opaque _ -> True _ -> False inlinePragmaSat :: InlinePragma -> Maybe Arity inlinePragmaSat = inl_sat inlinePragmaActivation :: InlinePragma -> Activation inlinePragmaActivation (InlinePragma { inl_act = activation }) = activation inlinePragmaRuleMatchInfo :: InlinePragma -> RuleMatchInfo inlinePragmaRuleMatchInfo (InlinePragma { inl_rule = info }) = info setInlinePragmaActivation :: InlinePragma -> Activation -> InlinePragma setInlinePragmaActivation prag activation = prag { inl_act = activation } setInlinePragmaRuleMatchInfo :: InlinePragma -> RuleMatchInfo -> InlinePragma setInlinePragmaRuleMatchInfo prag info = prag { inl_rule = info } instance Outputable Activation where ppr AlwaysActive = empty ppr NeverActive = brackets (text "~") ppr (ActiveBefore _ n) = brackets (char '~' <> int n) ppr (ActiveAfter _ n) = brackets (int n) ppr FinalActive = text "[final]" instance Binary Activation where put_ bh NeverActive = putByte bh 0 put_ bh FinalActive = putByte bh 1 put_ bh AlwaysActive = putByte bh 2 put_ bh (ActiveBefore src aa) = do putByte bh 3 put_ bh src put_ bh aa put_ bh (ActiveAfter src ab) = do putByte bh 4 put_ bh src put_ bh ab get bh = do h <- getByte bh case h of 0 -> return NeverActive 1 -> return FinalActive 2 -> return AlwaysActive 3 -> do src <- get bh aa <- get bh return (ActiveBefore src aa) _ -> do src <- get bh ab <- get bh return (ActiveAfter src ab) instance Outputable RuleMatchInfo where ppr ConLike = text "CONLIKE" ppr FunLike = text "FUNLIKE" instance Binary RuleMatchInfo where put_ bh FunLike = putByte bh 0 put_ bh ConLike = putByte bh 1 get bh = do h <- getByte bh if h == 1 then return ConLike else return FunLike instance Outputable InlineSpec where ppr (Inline src) = text "INLINE" <+> pprWithSourceText src empty ppr (NoInline src) = text "NOINLINE" <+> pprWithSourceText src empty ppr (Inlinable src) = text "INLINABLE" <+> pprWithSourceText src empty ppr (Opaque src) = text "OPAQUE" <+> pprWithSourceText src empty ppr NoUserInlinePrag = empty instance Binary InlineSpec where put_ bh NoUserInlinePrag = putByte bh 0 put_ bh (Inline s) = do putByte bh 1 put_ bh s put_ bh (Inlinable s) = do putByte bh 2 put_ bh s put_ bh (NoInline s) = do putByte bh 3 put_ bh s put_ bh (Opaque s) = do putByte bh 4 put_ bh s get bh = do h <- getByte bh case h of 0 -> return NoUserInlinePrag 1 -> do s <- get bh return (Inline s) 2 -> do s <- get bh return (Inlinable s) 3 -> do s <- get bh return (NoInline s) _ -> do s <- get bh return (Opaque s) instance Outputable InlinePragma where ppr = pprInline instance Binary InlinePragma where put_ bh (InlinePragma s a b c d) = do put_ bh s put_ bh a put_ bh b put_ bh c put_ bh d get bh = do s <- get bh a <- get bh b <- get bh c <- get bh d <- get bh return (InlinePragma s a b c d) -- | Outputs string for pragma name for any of INLINE/INLINABLE/NOINLINE. This -- differs from the Outputable instance for the InlineSpec type where the pragma -- name string as well as the accompanying SourceText (if any) is printed. inlinePragmaName :: InlineSpec -> SDoc inlinePragmaName (Inline _) = text "INLINE" inlinePragmaName (Inlinable _) = text "INLINABLE" inlinePragmaName (NoInline _) = text "NOINLINE" inlinePragmaName (Opaque _) = text "OPAQUE" inlinePragmaName NoUserInlinePrag = empty pprInline :: InlinePragma -> SDoc pprInline = pprInline' True pprInlineDebug :: InlinePragma -> SDoc pprInlineDebug = pprInline' False pprInline' :: Bool -- True <=> do not display the inl_inline field -> InlinePragma -> SDoc pprInline' emptyInline (InlinePragma { inl_inline = inline, inl_act = activation, inl_rule = info, inl_sat = mb_arity }) = pp_inl inline <> pp_act inline activation <+> pp_sat <+> pp_info where pp_inl x = if emptyInline then empty else inlinePragmaName x pp_act Inline {} AlwaysActive = empty pp_act NoInline {} NeverActive = empty pp_act Opaque {} NeverActive = empty pp_act _ act = ppr act pp_sat | Just ar <- mb_arity = parens (text "sat-args=" <> int ar) | otherwise = empty pp_info | isFunLike info = empty | otherwise = ppr info {- ************************************************************************ * * IntWithInf * * ************************************************************************ Represents an integer or positive infinity -} -- | An integer or infinity data IntWithInf = Int {-# UNPACK #-} !Int | Infinity deriving Eq -- | A representation of infinity infinity :: IntWithInf infinity = Infinity instance Ord IntWithInf where compare Infinity Infinity = EQ compare (Int _) Infinity = LT compare Infinity (Int _) = GT compare (Int a) (Int b) = a `compare` b instance Outputable IntWithInf where ppr Infinity = char '∞' ppr (Int n) = int n instance Num IntWithInf where (+) = plusWithInf (*) = mulWithInf abs Infinity = Infinity abs (Int n) = Int (abs n) signum Infinity = Int 1 signum (Int n) = Int (signum n) fromInteger = Int . fromInteger (-) = panic "subtracting IntWithInfs" intGtLimit :: Int -> IntWithInf -> Bool intGtLimit _ Infinity = False intGtLimit n (Int m) = n > m -- | Add two 'IntWithInf's plusWithInf :: IntWithInf -> IntWithInf -> IntWithInf plusWithInf Infinity _ = Infinity plusWithInf _ Infinity = Infinity plusWithInf (Int a) (Int b) = Int (a + b) -- | Multiply two 'IntWithInf's mulWithInf :: IntWithInf -> IntWithInf -> IntWithInf mulWithInf Infinity _ = Infinity mulWithInf _ Infinity = Infinity mulWithInf (Int a) (Int b) = Int (a * b) -- | Subtract an 'Int' from an 'IntWithInf' subWithInf :: IntWithInf -> Int -> IntWithInf subWithInf Infinity _ = Infinity subWithInf (Int a) b = Int (a - b) -- | Turn a positive number into an 'IntWithInf', where 0 represents infinity treatZeroAsInf :: Int -> IntWithInf treatZeroAsInf 0 = Infinity treatZeroAsInf n = Int n -- | Inject any integer into an 'IntWithInf' mkIntWithInf :: Int -> IntWithInf mkIntWithInf = Int data SpliceExplicitFlag = ExplicitSplice | -- ^ <=> $(f x y) ImplicitSplice -- ^ <=> f x y, i.e. a naked top level expression deriving Data {- ********************************************************************* * * Types vs Kinds * * ********************************************************************* -} -- | Flag to see whether we're type-checking terms or kind-checking types data TypeOrKind = TypeLevel | KindLevel deriving Eq instance Outputable TypeOrKind where ppr TypeLevel = text "TypeLevel" ppr KindLevel = text "KindLevel" isTypeLevel :: TypeOrKind -> Bool isTypeLevel TypeLevel = True isTypeLevel KindLevel = False isKindLevel :: TypeOrKind -> Bool isKindLevel TypeLevel = False isKindLevel KindLevel = True {- ********************************************************************* * * Levity information * * ********************************************************************* -} data Levity = Lifted | Unlifted deriving Eq instance Outputable Levity where ppr Lifted = text "Lifted" ppr Unlifted = text "Unlifted" mightBeLifted :: Maybe Levity -> Bool mightBeLifted (Just Unlifted) = False mightBeLifted _ = True mightBeUnlifted :: Maybe Levity -> Bool mightBeUnlifted (Just Lifted) = False mightBeUnlifted _ = True {- ********************************************************************* * * Expressions vs Patterns * * ********************************************************************* -} -- | Are we dealing with an expression or a pattern? -- -- Used only for the textual output of certain error messages; -- see the 'FRRDataConArg' constructor of 'FixedRuntimeRepContext'. data ExprOrPat = Expression | Pattern deriving Eq instance Outputable ExprOrPat where ppr Expression = text "expression" ppr Pattern = text "pattern" {- ********************************************************************* * * Defaulting options * * ********************************************************************* -} {- Note [Type variable defaulting options] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is an overview of the current type variable defaulting mechanisms, in the order in which they happen. GHC.Tc.Utils.TcMType.defaultTyVar This is a built-in defaulting mechanism for the following type variables: (1) kind variables with -XNoPolyKinds, (2) type variables of kind 'RuntimeRep' default to 'LiftedRep', of kind 'Levity' to 'Lifted', and of kind 'Multiplicity' to 'Many'. It is used in many situations: - inferring a type (e.g. a declaration with no type signature or a partial type signature), in 'GHC.Tc.Solver.simplifyInfer', - simplifying top-level constraints in 'GHC.Tc.Solver.simplifyTop', - kind checking a CUSK in 'GHC.Tc.Gen.kcCheckDeclHeader_cusk', - 'GHC.Tc.TyCl.generaliseTcTyCon', - type checking type family and data family instances, in 'GHC.Tc.TyCl.tcTyFamInstEqnGuts' and 'GHC.Tc.TyCl.Instance.tcDataFamInstHeader' respectively, - type-checking rules in 'GHC.Tc.Gen.tcRule', - kind generalisation in 'GHC.Tc.Gen.HsType.kindGeneralizeSome' and 'GHC.Tc.Gen.HsType.kindGeneralizeAll'. Different situations call for a different defaulting strategy, so 'defaultTyVar' takes a strategy parameter which determines which type variables to default. Currently, this strategy is set as follows: - Kind variables: - with -XNoPolyKinds, these must be defaulted. This includes kind variables of kind 'RuntimeRep', 'Levity' and 'Multiplicity'. Test case: T20584. - with -XPolyKinds, behave as if they were type variables (see below). - Type variables of kind 'RuntimeRep', 'Levity' or 'Multiplicity' - in type and data families instances, these are not defaulted. Test case: T17536. - otherwise: default variables of these three kinds. This ensures that in a program such as foo :: forall a. a -> a foo x = x we continue to infer `a :: Type`. Note that the strategy is set in two steps: callers of 'defaultTyVars' only specify whether to default type variables of "non-standard" kinds (that is, of kinds 'RuntimeRep'/'Levity'/'Multiplicity'). Then 'defaultTyVars' determines which variables are type variables and which are kind variables, and if the user has asked for -XNoPolyKinds we default the kind variables. GHC.Tc.Solver.defaultTyVarTcS This is a built-in defaulting mechanism that happens after the constraint solver has run, in 'GHC.Tc.Solver.simplifyTopWanteds'. It only defaults type (and kind) variables of kind 'RuntimeRep', 'Levity', 'Multiplicity'. It is not configurable, neither by options nor by the user. GHC.Tc.Solver.applyDefaultingRules This is typeclass defaulting, and includes defaulting plugins. It happens right after 'defaultTyVarTcS' in 'GHC.Tc.Solver.simplifyTopWanteds'. It is user configurable, using default declarations (/plugins). GHC.Iface.Type.defaultIfaceTyVarsOfKind This is a built-in defaulting mechanism that only applies when pretty-printing. It defaults 'RuntimeRep'/'Levity' variables unless -fprint-explicit-kinds is enabled, and 'Multiplicity' variables unless -XLinearTypes is enabled. -} -- | Specify whether to default type variables of kind 'RuntimeRep'/'Levity'/'Multiplicity'. data NonStandardDefaultingStrategy -- | Default type variables of the given kinds: -- -- - default 'RuntimeRep' variables to 'LiftedRep' -- - default 'Levity' variables to 'Lifted' -- - default 'Multiplicity' variables to 'Many' = DefaultNonStandardTyVars -- | Try not to default type variables of the kinds 'RuntimeRep'/'Levity'/'Multiplicity'. -- -- Note that these might get defaulted anyway, if they are kind variables -- and `-XNoPolyKinds` is enabled. | TryNotToDefaultNonStandardTyVars -- | Specify whether to default kind variables, and type variables -- of kind 'RuntimeRep'/'Levity'/'Multiplicity'. data DefaultingStrategy -- | Default kind variables: -- -- - default kind variables of kind 'Type' to 'Type', -- - default 'RuntimeRep'/'Levity'/'Multiplicity' kind variables -- to 'LiftedRep'/'Lifted'/'Many', respectively. -- -- When this strategy is used, it means that we have determined that -- the variables we are considering defaulting are all kind variables. -- -- Usually, we pass this option when -XNoPolyKinds is enabled. = DefaultKindVars -- | Default (or don't default) non-standard variables, of kinds -- 'RuntimeRep', 'Levity' and 'Multiplicity'. | NonStandardDefaulting NonStandardDefaultingStrategy defaultNonStandardTyVars :: DefaultingStrategy -> Bool defaultNonStandardTyVars DefaultKindVars = True defaultNonStandardTyVars (NonStandardDefaulting DefaultNonStandardTyVars) = True defaultNonStandardTyVars (NonStandardDefaulting TryNotToDefaultNonStandardTyVars) = False instance Outputable NonStandardDefaultingStrategy where ppr DefaultNonStandardTyVars = text "DefaultOnlyNonStandardTyVars" ppr TryNotToDefaultNonStandardTyVars = text "TryNotToDefaultNonStandardTyVars" instance Outputable DefaultingStrategy where ppr DefaultKindVars = text "DefaultKindVars" ppr (NonStandardDefaulting ns) = text "NonStandardDefaulting" <+> ppr ns ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/BreakInfo.hs0000644000000000000000000000054314472400113021136 0ustar0000000000000000-- | A module for the BreakInfo type. Used by both the GHC.Runtime.Eval and -- GHC.Runtime.Interpreter hierarchy, so put here to have a less deep module -- dependency tree module GHC.Types.BreakInfo (BreakInfo(..)) where import GHC.Prelude import GHC.Unit.Module data BreakInfo = BreakInfo { breakInfo_module :: Module , breakInfo_number :: Int } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/CompleteMatch.hs0000644000000000000000000000241414472400113022022 0ustar0000000000000000{-# LANGUAGE TypeApplications #-} -- | COMPLETE signature module GHC.Types.CompleteMatch where import GHC.Prelude import GHC.Core.TyCo.Rep import GHC.Types.Unique.DSet import GHC.Core.ConLike import GHC.Core.TyCon import GHC.Core.Type ( splitTyConApp_maybe ) import GHC.Utils.Outputable -- | A list of conlikes which represents a complete pattern match. -- These arise from @COMPLETE@ signatures. -- See also Note [Implementation of COMPLETE pragmas]. data CompleteMatch = CompleteMatch { cmConLikes :: UniqDSet ConLike -- ^ The set of `ConLike` values , cmResultTyCon :: Maybe TyCon -- ^ The optional, concrete result TyCon the set applies to } vanillaCompleteMatch :: UniqDSet ConLike -> CompleteMatch vanillaCompleteMatch cls = CompleteMatch { cmConLikes = cls, cmResultTyCon = Nothing } instance Outputable CompleteMatch where ppr (CompleteMatch cls mty) = case mty of Nothing -> ppr cls Just ty -> ppr cls <> text "@" <> parens (ppr ty) type CompleteMatches = [CompleteMatch] completeMatchAppliesAtType :: Type -> CompleteMatch -> Bool completeMatchAppliesAtType ty cm = all @Maybe ty_matches (cmResultTyCon cm) where ty_matches sig_tc | Just (tc, _arg_tys) <- splitTyConApp_maybe ty , tc == sig_tc = True | otherwise = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/CostCentre.hs0000644000000000000000000003053714472400113021355 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.CostCentre ( CostCentre(..), CcName, CCFlavour(..), -- All abstract except to friend: ParseIface.y CostCentreStack, CollectedCCs, emptyCollectedCCs, collectCC, currentCCS, dontCareCCS, isCurrentCCS, maybeSingletonCCS, mkUserCC, mkAutoCC, mkAllCafsCC, mkSingletonCCS, isCafCCS, isCafCC, isSccCountCC, sccAbleCC, ccFromThisModule, pprCostCentreCore, costCentreUserName, costCentreUserNameFS, costCentreSrcSpan, cmpCostCentre -- used for removing dups in a list ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Types.Var import GHC.Types.Name import GHC.Unit.Module import GHC.Types.Unique import GHC.Utils.Outputable import GHC.Types.SrcLoc import GHC.Data.FastString import GHC.Types.CostCentre.State import GHC.Utils.Panic.Plain import Data.Data ----------------------------------------------------------------------------- -- Cost Centres -- | A Cost Centre is a single @{-# SCC #-}@ annotation. data CostCentre = NormalCC { cc_flavour :: CCFlavour, -- ^ Two cost centres may have the same name and -- module but different SrcSpans, so we need a way to -- distinguish them easily and give them different -- object-code labels. So every CostCentre has an -- associated flavour that indicates how it was -- generated, and flavours that allow multiple instances -- of the same name and module have a deterministic 0-based -- index. cc_name :: CcName, -- ^ Name of the cost centre itself cc_mod :: Module, -- ^ Name of module defining this CC. cc_loc :: SrcSpan } | AllCafsCC { cc_mod :: Module, -- Name of module defining this CC. cc_loc :: SrcSpan } deriving Data type CcName = FastString -- | The flavour of a cost centre. -- -- Index fields represent 0-based indices giving source-code ordering of -- centres with the same module, name, and flavour. data CCFlavour = CafCC -- ^ Auto-generated top-level thunk | ExprCC !CostCentreIndex -- ^ Explicitly annotated expression | DeclCC !CostCentreIndex -- ^ Explicitly annotated declaration | HpcCC !CostCentreIndex -- ^ Generated by HPC for coverage | LateCC !CostCentreIndex -- ^ Annotated by the one of the prof-last* passes. deriving (Eq, Ord, Data) -- | Extract the index from a flavour flavourIndex :: CCFlavour -> Int flavourIndex CafCC = 0 flavourIndex (ExprCC x) = unCostCentreIndex x flavourIndex (DeclCC x) = unCostCentreIndex x flavourIndex (HpcCC x) = unCostCentreIndex x flavourIndex (LateCC x) = unCostCentreIndex x instance Eq CostCentre where c1 == c2 = case c1 `cmpCostCentre` c2 of { EQ -> True; _ -> False } instance Ord CostCentre where compare = cmpCostCentre cmpCostCentre :: CostCentre -> CostCentre -> Ordering cmpCostCentre (AllCafsCC {cc_mod = m1}) (AllCafsCC {cc_mod = m2}) = m1 `compare` m2 cmpCostCentre NormalCC {cc_flavour = f1, cc_mod = m1, cc_name = n1} NormalCC {cc_flavour = f2, cc_mod = m2, cc_name = n2} -- first key is module name, then centre name, then flavour = mconcat [ m1 `compare` m2 , n1 `lexicalCompareFS` n2 -- compare lexically to avoid non-determinism , f1 `compare` f2 ] cmpCostCentre other_1 other_2 = let tag1 = tag_CC other_1 tag2 = tag_CC other_2 in if tag1 < tag2 then LT else GT where tag_CC :: CostCentre -> Int tag_CC (NormalCC {}) = 0 tag_CC (AllCafsCC {}) = 1 ----------------------------------------------------------------------------- -- Predicates on CostCentre isCafCC :: CostCentre -> Bool isCafCC (AllCafsCC {}) = True isCafCC (NormalCC {cc_flavour = CafCC}) = True isCafCC _ = False -- | Is this a cost-centre which records scc counts isSccCountCC :: CostCentre -> Bool isSccCountCC cc | isCafCC cc = False | otherwise = True -- | Is this a cost-centre which can be sccd ? sccAbleCC :: CostCentre -> Bool sccAbleCC cc | isCafCC cc = False | otherwise = True ccFromThisModule :: CostCentre -> Module -> Bool ccFromThisModule cc m = cc_mod cc == m ----------------------------------------------------------------------------- -- Building cost centres mkUserCC :: FastString -> Module -> SrcSpan -> CCFlavour -> CostCentre mkUserCC cc_name mod loc flavour = NormalCC { cc_name = cc_name, cc_mod = mod, cc_loc = loc, cc_flavour = flavour } mkAutoCC :: Id -> Module -> CostCentre mkAutoCC id mod = NormalCC { cc_name = str, cc_mod = mod, cc_loc = nameSrcSpan (getName id), cc_flavour = CafCC } where name = getName id -- beware: only external names are guaranteed to have unique -- Occnames. If the name is not external, we must append its -- Unique. -- See bug #249, tests prof001, prof002, also #2411 str | isExternalName name = occNameFS (getOccName id) | otherwise = occNameFS (getOccName id) `appendFS` mkFastString ('_' : show (getUnique name)) mkAllCafsCC :: Module -> SrcSpan -> CostCentre mkAllCafsCC m loc = AllCafsCC { cc_mod = m, cc_loc = loc } ----------------------------------------------------------------------------- -- Cost Centre Stacks -- | A Cost Centre Stack is something that can be attached to a closure. -- This is either: -- -- * the current cost centre stack (CCCS) -- * a pre-defined cost centre stack (there are several -- pre-defined CCSs, see below). data CostCentreStack = CurrentCCS -- Pinned on a let(rec)-bound -- thunk/function/constructor, this says that the -- cost centre to be attached to the object, when it -- is allocated, is whatever is in the -- current-cost-centre-stack register. | DontCareCCS -- We need a CCS to stick in static closures -- (for data), but we *don't* expect them to -- accumulate any costs. But we still need -- the placeholder. This CCS is it. | SingletonCCS CostCentre deriving (Eq, Ord) -- needed for Ord on CLabel -- synonym for triple which describes the cost centre info in the generated -- code for a module. type CollectedCCs = ( [CostCentre] -- local cost-centres that need to be decl'd , [CostCentreStack] -- pre-defined "singleton" cost centre stacks ) emptyCollectedCCs :: CollectedCCs emptyCollectedCCs = ([], []) collectCC :: CostCentre -> CostCentreStack -> CollectedCCs -> CollectedCCs collectCC cc ccs (c, cs) = (cc : c, ccs : cs) currentCCS, dontCareCCS :: CostCentreStack currentCCS = CurrentCCS dontCareCCS = DontCareCCS ----------------------------------------------------------------------------- -- Predicates on Cost-Centre Stacks isCurrentCCS :: CostCentreStack -> Bool isCurrentCCS CurrentCCS = True isCurrentCCS _ = False isCafCCS :: CostCentreStack -> Bool isCafCCS (SingletonCCS cc) = isCafCC cc isCafCCS _ = False maybeSingletonCCS :: CostCentreStack -> Maybe CostCentre maybeSingletonCCS (SingletonCCS cc) = Just cc maybeSingletonCCS _ = Nothing mkSingletonCCS :: CostCentre -> CostCentreStack mkSingletonCCS cc = SingletonCCS cc ----------------------------------------------------------------------------- -- Printing Cost Centre Stacks. -- The outputable instance for CostCentreStack prints the CCS as a C -- expression. instance Outputable CostCentreStack where ppr CurrentCCS = text "CCCS" ppr DontCareCCS = text "CCS_DONT_CARE" ppr (SingletonCCS cc) = ppr cc <> text "_ccs" ----------------------------------------------------------------------------- -- Printing Cost Centres -- -- There are several different ways in which we might want to print a -- cost centre: -- -- - the name of the cost centre, for profiling output (a C string) -- - the label, i.e. C label for cost centre in .hc file. -- - the debugging name, for output in -ddump things -- - the interface name, for printing in _scc_ exprs in iface files. -- -- The last 3 are derived from costCentreStr below. The first is given -- by costCentreName. instance Outputable CostCentre where ppr cc = getPprStyle $ \ sty -> if codeStyle sty then ppCostCentreLbl cc else text (costCentreUserName cc) -- Printing in Core pprCostCentreCore :: CostCentre -> SDoc pprCostCentreCore (AllCafsCC {cc_mod = m}) = text "__sccC" <+> braces (ppr m) pprCostCentreCore (NormalCC {cc_flavour = flavour, cc_name = n, cc_mod = m, cc_loc = loc}) = text "__scc" <+> braces (hsep [ ppr m <> char '.' <> ftext n, pprFlavourCore flavour, whenPprDebug (ppr loc) ]) -- ^ Print a flavour in Core pprFlavourCore :: CCFlavour -> SDoc pprFlavourCore CafCC = text "__C" pprFlavourCore f = pprIdxCore $ flavourIndex f -- ^ Print a flavour's index in Core pprIdxCore :: Int -> SDoc pprIdxCore 0 = empty pprIdxCore idx = whenPprDebug $ ppr idx -- Printing as a C label ppCostCentreLbl :: CostCentre -> SDoc ppCostCentreLbl (AllCafsCC {cc_mod = m}) = ppr m <> text "_CAFs_cc" ppCostCentreLbl (NormalCC {cc_flavour = f, cc_name = n, cc_mod = m}) = ppr m <> char '_' <> ztext (zEncodeFS n) <> char '_' <> ppFlavourLblComponent f <> text "_cc" -- ^ Print the flavour component of a C label ppFlavourLblComponent :: CCFlavour -> SDoc ppFlavourLblComponent CafCC = text "CAF" ppFlavourLblComponent (ExprCC i) = text "EXPR" <> ppIdxLblComponent i ppFlavourLblComponent (DeclCC i) = text "DECL" <> ppIdxLblComponent i ppFlavourLblComponent (HpcCC i) = text "HPC" <> ppIdxLblComponent i ppFlavourLblComponent (LateCC i) = text "LATECC" <> ppIdxLblComponent i -- ^ Print the flavour index component of a C label ppIdxLblComponent :: CostCentreIndex -> SDoc ppIdxLblComponent n = case unCostCentreIndex n of 0 -> empty n -> ppr n -- This is the name to go in the user-displayed string, -- recorded in the cost centre declaration costCentreUserName :: CostCentre -> String costCentreUserName = unpackFS . costCentreUserNameFS costCentreUserNameFS :: CostCentre -> FastString costCentreUserNameFS (AllCafsCC {}) = mkFastString "CAF" costCentreUserNameFS (NormalCC {cc_name = name, cc_flavour = is_caf}) = case is_caf of CafCC -> mkFastString "CAF:" `appendFS` name _ -> name costCentreSrcSpan :: CostCentre -> SrcSpan costCentreSrcSpan = cc_loc instance Binary CCFlavour where put_ bh CafCC = putByte bh 0 put_ bh (ExprCC i) = do putByte bh 1 put_ bh i put_ bh (DeclCC i) = do putByte bh 2 put_ bh i put_ bh (HpcCC i) = do putByte bh 3 put_ bh i put_ bh (LateCC i) = do putByte bh 4 put_ bh i get bh = do h <- getByte bh case h of 0 -> return CafCC 1 -> ExprCC <$> get bh 2 -> DeclCC <$> get bh 3 -> HpcCC <$> get bh 4 -> LateCC <$> get bh _ -> panic "Invalid CCFlavour" instance Binary CostCentre where put_ bh (NormalCC aa ab ac _ad) = do putByte bh 0 put_ bh aa put_ bh ab put_ bh ac put_ bh (AllCafsCC ae _af) = do putByte bh 1 put_ bh ae get bh = do h <- getByte bh case h of 0 -> do aa <- get bh ab <- get bh ac <- get bh return (NormalCC aa ab ac noSrcSpan) _ -> do ae <- get bh return (AllCafsCC ae noSrcSpan) -- We ignore the SrcSpans in CostCentres when we serialise them, -- and set the SrcSpans to noSrcSpan when deserialising. This is -- ok, because we only need the SrcSpan when declaring the -- CostCentre in the original module, it is not used by importing -- modules. ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/CostCentre/State.hs0000644000000000000000000000223214472400113022424 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Types.CostCentre.State ( CostCentreState , newCostCentreState , CostCentreIndex , unCostCentreIndex , getCCIndex ) where import GHC.Prelude import GHC.Data.FastString import GHC.Data.FastString.Env import Data.Data import GHC.Utils.Binary -- | Per-module state for tracking cost centre indices. -- -- See documentation of 'GHC.Types.CostCentre.cc_flavour' for more details. newtype CostCentreState = CostCentreState (FastStringEnv Int) -- | Initialize cost centre state. newCostCentreState :: CostCentreState newCostCentreState = CostCentreState emptyFsEnv -- | An index into a given cost centre module,name,flavour set newtype CostCentreIndex = CostCentreIndex { unCostCentreIndex :: Int } deriving (Eq, Ord, Data, Binary) -- | Get a new index for a given cost centre name. getCCIndex :: FastString -> CostCentreState -> (CostCentreIndex, CostCentreState) getCCIndex nm (CostCentreState m) = (CostCentreIndex idx, CostCentreState m') where m_idx = lookupFsEnv m nm idx = maybe 0 id m_idx m' = extendFsEnv m nm (idx + 1) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Cpr.hs0000644000000000000000000001647614472400113020036 0ustar0000000000000000{-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE PatternSynonyms #-} -- | Types for the Constructed Product Result lattice. -- "GHC.Core.Opt.CprAnal" and "GHC.Core.Opt.WorkWrap.Utils" -- are its primary customers via 'GHC.Types.Id.idCprSig'. module GHC.Types.Cpr ( Cpr (ConCpr), topCpr, botCpr, flatConCpr, asConCpr, CprType (..), topCprType, botCprType, flatConCprType, lubCprType, applyCprTy, abstractCprTy, trimCprTy, UnpackConFieldsResult (..), unpackConFieldsCpr, CprSig (..), topCprSig, isTopCprSig, mkCprSigForArity, mkCprSig, seqCprSig ) where import GHC.Prelude import GHC.Core.DataCon import GHC.Types.Basic import GHC.Utils.Binary import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic -- -- * Cpr -- data Cpr = BotCpr | ConCpr_ !ConTag ![Cpr] -- ^ The number of field Cprs equals 'dataConRepArity'. -- If all of them are top, better use 'FlatConCpr', as ensured by the pattern -- synonym 'ConCpr'. | FlatConCpr !ConTag -- ^ @FlatConCpr tag@ is an efficient encoding for @'ConCpr_' tag [TopCpr..]@. -- Purely for compiler perf. Can be constructed with 'ConCpr'. | TopCpr deriving Eq pattern ConCpr :: ConTag -> [Cpr] -> Cpr pattern ConCpr t cs <- ConCpr_ t cs where ConCpr t cs | all (== TopCpr) cs = FlatConCpr t | otherwise = ConCpr_ t cs {-# COMPLETE BotCpr, TopCpr, FlatConCpr, ConCpr #-} viewConTag :: Cpr -> Maybe ConTag viewConTag (FlatConCpr t) = Just t viewConTag (ConCpr t _) = Just t viewConTag _ = Nothing {-# INLINE viewConTag #-} lubCpr :: Cpr -> Cpr -> Cpr lubCpr BotCpr cpr = cpr lubCpr cpr BotCpr = cpr lubCpr (FlatConCpr t1) (viewConTag -> Just t2) | t1 == t2 = FlatConCpr t1 lubCpr (viewConTag -> Just t1) (FlatConCpr t2) | t1 == t2 = FlatConCpr t2 lubCpr (ConCpr t1 cs1) (ConCpr t2 cs2) | t1 == t2 = ConCpr t1 (lubFieldCprs cs1 cs2) lubCpr _ _ = TopCpr lubFieldCprs :: [Cpr] -> [Cpr] -> [Cpr] lubFieldCprs as bs | as `equalLength` bs = zipWith lubCpr as bs | otherwise = [] topCpr :: Cpr topCpr = TopCpr botCpr :: Cpr botCpr = BotCpr flatConCpr :: ConTag -> Cpr flatConCpr t = FlatConCpr t trimCpr :: Cpr -> Cpr trimCpr BotCpr = botCpr trimCpr _ = topCpr asConCpr :: Cpr -> Maybe (ConTag, [Cpr]) asConCpr (ConCpr t cs) = Just (t, cs) asConCpr (FlatConCpr t) = Just (t, []) asConCpr TopCpr = Nothing asConCpr BotCpr = Nothing seqCpr :: Cpr -> () seqCpr (ConCpr _ cs) = foldr (seq . seqCpr) () cs seqCpr _ = () -- -- * CprType -- -- | The abstract domain \(A_t\) from the original 'CPR for Haskell' paper. data CprType = CprType { ct_arty :: !Arity -- ^ Number of value arguments the denoted expression -- eats before returning the 'ct_cpr' , ct_cpr :: !Cpr -- ^ 'Cpr' eventually unleashed when applied to -- 'ct_arty' arguments } instance Eq CprType where a == b = ct_cpr a == ct_cpr b && (ct_arty a == ct_arty b || ct_cpr a == topCpr) topCprType :: CprType topCprType = CprType 0 topCpr botCprType :: CprType botCprType = CprType 0 botCpr flatConCprType :: ConTag -> CprType flatConCprType con_tag = CprType { ct_arty = 0, ct_cpr = flatConCpr con_tag } lubCprType :: CprType -> CprType -> CprType lubCprType ty1@(CprType n1 cpr1) ty2@(CprType n2 cpr2) -- The arity of bottom CPR types can be extended arbitrarily. | cpr1 == botCpr && n1 <= n2 = ty2 | cpr2 == botCpr && n2 <= n1 = ty1 -- There might be non-bottom CPR types with mismatching arities. -- Consider test DmdAnalGADTs. We want to return top in these cases. | n1 == n2 = CprType n1 (lubCpr cpr1 cpr2) | otherwise = topCprType applyCprTy :: CprType -> Arity -> CprType applyCprTy (CprType n res) k | n >= k = CprType (n-k) res | res == botCpr = botCprType | otherwise = topCprType abstractCprTy :: CprType -> CprType abstractCprTy (CprType n res) | res == topCpr = topCprType | otherwise = CprType (n+1) res trimCprTy :: CprType -> CprType trimCprTy (CprType arty res) = CprType arty (trimCpr res) -- | The result of 'unpackConFieldsCpr'. data UnpackConFieldsResult = AllFieldsSame !Cpr | ForeachField ![Cpr] -- | Unpacks a 'ConCpr'-shaped 'Cpr' and returns the field 'Cpr's wrapped in a -- 'ForeachField'. Otherwise, it returns 'AllFieldsSame' with the appropriate -- 'Cpr' to assume for each field. -- -- The use of 'UnpackConFieldsResult' allows O(1) space for the common, -- non-'ConCpr' case. unpackConFieldsCpr :: DataCon -> Cpr -> UnpackConFieldsResult unpackConFieldsCpr dc (ConCpr t cs) | t == dataConTag dc, cs `lengthIs` dataConRepArity dc = ForeachField cs unpackConFieldsCpr _ BotCpr = AllFieldsSame BotCpr unpackConFieldsCpr _ _ = AllFieldsSame TopCpr {-# INLINE unpackConFieldsCpr #-} seqCprTy :: CprType -> () seqCprTy (CprType _ cpr) = seqCpr cpr -- | The arity of the wrapped 'CprType' is the arity at which it is safe -- to unleash. See Note [Understanding DmdType and DmdSig] in "GHC.Types.Demand" newtype CprSig = CprSig { getCprSig :: CprType } deriving (Eq, Binary) -- | Turns a 'CprType' computed for the particular 'Arity' into a 'CprSig' -- unleashable at that arity. See Note [Understanding DmdType and DmdSig] in -- "GHC.Types.Demand" mkCprSigForArity :: Arity -> CprType -> CprSig mkCprSigForArity arty ty@(CprType n _) | arty /= n = topCprSig -- Trim on arity mismatch | otherwise = CprSig ty topCprSig :: CprSig topCprSig = CprSig topCprType isTopCprSig :: CprSig -> Bool isTopCprSig (CprSig ty) = ct_cpr ty == topCpr mkCprSig :: Arity -> Cpr -> CprSig mkCprSig arty cpr = CprSig (CprType arty cpr) seqCprSig :: CprSig -> () seqCprSig (CprSig ty) = seqCprTy ty -- | BNF: -- -- > cpr ::= '' -- TopCpr -- > | n -- FlatConCpr n -- > | n '(' cpr1 ',' cpr2 ',' ... ')' -- ConCpr n [cpr1,cpr2,...] -- > | 'b' -- BotCpr -- -- Examples: -- * `f x = f x` has result CPR `b` -- * `1(1,)` is a valid (nested) 'Cpr' denotation for `(I# 42#, f 42)`. instance Outputable Cpr where ppr TopCpr = empty ppr (FlatConCpr n) = int n ppr (ConCpr n cs) = int n <> parens (pprWithCommas ppr cs) ppr BotCpr = char 'b' -- | BNF: -- -- > cpr_ty ::= cpr -- short form if arty == 0 -- > | '\' arty '.' cpr -- if arty > 0 -- -- Examples: -- * `f x y z = f x y z` has denotation `\3.b` -- * `g !x = (x+1, x+2)` has denotation `\1.1(1,1)`. instance Outputable CprType where ppr (CprType arty res) | 0 <- arty = ppr res | otherwise = char '\\' <> ppr arty <> char '.' <> ppr res -- | Only print the CPR result instance Outputable CprSig where ppr (CprSig ty) = ppr (ct_cpr ty) instance Binary Cpr where put_ bh TopCpr = putByte bh 0 put_ bh BotCpr = putByte bh 1 put_ bh (FlatConCpr n) = putByte bh 2 *> put_ bh n put_ bh (ConCpr n cs) = putByte bh 3 *> put_ bh n *> put_ bh cs get bh = do h <- getByte bh case h of 0 -> return TopCpr 1 -> return BotCpr 2 -> FlatConCpr <$> get bh 3 -> ConCpr <$> get bh <*> get bh _ -> pprPanic "Binary Cpr: Invalid tag" (int (fromIntegral h)) instance Binary CprType where put_ bh (CprType arty cpr) = put_ bh arty *> put_ bh cpr get bh = CprType <$> get bh <*> get bh ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Demand.hs0000644000000000000000000032105614472400113020473 0ustar0000000000000000{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE BinaryLiterals #-} {-# LANGUAGE PatternSynonyms #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- | A language to express the evaluation context of an expression as a -- 'Demand' and track how an expression evaluates free variables and arguments -- in turn as a 'DmdType'. -- -- Lays out the abstract domain for "GHC.Core.Opt.DmdAnal". module GHC.Types.Demand ( -- * Demands Boxity(..), Card(C_00, C_01, C_0N, C_10, C_11, C_1N), CardNonAbs, CardNonOnce, Demand(AbsDmd, BotDmd, (:*)), SubDemand(Prod, Poly), mkProd, viewProd, -- ** Algebra absDmd, topDmd, botDmd, seqDmd, topSubDmd, -- *** Least upper bound lubCard, lubDmd, lubSubDmd, -- *** Plus plusCard, plusDmd, plusSubDmd, -- *** Multiply multCard, multDmd, multSubDmd, -- ** Predicates on @Card@inalities and @Demand@s isAbs, isUsedOnce, isStrict, isAbsDmd, isUsedOnceDmd, isStrUsedDmd, isStrictDmd, isTopDmd, isWeakDmd, onlyBoxedArguments, -- ** Special demands evalDmd, -- *** Demands used in PrimOp signatures lazyApply1Dmd, lazyApply2Dmd, strictOnceApply1Dmd, strictManyApply1Dmd, -- ** Other @Demand@ operations oneifyCard, oneifyDmd, strictifyDmd, strictifyDictDmd, lazifyDmd, peelCallDmd, peelManyCalls, mkCalledOnceDmd, mkCalledOnceDmds, mkWorkerDemand, -- ** Extracting one-shot information argOneShots, argsOneShots, saturatedByOneShots, -- ** Manipulating Boxity of a Demand unboxDeeplyDmd, -- * Divergence Divergence(..), topDiv, botDiv, exnDiv, lubDivergence, isDeadEndDiv, -- * Demand environments DmdEnv(..), addVarDmdEnv, mkTermDmdEnv, nopDmdEnv, plusDmdEnv, plusDmdEnvs, reuseEnv, -- * Demand types DmdType(..), dmdTypeDepth, -- ** Algebra nopDmdType, botDmdType, lubDmdType, plusDmdType, multDmdType, discardArgDmds, -- ** Other operations peelFV, findIdDemand, addDemand, splitDmdTy, deferAfterPreciseException, -- * Demand signatures DmdSig(..), mkDmdSigForArity, mkClosedDmdSig, splitDmdSig, dmdSigDmdEnv, hasDemandEnvSig, nopSig, botSig, isNopSig, isDeadEndSig, isDeadEndAppSig, trimBoxityDmdSig, -- ** Handling arity adjustments prependArgsDmdSig, etaConvertDmdSig, -- * Demand transformers from demand signatures DmdTransformer, dmdTransformSig, dmdTransformDataConSig, dmdTransformDictSelSig, -- * Trim to a type shape TypeShape(..), trimToType, trimBoxity, -- * @seq@ing stuff seqDemand, seqDemandList, seqDmdType, seqDmdSig, -- * Zapping usage information zapUsageDemand, zapDmdEnvSig, zapUsedOnceDemand, zapUsedOnceSig ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Var.Env import GHC.Types.Unique.FM import GHC.Types.Basic import GHC.Data.Maybe ( orElse ) import GHC.Core.Type ( Type ) import GHC.Core.TyCon ( isNewTyCon, isClassTyCon ) import GHC.Core.DataCon ( splitDataProductType_maybe, StrictnessMark, isMarkedStrict ) import GHC.Core.Multiplicity ( scaledThing ) import GHC.Utils.Binary import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.Coerce (coerce) import Data.Function import GHC.Utils.Trace _ = pprTrace -- Tired of commenting out the import all the time {- ************************************************************************ * * Boxity: Whether the box of something is used * * ************************************************************************ -} {- Note [Strictness and Unboxing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If an argument is used strictly by the function body, we may use use call-by-value instead of call-by-need for that argument. What's more, we may unbox an argument that is used strictly, discarding the box at the call site. This can reduce allocations of the program drastically if the box really isn't needed in the function body. Here's an example: ``` even :: Int -> Bool even (I# 0) = True even (I# 1) = False even (I# n) = even (I# (n -# 2)) ``` All three code paths of 'even' are (a) strict in the argument, and (b) immediately discard the boxed 'Int'. Now if we have a call site like `even (I# 42)`, then it would be terrible to allocate the 'I#' box for the argument only to tear it apart immediately in the body of 'even'! Hence, worker/wrapper will allocate a wrapper for 'even' that not only uses call-by-value for the argument (e.g., `case I# 42 of b { $weven b }`), but also *unboxes* the argument, resulting in ``` even :: Int -> Bool even (I# n) = $weven n $weven :: Int# -> Bool $weven 0 = True $weven 1 = False $weven n = $weven (n -# 2) ``` And now the box in `even (I# 42)` will cancel away after inlining the wrapper. As far as the permission to unbox is concerned, *evaluatedness* of the argument is the important trait. Unboxing implies eager evaluation of an argument and we don't want to change the termination properties of the function. One way to ensure that is to unbox strict arguments only, but strictness is only a sufficient condition for evaluatedness. See Note [Unboxing evaluated arguments] in "GHC.Core.Opt.DmdAnal", where we manage to unbox *strict fields* of unboxed arguments that the function is not actually strict in, simply by realising that those fields have to be evaluated. Note [Boxity analysis] ~~~~~~~~~~~~~~~~~~~~~~ Alas, we don't want to unbox *every* strict argument (as Note [Strictness and Unboxing] might suggest). Here's an example (from T19871): ``` data Huge = H Bool Bool ... Bool ann :: Huge -> (Bool, Huge) ann h@(Huge True _ ... _) = (False, h) ann h = (True, h) ``` Unboxing 'h' yields ``` $wann :: Bool -> Bool -> ... -> Bool -> (Bool, Huge) $wann True b2 ... bn = (False, Huge True b2 ... bn) $wann b1 b2 ... bn = (True, Huge b1 b2 ... bn) ``` The pair constructor really needs its fields boxed. But '$wann' doesn't get passed 'h' anymore, only its components! Ergo it has to reallocate the 'Huge' box, in a process called "reboxing". After w/w, call sites like `case ... of Just h -> ann h` pay for the allocation of the additional box. In earlier versions of GHC we simply accepted that reboxing would sometimes happen, but we found some cases where it made a big difference: #19407, for example. We therefore perform a simple syntactic boxity analysis that piggy-backs on demand analysis in order to determine whether the box of a strict argument is always discarded in the function body, in which case we can pass it unboxed without risking regressions such as in 'ann' above. But as soon as one use needs the box, we want Boxed to win over any Unboxed uses. The demand signature (cf. Note [Demand notation]) will say whether it uses its arguments boxed or unboxed. Indeed it does so for every sub-component of the argument demand. Here's an example: ``` f :: (Int, Int) -> Bool f (a, b) = even (a + b) -- demand signature: <1!P(1!L,1!L)> ``` The '!' indicates places where we want to unbox, the lack thereof indicates the box is used by the function. Boxity flags are part of the 'Poly' and 'Prod' 'SubDemand's, see Note [Why Boxity in SubDemand and not in Demand?]. The given demand signature says "Unbox the pair and then nestedly unbox its two fields". By contrast, the demand signature of 'ann' above would look like <1P(1L,L,...,L)>, lacking any '!'. A demand signature like <1P(1!L)> -- Boxed outside but Unboxed in the field -- doesn't make a lot of sense, as we can never unbox the field without unboxing the containing record. See Note [Finalising boxity for demand signatures] in "GHC.Core.Opt.DmdAnal" for how we avoid to spread this and other kinds of misinformed boxities. Due to various practical reasons, Boxity Analysis is not conservative at times. Here are reasons for too much optimism: * Note [Function body boxity and call sites] is an observation about when it is beneficial to unbox a parameter that is returned from a function. Note [Unboxed demand on function bodies returning small products] derives a heuristic from the former Note, pretending that all call sites of a function need returned small products Unboxed. * Note [Boxity for bottoming functions] in DmdAnal makes all bottoming functions unbox their arguments, incurring reboxing in code paths that will diverge anyway. In turn we get more unboxing in hot code paths. Boxity analysis fixes a number of issues: #19871, #19407, #4267, #16859, #18907, #13331 Note [Function body boxity and call sites] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider (from T5949) ``` f n p = case n of 0 -> p :: (a, b) _ -> f (n-1) p -- Worker/wrapper split if we decide to unbox: $wf n x y = case n of 0 -> (# x, y #) _ -> $wf (n-1) x y f n (x,y) = case $wf n x y of (# r, s #) -> (r,s) ``` When is it better to /not/ to unbox 'p'? That depends on the callers of 'f'! If all call sites 1. Wouldn't need to allocate fresh boxes for 'p', and 2. Needed the result pair of 'f' boxed Only then we'd see an increase in allocation resulting from unboxing. But as soon as only one of (1) or (2) holds, it really doesn't matter if 'f' unboxes 'p' (and its result, it's important that CPR follows suit). For example ``` res = ... case f m (field t) of (r1,r2) -> ... -- (1) holds arg = ... [ f m (x,y) ] ... -- (2) holds ``` Because one of the boxes in the call site can cancel away: ``` res = ... case field1 t of (x1,x2) -> case field2 t of (y1,y2) -> case $wf x1 x2 y1 y2 of (#r1,r2#) -> ... arg = ... [ case $wf x1 x2 y1 y2 of (#r1,r2#) -> (r1,r2) ] ... ``` And when call sites neither have arg boxes (1) nor need the result boxed (2), then hesitating to unbox means /more/ allocation in the call site because of the need for fresh argument boxes. Summary: If call sites that satisfy both (1) and (2) occur more often than call sites that satisfy neither condition, then it's best /not/ to unbox 'p'. Note [Unboxed demand on function bodies returning small products] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note [Boxity analysis] achieves its biggest wins when we avoid reboxing huge records. But when we return small products from a function, we often get faster programs by pretending that the caller unboxes the result. Long version: Observation: Big record arguments (e.g., DynFlags) tend to be modified much less frequently than small records (e.g., Int). Result: Big records tend to be passed around boxed (unmodified) much more frequently than small records. Consequnce: The larger the record, the more likely conditions (1) and (2) from Note [Function body boxity and call sites] are met, in which case unboxing returned parameters leads to reboxing. So we put an Unboxed demand on function bodies returning small products and a Boxed demand on the others. What is regarded a small product is controlled by the -fdmd-unbox-width flag. This also manages to unbox functions like ``` sum z [] = z sum (I# n) ((I# x):xs) = sum (I# (n +# x)) xs ``` where we can unbox 'z' on the grounds that it's but a small box anyway. That in turn means that the I# allocation in the recursive call site can cancel away and we get a non-allocating loop, nice and tight. Note that this is the typical case in "Observation" above: A small box is unboxed, modified, the result reboxed for the recursive call. Originally, this came up in binary-trees' check' function and #4267 which (similarly) features a strict fold over a tree. We'd also regress in join004 and join007 if we didn't assume an optimistic Unboxed demand on the function body. T17932 features a (non-recursive) function that returns a large record, e.g., ``` flags (Options f x) = `seq` f ``` and here we won't unbox 'f' because it has 5 fields (which is larger than the default -fdmd-unbox-width threshold). Why not focus on putting Unboxed demands on *all recursive* function? Then we'd unbox ``` flags 0 (Options f x) = `seq` f flags n o = flags (n-1) o ``` and that seems hardly useful. (NB: Similar to 'f' from Note [Preserving Boxity of results is rarely a win], but there we only had 2 fields.) What about the Boxity of *fields* of a small, returned box? Consider ``` sumIO :: Int -> Int -> IO Int sumIO 0 !z = return z -- What DmdAnal sees: sumIO 0 z s = z `seq` (# s, z #) sumIO n !z = sumIO (n-1) (z+n) ``` We really want 'z' to unbox here. Yet its use in the returned unboxed pair is fundamentally a Boxed one! CPR would manage to unbox it, but DmdAnal runs before that. There is an Unboxed use in the recursive call to 'go' though. But 'IO Int' returns a small product, and 'Int' is a small product itself. So we'll put the RHS of 'sumIO' under sub-demand '!P(L,L!P(L))', indicating that *if* we evaluate 'z', we don't need the box later on. And indeed the bang will evaluate `z`, so we conclude with a total demand of `1!P(L)` on `z` and unbox it. Unlike for recursive functions, where we can often speed up the loop by unboxing at the cost of a bit of reboxing in the base case, the wins for non-recursive functions quickly turn into losses when unboxing too deeply. That happens in T11545, T18109 and T18174. Therefore, we deeply unbox recursive function bodies but only shallowly unbox non-recursive function bodies (governed by the max_depth variable). The implementation is in 'GHC.Core.Opt.DmdAnal.unboxWhenSmall'. It is quite vital, guarding for regressions in test cases like #2387, #3586, #16040, #5075 and #19871. Note that this is fundamentally working around a phase problem, namely that the results of boxity analysis depend on CPR analysis (and vice versa, of course). Note [unboxedWins] ~~~~~~~~~~~~~~~~~~ We used to use '_unboxedWins' below in 'lubBoxity', which was too optimistic. While it worked around some shortcomings of the phase separation between Boxity analysis and CPR analysis, it was a gross hack which caused regressions itself that needed all kinds of fixes and workarounds. Examples (from #21119): * As #20767 says, L and B were no longer top and bottom of our lattice * In #20746 we unboxed huge Handle types that were never needed boxed in the first place. See Note [deferAfterPreciseException]. * It also caused unboxing of huge records where we better shouldn't, for example in T19871.absent. * It became impossible to work with when implementing !7599, mostly due to the chaos that results from #20767. Conclusion: We should use 'boxedWins' in 'lubBoxity', #21119. Fortunately, we could come up with a number of better mechanisms to make up for the sometimes huge regressions that would have otherwise incured: 1. A beefed up Note [Unboxed demand on function bodies returning small products] that works recursively fixes most regressions. It's a bit unsound, but pretty well-behaved. 2. We saw bottoming functions spoil boxity in some less severe cases and countered that with Note [Boxity for bottoming functions]. -} boxedWins :: Boxity -> Boxity -> Boxity boxedWins Unboxed Unboxed = Unboxed boxedWins _ !_ = Boxed _unboxedWins :: Boxity -> Boxity -> Boxity -- See Note [unboxedWins] _unboxedWins Boxed Boxed = Boxed _unboxedWins _ !_ = Unboxed lubBoxity :: Boxity -> Boxity -> Boxity -- See Note [Boxity analysis] for the lattice. lubBoxity = boxedWins plusBoxity :: Boxity -> Boxity -> Boxity plusBoxity = boxedWins {- ************************************************************************ * * Card: Combining Strictness and Usage * * ************************************************************************ -} {- Note [Evaluation cardinalities] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The demand analyser uses an (abstraction of) /evaluation cardinality/ of type Card, to specify how many times a term is evaluated. A Card C_lu represents an /interval/ of possible cardinalities [l..u], meaning * Evaluated /at least/ 'l' times (strictness). Hence 'l' is either 0 (lazy) or 1 (strict) * Evaluated /at most/ 'u' times (usage). Hence 'u' is either 0 (not used at all), or 1 (used at most once) or n (no information) Intervals describe sets, so the underlying lattice is the powerset lattice. Usually l<=u, but we also have C_10, the interval [1,0], the empty interval, denoting the empty set. This is the bottom element of the lattice. See Note [Demand notation] for the notation we use for each of the constructors. Note [Bit vector representation for Card] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While the 6 inhabitants of Card admit an efficient representation as an enumeration, implementing operations such as lubCard, plusCard and multCard leads to unreasonably bloated code. This was the old defn for lubCard, for example: -- Handle C_10 (bot) lubCard C_10 n = n -- bot lubCard n C_10 = n -- bot -- Handle C_0N (top) lubCard C_0N _ = C_0N -- top lubCard _ C_0N = C_0N -- top -- Handle C_11 lubCard C_00 C_11 = C_01 -- {0} ∪ {1} = {0,1} lubCard C_11 C_00 = C_01 -- {0} ∪ {1} = {0,1} lubCard C_11 n = n -- {1} is a subset of all other intervals lubCard n C_11 = n -- {1} is a subset of all other intervals -- Handle C_1N lubCard C_1N C_1N = C_1N -- reflexivity lubCard _ C_1N = C_0N -- {0} ∪ {1,n} = top lubCard C_1N _ = C_0N -- {0} ∪ {1,n} = top -- Handle C_01 lubCard C_01 _ = C_01 -- {0} ∪ {0,1} = {0,1} lubCard _ C_01 = C_01 -- {0} ∪ {0,1} = {0,1} -- Handle C_00 lubCard C_00 C_00 = C_00 -- reflexivity There's a much more compact way to encode these operations if Card is represented not as distinctly denoted intervals, but as the subset of the set of all cardinalities {0,1,n} instead. We represent such a subset as a bit vector of length 3 (which fits in an Int). That's actually pretty common for such powerset lattices. There's one bit per denoted cardinality that is set iff that cardinality is part of the denoted set, with n being the most significand bit (index 2) and 0 being represented by the least significand bit (index 0). How does that help? Well, for one, lubCard just becomes lubCard (Card a) (Card b) = Card (a .|. b) The other operations, 'plusCard' and 'multCard', become significantly more tricky, but immensely more compact. It's all straight-line code with a few bit twiddling instructions now! Note [Algebraic specification for plusCard and multCard] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The representation change in Note [Bit vector representation for Card] admits very dense definitions of 'plusCard' and 'multCard' in terms of bit twiddling, but the connection to the algebraic operations they implement is lost. It's helpful to have a written specification of what 'plusCard' and 'multCard' here that says what they should compute. * plusCard: a@[l1,u1] + b@[l2,u2] = r@[l1+l2,u1+u2]. - In terms of sets, 0 ∈ r iff 0 ∈ a and 0 ∈ b. Examples: set in C_00 + C_00, C_01 + C_0N, but not in C_10 + C_00 - In terms of sets, 1 ∈ r iff 1 ∈ a or 1 ∈ b. Examples: set in C_01 + C_00, C_0N + C_0N, but not in C_10 + C_00 - In terms of sets, n ∈ r iff n ∈ a or n ∈ b, or (1 ∈ a and 1 ∈ b), so not unlike add with carry. Examples: set in C_01 + C_01, C_01 + C_0N, but not in C_10 + C_01 - Handy special cases: o 'plusCard C_10' bumps up the strictness of its argument, just like 'lubCard C_00' lazifies it, without touching upper bounds. o Similarly, 'plusCard C_0N' discards usage information (incl. absence) but leaves strictness alone. * multCard: a@[l1,u1] * b@[l2,u2] = r@[l1*l2,u1*u2]. - In terms of sets, 0 ∈ r iff 0 ∈ a or 0 ∈ b. Examples: set in C_00 * C_10, C_01 * C_1N, but not in C_10 * C_1N - In terms of sets, 1 ∈ r iff 1 ∈ a and 1 ∈ b. Examples: set in C_01 * C_01, C_01 * C_1N, but not in C_11 * C_10 - In terms of sets, n ∈ r iff 1 ∈ r and (n ∈ a or n ∈ b). Examples: set in C_1N * C_01, C_1N * C_0N, but not in C_10 * C_1N - Handy special cases: o 'multCard C_1N c' is the same as 'plusCard c c' and drops used-once info. But unlike 'plusCard C_0N', it leaves absence and strictness. o 'multCard C_01' drops strictness info, like 'lubCard C_00'. o 'multCard C_0N' does both; it discards all strictness and used-once info and retains only absence info. -} -- | Describes an interval of /evaluation cardinalities/. -- See Note [Evaluation cardinalities] -- See Note [Bit vector representation for Card] newtype Card = Card Int deriving Eq -- | A subtype of 'Card' for which the upper bound is never 0 (no 'C_00' or -- 'C_10'). The only four inhabitants are 'C_01', 'C_0N', 'C_11', 'C_1N'. -- Membership can be tested with 'isCardNonAbs'. -- See 'D' and 'Call' for use sites and explanation. type CardNonAbs = Card -- | A subtype of 'Card' for which the upper bound is never 1 (no 'C_01' or -- 'C_11'). The only four inhabitants are 'C_00', 'C_0N', 'C_10', 'C_1N'. -- Membership can be tested with 'isCardNonOnce'. -- See 'Poly' for use sites and explanation. type CardNonOnce = Card -- | Absent, {0}. Pretty-printed as A. pattern C_00 :: Card pattern C_00 = Card 0b001 -- | Bottom, {}. Pretty-printed as A. pattern C_10 :: Card pattern C_10 = Card 0b000 -- | Strict and used once, {1}. Pretty-printed as 1. pattern C_11 :: Card pattern C_11 = Card 0b010 -- | Used at most once, {0,1}. Pretty-printed as M. pattern C_01 :: Card pattern C_01 = Card 0b011 -- | Strict and used (possibly) many times, {1,n}. Pretty-printed as S. pattern C_1N :: Card pattern C_1N = Card 0b110 -- | Every possible cardinality; the top element, {0,1,n}. Pretty-printed as L. pattern C_0N :: Card pattern C_0N = Card 0b111 {-# COMPLETE C_00, C_01, C_0N, C_10, C_11, C_1N :: Card #-} _botCard, topCard :: Card _botCard = C_10 topCard = C_0N -- | True <=> lower bound is 1. isStrict :: Card -> Bool -- See Note [Bit vector representation for Card] isStrict (Card c) = c .&. 0b001 == 0 -- simply check 0 bit is not set -- | True <=> upper bound is 0. isAbs :: Card -> Bool -- See Note [Bit vector representation for Card] isAbs (Card c) = c .&. 0b110 == 0 -- simply check 1 and n bit are not set -- | True <=> upper bound is 1. isUsedOnce :: Card -> Bool -- See Note [Bit vector representation for Card] isUsedOnce (Card c) = c .&. 0b100 == 0 -- simply check n bit is not set -- | Is this a 'CardNonAbs'? isCardNonAbs :: Card -> Bool isCardNonAbs = not . isAbs -- | Is this a 'CardNonOnce'? isCardNonOnce :: Card -> Bool isCardNonOnce n = isAbs n || not (isUsedOnce n) -- | Intersect with [0,1]. oneifyCard :: Card -> Card oneifyCard C_0N = C_01 oneifyCard C_1N = C_11 oneifyCard c = c -- | Denotes '∪' on 'Card'. lubCard :: Card -> Card -> Card -- See Note [Bit vector representation for Card] lubCard (Card a) (Card b) = Card (a .|. b) -- main point of the bit-vector encoding! -- | Denotes '+' on lower and upper bounds of 'Card'. plusCard :: Card -> Card -> Card -- See Note [Algebraic specification for plusCard and multCard] plusCard (Card a) (Card b) = Card (bit0 .|. bit1 .|. bitN) where bit0 = (a .&. b) .&. 0b001 bit1 = (a .|. b) .&. 0b010 bitN = ((a .|. b) .|. shiftL (a .&. b) 1) .&. 0b100 -- | Denotes '*' on lower and upper bounds of 'Card'. multCard :: Card -> Card -> Card -- See Note [Algebraic specification for plusCard and multCard] multCard (Card a) (Card b) = Card (bit0 .|. bit1 .|. bitN) where bit0 = (a .|. b) .&. 0b001 bit1 = (a .&. b) .&. 0b010 bitN = (a .|. b) .&. shiftL bit1 1 .&. 0b100 {- ************************************************************************ * * Demand: Evaluation contexts * * ************************************************************************ -} -- | A demand describes a /scaled evaluation context/, e.g. how many times -- and how deep the denoted thing is evaluated. -- -- The "how many" component is represented by a 'Card'inality. -- The "how deep" component is represented by a 'SubDemand'. -- Examples (using Note [Demand notation]): -- -- * 'seq' puts demand @1A@ on its first argument: It evaluates the argument -- strictly (@1@), but not any deeper (@A@). -- * 'fst' puts demand @1P(1L,A)@ on its argument: It evaluates the argument -- pair strictly and the first component strictly, but no nested info -- beyond that (@L@). Its second argument is not used at all. -- * '$' puts demand @1C1(L)@ on its first argument: It calls (@C@) the -- argument function with one argument, exactly once (@1@). No info -- on how the result of that call is evaluated (@L@). -- * 'maybe' puts demand @MCM(L)@ on its second argument: It evaluates -- the argument function at most once ((M)aybe) and calls it once when -- it is evaluated. -- * @fst p + fst p@ puts demand @SP(SL,A)@ on @p@: It's @1P(1L,A)@ -- multiplied by two, so we get @S@ (used at least once, possibly multiple -- times). -- -- This data type is quite similar to @'Scaled' 'SubDemand'@, but it's scaled -- by 'Card', which is an /interval/ on 'Multiplicity', the upper bound of -- which could be used to infer uniqueness types. Also we treat 'AbsDmd' and -- 'BotDmd' specially, as the concept of a 'SubDemand' doesn't apply when there -- isn't any evaluation at all. If you don't care, simply use '(:*)'. data Demand = BotDmd -- ^ A bottoming demand, produced by a diverging function ('C_10'), hence there is no -- 'SubDemand' that describes how it was evaluated. | AbsDmd -- ^ An absent demand: Evaluated exactly 0 times ('C_00'), hence there is no -- 'SubDemand' that describes how it was evaluated. | D !CardNonAbs !SubDemand -- ^ Don't use this internal data constructor; use '(:*)' instead. -- Since BotDmd deals with 'C_10' and AbsDmd deals with 'C_00', the -- cardinality component is CardNonAbs deriving Eq -- | Only meant to be used in the pattern synonym below! viewDmdPair :: Demand -> (Card, SubDemand) viewDmdPair BotDmd = (C_10, botSubDmd) viewDmdPair AbsDmd = (C_00, seqSubDmd) viewDmdPair (D n sd) = (n, sd) -- | @c :* sd@ is a demand that says \"evaluated @c@ times, and each time it -- was evaluated, it was at least as deep as @sd@\". -- -- Matching on this pattern synonym is a complete match. -- If the matched demand was 'AbsDmd', it will match as @C_00 :* seqSubDmd@. -- If the matched demand was 'BotDmd', it will match as @C_10 :* botSubDmd@. -- The builder of this pattern synonym simply /discards/ the 'SubDemand' if the -- 'Card' was absent and returns 'AbsDmd' or 'BotDmd' instead. It will assert -- that the discarded sub-demand was 'seqSubDmd' and 'botSubDmd', respectively. -- -- Call sites should consider whether they really want to look at the -- 'SubDemand' of an absent demand and match on 'AbsDmd' and/or 'BotDmd' -- otherwise. Really, any other 'SubDemand' would be allowed and -- might work better, depending on context. pattern (:*) :: HasDebugCallStack => Card -> SubDemand -> Demand pattern n :* sd <- (viewDmdPair -> (n, sd)) where C_10 :* sd = BotDmd & assertPpr (sd == botSubDmd) (text "B /=" <+> ppr sd) C_00 :* sd = AbsDmd & assertPpr (sd == seqSubDmd) (text "A /=" <+> ppr sd) n :* sd = D n sd & assertPpr (isCardNonAbs n) (ppr n $$ ppr sd) {-# COMPLETE (:*) #-} -- | A sub-demand describes an /evaluation context/, e.g. how deep the -- denoted thing is evaluated. See 'Demand' for examples. -- -- The nested 'SubDemand' @d@ of a 'Call' @Cn(d)@ is /relative/ to a single such call. -- E.g. The expression @f 1 2 + f 3 4@ puts call demand @SCS(C1(L))@ on @f@: -- @f@ is called exactly twice (@S@), each time exactly once (@1@) with an -- additional argument. -- -- The nested 'Demand's @dn@ of a 'Prod' @P(d1,d2,...)@ apply /absolutely/: -- If @dn@ is a used once demand (cf. 'isUsedOnce'), then that means that -- the denoted sub-expression is used once in the entire evaluation context -- described by the surrounding 'Demand'. E.g., @LP(ML)@ means that the -- field of the denoted expression is used at most once, although the -- entire expression might be used many times. -- -- See Note [Call demands are relative] -- and Note [Demand notation]. -- See also Note [Why Boxity in SubDemand and not in Demand?]. data SubDemand = Poly !Boxity !CardNonOnce -- ^ Polymorphic demand, the denoted thing is evaluated arbitrarily deep, -- with the specified cardinality at every level. The 'Boxity' applies only -- to the outer evaluation context as well as all inner evaluation context. -- See Note [Boxity in Poly] for why we want it to carry 'Boxity'. -- Expands to 'Call' via 'viewCall' and to 'Prod' via 'viewProd'. -- -- @Poly b n@ is semantically equivalent to @Prod b [n :* Poly b n, ...] -- or @Call n (Poly Boxed n)@. 'viewCall' and 'viewProd' do these rewrites. -- -- In Note [Demand notation]: @L === P(L,L,...)@ and @L === CL(L)@, -- @B === P(B,B,...)@ and @B === CB(B)@, -- @!A === !P(A,A,...)@ and @!A === !CA(A)@, -- and so on. -- -- We'll only see 'Poly' with 'C_10' (B), 'C_00' (A), 'C_0N' (L) and sometimes -- 'C_1N' (S) through 'plusSubDmd', never 'C_01' (M) or 'C_11' (1) (grep the -- source code). Hence 'CardNonOnce', which is closed under 'lub' and 'plus'. | Call !CardNonAbs !SubDemand -- ^ @Call n sd@ describes the evaluation context of @n@ function -- applications, where every individual result is evaluated according to @sd@. -- @sd@ is /relative/ to a single call, see Note [Call demands are relative]. -- That Note also explains why it doesn't make sense for @n@ to be absent, -- hence we forbid it with 'CardNonAbs'. Absent call demands can still be -- expressed with 'Poly'. -- Used only for values of function type. Use the smart constructor 'mkCall' -- whenever possible! | Prod !Boxity ![Demand] -- ^ @Prod b ds@ describes the evaluation context of a case scrutinisation -- on an expression of product type, where the product components are -- evaluated according to @ds@. The 'Boxity' @b@ says whether or not the box -- of the product was used. -- | We have to respect Poly rewrites through 'viewCall' and 'viewProd'. instance Eq SubDemand where d1 == d2 = case d1 of Prod b1 ds1 | Just (b2, ds2) <- viewProd (length ds1) d2 -> b1 == b2 && ds1 == ds2 Call n1 sd1 | Just (n2, sd2) <- viewCall d2 -> n1 == n2 && sd1 == sd2 Poly b1 n1 | Poly b2 n2 <- d2 -> b1 == b2 && n1 == n2 _ -> False topSubDmd, botSubDmd, seqSubDmd :: SubDemand topSubDmd = Poly Boxed C_0N botSubDmd = Poly Unboxed C_10 seqSubDmd = Poly Unboxed C_00 -- | The uniform field demand when viewing a 'Poly' as a 'Prod', as in -- 'viewProd'. polyFieldDmd :: Boxity -> CardNonOnce -> Demand polyFieldDmd _ C_00 = AbsDmd polyFieldDmd _ C_10 = BotDmd polyFieldDmd Boxed C_0N = topDmd polyFieldDmd b n = n :* Poly b n & assertPpr (isCardNonOnce n) (ppr n) -- | A smart constructor for 'Prod', applying rewrite rules along the semantic -- equality @Prod b [n :* Poly Boxed n, ...] === Poly b n@, simplifying to -- 'Poly' 'SubDemand's when possible. Examples: -- -- * Rewrites @P(L,L)@ (e.g., arguments @Boxed@, @[L,L]@) to @L@ -- * Rewrites @!P(L!L,L!L)@ (e.g., arguments @Unboxed@, @[L!L,L!L]@) to @!L@ -- * Does not rewrite @P(1L)@, @P(L!L)@, @!P(L)@ or @P(L,A)@ -- mkProd :: Boxity -> [Demand] -> SubDemand mkProd b ds | all (== AbsDmd) ds = Poly b C_00 | all (== BotDmd) ds = Poly b C_10 | dmd@(n :* Poly b2 m):_ <- ds , n == m -- don't rewrite P(SL) to S , b == b2 -- don't rewrite P(S!S) to !S , all (== dmd) ds -- don't rewrite P(L,A) to L = Poly b n | otherwise = Prod b ds -- | @viewProd n sd@ interprets @sd@ as a 'Prod' of arity @n@, expanding 'Poly' -- demands as necessary. viewProd :: Arity -> SubDemand -> Maybe (Boxity, [Demand]) -- It's quite important that this function is optimised well; -- it is used by lubSubDmd and plusSubDmd. viewProd n (Prod b ds) | ds `lengthIs` n = Just (b, ds) -- Note the strict application to replicate: This makes sure we don't allocate -- a thunk for it, inlines it and lets case-of-case fire at call sites. viewProd n (Poly b card) | let !ds = replicate n $! polyFieldDmd b card = Just (b, ds) viewProd _ _ = Nothing {-# INLINE viewProd #-} -- we want to fuse away the replicate and the allocation -- for Arity. Otherwise, #18304 bites us. -- | A smart constructor for 'Call', applying rewrite rules along the semantic -- equality @Call n (Poly n) === Poly n@, simplifying to 'Poly' 'SubDemand's -- when possible. mkCall :: CardNonAbs -> SubDemand -> SubDemand mkCall C_1N sd@(Poly Boxed C_1N) = sd mkCall C_0N sd@(Poly Boxed C_0N) = sd mkCall n cd = assertPpr (isCardNonAbs n) (ppr n $$ ppr cd) $ Call n cd -- | @viewCall sd@ interprets @sd@ as a 'Call', expanding 'Poly' subdemands as -- necessary. viewCall :: SubDemand -> Maybe (Card, SubDemand) viewCall (Call n sd) = Just (n :: Card, sd) viewCall (Poly _ n) = Just (n :: Card, Poly Boxed n) viewCall _ = Nothing topDmd, absDmd, botDmd, seqDmd :: Demand topDmd = C_0N :* topSubDmd absDmd = AbsDmd botDmd = BotDmd seqDmd = C_11 :* seqSubDmd -- | Sets 'Boxity' to 'Unboxed' for non-'Call' sub-demands and recurses into 'Prod'. unboxDeeplySubDmd :: SubDemand -> SubDemand unboxDeeplySubDmd (Poly _ n) = Poly Unboxed n unboxDeeplySubDmd (Prod _ ds) = mkProd Unboxed (strictMap unboxDeeplyDmd ds) unboxDeeplySubDmd call@Call{} = call -- | Sets 'Boxity' to 'Unboxed' for the 'Demand', recursing into 'Prod's. unboxDeeplyDmd :: Demand -> Demand unboxDeeplyDmd AbsDmd = AbsDmd unboxDeeplyDmd BotDmd = BotDmd unboxDeeplyDmd (D n sd) = D n (unboxDeeplySubDmd sd) -- | Denotes '∪' on 'SubDemand'. lubSubDmd :: SubDemand -> SubDemand -> SubDemand -- Handle botSubDmd (just an optimisation, the general case would do the same) lubSubDmd (Poly Unboxed C_10) d2 = d2 lubSubDmd d1 (Poly Unboxed C_10) = d1 -- Handle Prod lubSubDmd (Prod b1 ds1) (Poly b2 n2) | let !d = polyFieldDmd b2 n2 = mkProd (lubBoxity b1 b2) (strictMap (lubDmd d) ds1) lubSubDmd (Prod b1 ds1) (Prod b2 ds2) | equalLength ds1 ds2 = mkProd (lubBoxity b1 b2) (strictZipWith lubDmd ds1 ds2) -- Handle Call lubSubDmd (Call n1 sd1) sd2@(Poly _ n2) -- See Note [Call demands are relative] | isAbs n2 = mkCall (lubCard n2 n1) sd1 | otherwise = mkCall (lubCard n2 n1) (lubSubDmd sd1 sd2) lubSubDmd (Call n1 d1) (Call n2 d2) | otherwise = mkCall (lubCard n1 n2) (lubSubDmd d1 d2) -- Handle Poly. Exploit reflexivity (so we'll match the Prod or Call cases again). lubSubDmd (Poly b1 n1) (Poly b2 n2) = Poly (lubBoxity b1 b2) (lubCard n1 n2) lubSubDmd sd1@Poly{} sd2 = lubSubDmd sd2 sd1 -- Otherwise (Call `lub` Prod) return Top lubSubDmd _ _ = topSubDmd -- | Denotes '∪' on 'Demand'. lubDmd :: Demand -> Demand -> Demand lubDmd (n1 :* sd1) (n2 :* sd2) = lubCard n1 n2 :* lubSubDmd sd1 sd2 multSubDmd :: Card -> SubDemand -> SubDemand multSubDmd C_11 sd = sd -- The following three equations don't have an impact on Demands, only on -- Boxity. They are needed so that we don't trigger the assertions in `:*` -- when called from `multDmd`. multSubDmd C_00 _ = seqSubDmd -- Otherwise `multSubDmd A L == A /= !A` multSubDmd C_10 (Poly _ n) = if isStrict n then botSubDmd else seqSubDmd -- Otherwise `multSubDmd B L == B /= !B` multSubDmd C_10 (Call n _) = if isStrict n then botSubDmd else seqSubDmd -- Otherwise we'd call `mkCall` with absent cardinality multSubDmd n (Poly b m) = Poly b (multCard n m) multSubDmd n (Call n' sd) = mkCall (multCard n n') sd -- See Note [Call demands are relative] multSubDmd n (Prod b ds) = mkProd b (strictMap (multDmd n) ds) multDmd :: Card -> Demand -> Demand -- The first two lines compute the same result as the last line, but won't -- trigger the assertion in `:*` for input like `multDmd B 1L`, which would call -- `B :* A`. We want to return `B` in these cases. multDmd C_10 (n :* _) = if isStrict n then BotDmd else AbsDmd multDmd n (C_10 :* _) = if isStrict n then BotDmd else AbsDmd multDmd n (m :* sd) = multCard n m :* multSubDmd n sd -- | Denotes '+' on 'SubDemand'. plusSubDmd :: SubDemand -> SubDemand -> SubDemand -- Handle seqSubDmd (just an optimisation, the general case would do the same) plusSubDmd (Poly Unboxed C_00) d2 = d2 plusSubDmd d1 (Poly Unboxed C_00) = d1 -- Handle Prod plusSubDmd (Prod b1 ds1) (Poly b2 n2) | let !d = polyFieldDmd b2 n2 = mkProd (plusBoxity b1 b2) (strictMap (plusDmd d) ds1) plusSubDmd (Prod b1 ds1) (Prod b2 ds2) | equalLength ds1 ds2 = mkProd (plusBoxity b1 b2) (strictZipWith plusDmd ds1 ds2) -- Handle Call plusSubDmd (Call n1 sd1) sd2@(Poly _ n2) -- See Note [Call demands are relative] | isAbs n2 = mkCall (plusCard n2 n1) sd1 | otherwise = mkCall (plusCard n2 n1) (lubSubDmd sd1 sd2) plusSubDmd (Call n1 sd1) (Call n2 sd2) | otherwise = mkCall (plusCard n1 n2) (lubSubDmd sd1 sd2) -- Handle Poly. Exploit reflexivity (so we'll match the Prod or Call cases again). plusSubDmd (Poly b1 n1) (Poly b2 n2) = Poly (plusBoxity b1 b2) (plusCard n1 n2) plusSubDmd sd1@Poly{} sd2 = plusSubDmd sd2 sd1 -- Otherwise (Call `lub` Prod) return Top plusSubDmd _ _ = topSubDmd -- | Denotes '+' on 'Demand'. plusDmd :: Demand -> Demand -> Demand plusDmd (n1 :* sd1) (n2 :* sd2) = plusCard n1 n2 :* plusSubDmd sd1 sd2 -- | Used to suppress pretty-printing of an uninformative demand isTopDmd :: Demand -> Bool isTopDmd dmd = dmd == topDmd isAbsDmd :: Demand -> Bool isAbsDmd (n :* _) = isAbs n -- | Contrast with isStrictUsedDmd. See Note [Strict demands] isStrictDmd :: Demand -> Bool isStrictDmd (n :* _) = isStrict n -- | Not absent and used strictly. See Note [Strict demands] isStrUsedDmd :: Demand -> Bool isStrUsedDmd (n :* _) = isStrict n && not (isAbs n) -- | Is the value used at most once? isUsedOnceDmd :: Demand -> Bool isUsedOnceDmd (n :* _) = isUsedOnce n -- | We try to avoid tracking weak free variable demands in strictness -- signatures for analysis performance reasons. -- See Note [Lazy and unleashable free variables] in "GHC.Core.Opt.DmdAnal". isWeakDmd :: Demand -> Bool isWeakDmd dmd@(n :* _) = not (isStrict n) && is_plus_idem_dmd dmd where -- @is_plus_idem_* thing@ checks whether @thing `plus` thing = thing@, -- e.g. if @thing@ is idempotent wrt. to @plus@. -- is_plus_idem_card n = plusCard n n == n is_plus_idem_card = isCardNonOnce -- is_plus_idem_dmd dmd = plusDmd dmd dmd == dmd is_plus_idem_dmd AbsDmd = True is_plus_idem_dmd BotDmd = True is_plus_idem_dmd (n :* sd) = is_plus_idem_card n && is_plus_idem_sub_dmd sd -- is_plus_idem_sub_dmd sd = plusSubDmd sd sd == sd is_plus_idem_sub_dmd (Poly _ n) = assert (isCardNonOnce n) True is_plus_idem_sub_dmd (Prod _ ds) = all is_plus_idem_dmd ds is_plus_idem_sub_dmd (Call n _) = is_plus_idem_card n -- See Note [Call demands are relative] evalDmd :: Demand evalDmd = C_1N :* topSubDmd -- | First argument of 'GHC.Exts.maskAsyncExceptions#': @1C1(L)@. -- Called exactly once. strictOnceApply1Dmd :: Demand strictOnceApply1Dmd = C_11 :* mkCall C_11 topSubDmd -- | First argument of 'GHC.Exts.atomically#': @SCS(L)@. -- Called at least once, possibly many times. strictManyApply1Dmd :: Demand strictManyApply1Dmd = C_1N :* mkCall C_1N topSubDmd -- | First argument of catch#: @MCM(L)@. -- Evaluates its arg lazily, but then applies it exactly once to one argument. lazyApply1Dmd :: Demand lazyApply1Dmd = C_01 :* mkCall C_01 topSubDmd -- | Second argument of catch#: @MCM(C1(L))@. -- Calls its arg lazily, but then applies it exactly once to an additional argument. lazyApply2Dmd :: Demand lazyApply2Dmd = C_01 :* mkCall C_01 (mkCall C_11 topSubDmd) -- | Make a 'Demand' evaluated at-most-once. oneifyDmd :: Demand -> Demand oneifyDmd AbsDmd = AbsDmd oneifyDmd BotDmd = BotDmd oneifyDmd (n :* sd) = oneifyCard n :* sd -- | Make a 'Demand' evaluated at-least-once (e.g. strict). strictifyDmd :: Demand -> Demand strictifyDmd AbsDmd = seqDmd strictifyDmd BotDmd = BotDmd strictifyDmd (n :* sd) = plusCard C_10 n :* sd -- | If the argument is a used non-newtype dictionary, give it strict demand. -- Also split the product type & demand and recur in order to similarly -- strictify the argument's contained used non-newtype superclass dictionaries. -- We use the demand as our recursive measure to guarantee termination. strictifyDictDmd :: Type -> Demand -> Demand strictifyDictDmd ty (n :* Prod b ds) | not (isAbs n) , Just field_tys <- as_non_newtype_dict ty = C_1N :* mkProd b (zipWith strictifyDictDmd field_tys ds) -- main idea: ensure it's strict where -- Return a TyCon and a list of field types if the given -- type is a non-newtype dictionary type as_non_newtype_dict ty | Just (tycon, _arg_tys, _data_con, map scaledThing -> inst_con_arg_tys) <- splitDataProductType_maybe ty , not (isNewTyCon tycon) , isClassTyCon tycon = Just inst_con_arg_tys | otherwise = Nothing strictifyDictDmd _ dmd = dmd -- | Make a 'Demand' lazy, setting all lower bounds (outside 'Call's) to 0. lazifyDmd :: Demand -> Demand lazifyDmd AbsDmd = AbsDmd lazifyDmd BotDmd = AbsDmd lazifyDmd (n :* sd) = multCard C_01 n :* lazifySubDmd sd -- | Make a 'SubDemand' lazy, setting all lower bounds (outside 'Call's) to 0. lazifySubDmd :: SubDemand -> SubDemand lazifySubDmd (Poly b n) = Poly b (multCard C_01 n) lazifySubDmd (Prod b sd) = mkProd b (strictMap lazifyDmd sd) lazifySubDmd (Call n sd) = mkCall (lubCard C_01 n) sd -- | Wraps the 'SubDemand' with a one-shot call demand: @d@ -> @C1(d)@. mkCalledOnceDmd :: SubDemand -> SubDemand mkCalledOnceDmd sd = mkCall C_11 sd -- | @mkCalledOnceDmds n d@ returns @C1(C1...(C1 d))@ where there are @n@ @C1@'s. mkCalledOnceDmds :: Arity -> SubDemand -> SubDemand mkCalledOnceDmds arity sd = iterate mkCalledOnceDmd sd !! arity -- | Peels one call level from the sub-demand, and also returns how many -- times we entered the lambda body. peelCallDmd :: SubDemand -> (Card, SubDemand) peelCallDmd sd = viewCall sd `orElse` (topCard, topSubDmd) -- Peels multiple nestings of 'Call' sub-demands and also returns -- whether it was unsaturated in the form of a 'Card'inality, denoting -- how many times the lambda body was entered. -- See Note [Demands from unsaturated function calls]. peelManyCalls :: Arity -> SubDemand -> (Card, SubDemand) peelManyCalls k sd = go k C_11 sd where go 0 !n !sd = (n, sd) go k !n (viewCall -> Just (m, sd)) = go (k-1) (n `multCard` m) sd go _ _ _ = (topCard, topSubDmd) {-# INLINE peelManyCalls #-} -- so that the pair cancels away in a `fst _` context -- See Note [Demand on the worker] in GHC.Core.Opt.WorkWrap mkWorkerDemand :: Int -> Demand mkWorkerDemand n = C_01 :* go n where go 0 = topSubDmd go n = Call C_01 $ go (n-1) argsOneShots :: DmdSig -> Arity -> [[OneShotInfo]] -- ^ See Note [Computing one-shot info] argsOneShots (DmdSig (DmdType _ arg_ds)) n_val_args | unsaturated_call = [] | otherwise = go arg_ds where unsaturated_call = arg_ds `lengthExceeds` n_val_args go [] = [] go (arg_d : arg_ds) = argOneShots arg_d `cons` go arg_ds -- Avoid list tail like [ [], [], [] ] cons [] [] = [] cons a as = a:as argOneShots :: Demand -- ^ depending on saturation -> [OneShotInfo] -- ^ See Note [Computing one-shot info] argOneShots AbsDmd = [] -- This defn conflicts with 'saturatedByOneShots', argOneShots BotDmd = [] -- according to which we should return -- @repeat OneShotLam@ here... argOneShots (_ :* sd) = go sd -- See Note [Call demands are relative] where go (Call n sd) | isUsedOnce n = OneShotLam : go sd | otherwise = NoOneShotInfo : go sd go _ = [] -- | -- @saturatedByOneShots n CM(CM(...)) = True@ -- <=> -- There are at least n nested CM(..) calls. -- See Note [Demand on the worker] in GHC.Core.Opt.WorkWrap saturatedByOneShots :: Int -> Demand -> Bool saturatedByOneShots _ AbsDmd = True saturatedByOneShots _ BotDmd = True saturatedByOneShots n (_ :* sd) = isUsedOnce $ fst $ peelManyCalls n sd {- Note [Strict demands] ~~~~~~~~~~~~~~~~~~~~~~~~ 'isStrUsedDmd' returns true only of demands that are both strict and used In particular, it is False for (i.e. strict and not used, cardinality C_10), which can and does arise in, say (#7319) f x = raise# Then 'x' is not used, so f gets strictness -> . Now the w/w generates fx = let x = absentError "unused" in raise At this point we really don't want to convert to fx = case absentError "unused" of x -> raise Since the program is going to diverge, this swaps one error for another, but it's really a bad idea to *ever* evaluate an absent argument. In #7319 we get T7319.exe: Oops! Entered absent arg w_s1Hd{v} [lid] [base:GHC.Base.String{tc 36u}] Note [Call demands are relative] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The expression @if b then 0 else f 1 2 + f 3 4@ uses @f@ according to the demand @LCL(C1(P(L)))@, meaning "f is called multiple times or not at all (CL), but each time it is called, it's called with *exactly one* (C1) more argument. Whenever it is called with two arguments, we have no info on how often the field of the product result is used (L)." So the 'SubDemand' nested in a 'Call' demand is relative to exactly one call. And that extends to the information we have how its results are used in each call site. Consider (#18903) h :: Int -> Int h m = let g :: Int -> (Int,Int) g 1 = (m, 0) g n = (2 * n, 2 `div` n) {-# NOINLINE g #-} in case m of 1 -> 0 2 -> snd (g m) _ -> uncurry (+) (g m) We want to give @g@ the demand @MCM(P(MP(L),1P(L)))@, so we see that in each call site of @g@, we are strict in the second component of the returned pair. This relative cardinality leads to an otherwise unexpected call to 'lubSubDmd' in 'plusSubDmd', but if you do the math it's just the right thing. There's one more subtlety: Since the nested demand is relative to exactly one call, in the case where we have *at most zero calls* (e.g. CA(...)), the premise is hurt and we can assume that the nested demand is 'botSubDmd'. That ensures that @g@ above actually gets the @1P(L)@ demand on its second pair component, rather than the lazy @MP(L)@ if we 'lub'bed with an absent demand. Note [Computing one-shot info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a call f (\pqr. e1) (\xyz. e2) e3 where f has usage signature Then argsOneShots returns a [[OneShotInfo]] of [[OneShot,NoOneShotInfo,OneShot], [OneShot]] The occurrence analyser propagates this one-shot infor to the binders \pqr and \xyz; see Note [Use one-shot information] in "GHC.Core.Opt.OccurAnal". Note [Boxity in Poly] ~~~~~~~~~~~~~~~~~~~~~ To support Note [Boxity analysis], it makes sense that 'Prod' carries a 'Boxity'. But why does 'Poly' have to carry a 'Boxity', too? Shouldn't all 'Poly's be 'Boxed'? Couldn't we simply use 'Prod Unboxed' when we need to express an unboxing demand? 'botSubDmd' (B) needs to be the bottom of the lattice, so it needs to be an Unboxed demand (and deeply, at that). Similarly, 'seqSubDmd' (A) is an Unboxed demand. So why not say that Polys with absent cardinalities have Unboxed boxity? That doesn't work, because we also need the boxed equivalents. Here's an example for A (function 'absent' in T19871): ``` f _ True = 1 f a False = a `seq` 2 -- demand on a: MA, the A is short for `Poly Boxed C_00` g a = a `seq` f a True -- demand on a: SA, which is `Poly Boxed C_00` h True p = g p -- SA on p (inherited from g) h False p@(x,y) = x+y -- S!P(1!L,1!L) on p ``` If A is treated as Unboxed, we get reboxing in the call site to 'g'. So we obviously would need a Boxed variant of A. Rather than introducing a lot of special cases, we just carry the Boxity in 'Poly'. Plus, we could most likely find examples like the above for any other cardinality. Note [Why Boxity in SubDemand and not in Demand?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In #19871, we started out by storing 'Boxity' in 'SubDemand', in the 'Prod' constructor only. But then we found that we weren't able to express the unboxing 'seqSubDmd', because that one really is a `Poly C_00` sub-demand. We then tried to store the Boxity in 'Demand' instead, for these reasons: 1. The whole boxity-of-seq business comes to a satisfying conclusion 2. Putting Boxity in the SubDemand is weird to begin with, because it describes the box and not its fields, just as the evaluation cardinality of a Demand describes how often the box is used. It makes more sense that Card and Boxity travel together. Also the alternative would have been to store Boxity with Poly, which is even weirder and more redundant. But then we regressed in T7837 (grep #19871 for boring specifics), which needed to transfer an ambient unboxed *demand* on a dictionary selector to its argument dictionary, via a 'Call' sub-demand `C1(sd)`, as Note [Demand transformer for a dictionary selector] explains. Annoyingly, the boxity info has to be stored in the *sub-demand* `sd`! There's no demand to store the boxity in. So we bit the bullet and now we store Boxity in 'SubDemand', both in 'Prod' *and* 'Poly'. See also Note [Boxity in Poly]. Note [Demand transformer for data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the expression (x,y) with sub-demand P(SL,A). What is the demand on x,y? Obviously `x` is used strictly, and `y` not at all. So we want to decompose a product demand, and feed its components demands into the arguments. That is the job of dmdTransformDataConSig. More precisely, * it gets the demand on the data constructor itself; in the above example that is C(1,C(1,P(SL,A))) * it returns the demands on the arguments; in the above example that is [SL, A] Nasty wrinkle. Consider this code (#22475 has more realistic examples but assume this is what the demand analyser sees) data T = MkT !Int Bool get :: T -> Bool get (MkT _ b) = b foo = let v::Int = I# 7 t::T = MkT v True in get t Now `v` is unused by `get`, /but/ we can't give `v` an Absent demand, else we'll drop the binding and replace it with an error thunk. Then the code generator (more specifically GHC.Stg.InferTags.Rewrite) will add an extra eval of MkT's argument to give foo = let v::Int = error "absent" t::T = case v of v' -> MkT v' True in get t Boo! Because of this extra eval (added in STG-land), the truth is that `MkT` may (or may not) evaluate its arguments (as established in #21497). Hence the use of `bump` in dmdTransformDataConSig, which adds in a `C_01` eval. The `C_01` says "may or may not evaluate" which is absolutely faithful to what InferTags.Rewrite does. In particular it is very important /not/ to make that a `C_11` eval, see Note [Data-con worker strictness]. -} {- ********************************************************************* * * Divergence: Whether evaluation surely diverges * * ********************************************************************* -} -- | 'Divergence' characterises whether something surely diverges. -- Models a subset lattice of the following exhaustive set of divergence -- results: -- -- [n] nontermination (e.g. loops) -- [i] throws imprecise exception -- [p] throws precise exceTtion -- [c] converges (reduces to WHNF). -- -- The different lattice elements correspond to different subsets, indicated by -- juxtaposition of indicators (e.g. __nc__ definitely doesn't throw an -- exception, and may or may not reduce to WHNF). -- -- @ -- Dunno (nipc) -- | -- ExnOrDiv (nip) -- | -- Diverges (ni) -- @ -- -- As you can see, we don't distinguish __n__ and __i__. -- See Note [Precise exceptions and strictness analysis] for why __p__ is so -- special compared to __i__. data Divergence = Diverges -- ^ Definitely throws an imprecise exception or diverges. | ExnOrDiv -- ^ Definitely throws a *precise* exception, an imprecise -- exception or diverges. Never converges, hence 'isDeadEndDiv'! -- See scenario 1 in Note [Precise exceptions and strictness analysis]. | Dunno -- ^ Might diverge, throw any kind of exception or converge. deriving Eq lubDivergence :: Divergence -> Divergence -> Divergence lubDivergence Diverges div = div lubDivergence div Diverges = div lubDivergence ExnOrDiv ExnOrDiv = ExnOrDiv lubDivergence _ _ = Dunno -- This needs to commute with defaultFvDmd, i.e. -- defaultFvDmd (r1 `lubDivergence` r2) = defaultFvDmd r1 `lubDmd` defaultFvDmd r2 -- (See Note [Default demand on free variables and arguments] for why) -- | See Note [Asymmetry of plusDmdType], which concludes that 'plusDivergence' -- needs to be symmetric. -- Strictly speaking, we should have @plusDivergence Dunno Diverges = ExnOrDiv@. -- But that regresses in too many places (every infinite loop, basically) to be -- worth it and is only relevant in higher-order scenarios -- (e.g. Divergence of @f (throwIO blah)@). -- So 'plusDivergence' currently is 'glbDivergence', really. plusDivergence :: Divergence -> Divergence -> Divergence plusDivergence Dunno Dunno = Dunno plusDivergence Diverges _ = Diverges plusDivergence _ Diverges = Diverges plusDivergence _ _ = ExnOrDiv -- | In a non-strict scenario, we might not force the Divergence, in which case -- we might converge, hence Dunno. multDivergence :: Card -> Divergence -> Divergence multDivergence n _ | not (isStrict n) = Dunno multDivergence _ d = d topDiv, exnDiv, botDiv :: Divergence topDiv = Dunno exnDiv = ExnOrDiv botDiv = Diverges -- | True if the 'Divergence' indicates that evaluation will not return. -- See Note [Dead ends]. isDeadEndDiv :: Divergence -> Bool isDeadEndDiv Diverges = True isDeadEndDiv ExnOrDiv = True isDeadEndDiv Dunno = False -- See Notes [Default demand on free variables and arguments] -- and Scenario 1 in [Precise exceptions and strictness analysis] defaultFvDmd :: Divergence -> Demand defaultFvDmd Dunno = absDmd defaultFvDmd ExnOrDiv = absDmd -- This is the whole point of ExnOrDiv! defaultFvDmd Diverges = botDmd -- Diverges defaultArgDmd :: Divergence -> Demand -- TopRes and BotRes are polymorphic, so that -- BotRes === (Bot -> BotRes) === ... -- TopRes === (Top -> TopRes) === ... -- This function makes that concrete -- Also see Note [Default demand on free variables and arguments] defaultArgDmd Dunno = topDmd -- NB: not botDmd! We don't want to mask the precise exception by forcing the -- argument. But it is still absent. defaultArgDmd ExnOrDiv = absDmd defaultArgDmd Diverges = botDmd {- Note [Precise vs imprecise exceptions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An exception is considered to be /precise/ when it is thrown by the 'raiseIO#' primop. It follows that all other primops (such as 'raise#' or division-by-zero) throw /imprecise/ exceptions. Note that the actual type of the exception thrown doesn't have any impact! GHC undertakes some effort not to apply an optimisation that would mask a /precise/ exception with some other source of nontermination, such as genuine divergence or an imprecise exception, so that the user can reliably intercept the precise exception with a catch handler before and after optimisations. See also the wiki page on precise exceptions: https://gitlab.haskell.org/ghc/ghc/wikis/exceptions/precise-exceptions Section 5 of "Tackling the awkward squad" talks about semantic concerns. Imprecise exceptions are actually more interesting than precise ones (which are fairly standard) from the perspective of semantics. See the paper "A Semantics for Imprecise Exceptions" for more details. Note [Dead ends] ~~~~~~~~~~~~~~~~ We call an expression that either diverges or throws a precise or imprecise exception a "dead end". We used to call such an expression just "bottoming", but with the measures we take to preserve precise exception semantics (see Note [Precise exceptions and strictness analysis]), that is no longer accurate: 'exnDiv' is no longer the bottom of the Divergence lattice. Yet externally to demand analysis, we mostly care about being able to drop dead code etc., which is all due to the property that such an expression never returns, hence we consider throwing a precise exception to be a dead end. See also 'isDeadEndDiv'. Note [Precise exceptions and strictness analysis] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have to take care to preserve precise exception semantics in strictness analysis (#17676). There are two scenarios that need careful treatment. The fixes were discussed at https://gitlab.haskell.org/ghc/ghc/wikis/fixing-precise-exceptions Recall that raiseIO# raises a *precise* exception, in contrast to raise# which raises an *imprecise* exception. See Note [Precise vs imprecise exceptions]. Scenario 1: Precise exceptions in case alternatives --------------------------------------------------- Unlike raise# (which returns botDiv), we want raiseIO# to return exnDiv. Here's why. Consider this example from #13380 (similarly #17676): f x y | x>0 = raiseIO# Exc | y>0 = return 1 | otherwise = return 2 Is 'f' strict in 'y'? One might be tempted to say yes! But that plays fast and loose with the precise exception; after optimisation, (f 42 (error "boom")) turns from throwing the precise Exc to throwing the imprecise user error "boom". So, the defaultFvDmd of raiseIO# should be lazy (topDmd), which can be achieved by giving it divergence exnDiv. See Note [Default demand on free variables and arguments]. Why don't we just give it topDiv instead of introducing exnDiv? Because then the simplifier will fail to discard raiseIO#'s continuation in case raiseIO# x s of { (# s', r #) -> } which we'd like to optimise to case raiseIO# x s of {} Hence we came up with exnDiv. The default FV demand of exnDiv is lazy (and its default arg dmd is absent), but otherwise (in terms of 'isDeadEndDiv') it behaves exactly as botDiv, so that dead code elimination works as expected. This is tracked by T13380b. Scenario 2: Precise exceptions in case scrutinees ------------------------------------------------- Consider (more complete examples in #148, #1592, testcase strun003) case foo x s of { (# s', r #) -> y } Is this strict in 'y'? Often not! If @foo x s@ might throw a precise exception (ultimately via raiseIO#), then we must not force 'y', which may fail to terminate or throw an imprecise exception, until we have performed @foo x s@. So we have to 'deferAfterPreciseException' (which 'lub's with 'exnDmdType' to model the exceptional control flow) when @foo x s@ may throw a precise exception. Motivated by T13380{d,e,f}. See Note [Which scrutinees may throw precise exceptions] in "GHC.Core.Opt.DmdAnal". We have to be careful not to discard dead-end Divergence from case alternatives, though (#18086): m = putStrLn "foo" >> error "bar" 'm' should still have 'exnDiv', which is why it is not sufficient to lub with 'nopDmdType' (which has 'topDiv') in 'deferAfterPreciseException'. Historical Note: This used to be called the "IO hack". But that term is rather a bad fit because 1. It's easily confused with the "State hack", which also affects IO. 2. Neither "IO" nor "hack" is a good description of what goes on here, which is deferring strictness results after possibly throwing a precise exception. The "hack" is probably not having to defer when we can prove that the expression may not throw a precise exception (increasing precision of the analysis), but that's just a favourable guess. Note [Exceptions and strictness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to smart about catching exceptions, but we aren't anymore. See #14998 for the way it's resolved at the moment. Here's a historic breakdown: Apparently, exception handling prim-ops didn't use to have any special strictness signatures, thus defaulting to nopSig, which assumes they use their arguments lazily. Joachim was the first to realise that we could provide richer information. Thus, in 0558911f91c (Dec 13), he added signatures to primops.txt.pp indicating that functions like `catch#` and `catchRetry#` call their argument, which is useful information for usage analysis. Still with a 'Lazy' strictness demand (i.e. 'lazyApply1Dmd'), though, and the world was fine. In 7c0fff4 (July 15), Simon argued that giving `catch#` et al. a 'strictApply1Dmd' leads to substantial performance gains. That was at the cost of correctness, as #10712 proved. So, back to 'lazyApply1Dmd' in 28638dfe79e (Dec 15). Motivated to reproduce the gains of 7c0fff4 without the breakage of #10712, Ben opened #11222. Simon made the demand analyser "understand catch" in 9915b656 (Jan 16) by adding a new 'catchArgDmd', which basically said to call its argument strictly, but also swallow any thrown exceptions in 'multDivergence'. This was realized by extending the 'Str' constructor of 'ArgStr' with a 'ExnStr' field, indicating that it catches the exception, and adding a 'ThrowsExn' constructor to the 'Divergence' lattice as an element between 'Dunno' and 'Diverges'. Then along came #11555 and finally #13330, so we had to revert to 'lazyApply1Dmd' again in 701256df88c (Mar 17). This left the other variants like 'catchRetry#' having 'catchArgDmd', which is where #14998 picked up. Item 1 was concerned with measuring the impact of also making `catchRetry#` and `catchSTM#` have 'lazyApply1Dmd'. The result was that there was none. We removed the last usages of 'catchArgDmd' in 00b8ecb7 (Apr 18). There was a lot of dead code resulting from that change, that we removed in ef6b283 (Jan 19): We got rid of 'ThrowsExn' and 'ExnStr' again and removed any code that was dealing with the peculiarities. Where did the speed-ups vanish to? In #14998, item 3 established that turning 'catch#' strict in its first argument didn't bring back any of the alleged performance benefits. Item 2 of that ticket finally found out that it was entirely due to 'catchException's new (since #11555) definition, which was simply catchException !io handler = catch io handler While 'catchException' is arguably the saner semantics for 'catch', it is an internal helper function in "GHC.IO". Its use in "GHC.IO.Handle.Internals.do_operation" made for the huge allocation differences: Remove the bang and you find the regressions we originally wanted to avoid with 'catchArgDmd'. See also #exceptions_and_strictness# in "GHC.IO". So history keeps telling us that the only possibly correct strictness annotation for the first argument of 'catch#' is 'lazyApply1Dmd', because 'catch#' really is not strict in its argument: Just try this in GHCi :set -XScopedTypeVariables import Control.Exception catch undefined (\(_ :: SomeException) -> putStrLn "you'll see this") Any analysis that assumes otherwise will be broken in some way or another (beyond `-fno-pendantic-bottoms`). But then #13380 and #17676 suggest (in Mar 20) that we need to re-introduce a subtly different variant of `ThrowsExn` (which we call `ExnOrDiv` now) that is only used by `raiseIO#` in order to preserve precise exceptions by strictness analysis, while not impacting the ability to eliminate dead code. See Note [Precise exceptions and strictness analysis]. Note [Default demand on free variables and arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Free variables not mentioned in the environment of a 'DmdType' are demanded according to the demand type's Divergence: * In a Diverges (botDiv) context, that demand is botDmd (strict and absent). * In all other contexts, the demand is absDmd (lazy and absent). This is recorded in 'defaultFvDmd'. Similarly, we can eta-expand demand types to get demands on excess arguments not accounted for in the type, by consulting 'defaultArgDmd': * In a Diverges (botDiv) context, that demand is again botDmd. * In a ExnOrDiv (exnDiv) context, that demand is absDmd: We surely diverge before evaluating the excess argument, but don't want to eagerly evaluate it (cf. Note [Precise exceptions and strictness analysis]). * In a Dunno context (topDiv), the demand is topDmd, because it's perfectly possible to enter the additional lambda and evaluate it in unforeseen ways (so, not absent). Note [Bottom CPR iff Dead-Ending Divergence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both CPR analysis and Demand analysis handle recursive functions by doing fixed-point iteration. To find the *least* (e.g., most informative) fixed-point, iteration starts with the bottom element of the semantic domain. Diverging functions generally have the bottom element as their least fixed-point. One might think that CPR analysis and Demand analysis then agree in when a function gets a bottom denotation. E.g., whenever it has 'botCpr', it should also have 'botDiv'. But that is not the case, because strictness analysis has to be careful around precise exceptions, see Note [Precise vs imprecise exceptions]. So Demand analysis gives some diverging functions 'exnDiv' (which is *not* the bottom element) when the CPR signature says 'botCpr', and that's OK. Here's an example (from #18086) where that is the case: ioTest :: IO () ioTest = do putStrLn "hi" undefined However, one can loosely say that we give a function 'botCpr' whenever its 'Divergence' is 'exnDiv' or 'botDiv', i.e., dead-ending. But that's just a consequence of fixed-point iteration, it's not important that they agree. ************************************************************************ * * Demand environments and types * * ************************************************************************ -} -- Subject to Note [Default demand on free variables and arguments] -- | Captures the result of an evaluation of an expression, by -- -- * Listing how the free variables of that expression have been evaluted -- ('de_fvs') -- * Saying whether or not evaluation would surely diverge ('de_div') -- -- See Note [Demand env Equality]. data DmdEnv = DE { de_fvs :: !(VarEnv Demand), de_div :: !Divergence } instance Eq DmdEnv where DE fv1 div1 == DE fv2 div2 = div1 == div2 && canonicalise div1 fv1 == canonicalise div2 fv2 where canonicalise div fv = filterUFM (/= defaultFvDmd div) fv mkEmptyDmdEnv :: Divergence -> DmdEnv mkEmptyDmdEnv div = DE emptyVarEnv div -- | Build a potentially terminating 'DmdEnv' from a finite map that says what -- has been evaluated so far mkTermDmdEnv :: VarEnv Demand -> DmdEnv mkTermDmdEnv fvs = DE fvs topDiv nopDmdEnv :: DmdEnv nopDmdEnv = mkEmptyDmdEnv topDiv botDmdEnv :: DmdEnv botDmdEnv = mkEmptyDmdEnv botDiv exnDmdEnv :: DmdEnv exnDmdEnv = mkEmptyDmdEnv exnDiv lubDmdEnv :: DmdEnv -> DmdEnv -> DmdEnv lubDmdEnv (DE fv1 d1) (DE fv2 d2) = DE lub_fv lub_div where -- See Note [Demand env Equality] lub_fv = plusVarEnv_CD lubDmd fv1 (defaultFvDmd d1) fv2 (defaultFvDmd d2) lub_div = lubDivergence d1 d2 addVarDmdEnv :: DmdEnv -> Id -> Demand -> DmdEnv addVarDmdEnv env@(DE fvs div) id dmd = DE (extendVarEnv fvs id (dmd `plusDmd` lookupDmdEnv env id)) div plusDmdEnv :: DmdEnv -> DmdEnv -> DmdEnv plusDmdEnv (DE fv1 d1) (DE fv2 d2) -- In contrast to Note [Asymmetry of plusDmdType], this function is symmetric. | isEmptyVarEnv fv2, defaultFvDmd d2 == absDmd = DE fv1 (d1 `plusDivergence` d2) -- a very common case that is much more efficient | isEmptyVarEnv fv1, defaultFvDmd d1 == absDmd = DE fv2 (d1 `plusDivergence` d2) -- another very common case that is much more efficient | otherwise = DE (plusVarEnv_CD plusDmd fv1 (defaultFvDmd d1) fv2 (defaultFvDmd d2)) (d1 `plusDivergence` d2) -- | 'DmdEnv' is a monoid via 'plusDmdEnv' and 'nopDmdEnv'; this is its 'msum' plusDmdEnvs :: [DmdEnv] -> DmdEnv plusDmdEnvs [] = nopDmdEnv plusDmdEnvs pdas = foldl1' plusDmdEnv pdas multDmdEnv :: Card -> DmdEnv -> DmdEnv multDmdEnv C_11 env = env multDmdEnv C_00 _ = nopDmdEnv multDmdEnv n (DE fvs div) = DE (mapVarEnv (multDmd n) fvs) (multDivergence n div) reuseEnv :: DmdEnv -> DmdEnv reuseEnv = multDmdEnv C_1N lookupDmdEnv :: DmdEnv -> Id -> Demand -- See Note [Default demand on free variables and arguments] lookupDmdEnv (DE fv div) id = lookupVarEnv fv id `orElse` defaultFvDmd div delDmdEnv :: DmdEnv -> Id -> DmdEnv delDmdEnv (DE fv div) id = DE (fv `delVarEnv` id) div -- | Characterises how an expression -- -- * Evaluates its free variables ('dt_env') including divergence info -- * Evaluates its arguments ('dt_args') -- data DmdType = DmdType { dt_env :: !DmdEnv -- ^ Demands on free variables. -- See Note [Demand type Divergence] , dt_args :: ![Demand] -- ^ Demand on arguments } -- | See Note [Demand env Equality]. instance Eq DmdType where DmdType env1 ds1 == DmdType env2 ds2 = ds1 == ds2 -- cheap checks first && env1 == env2 -- | Compute the least upper bound of two 'DmdType's elicited /by the same -- incoming demand/! lubDmdType :: DmdType -> DmdType -> DmdType lubDmdType d1 d2 = DmdType lub_fv lub_ds where n = max (dmdTypeDepth d1) (dmdTypeDepth d2) (DmdType fv1 ds1) = etaExpandDmdType n d1 (DmdType fv2 ds2) = etaExpandDmdType n d2 lub_ds = zipWithEqual "lubDmdType" lubDmd ds1 ds2 lub_fv = lubDmdEnv fv1 fv2 discardArgDmds :: DmdType -> DmdEnv discardArgDmds (DmdType fv _) = fv plusDmdType :: DmdType -> DmdEnv -> DmdType plusDmdType (DmdType fv ds) fv' -- See Note [Asymmetry of plusDmdType] -- 'DmdEnv' forms a (monoidal) action on 'DmdType' via this operation. = DmdType (plusDmdEnv fv fv') ds botDmdType :: DmdType botDmdType = DmdType botDmdEnv [] -- | The demand type of doing nothing (lazy, absent, no Divergence -- information). Note that it is ''not'' the top of the lattice (which would be -- "may use everything"), so it is (no longer) called topDmdType. nopDmdType :: DmdType nopDmdType = DmdType nopDmdEnv [] -- | The demand type of an unspecified expression that is guaranteed to -- throw a (precise or imprecise) exception or diverge. exnDmdType :: DmdType exnDmdType = DmdType exnDmdEnv [] dmdTypeDepth :: DmdType -> Arity dmdTypeDepth = length . dt_args -- | This makes sure we can use the demand type with n arguments after eta -- expansion, where n must not be lower than the demand types depth. -- It appends the argument list with the correct 'defaultArgDmd'. etaExpandDmdType :: Arity -> DmdType -> DmdType etaExpandDmdType n d@DmdType{dt_args = ds, dt_env = env} | n == depth = d | n > depth = d{dt_args = inc_ds} | otherwise = pprPanic "etaExpandDmdType: arity decrease" (ppr n $$ ppr d) where depth = length ds -- Arity increase: -- * Demands on FVs are still valid -- * Demands on args also valid, plus we can extend with defaultArgDmd -- as appropriate for the given Divergence -- * Divergence is still valid: -- - A dead end after 2 arguments stays a dead end after 3 arguments -- - The remaining case is Dunno, which is already topDiv inc_ds = take n (ds ++ repeat (defaultArgDmd (de_div env))) -- | A conservative approximation for a given 'DmdType' in case of an arity -- decrease. Currently, it's just nopDmdType. decreaseArityDmdType :: DmdType -> DmdType decreaseArityDmdType _ = nopDmdType splitDmdTy :: DmdType -> (Demand, DmdType) -- Split off one function argument -- We already have a suitable demand on all -- free vars, so no need to add more! splitDmdTy ty@DmdType{dt_args=dmd:args} = (dmd, ty{dt_args=args}) splitDmdTy ty@DmdType{dt_env=env} = (defaultArgDmd (de_div env), ty) multDmdType :: Card -> DmdType -> DmdType multDmdType n (DmdType fv args) = -- pprTrace "multDmdType" (ppr n $$ ppr fv $$ ppr (multDmdEnv n fv)) $ DmdType (multDmdEnv n fv) (map (multDmd n) args) peelFV :: DmdType -> Var -> (DmdType, Demand) peelFV (DmdType fv ds) id = -- pprTrace "rfv" (ppr id <+> ppr dmd $$ ppr fv) (DmdType fv' ds, dmd) where -- Force these arguments so that old `Env` is not retained. !fv' = fv `delDmdEnv` id !dmd = lookupDmdEnv fv id addDemand :: Demand -> DmdType -> DmdType addDemand dmd (DmdType fv ds) = DmdType fv (dmd:ds) findIdDemand :: DmdType -> Var -> Demand findIdDemand (DmdType fv _) id = lookupDmdEnv fv id -- | When e is evaluated after executing an IO action that may throw a precise -- exception, we act as if there is an additional control flow path that is -- taken if e throws a precise exception. The demand type of this control flow -- path -- * is lazy and absent ('topDmd') and boxed in all free variables and arguments -- * has 'exnDiv' 'Divergence' result -- See Note [Precise exceptions and strictness analysis] -- -- So we can simply take a variant of 'nopDmdType', 'exnDmdType'. -- Why not 'nopDmdType'? Because then the result of 'e' can never be 'exnDiv'! -- That means failure to drop dead-ends, see #18086. deferAfterPreciseException :: DmdType -> DmdType deferAfterPreciseException = lubDmdType exnDmdType {- Note [deferAfterPreciseException] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The big picture is in Note [Precise exceptions and strictness analysis] The idea is that we want to treat case of (# s', r #) -> rhs as if it was case of Just (# s', r #) -> rhs Nothing -> error That is, the I/O operation might throw an exception, so that 'rhs' never gets reached. For example, we don't want to be strict in the strict free variables of 'rhs'. So we have the simple definition deferAfterPreciseException = lubDmdType (DmdType emptyDmdEnv [] exnDiv) Historically, when we had `lubBoxity = _unboxedWins` (see Note [unboxedWins]), we had a more complicated definition for deferAfterPreciseException to make sure it preserved boxity in its argument. That was needed for code like case of (# s', r) -> f x which uses `x` *boxed*. If we `lub`bed it with `(DmdType emptyDmdEnv [] exnDiv)` we'd get an *unboxed* demand on `x` (because we let Unboxed win), which led to ticket #20746. Nowadays with `lubBoxity = boxedWins` we don't need the complicated definition. Note [Demand type Divergence] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In contrast to DmdSigs, DmdTypes are elicited under a specific incoming demand. This is described in detail in Note [Understanding DmdType and DmdSig]. Here, we'll focus on what that means for a DmdType's Divergence in a higher-order scenario. Consider err x y = x `seq` y `seq` error (show x) this has a strictness signature of <1L><1L>b meaning that we don't know what happens when we call err in weaker contexts than C1(C1(L)), like @err `seq` ()@ (1A) and @err 1 `seq` ()@ (CS(A)). We may not unleash the botDiv, hence assume topDiv. Of course, in @err 1 2 `seq` ()@ the incoming demand CS(CS(A)) is strong enough and we see that the expression diverges. Now consider a function f g = g 1 2 with signature , and the expression f err `seq` () now f puts a strictness demand of C1(C1(L)) onto its argument, which is unleashed on err via the App rule. In contrast to weaker head strictness, this demand is strong enough to unleash err's signature and hence we see that the whole expression diverges! Note [Demand env Equality] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ What is the difference between the Demand env {x->A} and {}? Answer: There is none! They have the exact same semantics, because any var that is not mentioned in 'de_fvs' implicitly has demand 'defaultFvDmd', based on the divergence of the demand env 'de_div'. Similarly, b{x->B, y->A} is the same as b{y->A}, because the default FV demand of BotDiv is B. But neither is equal to b{}, because y has demand B in the latter, not A as before. The Eq instance of DmdEnv must reflect that, otherwise we can get into monotonicity issues during fixed-point iteration ({x->A} /= {} /= {x->A} /= ...). It does so by filtering out any default FV demands prior to comparing 'de_fvs'. Note that 'lubDmdEnv' maintains this kind of equality by using 'plusVarEnv_CD', involving 'defaultFvDmd' for any entries present in one 'de_fvs' but not the other. Note [Asymmetry of plusDmdType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'plus' for DmdTypes is *asymmetrical*, because there can only one be one type contributing argument demands! For example, given (e1 e2), we get a DmdType dt1 for e1, use its arg demand to analyse e2 giving dt2, and then do (dt1 `plusType` dt2). Similarly with case e of { p -> rhs } we get dt_scrut from the scrutinee and dt_rhs from the RHS, and then compute (dt_rhs `plusType` dt_scrut). We 1. combine the information on the free variables, 2. take the demand on arguments from the first argument 3. combine the termination results, as in plusDivergence. Since we don't use argument demands of the second argument anyway, 'plus's second argument is just a 'PlusDmdType'. But note that the argument demand types are not guaranteed to be observed in left to right order. For example, analysis of a case expression will pass the demand type for the alts as the left argument and the type for the scrutinee as the right argument. Also, it is not at all clear if there is such an order; consider the LetUp case, where the RHS might be forced at any point while evaluating the let body. Therefore, it is crucial that 'plusDivergence' is symmetric! Note [Demands from unsaturated function calls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a demand transformer d1 -> d2 -> r for f. If a sufficiently detailed demand is fed into this transformer, e.g arising from "f x1 x2" in a strict, use-once context, then d1 and d2 is precisely the demand unleashed onto x1 and x2 (similar for the free variable environment) and furthermore the result information r is the one we want to use. An anonymous lambda is also an unsaturated function all (needs one argument, none given), so this applies to that case as well. But the demand fed into f might be less than C1(C1(L)). Then we have to 'multDmdType' the announced demand type. Examples: * Not strict enough, e.g. C1(C1(L)): - We have to multiply all argument and free variable demands with C_01, zapping strictness. - We have to multiply divergence with C_01. If r says that f Diverges for sure, then this holds when the demand guarantees that two arguments are going to be passed. If the demand is lower, we may just as well converge. If we were tracking definite convergence, than that would still hold under a weaker demand than expected by the demand transformer. * Used more than once, e.g. CS(C1(L)): - Multiply with C_1N. Even if f puts a used-once demand on any of its argument or free variables, if we call f multiple times, we may evaluate this argument or free variable multiple times. In dmdTransformSig, we call peelManyCalls to find out the 'Card'inality with which we have to multiply and then call multDmdType with that. Similarly, dmdTransformDictSelSig and dmdAnal, when analyzing a Lambda, use peelCallDmd, which peels only one level, but also returns the demand put on the body of the function. -} {- ************************************************************************ * * Demand signatures * * ************************************************************************ In a let-bound Id we record its demand signature. In principle, this demand signature is a demand transformer, mapping a demand on the Id into a DmdType, which gives a) the free vars of the Id's value b) the Id's arguments c) an indication of the result of applying the Id to its arguments However, in fact we store in the Id an extremely emascuated demand transfomer, namely a single DmdType (Nevertheless we dignify DmdSig as a distinct type.) This DmdType gives the demands unleashed by the Id when it is applied to as many arguments as are given in by the arg demands in the DmdType. Also see Note [Demand type Divergence] for the meaning of a Divergence in a strictness signature. If an Id is applied to less arguments than its arity, it means that the demand on the function at a call site is weaker than the vanilla call demand, used for signature inference. Therefore we place a top demand on all arguments. Otherwise, the demand is specified by Id's signature. For example, the demand transformer described by the demand signature DmdSig (DmdType {x -> <1L>} <1P(L,L)>) says that when the function is applied to two arguments, it unleashes demand 1L on the free var x, A on the first arg, and 1P(L,L) on the second. If this same function is applied to one arg, all we can say is that it uses x with 1L, and its arg with demand 1P(L,L). Note [Understanding DmdType and DmdSig] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Demand types are sound approximations of an expression's semantics relative to the incoming demand we put the expression under. Consider the following expression: \x y -> x `seq` (y, 2*x) Here is a table with demand types resulting from different incoming demands we put that expression under. Note the monotonicity; a stronger incoming demand yields a more precise demand type: incoming demand | demand type -------------------------------- 1A | {} C1(C1(L)) | <1P(L)>{} C1(C1(1P(1P(L),A))) | <1P(A)>{} Note that in the first example, the depth of the demand type was *higher* than the arity of the incoming call demand due to the anonymous lambda. The converse is also possible and happens when we unleash demand signatures. In @f x y@, the incoming call demand on f has arity 2. But if all we have is a demand signature with depth 1 for @f@ (which we can safely unleash, see below), the demand type of @f@ under a call demand of arity 2 has a *lower* depth of 1. So: Demand types are elicited by putting an expression under an incoming (call) demand, the arity of which can be lower or higher than the depth of the resulting demand type. In contrast, a demand signature summarises a function's semantics *without* immediately specifying the incoming demand it was produced under. Despite StrSig being a newtype wrapper around DmdType, it actually encodes two things: * The threshold (i.e., minimum arity) to unleash the signature * A demand type that is sound to unleash when the minimum arity requirement is met. Here comes the subtle part: The threshold is encoded in the wrapped demand type's depth! So in mkDmdSigForArity we make sure to trim the list of argument demands to the given threshold arity. Call sites will make sure that this corresponds to the arity of the call demand that elicited the wrapped demand type. See also Note [What are demand signatures?]. -} -- | The depth of the wrapped 'DmdType' encodes the arity at which it is safe -- to unleash. Better construct this through 'mkDmdSigForArity'. -- See Note [Understanding DmdType and DmdSig] newtype DmdSig = DmdSig DmdType deriving Eq -- | Turns a 'DmdType' computed for the particular 'Arity' into a 'DmdSig' -- unleashable at that arity. See Note [Understanding DmdType and DmdSig]. mkDmdSigForArity :: Arity -> DmdType -> DmdSig mkDmdSigForArity arity dmd_ty@(DmdType fvs args) | arity < dmdTypeDepth dmd_ty = DmdSig $ DmdType fvs (take arity args) | otherwise = DmdSig (etaExpandDmdType arity dmd_ty) mkClosedDmdSig :: [Demand] -> Divergence -> DmdSig mkClosedDmdSig ds div = mkDmdSigForArity (length ds) (DmdType (mkEmptyDmdEnv div) ds) splitDmdSig :: DmdSig -> ([Demand], Divergence) splitDmdSig (DmdSig (DmdType env dmds)) = (dmds, de_div env) dmdSigDmdEnv :: DmdSig -> DmdEnv dmdSigDmdEnv (DmdSig (DmdType env _)) = env hasDemandEnvSig :: DmdSig -> Bool hasDemandEnvSig = not . isEmptyVarEnv . de_fvs . dmdSigDmdEnv botSig :: DmdSig botSig = DmdSig botDmdType nopSig :: DmdSig nopSig = DmdSig nopDmdType isNopSig :: DmdSig -> Bool isNopSig (DmdSig ty) = ty == nopDmdType -- | True if the signature diverges or throws an exception in a saturated call. -- See Note [Dead ends]. isDeadEndSig :: DmdSig -> Bool isDeadEndSig (DmdSig (DmdType env _)) = isDeadEndDiv (de_div env) -- | True when the signature indicates all arguments are boxed onlyBoxedArguments :: DmdSig -> Bool onlyBoxedArguments (DmdSig (DmdType _ dmds)) = all demandIsBoxed dmds where demandIsBoxed BotDmd = True demandIsBoxed AbsDmd = True demandIsBoxed (_ :* sd) = subDemandIsboxed sd subDemandIsboxed (Poly Unboxed _) = False subDemandIsboxed (Poly _ _) = True subDemandIsboxed (Call _ sd) = subDemandIsboxed sd subDemandIsboxed (Prod Unboxed _) = False subDemandIsboxed (Prod _ ds) = all demandIsBoxed ds -- | Returns true if an application to n value args would diverge or throw an -- exception. -- -- If a function having 'botDiv' is applied to a less number of arguments than -- its syntactic arity, we cannot say for sure that it is going to diverge. -- Hence this function conservatively returns False in that case. -- See Note [Dead ends]. isDeadEndAppSig :: DmdSig -> Int -> Bool isDeadEndAppSig (DmdSig (DmdType env ds)) n = isDeadEndDiv (de_div env) && not (lengthExceeds ds n) trimBoxityDmdEnv :: DmdEnv -> DmdEnv trimBoxityDmdEnv (DE fvs div) = DE (mapVarEnv trimBoxity fvs) div trimBoxityDmdType :: DmdType -> DmdType trimBoxityDmdType (DmdType env ds) = DmdType (trimBoxityDmdEnv env) (map trimBoxity ds) trimBoxityDmdSig :: DmdSig -> DmdSig trimBoxityDmdSig = coerce trimBoxityDmdType prependArgsDmdSig :: Int -> DmdSig -> DmdSig -- ^ Add extra ('topDmd') arguments to a strictness signature. -- In contrast to 'etaConvertDmdSig', this /prepends/ additional argument -- demands. This is used by FloatOut. prependArgsDmdSig new_args sig@(DmdSig dmd_ty@(DmdType env dmds)) | new_args == 0 = sig | dmd_ty == nopDmdType = sig | otherwise = DmdSig (DmdType env dmds') where dmds' = replicate new_args topDmd ++ dmds etaConvertDmdSig :: Arity -> DmdSig -> DmdSig -- ^ We are expanding (\x y. e) to (\x y z. e z) or reducing from the latter to -- the former (when the Simplifier identifies a new join points, for example). -- In contrast to 'prependArgsDmdSig', this /appends/ extra arg demands if -- necessary. -- This works by looking at the 'DmdType' (which was produced under a call -- demand for the old arity) and trying to transfer as many facts as we can to -- the call demand of new arity. -- An arity increase (resulting in a stronger incoming demand) can retain much -- of the info, while an arity decrease (a weakening of the incoming demand) -- must fall back to a conservative default. etaConvertDmdSig arity (DmdSig dmd_ty) | arity < dmdTypeDepth dmd_ty = DmdSig $ decreaseArityDmdType dmd_ty | otherwise = DmdSig $ etaExpandDmdType arity dmd_ty {- ************************************************************************ * * Demand transformers * * ************************************************************************ -} -- | A /demand transformer/ is a monotone function from an incoming evaluation -- context ('SubDemand') to a 'DmdType', describing how the denoted thing -- (i.e. expression, function) uses its arguments and free variables, and -- whether it diverges. -- -- See Note [Understanding DmdType and DmdSig] -- and Note [What are demand signatures?]. type DmdTransformer = SubDemand -> DmdType -- | Extrapolate a demand signature ('DmdSig') into a 'DmdTransformer'. -- -- Given a function's 'DmdSig' and a 'SubDemand' for the evaluation context, -- return how the function evaluates its free variables and arguments. dmdTransformSig :: DmdSig -> DmdTransformer dmdTransformSig (DmdSig dmd_ty@(DmdType _ arg_ds)) sd = multDmdType (fst $ peelManyCalls (length arg_ds) sd) dmd_ty -- see Note [Demands from unsaturated function calls] -- and Note [What are demand signatures?] -- | A special 'DmdTransformer' for data constructors that feeds product -- demands into the constructor arguments. dmdTransformDataConSig :: [StrictnessMark] -> DmdTransformer -- See Note [Demand transformer for data constructors] dmdTransformDataConSig str_marks sd = case viewProd arity body_sd of Just (_, dmds) -> mk_body_ty n dmds Nothing -> nopDmdType where arity = length str_marks (n, body_sd) = peelManyCalls arity sd mk_body_ty n dmds = DmdType nopDmdEnv (zipWith (bump n) str_marks dmds) bump n str dmd | isMarkedStrict str = multDmd n (plusDmd str_field_dmd dmd) | otherwise = multDmd n dmd str_field_dmd = C_01 :* seqSubDmd -- Why not C_11? See Note [Data-con worker strictness] -- | A special 'DmdTransformer' for dictionary selectors that feeds the demand -- on the result into the indicated dictionary component (if saturated). -- See Note [Demand transformer for a dictionary selector]. dmdTransformDictSelSig :: DmdSig -> DmdTransformer -- NB: This currently doesn't handle newtype dictionaries. -- It should simply apply call_sd directly to the dictionary, I suppose. dmdTransformDictSelSig (DmdSig (DmdType _ [_ :* prod])) call_sd | (n, sd') <- peelCallDmd call_sd , Prod _ sig_ds <- prod = multDmdType n $ DmdType nopDmdEnv [C_11 :* mkProd Unboxed (map (enhance sd') sig_ds)] | otherwise = nopDmdType -- See Note [Demand transformer for a dictionary selector] where enhance _ AbsDmd = AbsDmd enhance _ BotDmd = BotDmd enhance sd _dmd_var = C_11 :* sd -- This is the one! -- C_11, because we multiply with n above dmdTransformDictSelSig sig sd = pprPanic "dmdTransformDictSelSig: no args" (ppr sig $$ ppr sd) {- Note [What are demand signatures?] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Demand analysis interprets expressions in the abstract domain of demand transformers. Given a (sub-)demand that denotes the evaluation context, the abstract transformer of an expression gives us back a demand type denoting how other things (like arguments and free vars) were used when the expression was evaluated. Here's an example: f x y = if x + expensive then \z -> z + y * ... else \z -> z * ... The abstract transformer (let's call it F_e) of the if expression (let's call it e) would transform an incoming (undersaturated!) head demand 1A into a demand type like {x-><1L>,y->}. In pictures: Demand ---F_e---> DmdType <1A> {x-><1L>,y->} Let's assume that the demand transformers we compute for an expression are correct wrt. to some concrete semantics for Core. How do demand signatures fit in? They are strange beasts, given that they come with strict rules when to it's sound to unleash them. Fortunately, we can formalise the rules with Galois connections. Consider f's strictness signature, {}<1L>. It's a single-point approximation of the actual abstract transformer of f's RHS for arity 2. So, what happens is that we abstract *once more* from the abstract domain we already are in, replacing the incoming Demand by a simple lattice with two elements denoting incoming arity: A_2 = {<2, >=2} (where '<2' is the top element and >=2 the bottom element). Here's the diagram: A_2 -----f_f----> DmdType ^ | | α γ | | v SubDemand --F_f----> DmdType With α(C1(C1(_))) = >=2 α(_) = <2 γ(ty) = ty and F_f being the abstract transformer of f's RHS and f_f being the abstracted abstract transformer computable from our demand signature simply by f_f(>=2) = {}<1L> f_f(<2) = multDmdType C_0N {}<1L> where multDmdType makes a proper top element out of the given demand type. In practice, the A_n domain is not just a simple Bool, but a Card, which is exactly the Card with which we have to multDmdType. The Card for arity n is computed by calling @peelManyCalls n@, which corresponds to α above. Note [Demand transformer for a dictionary selector] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have a superclass selector 'sc_sel' and a class method selector 'op_sel', and a function that uses both, like this -- Strictness sig: 1P(1,A) sc_sel (x,y) = x -- Strictness sig: 1P(A,1) op_sel (p,q)= q f d v = op_sel (sc_sel d) v What do we learn about the demand on 'd'? Alas, we see only the demand from 'sc_sel', namely '1P(1,A)'. We /don't/ see that 'd' really has a nested demand '1P(1P(A,1C1(1)),A)'. On the other hand, if we inlined the two selectors we'd have f d x = case d of (x,_) -> case x of (_,q) -> q v If we analyse that, we'll get a richer, nested demand on 'd'. We want to behave /as if/ we'd inlined 'op_sel' and 'sc_sel'. We can do this easily by building a richer demand transformer for dictionary selectors than is expressible by a regular demand signature. And that is what 'dmdTransformDictSelSig' does: it transforms the demand on the result to a demand on the (single) argument. How does it do that? If we evaluate (op dict-expr) under demand 'd', then we can push the demand 'd' into the appropriate field of the dictionary. What *is* the appropriate field? We just look at the strictness signature of the class op, which will be something like: P(AAA1AAAAA). Then replace the '1' (or any other non-absent demand, really) by the demand 'd'. The '1' acts as if it was a demand variable, the whole signature really means `\d. P(AAAdAAAAA)` for any incoming demand 'd'. For single-method classes, which are represented by newtypes the signature of 'op' won't look like P(...), so matching on Prod will fail. That's fine: if we are doing strictness analysis we are also doing inlining, so we'll have inlined 'op' into a cast. So we can bale out in a conservative way, returning nopDmdType. SG: Although we then probably want to apply the eval demand 'd' directly to 'op' rather than turning it into 'topSubDmd'... It is (just.. #8329) possible to be running strictness analysis *without* having inlined class ops from single-method classes. Suppose you are using ghc --make; and the first module has a local -O0 flag. So you may load a class without interface pragmas, ie (currently) without an unfolding for the class ops. Now if a subsequent module in the --make sweep has a local -O flag you might do strictness analysis, but there is no inlining for the class op. This is weird, so I'm not worried about whether this optimises brilliantly; but it should not fall over. -} zapDmdEnv :: DmdEnv -> DmdEnv zapDmdEnv (DE _ div) = mkEmptyDmdEnv div -- | Remove the demand environment from the signature. zapDmdEnvSig :: DmdSig -> DmdSig zapDmdEnvSig (DmdSig (DmdType env ds)) = DmdSig (DmdType (zapDmdEnv env) ds) zapUsageDemand :: Demand -> Demand -- Remove the usage info, but not the strictness info, from the demand zapUsageDemand = kill_usage $ KillFlags { kf_abs = True , kf_used_once = True , kf_called_once = True } -- | Remove all `C_01 :*` info (but not `CM` sub-demands) from the demand zapUsedOnceDemand :: Demand -> Demand zapUsedOnceDemand = kill_usage $ KillFlags { kf_abs = False , kf_used_once = True , kf_called_once = False } -- | Remove all `C_01 :*` info (but not `CM` sub-demands) from the strictness -- signature zapUsedOnceSig :: DmdSig -> DmdSig zapUsedOnceSig (DmdSig (DmdType env ds)) = DmdSig (DmdType env (map zapUsedOnceDemand ds)) data KillFlags = KillFlags { kf_abs :: Bool , kf_used_once :: Bool , kf_called_once :: Bool } kill_usage_card :: KillFlags -> Card -> Card kill_usage_card kfs C_00 | kf_abs kfs = C_0N kill_usage_card kfs C_10 | kf_abs kfs = C_1N kill_usage_card kfs C_01 | kf_used_once kfs = C_0N kill_usage_card kfs C_11 | kf_used_once kfs = C_1N kill_usage_card _ n = n kill_usage :: KillFlags -> Demand -> Demand kill_usage _ AbsDmd = AbsDmd kill_usage _ BotDmd = BotDmd kill_usage kfs (n :* sd) = kill_usage_card kfs n :* kill_usage_sd kfs sd kill_usage_sd :: KillFlags -> SubDemand -> SubDemand kill_usage_sd kfs (Call n sd) | kf_called_once kfs = mkCall (lubCard C_1N n) (kill_usage_sd kfs sd) | otherwise = mkCall n (kill_usage_sd kfs sd) kill_usage_sd kfs (Prod b ds) = mkProd b (map (kill_usage kfs) ds) kill_usage_sd _ sd = sd {- ********************************************************************* * * TypeShape and demand trimming * * ********************************************************************* -} data TypeShape -- See Note [Trimming a demand to a type] -- in GHC.Core.Opt.DmdAnal = TsFun TypeShape | TsProd [TypeShape] | TsUnk trimToType :: Demand -> TypeShape -> Demand -- See Note [Trimming a demand to a type] in GHC.Core.Opt.DmdAnal trimToType AbsDmd _ = AbsDmd trimToType BotDmd _ = BotDmd trimToType (n :* sd) ts = n :* go sd ts where go (Prod b ds) (TsProd tss) | equalLength ds tss = mkProd b (zipWith trimToType ds tss) go (Call n sd) (TsFun ts) = mkCall n (go sd ts) go sd@Poly{} _ = sd go _ _ = topSubDmd -- | Drop all boxity trimBoxity :: Demand -> Demand trimBoxity AbsDmd = AbsDmd trimBoxity BotDmd = BotDmd trimBoxity (n :* sd) = n :* go sd where go (Poly _ n) = Poly Boxed n go (Prod _ ds) = mkProd Boxed (map trimBoxity ds) go (Call n sd) = mkCall n $ go sd {- ************************************************************************ * * 'seq'ing demands * * ************************************************************************ -} seqDemand :: Demand -> () seqDemand AbsDmd = () seqDemand BotDmd = () seqDemand (_ :* sd) = seqSubDemand sd seqSubDemand :: SubDemand -> () seqSubDemand (Prod _ ds) = seqDemandList ds seqSubDemand (Call _ sd) = seqSubDemand sd seqSubDemand (Poly _ _) = () seqDemandList :: [Demand] -> () seqDemandList = foldr (seq . seqDemand) () seqDmdType :: DmdType -> () seqDmdType (DmdType env ds) = seqDmdEnv env `seq` seqDemandList ds `seq` () seqDmdEnv :: DmdEnv -> () seqDmdEnv (DE fvs _) = seqEltsUFM seqDemand fvs seqDmdSig :: DmdSig -> () seqDmdSig (DmdSig ty) = seqDmdType ty {- ************************************************************************ * * Outputable and Binary instances * * ************************************************************************ -} -- Just for debugging purposes. instance Show Card where show C_00 = "C_00" show C_01 = "C_01" show C_0N = "C_0N" show C_10 = "C_10" show C_11 = "C_11" show C_1N = "C_1N" {- Note [Demand notation] ~~~~~~~~~~~~~~~~~~~~~~~~~ This Note should be kept up to date with the documentation of `-fstrictness` in the user's guide. For pretty-printing demands, we use quite a compact notation with some abbreviations. Here's the BNF: card ::= B {} | A {0} | M {0,1} | L {0,1,n} | 1 {1} | S {1,n} box ::= ! Unboxed | Boxed d ::= card sd The :* constructor, just juxtaposition | card abbreviation: Same as "card card" sd ::= box card @Poly box card@ | box P(d,d,..) @Prod box [d1,d2,..]@ | Ccard(sd) @Call card sd@ So, L can denote a 'Card', polymorphic 'SubDemand' or polymorphic 'Demand', but it's always clear from context which "overload" is meant. It's like return-type inference of e.g. 'read'. Examples are in the haddock for 'Demand'. This is the syntax for demand signatures: div ::= topDiv | x exnDiv | b botDiv sig ::= {x->dx,y->dy,z->dz...}...div ^ ^ ^ ^ ^ ^ | | | | | | | \---+---+------/ | | | | demand on free demand on divergence variables arguments information (omitted if empty) (omitted if no information) -} -- | See Note [Demand notation] -- Current syntax was discussed in #19016. instance Outputable Card where ppr C_00 = char 'A' -- "Absent" ppr C_01 = char 'M' -- "Maybe" ppr C_0N = char 'L' -- "Lazy" ppr C_11 = char '1' -- "exactly 1" ppr C_1N = char 'S' -- "Strict" ppr C_10 = char 'B' -- "Bottom" -- | See Note [Demand notation] instance Outputable Demand where ppr AbsDmd = char 'A' ppr BotDmd = char 'B' ppr (C_0N :* Poly Boxed C_0N) = char 'L' -- Print LL as just L ppr (C_1N :* Poly Boxed C_1N) = char 'S' -- Dito SS ppr (n :* sd) = ppr n <> ppr sd -- | See Note [Demand notation] instance Outputable SubDemand where ppr (Poly b sd) = pp_boxity b <> ppr sd ppr (Call n sd) = char 'C' <> ppr n <> parens (ppr sd) ppr (Prod b ds) = pp_boxity b <> char 'P' <> parens (fields ds) where fields [] = empty fields [x] = ppr x fields (x:xs) = ppr x <> char ',' <> fields xs pp_boxity :: Boxity -> SDoc pp_boxity Unboxed = char '!' pp_boxity _ = empty instance Outputable Divergence where ppr Diverges = char 'b' -- for (b)ottom ppr ExnOrDiv = char 'x' -- for e(x)ception ppr Dunno = empty instance Outputable DmdEnv where ppr (DE fvs div) = ppr div <> if null fv_elts then empty else braces (fsep (map pp_elt fv_elts)) where pp_elt (uniq, dmd) = ppr uniq <> text "->" <> ppr dmd fv_elts = nonDetUFMToList fvs -- It's OK to use nonDetUFMToList here because we only do it for -- pretty printing instance Outputable DmdType where ppr (DmdType fv ds) = hcat (map (angleBrackets . ppr) ds) <> ppr fv instance Outputable DmdSig where ppr (DmdSig ty) = ppr ty instance Outputable TypeShape where ppr TsUnk = text "TsUnk" ppr (TsFun ts) = text "TsFun" <> parens (ppr ts) ppr (TsProd tss) = parens (hsep $ punctuate comma $ map ppr tss) instance Binary Card where put_ bh C_00 = putByte bh 0 put_ bh C_01 = putByte bh 1 put_ bh C_0N = putByte bh 2 put_ bh C_11 = putByte bh 3 put_ bh C_1N = putByte bh 4 put_ bh C_10 = putByte bh 5 get bh = do h <- getByte bh case h of 0 -> return C_00 1 -> return C_01 2 -> return C_0N 3 -> return C_11 4 -> return C_1N 5 -> return C_10 _ -> pprPanic "Binary:Card" (ppr (fromIntegral h :: Int)) instance Binary Demand where put_ bh (n :* sd) = put_ bh n *> case n of C_00 -> return () C_10 -> return () _ -> put_ bh sd get bh = get bh >>= \n -> case n of C_00 -> return AbsDmd C_10 -> return BotDmd _ -> (n :*) <$> get bh instance Binary SubDemand where put_ bh (Poly b sd) = putByte bh 0 *> put_ bh b *> put_ bh sd put_ bh (Call n sd) = putByte bh 1 *> put_ bh n *> put_ bh sd put_ bh (Prod b ds) = putByte bh 2 *> put_ bh b *> put_ bh ds get bh = do h <- getByte bh case h of 0 -> Poly <$> get bh <*> get bh 1 -> mkCall <$> get bh <*> get bh 2 -> Prod <$> get bh <*> get bh _ -> pprPanic "Binary:SubDemand" (ppr (fromIntegral h :: Int)) instance Binary Divergence where put_ bh Dunno = putByte bh 0 put_ bh ExnOrDiv = putByte bh 1 put_ bh Diverges = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return Dunno 1 -> return ExnOrDiv 2 -> return Diverges _ -> pprPanic "Binary:Divergence" (ppr (fromIntegral h :: Int)) instance Binary DmdEnv where -- Ignore VarEnv when spitting out the DmdType put_ bh (DE _ d) = put_ bh d get bh = DE emptyVarEnv <$> get bh instance Binary DmdType where put_ bh (DmdType fv ds) = put_ bh fv *> put_ bh ds get bh = DmdType <$> get bh <*> get bh instance Binary DmdSig where put_ bh (DmdSig aa) = put_ bh aa get bh = DmdSig <$> get bh ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Error.hs0000644000000000000000000005535214472400113020377 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} module GHC.Types.Error ( -- * Messages Messages , mkMessages , getMessages , emptyMessages , isEmptyMessages , singleMessage , addMessage , unionMessages , unionManyMessages , MsgEnvelope (..) -- * Classifying Messages , MessageClass (..) , Severity (..) , Diagnostic (..) , DiagnosticMessage (..) , DiagnosticReason (..) , DiagnosticHint (..) , mkPlainDiagnostic , mkPlainError , mkDecoratedDiagnostic , mkDecoratedError -- * Hints and refactoring actions , GhcHint (..) , AvailableBindings(..) , LanguageExtensionHint(..) , suggestExtension , suggestExtensionWithInfo , suggestExtensions , suggestExtensionsWithInfo , suggestAnyExtension , suggestAnyExtensionWithInfo , useExtensionInOrderTo , noHints -- * Rendering Messages , SDoc , DecoratedSDoc (unDecorated) , mkDecorated, mkSimpleDecorated , unionDecoratedSDoc , mapDecoratedSDoc , pprMessageBag , mkLocMessage , mkLocMessageAnn , getCaretDiagnostic -- * Queries , isIntrinsicErrorMessage , isExtrinsicErrorMessage , isWarningMessage , getErrorMessages , getWarningMessages , partitionMessages , errorsFound , errorsOrFatalWarningsFound ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Data.Bag import GHC.IO (catchException) import GHC.Utils.Outputable as Outputable import qualified GHC.Utils.Ppr.Colour as Col import GHC.Types.SrcLoc as SrcLoc import GHC.Data.FastString (unpackFS) import GHC.Data.StringBuffer (atLine, hGetStringBuffer, len, lexemeToString) import GHC.Utils.Json import Data.Bifunctor import Data.Foldable ( fold ) import GHC.Types.Hint {- Note [Messages] ~~~~~~~~~~~~~~~ We represent the 'Messages' as a single bag of warnings and errors. The reason behind that is that there is a fluid relationship between errors and warnings and we want to be able to promote or demote errors and warnings based on certain flags (e.g. -Werror, -fdefer-type-errors or -XPartialTypeSignatures). More specifically, every diagnostic has a 'DiagnosticReason', but a warning 'DiagnosticReason' might be associated with 'SevError', in the case of -Werror. We rely on the 'Severity' to distinguish between a warning and an error. 'WarningMessages' and 'ErrorMessages' are for now simple type aliases to retain backward compatibility, but in future iterations these can be either parameterised over an 'e' message type (to make type signatures a bit more declarative) or removed altogether. -} -- | A collection of messages emitted by GHC during error reporting. A -- diagnostic message is typically a warning or an error. See Note [Messages]. -- -- /INVARIANT/: All the messages in this collection must be relevant, i.e. -- their 'Severity' should /not/ be 'SevIgnore'. The smart constructor -- 'mkMessages' will filter out any message which 'Severity' is 'SevIgnore'. newtype Messages e = Messages { getMessages :: Bag (MsgEnvelope e) } deriving newtype (Semigroup, Monoid) deriving stock (Functor, Foldable, Traversable) emptyMessages :: Messages e emptyMessages = Messages emptyBag mkMessages :: Bag (MsgEnvelope e) -> Messages e mkMessages = Messages . filterBag interesting where interesting :: MsgEnvelope e -> Bool interesting = (/=) SevIgnore . errMsgSeverity isEmptyMessages :: Messages e -> Bool isEmptyMessages (Messages msgs) = isEmptyBag msgs singleMessage :: MsgEnvelope e -> Messages e singleMessage e = addMessage e emptyMessages instance Diagnostic e => Outputable (Messages e) where ppr msgs = braces (vcat (map ppr_one (bagToList (getMessages msgs)))) where ppr_one :: MsgEnvelope e -> SDoc ppr_one envelope = pprDiagnostic (errMsgDiagnostic envelope) {- Note [Discarding Messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Discarding a 'SevIgnore' message from 'addMessage' and 'unionMessages' is just an optimisation, as GHC would /also/ suppress any diagnostic which severity is 'SevIgnore' before printing the message: See for example 'putLogMsg' and 'defaultLogAction'. -} -- | Adds a 'Message' to the input collection of messages. -- See Note [Discarding Messages]. addMessage :: MsgEnvelope e -> Messages e -> Messages e addMessage x (Messages xs) | SevIgnore <- errMsgSeverity x = Messages xs | otherwise = Messages (x `consBag` xs) -- | Joins two collections of messages together. -- See Note [Discarding Messages]. unionMessages :: Messages e -> Messages e -> Messages e unionMessages (Messages msgs1) (Messages msgs2) = Messages (msgs1 `unionBags` msgs2) -- | Joins many 'Messages's together unionManyMessages :: Foldable f => f (Messages e) -> Messages e unionManyMessages = fold -- | A 'DecoratedSDoc' is isomorphic to a '[SDoc]' but it carries the -- invariant that the input '[SDoc]' needs to be rendered /decorated/ into its -- final form, where the typical case would be adding bullets between each -- elements of the list. The type of decoration depends on the formatting -- function used, but in practice GHC uses the 'formatBulleted'. newtype DecoratedSDoc = Decorated { unDecorated :: [SDoc] } -- | Creates a new 'DecoratedSDoc' out of a list of 'SDoc'. mkDecorated :: [SDoc] -> DecoratedSDoc mkDecorated = Decorated -- | Creates a new 'DecoratedSDoc' out of a single 'SDoc' mkSimpleDecorated :: SDoc -> DecoratedSDoc mkSimpleDecorated doc = Decorated [doc] -- | Joins two 'DecoratedSDoc' together. The resulting 'DecoratedSDoc' -- will have a number of entries which is the sum of the lengths of -- the input. unionDecoratedSDoc :: DecoratedSDoc -> DecoratedSDoc -> DecoratedSDoc unionDecoratedSDoc (Decorated s1) (Decorated s2) = Decorated (s1 `mappend` s2) -- | Apply a transformation function to all elements of a 'DecoratedSDoc'. mapDecoratedSDoc :: (SDoc -> SDoc) -> DecoratedSDoc -> DecoratedSDoc mapDecoratedSDoc f (Decorated s1) = Decorated (map f s1) {- Note [Rendering Messages] ~~~~~~~~~~~~~~~~~~~~~~~~~ Turning 'Messages' into something that renders nicely for the user is one of the last steps, and it happens typically at the application's boundaries (i.e. from the 'Driver' upwards). For now (see #18516) this class has few instance, but the idea is that as the more domain-specific types are defined, the more instances we would get. For example, given something like: data TcRnDiagnostic = TcRnOutOfScope .. | .. newtype TcRnMessage = TcRnMessage (DiagnosticMessage TcRnDiagnostic) We could then define how a 'TcRnDiagnostic' is displayed to the user. Rather than scattering pieces of 'SDoc' around the codebase, we would write once for all: instance Diagnostic TcRnDiagnostic where diagnosticMessage (TcRnMessage msg) = case diagMessage msg of TcRnOutOfScope .. -> Decorated [text "Out of scope error ..."] ... This way, we can easily write generic rendering functions for errors that all they care about is the knowledge that a given type 'e' has a 'Diagnostic' constraint. -} -- | A class identifying a diagnostic. -- Dictionary.com defines a diagnostic as: -- -- \"a message output by a computer diagnosing an error in a computer program, -- computer system, or component device\". -- -- A 'Diagnostic' carries the /actual/ description of the message (which, in -- GHC's case, it can be an error or a warning) and the /reason/ why such -- message was generated in the first place. See also Note [Rendering -- Messages]. class Diagnostic a where diagnosticMessage :: a -> DecoratedSDoc diagnosticReason :: a -> DiagnosticReason diagnosticHints :: a -> [GhcHint] pprDiagnostic :: Diagnostic e => e -> SDoc pprDiagnostic e = vcat [ ppr (diagnosticReason e) , nest 2 (vcat (unDecorated (diagnosticMessage e))) ] -- | A generic 'Hint' message, to be used with 'DiagnosticMessage'. data DiagnosticHint = DiagnosticHint !SDoc instance Outputable DiagnosticHint where ppr (DiagnosticHint msg) = msg -- | A generic 'Diagnostic' message, without any further classification or -- provenance: By looking at a 'DiagnosticMessage' we don't know neither -- /where/ it was generated nor how to intepret its payload (as it's just a -- structured document). All we can do is to print it out and look at its -- 'DiagnosticReason'. data DiagnosticMessage = DiagnosticMessage { diagMessage :: !DecoratedSDoc , diagReason :: !DiagnosticReason , diagHints :: [GhcHint] } instance Diagnostic DiagnosticMessage where diagnosticMessage = diagMessage diagnosticReason = diagReason diagnosticHints = diagHints -- | Helper function to use when no hints can be provided. Currently this function -- can be used to construct plain 'DiagnosticMessage' and add hints to them, but -- once #18516 will be fully executed, the main usage of this function would be in -- the implementation of the 'diagnosticHints' typeclass method, to report the fact -- that a particular 'Diagnostic' has no hints. noHints :: [GhcHint] noHints = mempty mkPlainDiagnostic :: DiagnosticReason -> [GhcHint] -> SDoc -> DiagnosticMessage mkPlainDiagnostic rea hints doc = DiagnosticMessage (mkSimpleDecorated doc) rea hints -- | Create an error 'DiagnosticMessage' holding just a single 'SDoc' mkPlainError :: [GhcHint] -> SDoc -> DiagnosticMessage mkPlainError hints doc = DiagnosticMessage (mkSimpleDecorated doc) ErrorWithoutFlag hints -- | Create a 'DiagnosticMessage' from a list of bulleted SDocs and a 'DiagnosticReason' mkDecoratedDiagnostic :: DiagnosticReason -> [GhcHint] -> [SDoc] -> DiagnosticMessage mkDecoratedDiagnostic rea hints docs = DiagnosticMessage (mkDecorated docs) rea hints -- | Create an error 'DiagnosticMessage' from a list of bulleted SDocs mkDecoratedError :: [GhcHint] -> [SDoc] -> DiagnosticMessage mkDecoratedError hints docs = DiagnosticMessage (mkDecorated docs) ErrorWithoutFlag hints -- | The reason /why/ a 'Diagnostic' was emitted in the first place. -- Diagnostic messages are born within GHC with a very precise reason, which -- can be completely statically-computed (i.e. this is an error or a warning -- no matter what), or influenced by the specific state of the 'DynFlags' at -- the moment of the creation of a new 'Diagnostic'. For example, a parsing -- error is /always/ going to be an error, whereas a 'WarningWithoutFlag -- Opt_WarnUnusedImports' might turn into an error due to '-Werror' or -- '-Werror=warn-unused-imports'. Interpreting a 'DiagnosticReason' together -- with its associated 'Severity' gives us the full picture. data DiagnosticReason = WarningWithoutFlag -- ^ Born as a warning. | WarningWithFlag !WarningFlag -- ^ Warning was enabled with the flag. | ErrorWithoutFlag -- ^ Born as an error. deriving (Eq, Show) instance Outputable DiagnosticReason where ppr = \case WarningWithoutFlag -> text "WarningWithoutFlag" WarningWithFlag wf -> text ("WarningWithFlag " ++ show wf) ErrorWithoutFlag -> text "ErrorWithoutFlag" -- | An envelope for GHC's facts about a running program, parameterised over the -- /domain-specific/ (i.e. parsing, typecheck-renaming, etc) diagnostics. -- -- To say things differently, GHC emits /diagnostics/ about the running -- program, each of which is wrapped into a 'MsgEnvelope' that carries -- specific information like where the error happened, etc. Finally, multiple -- 'MsgEnvelope's are aggregated into 'Messages' that are returned to the -- user. data MsgEnvelope e = MsgEnvelope { errMsgSpan :: SrcSpan -- ^ The SrcSpan is used for sorting errors into line-number order , errMsgContext :: PrintUnqualified , errMsgDiagnostic :: e , errMsgSeverity :: Severity } deriving (Functor, Foldable, Traversable) -- | The class for a diagnostic message. The main purpose is to classify a -- message within GHC, to distinguish it from a debug/dump message vs a proper -- diagnostic, for which we include a 'DiagnosticReason'. data MessageClass = MCOutput | MCFatal | MCInteractive | MCDump -- ^ Log message intended for compiler developers -- No file\/line\/column stuff | MCInfo -- ^ Log messages intended for end users. -- No file\/line\/column stuff. | MCDiagnostic Severity DiagnosticReason -- ^ Diagnostics from the compiler. This constructor is very powerful as -- it allows the construction of a 'MessageClass' with a completely -- arbitrary permutation of 'Severity' and 'DiagnosticReason'. As such, -- users are encouraged to use the 'mkMCDiagnostic' smart constructor -- instead. Use this constructor directly only if you need to construct -- and manipulate diagnostic messages directly, for example inside -- 'GHC.Utils.Error'. In all the other circumstances, /especially/ when -- emitting compiler diagnostics, use the smart constructor. deriving (Eq, Show) {- Note [Suppressing Messages] The 'SevIgnore' constructor is used to generate messages for diagnostics which are meant to be suppressed and not reported to the user: the classic example are warnings for which the user didn't enable the corresponding 'WarningFlag', so GHC shouldn't print them. A different approach would be to extend the zoo of 'mkMsgEnvelope' functions to return a 'Maybe (MsgEnvelope e)', so that we won't need to even create the message to begin with. Both approaches have been evaluated, but we settled on the "SevIgnore one" for a number of reasons: * It's less invasive to deal with; * It plays slightly better with deferred diagnostics (see 'GHC.Tc.Errors') as for those we need to be able to /always/ produce a message (so that is reported at runtime); * It gives us more freedom: we can still decide to drop a 'SevIgnore' message at leisure, or we can decide to keep it around until the last moment. Maybe in the future we would need to turn a 'SevIgnore' into something else, for example to "unsuppress" diagnostics if a flag is set: with this approach, we have more leeway to accommodate new features. -} -- | Used to describe warnings and errors -- o The message has a file\/line\/column heading, -- plus "warning:" or "error:", -- added by mkLocMessage -- o With 'SevIgnore' the message is suppressed -- o Output is intended for end users data Severity = SevIgnore -- ^ Ignore this message, for example in -- case of suppression of warnings users -- don't want to see. See Note [Suppressing Messages] | SevWarning | SevError deriving (Eq, Show) instance Outputable Severity where ppr = \case SevIgnore -> text "SevIgnore" SevWarning -> text "SevWarning" SevError -> text "SevError" instance ToJson Severity where json s = JSString (show s) instance ToJson MessageClass where json MCOutput = JSString "MCOutput" json MCFatal = JSString "MCFatal" json MCInteractive = JSString "MCInteractive" json MCDump = JSString "MCDump" json MCInfo = JSString "MCInfo" json (MCDiagnostic sev reason) = JSString $ renderWithContext defaultSDocContext (ppr $ text "MCDiagnostic" <+> ppr sev <+> ppr reason) instance Show (MsgEnvelope DiagnosticMessage) where show = showMsgEnvelope -- | Shows an 'MsgEnvelope'. showMsgEnvelope :: Diagnostic a => MsgEnvelope a -> String showMsgEnvelope err = renderWithContext defaultSDocContext (vcat (unDecorated . diagnosticMessage $ errMsgDiagnostic err)) pprMessageBag :: Bag SDoc -> SDoc pprMessageBag msgs = vcat (punctuate blankLine (bagToList msgs)) -- | Make an unannotated error message with location info. mkLocMessage :: MessageClass -> SrcSpan -> SDoc -> SDoc mkLocMessage = mkLocMessageAnn Nothing -- | Make a possibly annotated error message with location info. mkLocMessageAnn :: Maybe String -- ^ optional annotation -> MessageClass -- ^ What kind of message? -> SrcSpan -- ^ location -> SDoc -- ^ message -> SDoc -- Always print the location, even if it is unhelpful. Error messages -- are supposed to be in a standard format, and one without a location -- would look strange. Better to say explicitly "". mkLocMessageAnn ann msg_class locn msg = sdocOption sdocColScheme $ \col_scheme -> let locn' = sdocOption sdocErrorSpans $ \case True -> ppr locn False -> ppr (srcSpanStart locn) msgColour = getMessageClassColour msg_class col_scheme -- Add optional information optAnn = case ann of Nothing -> text "" Just i -> text " [" <> coloured msgColour (text i) <> text "]" -- Add prefixes, like Foo.hs:34: warning: -- header = locn' <> colon <+> coloured msgColour msgText <> optAnn in coloured (Col.sMessage col_scheme) (hang (coloured (Col.sHeader col_scheme) header) 4 msg) where msgText = case msg_class of MCDiagnostic SevError _reason -> text "error:" MCDiagnostic SevWarning _reason -> text "warning:" MCFatal -> text "fatal:" _ -> empty getMessageClassColour :: MessageClass -> Col.Scheme -> Col.PprColour getMessageClassColour (MCDiagnostic SevError _reason) = Col.sError getMessageClassColour (MCDiagnostic SevWarning _reason) = Col.sWarning getMessageClassColour MCFatal = Col.sFatal getMessageClassColour _ = const mempty getCaretDiagnostic :: MessageClass -> SrcSpan -> IO SDoc getCaretDiagnostic _ (UnhelpfulSpan _) = pure empty getCaretDiagnostic msg_class (RealSrcSpan span _) = caretDiagnostic <$> getSrcLine (srcSpanFile span) row where getSrcLine fn i = getLine i (unpackFS fn) `catchException` \(_ :: IOError) -> pure Nothing getLine i fn = do -- StringBuffer has advantages over readFile: -- (a) no lazy IO, otherwise IO exceptions may occur in pure code -- (b) always UTF-8, rather than some system-dependent encoding -- (Haskell source code must be UTF-8 anyway) content <- hGetStringBuffer fn case atLine i content of Just at_line -> pure $ case lines (fix <$> lexemeToString at_line (len at_line)) of srcLine : _ -> Just srcLine _ -> Nothing _ -> pure Nothing -- allow user to visibly see that their code is incorrectly encoded -- (StringBuffer.nextChar uses \0 to represent undecodable characters) fix '\0' = '\xfffd' fix c = c row = srcSpanStartLine span rowStr = show row multiline = row /= srcSpanEndLine span caretDiagnostic Nothing = empty caretDiagnostic (Just srcLineWithNewline) = sdocOption sdocColScheme$ \col_scheme -> let sevColour = getMessageClassColour msg_class col_scheme marginColour = Col.sMargin col_scheme in coloured marginColour (text marginSpace) <> text ("\n") <> coloured marginColour (text marginRow) <> text (" " ++ srcLinePre) <> coloured sevColour (text srcLineSpan) <> text (srcLinePost ++ "\n") <> coloured marginColour (text marginSpace) <> coloured sevColour (text (" " ++ caretLine)) where -- expand tabs in a device-independent manner #13664 expandTabs tabWidth i s = case s of "" -> "" '\t' : cs -> replicate effectiveWidth ' ' ++ expandTabs tabWidth (i + effectiveWidth) cs c : cs -> c : expandTabs tabWidth (i + 1) cs where effectiveWidth = tabWidth - i `mod` tabWidth srcLine = filter (/= '\n') (expandTabs 8 0 srcLineWithNewline) start = srcSpanStartCol span - 1 end | multiline = length srcLine | otherwise = srcSpanEndCol span - 1 width = max 1 (end - start) marginWidth = length rowStr marginSpace = replicate marginWidth ' ' ++ " |" marginRow = rowStr ++ " |" (srcLinePre, srcLineRest) = splitAt start srcLine (srcLineSpan, srcLinePost) = splitAt width srcLineRest caretEllipsis | multiline = "..." | otherwise = "" caretLine = replicate start ' ' ++ replicate width '^' ++ caretEllipsis -- -- Queries -- {- Note [Intrinsic And Extrinsic Failures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We distinguish between /intrinsic/ and /extrinsic/ failures. We classify in the former category those diagnostics which are /essentially/ failures, and their nature can't be changed. This is the case for 'ErrorWithoutFlag'. We classify as /extrinsic/ all those diagnostics (like fatal warnings) which are born as warnings but which are still failures under particular 'DynFlags' settings. It's important to be aware of such logic distinction, because when we are inside the typechecker or the desugarer, we are interested about intrinsic errors, and to bail out as soon as we find one of them. Conversely, if we find an /extrinsic/ one, for example because a particular 'WarningFlag' makes a warning into an error, we /don't/ want to bail out, that's still not the right time to do so: Rather, we want to first collect all the diagnostics, and later classify and report them appropriately (in the driver). -} -- | Returns 'True' if this is, intrinsically, a failure. See -- Note [Intrinsic And Extrinsic Failures]. isIntrinsicErrorMessage :: Diagnostic e => MsgEnvelope e -> Bool isIntrinsicErrorMessage = (==) ErrorWithoutFlag . diagnosticReason . errMsgDiagnostic isWarningMessage :: Diagnostic e => MsgEnvelope e -> Bool isWarningMessage = not . isIntrinsicErrorMessage -- | Are there any hard errors here? -Werror warnings are /not/ detected. If -- you want to check for -Werror warnings, use 'errorsOrFatalWarningsFound'. errorsFound :: Diagnostic e => Messages e -> Bool errorsFound (Messages msgs) = any isIntrinsicErrorMessage msgs -- | Returns 'True' if the envelope contains a message that will stop -- compilation: either an intrinsic error or a fatal (-Werror) warning isExtrinsicErrorMessage :: MsgEnvelope e -> Bool isExtrinsicErrorMessage = (==) SevError . errMsgSeverity -- | Are there any errors or -Werror warnings here? errorsOrFatalWarningsFound :: Messages e -> Bool errorsOrFatalWarningsFound (Messages msgs) = any isExtrinsicErrorMessage msgs getWarningMessages :: Diagnostic e => Messages e -> Bag (MsgEnvelope e) getWarningMessages (Messages xs) = fst $ partitionBag isWarningMessage xs getErrorMessages :: Diagnostic e => Messages e -> Bag (MsgEnvelope e) getErrorMessages (Messages xs) = fst $ partitionBag isIntrinsicErrorMessage xs -- | Partitions the 'Messages' and returns a tuple which first element are the -- warnings, and the second the errors. partitionMessages :: Diagnostic e => Messages e -> (Messages e, Messages e) partitionMessages (Messages xs) = bimap Messages Messages (partitionBag isWarningMessage xs) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/FieldLabel.hs0000644000000000000000000001671314472400113021267 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE UndecidableInstances #-} {- % % (c) Adam Gundry 2013-2015 % Note [FieldLabel] ~~~~~~~~~~~~~~~~~ This module defines the representation of FieldLabels as stored in TyCons. As well as a selector name, these have some extra structure to support the DuplicateRecordFields and NoFieldSelectors extensions. In the normal case (with NoDuplicateRecordFields and FieldSelectors), a datatype like data T = MkT { foo :: Int } has FieldLabel { flLabel = "foo" , flHasDuplicateRecordFields = NoDuplicateRecordFields , flHasFieldSelector = FieldSelectors , flSelector = foo }. In particular, the Name of the selector has the same string representation as the label. If DuplicateRecordFields is enabled, however, the same declaration instead gives FieldLabel { flLabel = "foo" , flHasDuplicateRecordFields = DuplicateRecordFields , flHasFieldSelector = FieldSelectors , flSelector = $sel:foo:MkT }. Similarly, the selector name will be mangled if NoFieldSelectors is used (whether or not DuplicateRecordFields is enabled). See Note [NoFieldSelectors] in GHC.Rename.Env. Now the name of the selector ($sel:foo:MkT) does not match the label of the field (foo). We must be careful not to show the selector name to the user! The point of mangling the selector name is to allow a module to define the same field label in different datatypes: data T = MkT { foo :: Int } data U = MkU { foo :: Bool } Now there will be two FieldLabel values for 'foo', one in T and one in U. They share the same label (FieldLabelString), but the selector functions differ. See also Note [Representing fields in AvailInfo] in GHC.Types.Avail. Note [Why selector names include data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As explained above, a selector name includes the name of the first data constructor in the type, so that the same label can appear multiple times in the same module. (This is irrespective of whether the first constructor has that field, for simplicity.) We use a data constructor name, rather than the type constructor name, because data family instances do not have a representation type constructor name generated until relatively late in the typechecking process. Of course, datatypes with no constructors cannot have any fields. -} module GHC.Types.FieldLabel ( FieldLabelString , FieldLabelEnv , FieldLabel(..) , fieldSelectorOccName , fieldLabelPrintableName , DuplicateRecordFields(..) , FieldSelectors(..) , flIsOverloaded ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Name.Occurrence import {-# SOURCE #-} GHC.Types.Name import GHC.Data.FastString import GHC.Data.FastString.Env import GHC.Utils.Outputable import GHC.Utils.Binary import Data.Bool import Data.Data -- | Field labels are just represented as strings; -- they are not necessarily unique (even within a module) type FieldLabelString = FastString -- | A map from labels to all the auxiliary information type FieldLabelEnv = DFastStringEnv FieldLabel -- | Fields in an algebraic record type; see Note [FieldLabel]. data FieldLabel = FieldLabel { flLabel :: FieldLabelString, -- ^ User-visible label of the field flHasDuplicateRecordFields :: DuplicateRecordFields, -- ^ Was @DuplicateRecordFields@ on in the defining module for this datatype? flHasFieldSelector :: FieldSelectors, -- ^ Was @FieldSelectors@ enabled in the defining module for this datatype? -- See Note [NoFieldSelectors] in GHC.Rename.Env flSelector :: Name -- ^ Record selector function } deriving (Data, Eq) instance HasOccName FieldLabel where occName = mkVarOccFS . flLabel instance Outputable FieldLabel where ppr fl = ppr (flLabel fl) <> whenPprDebug (braces (ppr (flSelector fl)) <> ppr (flHasDuplicateRecordFields fl) <> ppr (flHasFieldSelector fl)) -- | Flag to indicate whether the DuplicateRecordFields extension is enabled. data DuplicateRecordFields = DuplicateRecordFields -- ^ Fields may be duplicated in a single module | NoDuplicateRecordFields -- ^ Fields must be unique within a module (the default) deriving (Show, Eq, Typeable, Data) instance Binary DuplicateRecordFields where put_ bh f = put_ bh (f == DuplicateRecordFields) get bh = bool NoDuplicateRecordFields DuplicateRecordFields <$> get bh instance Outputable DuplicateRecordFields where ppr DuplicateRecordFields = text "+dup" ppr NoDuplicateRecordFields = text "-dup" -- | Flag to indicate whether the FieldSelectors extension is enabled. data FieldSelectors = FieldSelectors -- ^ Selector functions are available (the default) | NoFieldSelectors -- ^ Selector functions are not available deriving (Show, Eq, Typeable, Data) instance Binary FieldSelectors where put_ bh f = put_ bh (f == FieldSelectors) get bh = bool NoFieldSelectors FieldSelectors <$> get bh instance Outputable FieldSelectors where ppr FieldSelectors = text "+sel" ppr NoFieldSelectors = text "-sel" -- | We need the @Binary Name@ constraint here even though there is an instance -- defined in "GHC.Types.Name", because the we have a SOURCE import, so the -- instance is not in scope. And the instance cannot be added to Name.hs-boot -- because "GHC.Utils.Binary" itself depends on "GHC.Types.Name". instance Binary Name => Binary FieldLabel where put_ bh (FieldLabel aa ab ac ad) = do put_ bh aa put_ bh ab put_ bh ac put_ bh ad get bh = do aa <- get bh ab <- get bh ac <- get bh ad <- get bh return (FieldLabel aa ab ac ad) -- | Record selector OccNames are built from the underlying field name -- and the name of the first data constructor of the type, to support -- duplicate record field names. -- See Note [Why selector names include data constructors]. fieldSelectorOccName :: FieldLabelString -> OccName -> DuplicateRecordFields -> FieldSelectors -> OccName fieldSelectorOccName lbl dc dup_fields_ok has_sel | shouldMangleSelectorNames dup_fields_ok has_sel = mkRecFldSelOcc str | otherwise = mkVarOccFS lbl where str = ":" ++ unpackFS lbl ++ ":" ++ occNameString dc -- | Undo the name mangling described in Note [FieldLabel] to produce a Name -- that has the user-visible OccName (but the selector's unique). This should -- be used only when generating output, when we want to show the label, but may -- need to qualify it with a module prefix. fieldLabelPrintableName :: FieldLabel -> Name fieldLabelPrintableName fl | flIsOverloaded fl = tidyNameOcc (flSelector fl) (mkVarOccFS (flLabel fl)) | otherwise = flSelector fl -- | Selector name mangling should be used if either DuplicateRecordFields or -- NoFieldSelectors is enabled, so that the OccName of the field can be used for -- something else. See Note [FieldLabel], and Note [NoFieldSelectors] in -- GHC.Rename.Env. shouldMangleSelectorNames :: DuplicateRecordFields -> FieldSelectors -> Bool shouldMangleSelectorNames dup_fields_ok has_sel = dup_fields_ok == DuplicateRecordFields || has_sel == NoFieldSelectors flIsOverloaded :: FieldLabel -> Bool flIsOverloaded fl = shouldMangleSelectorNames (flHasDuplicateRecordFields fl) (flHasFieldSelector fl) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Fixity.hs0000644000000000000000000000601214472400113020547 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} -- | Fixity module GHC.Types.Fixity ( Fixity (..) , FixityDirection (..) , LexicalFixity (..) , maxPrecedence , minPrecedence , defaultFixity , negateFixity , funTyFixity , compareFixity ) where import GHC.Prelude import GHC.Types.SourceText import GHC.Utils.Outputable import GHC.Utils.Binary import Data.Data hiding (Fixity, Prefix, Infix) data Fixity = Fixity SourceText Int FixityDirection -- Note [Pragma source text] deriving Data instance Outputable Fixity where ppr (Fixity _ prec dir) = hcat [ppr dir, space, int prec] instance Eq Fixity where -- Used to determine if two fixities conflict (Fixity _ p1 dir1) == (Fixity _ p2 dir2) = p1==p2 && dir1 == dir2 instance Binary Fixity where put_ bh (Fixity src aa ab) = do put_ bh src put_ bh aa put_ bh ab get bh = do src <- get bh aa <- get bh ab <- get bh return (Fixity src aa ab) ------------------------ data FixityDirection = InfixL | InfixR | InfixN deriving (Eq, Data) instance Outputable FixityDirection where ppr InfixL = text "infixl" ppr InfixR = text "infixr" ppr InfixN = text "infix" instance Binary FixityDirection where put_ bh InfixL = putByte bh 0 put_ bh InfixR = putByte bh 1 put_ bh InfixN = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return InfixL 1 -> return InfixR _ -> return InfixN ------------------------ maxPrecedence, minPrecedence :: Int maxPrecedence = 9 minPrecedence = 0 defaultFixity :: Fixity defaultFixity = Fixity NoSourceText maxPrecedence InfixL negateFixity, funTyFixity :: Fixity -- Wired-in fixities negateFixity = Fixity NoSourceText 6 InfixL -- Fixity of unary negate funTyFixity = Fixity NoSourceText (-1) InfixR -- Fixity of '->', see #15235 {- Consider \begin{verbatim} a `op1` b `op2` c \end{verbatim} @(compareFixity op1 op2)@ tells which way to arrange application, or whether there's an error. -} compareFixity :: Fixity -> Fixity -> (Bool, -- Error please Bool) -- Associate to the right: a op1 (b op2 c) compareFixity (Fixity _ prec1 dir1) (Fixity _ prec2 dir2) = case prec1 `compare` prec2 of GT -> left LT -> right EQ -> case (dir1, dir2) of (InfixR, InfixR) -> right (InfixL, InfixL) -> left _ -> error_please where right = (False, True) left = (False, False) error_please = (True, False) -- |Captures the fixity of declarations as they are parsed. This is not -- necessarily the same as the fixity declaration, as the normal fixity may be -- overridden using parens or backticks. data LexicalFixity = Prefix | Infix deriving (Data,Eq) instance Outputable LexicalFixity where ppr Prefix = text "Prefix" ppr Infix = text "Infix" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Fixity/Env.hs0000644000000000000000000000221514472400113021300 0ustar0000000000000000module GHC.Types.Fixity.Env ( FixityEnv , FixItem (..) , emptyFixityEnv , lookupFixity , mkIfaceFixCache , emptyIfaceFixCache ) where import GHC.Prelude import GHC.Types.Fixity import GHC.Types.Name import GHC.Types.Name.Env import GHC.Utils.Outputable -- | Fixity environment mapping names to their fixities type FixityEnv = NameEnv FixItem -- | Fixity information for an 'Name'. We keep the OccName in the range -- so that we can generate an interface from it data FixItem = FixItem OccName Fixity instance Outputable FixItem where ppr (FixItem occ fix) = ppr fix <+> ppr occ emptyFixityEnv :: FixityEnv emptyFixityEnv = emptyNameEnv lookupFixity :: FixityEnv -> Name -> Fixity lookupFixity env n = case lookupNameEnv env n of Just (FixItem _ fix) -> fix Nothing -> defaultFixity -- | Creates cached lookup for the 'mi_fix_fn' field of 'ModIface' mkIfaceFixCache :: [(OccName, Fixity)] -> OccName -> Maybe Fixity mkIfaceFixCache pairs = \n -> lookupOccEnv env n where env = mkOccEnv pairs emptyIfaceFixCache :: OccName -> Maybe Fixity emptyIfaceFixCache _ = Nothing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/ForeignCall.hs0000644000000000000000000003023014472400113021457 0ustar0000000000000000{- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[Foreign]{Foreign calls} -} {-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.ForeignCall ( ForeignCall(..), isSafeForeignCall, Safety(..), playSafe, playInterruptible, CExportSpec(..), CLabelString, isCLabelString, pprCLabelString, CCallSpec(..), CCallTarget(..), isDynamicTarget, CCallConv(..), defaultCCallConv, ccallConvToInt, ccallConvAttribute, Header(..), CType(..), ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Unit.Module import GHC.Types.SourceText ( SourceText, pprWithSourceText ) import Data.Char import Data.Data {- ************************************************************************ * * \subsubsection{Data types} * * ************************************************************************ -} newtype ForeignCall = CCall CCallSpec deriving Eq isSafeForeignCall :: ForeignCall -> Bool isSafeForeignCall (CCall (CCallSpec _ _ safe)) = playSafe safe -- We may need more clues to distinguish foreign calls -- but this simple printer will do for now instance Outputable ForeignCall where ppr (CCall cc) = ppr cc data Safety = PlaySafe -- ^ Might invoke Haskell GC, or do a call back, or -- switch threads, etc. So make sure things are -- tidy before the call. Additionally, in the threaded -- RTS we arrange for the external call to be executed -- by a separate OS thread, i.e., _concurrently_ to the -- execution of other Haskell threads. | PlayInterruptible -- ^ Like PlaySafe, but additionally -- the worker thread running this foreign call may -- be unceremoniously killed, so it must be scheduled -- on an unbound thread. | PlayRisky -- ^ None of the above can happen; the call will return -- without interacting with the runtime system at all. -- Specifically: -- -- * No GC -- * No call backs -- * No blocking -- * No precise exceptions -- deriving ( Eq, Show, Data, Enum ) -- Show used just for Show Lex.Token, I think instance Outputable Safety where ppr PlaySafe = text "safe" ppr PlayInterruptible = text "interruptible" ppr PlayRisky = text "unsafe" playSafe :: Safety -> Bool playSafe PlaySafe = True playSafe PlayInterruptible = True playSafe PlayRisky = False playInterruptible :: Safety -> Bool playInterruptible PlayInterruptible = True playInterruptible _ = False {- ************************************************************************ * * \subsubsection{Calling C} * * ************************************************************************ -} data CExportSpec = CExportStatic -- foreign export ccall foo :: ty SourceText -- of the CLabelString. -- See Note [Pragma source text] in GHC.Types.SourceText CLabelString -- C Name of exported function CCallConv deriving Data data CCallSpec = CCallSpec CCallTarget -- What to call CCallConv -- Calling convention to use. Safety deriving( Eq ) -- The call target: -- | How to call a particular function in C-land. data CCallTarget -- An "unboxed" ccall# to named function in a particular package. = StaticTarget SourceText -- of the CLabelString. -- See Note [Pragma source text] in GHC.Types.SourceText CLabelString -- C-land name of label. (Maybe Unit) -- What package the function is in. -- If Nothing, then it's taken to be in the current package. -- Note: This information is only used for PrimCalls on Windows. -- See CLabel.labelDynamic and CoreToStg.coreToStgApp -- for the difference in representation between PrimCalls -- and ForeignCalls. If the CCallTarget is representing -- a regular ForeignCall then it's safe to set this to Nothing. -- The first argument of the import is the name of a function pointer (an Addr#). -- Used when importing a label as "foreign import ccall "dynamic" ..." Bool -- True => really a function -- False => a value; only -- allowed in CAPI imports | DynamicTarget deriving( Eq, Data ) isDynamicTarget :: CCallTarget -> Bool isDynamicTarget DynamicTarget = True isDynamicTarget _ = False {- Stuff to do with calling convention: ccall: Caller allocates parameters, *and* deallocates them. stdcall: Caller allocates parameters, callee deallocates. Function name has @N after it, where N is number of arg bytes e.g. _Foo@8. This convention is x86 (win32) specific. See: http://www.programmersheaven.com/2/Calling-conventions -} -- any changes here should be replicated in the CallConv type in template haskell data CCallConv = CCallConv | CApiConv | StdCallConv | PrimCallConv | JavaScriptCallConv deriving (Eq, Data, Enum) instance Outputable CCallConv where ppr StdCallConv = text "stdcall" ppr CCallConv = text "ccall" ppr CApiConv = text "capi" ppr PrimCallConv = text "prim" ppr JavaScriptCallConv = text "javascript" defaultCCallConv :: CCallConv defaultCCallConv = CCallConv ccallConvToInt :: CCallConv -> Int ccallConvToInt StdCallConv = 0 ccallConvToInt CCallConv = 1 ccallConvToInt CApiConv = panic "ccallConvToInt CApiConv" ccallConvToInt (PrimCallConv {}) = panic "ccallConvToInt PrimCallConv" ccallConvToInt JavaScriptCallConv = panic "ccallConvToInt JavaScriptCallConv" {- Generate the gcc attribute corresponding to the given calling convention (used by PprAbsC): -} ccallConvAttribute :: CCallConv -> SDoc ccallConvAttribute StdCallConv = text "__attribute__((__stdcall__))" ccallConvAttribute CCallConv = empty ccallConvAttribute CApiConv = empty ccallConvAttribute (PrimCallConv {}) = panic "ccallConvAttribute PrimCallConv" ccallConvAttribute JavaScriptCallConv = panic "ccallConvAttribute JavaScriptCallConv" type CLabelString = FastString -- A C label, completely unencoded pprCLabelString :: CLabelString -> SDoc pprCLabelString lbl = ftext lbl isCLabelString :: CLabelString -> Bool -- Checks to see if this is a valid C label isCLabelString lbl = all ok (unpackFS lbl) where ok c = isAlphaNum c || c == '_' || c == '.' -- The '.' appears in e.g. "foo.so" in the -- module part of a ExtName. Maybe it should be separate -- Printing into C files: instance Outputable CExportSpec where ppr (CExportStatic _ str _) = pprCLabelString str instance Outputable CCallSpec where ppr (CCallSpec fun cconv safety) = hcat [ whenPprDebug callconv, ppr_fun fun, text " ::" ] where callconv = text "{-" <> ppr cconv <> text "-}" gc_suf | playSafe safety = text "_safe" | otherwise = text "_unsafe" ppr_fun (StaticTarget st lbl mPkgId isFun) = text (if isFun then "__ffi_static_ccall" else "__ffi_static_ccall_value") <> gc_suf <+> (case mPkgId of Nothing -> empty Just pkgId -> ppr pkgId) <> text ":" <> ppr lbl <+> (pprWithSourceText st empty) ppr_fun DynamicTarget = text "__ffi_dyn_ccall" <> gc_suf <+> text "\"\"" -- The filename for a C header file -- Note [Pragma source text] in GHC.Types.SourceText data Header = Header SourceText FastString deriving (Eq, Data) instance Outputable Header where ppr (Header st h) = pprWithSourceText st (doubleQuotes $ ppr h) -- | A C type, used in CAPI FFI calls -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'{-\# CTYPE'@, -- 'GHC.Parser.Annotation.AnnHeader','GHC.Parser.Annotation.AnnVal', -- 'GHC.Parser.Annotation.AnnClose' @'\#-}'@, -- For details on above see Note [exact print annotations] in "GHC.Parser.Annotation" data CType = CType SourceText -- Note [Pragma source text] in GHC.Types.SourceText (Maybe Header) -- header to include for this type (SourceText,FastString) -- the type itself deriving (Eq, Data) instance Outputable CType where ppr (CType stp mh (stct,ct)) = pprWithSourceText stp (text "{-# CTYPE") <+> hDoc <+> pprWithSourceText stct (doubleQuotes (ftext ct)) <+> text "#-}" where hDoc = case mh of Nothing -> empty Just h -> ppr h {- ************************************************************************ * * \subsubsection{Misc} * * ************************************************************************ -} instance Binary ForeignCall where put_ bh (CCall aa) = put_ bh aa get bh = do aa <- get bh; return (CCall aa) instance Binary Safety where put_ bh PlaySafe = putByte bh 0 put_ bh PlayInterruptible = putByte bh 1 put_ bh PlayRisky = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return PlaySafe 1 -> return PlayInterruptible _ -> return PlayRisky instance Binary CExportSpec where put_ bh (CExportStatic ss aa ab) = do put_ bh ss put_ bh aa put_ bh ab get bh = do ss <- get bh aa <- get bh ab <- get bh return (CExportStatic ss aa ab) instance Binary CCallSpec where put_ bh (CCallSpec aa ab ac) = do put_ bh aa put_ bh ab put_ bh ac get bh = do aa <- get bh ab <- get bh ac <- get bh return (CCallSpec aa ab ac) instance Binary CCallTarget where put_ bh (StaticTarget ss aa ab ac) = do putByte bh 0 put_ bh ss put_ bh aa put_ bh ab put_ bh ac put_ bh DynamicTarget = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> do ss <- get bh aa <- get bh ab <- get bh ac <- get bh return (StaticTarget ss aa ab ac) _ -> return DynamicTarget instance Binary CCallConv where put_ bh CCallConv = putByte bh 0 put_ bh StdCallConv = putByte bh 1 put_ bh PrimCallConv = putByte bh 2 put_ bh CApiConv = putByte bh 3 put_ bh JavaScriptCallConv = putByte bh 4 get bh = do h <- getByte bh case h of 0 -> return CCallConv 1 -> return StdCallConv 2 -> return PrimCallConv 3 -> return CApiConv _ -> return JavaScriptCallConv instance Binary CType where put_ bh (CType s mh fs) = do put_ bh s put_ bh mh put_ bh fs get bh = do s <- get bh mh <- get bh fs <- get bh return (CType s mh fs) instance Binary Header where put_ bh (Header s h) = put_ bh s >> put_ bh h get bh = do s <- get bh h <- get bh return (Header s h) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/ForeignStubs.hs0000644000000000000000000000540614472400113021713 0ustar0000000000000000-- | Foreign export stubs {-# LANGUAGE DerivingVia #-} module GHC.Types.ForeignStubs ( ForeignStubs (..) , CHeader(..) , CStub(..) , initializerCStub , finalizerCStub , appendStubC ) where import {-# SOURCE #-} GHC.Cmm.CLabel import GHC.Platform import GHC.Utils.Outputable import Data.List ((++)) import Data.Monoid import Data.Semigroup import Data.Coerce data CStub = CStub { getCStub :: SDoc , getInitializers :: [CLabel] -- ^ Initializers to be run at startup -- See Note [Initializers and finalizers in Cmm] in -- "GHC.Cmm.InitFini". , getFinalizers :: [CLabel] -- ^ Finalizers to be run at shutdown } emptyCStub :: CStub emptyCStub = CStub empty [] [] instance Monoid CStub where mempty = emptyCStub instance Semigroup CStub where CStub a0 b0 c0 <> CStub a1 b1 c1 = CStub (a0 $$ a1) (b0 ++ b1) (c0 ++ c1) functionCStub :: Platform -> CLabel -> SDoc -> SDoc -> CStub functionCStub platform clbl declarations body = CStub body' [] [] where body' = vcat [ declarations , hsep [text "void", pprCLabel platform CStyle clbl, text "(void)"] , braces body ] -- | @initializerCStub fn_nm decls body@ is a 'CStub' containing C initializer -- function (e.g. an entry of the @.init_array@ section) named -- @fn_nm@ with the given body and the given set of declarations. initializerCStub :: Platform -> CLabel -> SDoc -> SDoc -> CStub initializerCStub platform clbl declarations body = functionCStub platform clbl declarations body `mappend` CStub empty [clbl] [] -- | @finalizerCStub fn_nm decls body@ is a 'CStub' containing C finalizer -- function (e.g. an entry of the @.fini_array@ section) named -- @fn_nm@ with the given body and the given set of declarations. finalizerCStub :: Platform -> CLabel -> SDoc -> SDoc -> CStub finalizerCStub platform clbl declarations body = functionCStub platform clbl declarations body `mappend` CStub empty [] [clbl] newtype CHeader = CHeader { getCHeader :: SDoc } instance Monoid CHeader where mempty = CHeader empty mconcat = coerce vcat instance Semigroup CHeader where (<>) = coerce ($$) -- | Foreign export stubs data ForeignStubs = NoStubs -- ^ We don't have any stubs | ForeignStubs CHeader CStub -- ^ There are some stubs. Parameters: -- -- 1) Header file prototypes for -- "foreign exported" functions -- -- 2) C stubs to use when calling -- "foreign exported" functions appendStubC :: ForeignStubs -> CStub -> ForeignStubs appendStubC NoStubs c_code = ForeignStubs mempty c_code appendStubC (ForeignStubs h c) c_code = ForeignStubs h (c `mappend` c_code) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Hint.hs0000644000000000000000000004762614472400113020215 0ustar0000000000000000{-# LANGUAGE ExistentialQuantification #-} module GHC.Types.Hint ( GhcHint(..) , AvailableBindings(..) , InstantiationSuggestion(..) , LanguageExtensionHint(..) , ImportSuggestion(..) , HowInScope(..) , SimilarName(..) , StarIsType(..) , UntickedPromotedThing(..) , pprUntickedConstructor, isBareSymbol , suggestExtension , suggestExtensionWithInfo , suggestExtensions , suggestExtensionsWithInfo , suggestAnyExtension , suggestAnyExtensionWithInfo , useExtensionInOrderTo , noStarIsTypeHints ) where import GHC.Prelude import qualified Data.List.NonEmpty as NE import GHC.Utils.Outputable import qualified GHC.LanguageExtensions as LangExt import Data.Typeable import GHC.Unit.Module (ModuleName, Module) import GHC.Hs.Extension (GhcTc) import GHC.Core.Coercion import GHC.Types.Fixity (LexicalFixity(..)) import GHC.Types.Name (Name, NameSpace, OccName (occNameFS), isSymOcc, nameOccName) import GHC.Types.Name.Reader (RdrName (Unqual), ImpDeclSpec) import GHC.Types.SrcLoc (SrcSpan) import GHC.Types.Basic (Activation, RuleName) import GHC.Parser.Errors.Basic import {-# SOURCE #-} Language.Haskell.Syntax.Expr import GHC.Unit.Module.Imported (ImportedModsVal) import GHC.Data.FastString (fsLit) -- This {-# SOURCE #-} import should be removable once -- 'Language.Haskell.Syntax.Bind' no longer depends on 'GHC.Tc.Types.Evidence'. -- | The bindings we have available in scope when -- suggesting an explicit type signature. data AvailableBindings = NamedBindings (NE.NonEmpty Name) | UnnamedBinding -- ^ An unknown binding (i.e. too complicated to turn into a 'Name') data LanguageExtensionHint = -- | Suggest to enable the input extension. This is the hint that -- GHC emits if this is not a \"known\" fix, i.e. this is GHC giving -- its best guess on what extension might be necessary to make a -- certain program compile. For example, GHC might suggests to -- enable 'BlockArguments' when the user simply formatted incorrectly -- the input program, so GHC here is trying to be as helpful as -- possible. -- If the input 'SDoc' is not empty, it will contain some extra -- information about the why the extension is required, but -- it's totally irrelevant/redundant for IDEs and other tools. SuggestSingleExtension !SDoc !LangExt.Extension -- | Suggest to enable the input extensions. The list -- is to be intended as /disjuctive/ i.e. the user is -- suggested to enable /any/ of the extensions listed. If -- the input 'SDoc' is not empty, it will contain some extra -- information about the why the extensions are required, but -- it's totally irrelevant/redundant for IDEs and other tools. | SuggestAnyExtension !SDoc [LangExt.Extension] -- | Suggest to enable the input extensions. The list -- is to be intended as /conjunctive/ i.e. the user is -- suggested to enable /all/ the extensions listed. If -- the input 'SDoc' is not empty, it will contain some extra -- information about the why the extensions are required, but -- it's totally irrelevant/redundant for IDEs and other tools. | SuggestExtensions !SDoc [LangExt.Extension] -- | Suggest to enable the input extension in order to fix -- a certain problem. This is the suggestion that GHC emits when -- is more-or-less clear \"what's going on\". For example, if -- both 'DeriveAnyClass' and 'GeneralizedNewtypeDeriving' are -- turned on, the right thing to do is to enabled 'DerivingStrategies', -- so in contrast to 'SuggestSingleExtension' GHC will be a bit more -- \"imperative\" (i.e. \"Use X Y Z in order to ... \"). -- If the input 'SDoc' is not empty, it will contain some extra -- information about the why the extensions are required, but -- it's totally irrelevant/redundant for IDEs and other tools. | SuggestExtensionInOrderTo !SDoc !LangExt.Extension -- | Suggests a single extension without extra user info. suggestExtension :: LangExt.Extension -> GhcHint suggestExtension ext = SuggestExtension (SuggestSingleExtension empty ext) -- | Like 'suggestExtension' but allows supplying extra info for the user. suggestExtensionWithInfo :: SDoc -> LangExt.Extension -> GhcHint suggestExtensionWithInfo extraInfo ext = SuggestExtension (SuggestSingleExtension extraInfo ext) -- | Suggests to enable /every/ extension in the list. suggestExtensions :: [LangExt.Extension] -> GhcHint suggestExtensions exts = SuggestExtension (SuggestExtensions empty exts) -- | Like 'suggestExtensions' but allows supplying extra info for the user. suggestExtensionsWithInfo :: SDoc -> [LangExt.Extension] -> GhcHint suggestExtensionsWithInfo extraInfo exts = SuggestExtension (SuggestExtensions extraInfo exts) -- | Suggests to enable /any/ extension in the list. suggestAnyExtension :: [LangExt.Extension] -> GhcHint suggestAnyExtension exts = SuggestExtension (SuggestAnyExtension empty exts) -- | Like 'suggestAnyExtension' but allows supplying extra info for the user. suggestAnyExtensionWithInfo :: SDoc -> [LangExt.Extension] -> GhcHint suggestAnyExtensionWithInfo extraInfo exts = SuggestExtension (SuggestAnyExtension extraInfo exts) useExtensionInOrderTo :: SDoc -> LangExt.Extension -> GhcHint useExtensionInOrderTo extraInfo ext = SuggestExtension (SuggestExtensionInOrderTo extraInfo ext) -- | A type for hints emitted by GHC. -- A /hint/ suggests a possible way to deal with a particular warning or error. data GhcHint = {-| An \"unknown\" hint. This type constructor allows arbitrary -- hints to be embedded. The typical use case would be GHC plugins -- willing to emit hints alongside their custom diagnostics. -} forall a. (Outputable a, Typeable a) => UnknownHint a {-| Suggests adding a particular language extension. GHC will do its best trying to guess when the user is using the syntax of a particular language extension without having the relevant extension enabled. Example: If the user uses the keyword \"mdo\" (and we are in a monadic block), but the relevant extension is not enabled, GHC will emit a 'SuggestExtension RecursiveDo'. Test case(s): parser/should_fail/T12429, parser/should_fail/T8501c, parser/should_fail/T18251e, ... (and many more) -} | SuggestExtension !LanguageExtensionHint {-| Suggests that a monadic code block is probably missing a \"do\" keyword. Example: main = putStrLn "hello" putStrLn "world" Test case(s): parser/should_fail/T8501a, parser/should_fail/readFail007, parser/should_fail/InfixAppPatErr, parser/should_fail/T984 -} | SuggestMissingDo {-| Suggests that a \"let\" expression is needed in a \"do\" block. Test cases: None (that explicitly test this particular hint is emitted). -} | SuggestLetInDo {-| Suggests to add an \".hsig\" signature file to the Cabal manifest. Triggered by: 'GHC.Driver.Errors.Types.DriverUnexpectedSignature', if Cabal is being used. Example: See comment of 'DriverUnexpectedSignature'. Test case(s): driver/T12955 -} | SuggestAddSignatureCabalFile !ModuleName {-| Suggests to explicitly list the instantiations for the signatures in the GHC invocation command. Triggered by: 'GHC.Driver.Errors.Types.DriverUnexpectedSignature', if Cabal is /not/ being used. Example: See comment of 'DriverUnexpectedSignature'. Test case(s): driver/T12955 -} | SuggestSignatureInstantiations !ModuleName [InstantiationSuggestion] {-| Suggests to use spaces instead of tabs. Triggered by: 'GHC.Parser.Errors.Types.PsWarnTab'. Examples: None Test Case(s): None -} | SuggestUseSpaces {-| Suggests adding a whitespace after the given symbol. Examples: None Test Case(s): parser/should_compile/T18834a.hs -} | SuggestUseWhitespaceAfter !OperatorWhitespaceSymbol {-| Suggests adding a whitespace around the given operator symbol, as it might be repurposed as special syntax by a future language extension. The second parameter is how such operator occurred, if in a prefix, suffix or tight infix position. Triggered by: 'GHC.Parser.Errors.Types.PsWarnOperatorWhitespace'. Example: h a b = a+b -- not OK, no spaces around '+'. Test Case(s): parser/should_compile/T18834b.hs -} | SuggestUseWhitespaceAround !String !OperatorWhitespaceOccurrence {-| Suggests wrapping an expression in parentheses Examples: None Test Case(s): None -} | SuggestParentheses {-| Suggests to increase the -fmax-pmcheck-models limit for the pattern match checker. Triggered by: 'GHC.HsToCore.Errors.Types.DsMaxPmCheckModelsReached' Test case(s): pmcheck/should_compile/TooManyDeltas pmcheck/should_compile/TooManyDeltas pmcheck/should_compile/T11822 -} | SuggestIncreaseMaxPmCheckModels {-| Suggests adding a type signature, typically to resolve ambiguity or help GHC inferring types. -} | SuggestAddTypeSignatures AvailableBindings {-| Suggests to explicitly discard the result of a monadic action by binding the result to the '_' wilcard. Example: main = do _ <- getCurrentTime -} | SuggestBindToWildcard !(LHsExpr GhcTc) | SuggestAddInlineOrNoInlinePragma !Var !Activation | SuggestAddPhaseToCompetingRule !RuleName {-| Suggests adding an identifier to the export list of a signature. -} | SuggestAddToHSigExportList !Name !(Maybe Module) {-| Suggests increasing the limit for the number of iterations in the simplifier. -} | SuggestIncreaseSimplifierIterations {-| Suggests to explicitly import 'Type' from the 'Data.Kind' module, because using "*" to mean 'Data.Kind.Type' relies on the StarIsType extension, which will become deprecated in the future. Triggered by: 'GHC.Parser.Errors.Types.PsWarnStarIsType' Example: None Test case(s): wcompat-warnings/WCompatWarningsOn.hs -} | SuggestUseTypeFromDataKind (Maybe RdrName) {-| Suggests placing the 'qualified' keyword /after/ the module name. Triggered by: 'GHC.Parser.Errors.Types.PsWarnImportPreQualified' Example: None Test case(s): module/mod184.hs -} | SuggestQualifiedAfterModuleName {-| Suggests using TemplateHaskell quotation syntax. Triggered by: 'GHC.Parser.Errors.Types.PsErrEmptyDoubleQuotes' only if TemplateHaskell is enabled. Example: None Test case(s): parser/should_fail/T13450TH.hs -} | SuggestThQuotationSyntax {-| Suggests alternative roles in case we found an illegal one. Triggered by: 'GHC.Parser.Errors.Types.PsErrIllegalRoleName' Example: None Test case(s): roles/should_fail/Roles7.hs -} | SuggestRoles [Role] {-| Suggests qualifying the '*' operator in modules where StarIsType is enabled. Triggered by: 'GHC.Parser.Errors.Types.PsWarnStarBinder' Test case(s): warnings/should_compile/StarBinder.hs -} | SuggestQualifyStarOperator {-| Suggests that a type signature should have form :: in order to be accepted by GHC. Triggered by: 'GHC.Parser.Errors.Types.PsErrInvalidTypeSignature' Test case(s): parser/should_fail/T3811 -} | SuggestTypeSignatureForm {-| Suggests to move an orphan instance or to newtype-wrap it. Triggered by: 'GHC.Tc.Errors.Types.TcRnOrphanInstance' Test cases(s): warnings/should_compile/T9178 typecheck/should_compile/T4912 -} | SuggestFixOrphanInstance {-| Suggests to use a standalone deriving declaration when GHC can't derive a typeclass instance in a trivial way. Triggered by: 'GHC.Tc.Errors.Types.DerivBadErrConstructor' Test cases(s): typecheck/should_fail/tcfail086 -} | SuggestAddStandaloneDerivation {-| Suggests the user to fill in the wildcard constraint to disambiguate which constraint that is. Example: deriving instance _ => Eq (Foo f a) Triggered by: 'GHC.Tc.Errors.Types.DerivBadErrConstructor' Test cases(s): partial-sigs/should_fail/T13324_fail2 -} | SuggestFillInWildcardConstraint {-| Suggests to use an identifier other than 'forall' Triggered by: 'GHC.Tc.Errors.Types.TcRnForallIdentifier' -} | SuggestRenameForall {-| Suggests to use the appropriate Template Haskell tick: a single tick for a term-level 'NameSpace', or a double tick for a type-level 'NameSpace'. Triggered by: 'GHC.Tc.Errors.Types.TcRnIncorrectNameSpace'. -} | SuggestAppropriateTHTick NameSpace {-| Suggests enabling -ddump-splices to help debug an issue when a 'Name' is not in scope or is used in multiple different namespaces (e.g. both as a data constructor and a type constructor). Concomitant with 'NoExactName' or 'SameName' errors, see e.g. "GHC.Rename.Env.lookupExactOcc_either". Test cases: T5971, T7241, T13937. -} | SuggestDumpSlices {-| Suggests adding a tick to refer to something which has been promoted to the type level, e.g. a data constructor. Test cases: T9778, T19984. -} | SuggestAddTick UntickedPromotedThing {-| Something is split off from its corresponding declaration. For example, a datatype is given a role declaration in a different module. Test cases: T495, T8485, T2713, T5533. -} | SuggestMoveToDeclarationSite -- TODO: remove the SDoc argument. SDoc -- ^ fixity declaration, role annotation, type signature, ... RdrName -- ^ the 'RdrName' for the declaration site {-| Suggest a similar name that the user might have meant, e.g. suggest 'traverse' when the user has written @travrese@. Test case: mod73. -} | SuggestSimilarNames RdrName (NE.NonEmpty SimilarName) {-| Remind the user that the field selector has been suppressed because of -XNoFieldSelectors. Test cases: NFSSuppressed, records-nofieldselectors. -} | RemindFieldSelectorSuppressed { suppressed_selector :: RdrName , suppressed_parents :: [Name] } {-| Suggest importing from a module, removing a @hiding@ clause, or explain to the user that we couldn't find a module with the given 'ModuleName'. Test cases: mod28, mod36, mod87, mod114, ... -} | ImportSuggestion ImportSuggestion {-| Suggest importing a data constructor to bring it into scope Triggered by: 'GHC.Tc.Errors.Types.TcRnTypeCannotBeMarshaled' Test cases: ccfail004 -} | SuggestImportingDataCon {- Found a pragma in the body of a module, suggest placing it in the header -} | SuggestPlacePragmaInHeader -- | An 'InstantiationSuggestion' for a '.hsig' file. This is generated -- by GHC in case of a 'DriverUnexpectedSignature' and suggests a way -- to instantiate a particular signature, where the first argument is -- the signature name and the second is the module where the signature -- was defined. -- Example: -- -- src/MyStr.hsig:2:11: error: -- Unexpected signature: ‘MyStr’ -- (Try passing -instantiated-with="MyStr=" -- replacing as necessary.) data InstantiationSuggestion = InstantiationSuggestion !ModuleName !Module -- | Suggest how to fix an import. data ImportSuggestion -- | Some module exports what we want, but we aren't explicitly importing it. = CouldImportFrom (NE.NonEmpty (Module, ImportedModsVal)) OccName -- | Some module exports what we want, but we are explicitly hiding it. | CouldUnhideFrom (NE.NonEmpty (Module, ImportedModsVal)) OccName -- | Explain how something is in scope. data HowInScope -- | It was locally bound at this particular source location. = LocallyBoundAt SrcSpan -- | It was imported by this particular import declaration. | ImportedBy ImpDeclSpec data SimilarName = SimilarName Name | SimilarRdrName RdrName HowInScope -- | Something is promoted to the type-level without a promotion tick. data UntickedPromotedThing = UntickedConstructor LexicalFixity Name | UntickedExplicitList pprUntickedConstructor :: LexicalFixity -> Name -> SDoc pprUntickedConstructor fixity nm = case fixity of Prefix -> pprPrefixVar is_op ppr_nm -- e.g. (:) and '(:) Infix -> pprInfixVar is_op ppr_nm -- e.g. `Con` and '`Con` where ppr_nm = ppr nm is_op = isSymOcc (nameOccName nm) -- | Whether a constructor name is printed out as a bare symbol, e.g. @:@. -- -- True for symbolic names in infix position. -- -- Used for pretty-printing. isBareSymbol :: LexicalFixity -> Name -> Bool isBareSymbol fixity nm | isSymOcc (nameOccName nm) , Infix <- fixity = True | otherwise = False -------------------------------------------------------------------------------- -- | Whether '*' is a synonym for 'Data.Kind.Type'. data StarIsType = StarIsNotType | StarIsType -- | Display info about the treatment of '*' under NoStarIsType. -- -- With StarIsType, three properties of '*' hold: -- -- (a) it is not an infix operator -- (b) it is always in scope -- (c) it is a synonym for Data.Kind.Type -- -- However, the user might not know that they are working on a module with -- NoStarIsType and write code that still assumes (a), (b), and (c), which -- actually do not hold in that module. -- -- Violation of (a) shows up in the parser. For instance, in the following -- examples, we have '*' not applied to enough arguments: -- -- data A :: * -- data F :: * -> * -- -- Violation of (b) or (c) show up in the renamer and the typechecker -- respectively. For instance: -- -- type K = Either * Bool -- -- This will parse differently depending on whether StarIsType is enabled, -- but it will parse nonetheless. With NoStarIsType it is parsed as a type -- operator, thus we have ((*) Either Bool). Now there are two cases to -- consider: -- -- 1. There is no definition of (*) in scope. In this case the renamer will -- fail to look it up. This is a violation of assumption (b). -- -- 2. There is a definition of the (*) type operator in scope (for example -- coming from GHC.TypeNats). In this case the user will get a kind -- mismatch error. This is a violation of assumption (c). -- -- The user might unknowingly be working on a module with NoStarIsType -- or use '*' as 'Data.Kind.Type' out of habit. So it is important to give a -- hint whenever an assumption about '*' is violated. Unfortunately, it is -- somewhat difficult to deal with (c), so we limit ourselves to (a) and (b). -- -- 'noStarIsTypeHints' returns appropriate hints to the user depending on the -- extensions enabled in the module and the name that triggered the error. -- That is, if we have NoStarIsType and the error is related to '*' or its -- Unicode variant, we will suggest using 'Data.Kind.Type'; otherwise we won't -- suggest anything. noStarIsTypeHints :: StarIsType -> RdrName -> [GhcHint] noStarIsTypeHints is_star_type rdr_name -- One might ask: if can use `sdocOption sdocStarIsType` here, why bother to -- take star_is_type as input? Why not refactor? -- -- The reason is that `sdocOption sdocStarIsType` would indicate that -- StarIsType is enabled in the module that tries to load the problematic -- definition, not in the module that is being loaded. -- -- So if we have 'data T :: *' in a module with NoStarIsType, then the hint -- must be displayed even if we load this definition from a module (or GHCi) -- with StarIsType enabled! -- | isUnqualStar , StarIsNotType <- is_star_type = [SuggestUseTypeFromDataKind (Just rdr_name)] | otherwise = [] where -- Does rdr_name look like the user might have meant the '*' kind by it? -- We focus on unqualified stars specifically, because qualified stars are -- treated as type operators even under StarIsType. isUnqualStar | Unqual occName <- rdr_name = let fs = occNameFS occName in fs == fsLit "*" || fs == fsLit "★" | otherwise = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Hint/Ppr.hs0000644000000000000000000002613514472400113020746 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# OPTIONS_GHC -Wno-orphans #-} -- instance Outputable GhcHint module GHC.Types.Hint.Ppr ( perhapsAsPat -- also, and more interesting: instance Outputable GhcHint ) where import GHC.Prelude import GHC.Parser.Errors.Basic import GHC.Types.Hint import GHC.Hs.Expr () -- instance Outputable import GHC.Types.Id import GHC.Types.Name (NameSpace, pprDefinedAt, occNameSpace, pprNameSpace, isValNameSpace) import GHC.Types.Name.Reader (RdrName,ImpDeclSpec (..), rdrNameOcc, rdrNameSpace) import GHC.Types.SrcLoc (SrcSpan(..), srcSpanStartLine) import GHC.Unit.Module.Imported (ImportedModsVal(..)) import GHC.Unit.Types import GHC.Utils.Outputable import Data.List (intersperse) import qualified Data.List.NonEmpty as NE instance Outputable GhcHint where ppr = \case UnknownHint m -> ppr m SuggestExtension extHint -> case extHint of SuggestSingleExtension extraUserInfo ext -> (text "Perhaps you intended to use" <+> ppr ext) $$ extraUserInfo SuggestAnyExtension extraUserInfo exts -> let header = text "Enable any of the following extensions:" in header <+> hcat (intersperse (text ", ") (map ppr exts)) $$ extraUserInfo SuggestExtensions extraUserInfo exts -> let header = text "Enable all of the following extensions:" in header <+> hcat (intersperse (text ", ") (map ppr exts)) $$ extraUserInfo SuggestExtensionInOrderTo extraUserInfo ext -> (text "Use" <+> ppr ext) $$ extraUserInfo SuggestMissingDo -> text "Possibly caused by a missing 'do'?" SuggestLetInDo -> text "Perhaps you need a 'let' in a 'do' block?" $$ text "e.g. 'let x = 5' instead of 'x = 5'" SuggestAddSignatureCabalFile pi_mod_name -> text "Try adding" <+> quotes (ppr pi_mod_name) <+> text "to the" <+> quotes (text "signatures") <+> text "field in your Cabal file." SuggestSignatureInstantiations pi_mod_name suggestions -> let suggested_instantiated_with = hcat (punctuate comma $ [ ppr k <> text "=" <> ppr v | InstantiationSuggestion k v <- suggestions ]) in text "Try passing -instantiated-with=\"" <> suggested_instantiated_with <> text "\"" $$ text "replacing <" <> ppr pi_mod_name <> text "> as necessary." SuggestUseSpaces -> text "Please use spaces instead." SuggestUseWhitespaceAfter sym -> text "Add whitespace after the" <+> quotes (pprOperatorWhitespaceSymbol sym) <> char '.' SuggestUseWhitespaceAround sym _occurrence -> text "Add whitespace around" <+> quotes (text sym) <> char '.' SuggestParentheses -> text "Use parentheses." SuggestIncreaseMaxPmCheckModels -> text "Increase the limit or resolve the warnings to suppress this message." SuggestAddTypeSignatures bindings -> case bindings of -- This might happen when we have bindings which are /too complicated/, -- see for example 'DsCannotMixPolyAndUnliftedBindings' in 'GHC.HsToCore.Errors.Types'. -- In this case, we emit a generic message. UnnamedBinding -> text "Add a type signature." NamedBindings (x NE.:| xs) -> let nameList = case xs of [] -> quotes . ppr $ x _ -> pprWithCommas (quotes . ppr) xs <+> text "and" <+> quotes (ppr x) in hsep [ text "Consider giving" , nameList , text "a type signature"] SuggestBindToWildcard rhs -> hang (text "Suppress this warning by saying") 2 (quotes $ text "_ <-" <+> ppr rhs) SuggestAddInlineOrNoInlinePragma lhs_id rule_act -> vcat [ text "Add an INLINE[n] or NOINLINE[n] pragma for" <+> quotes (ppr lhs_id) , whenPprDebug (ppr (idInlineActivation lhs_id) $$ ppr rule_act) ] SuggestAddPhaseToCompetingRule bad_rule -> vcat [ text "Add phase [n] or [~n] to the competing rule" , whenPprDebug (ppr bad_rule) ] SuggestIncreaseSimplifierIterations -> text "Set limit with -fconstraint-solver-iterations=n; n=0 for no limit" SuggestUseTypeFromDataKind mb_rdr_name -> text "Use" <+> quotes (text "Type") <+> text "from" <+> quotes (text "Data.Kind") <+> text "instead." $$ maybe empty (\rdr_name -> text "NB: with NoStarIsType, " <> quotes (ppr rdr_name) <+> text "is treated as a regular type operator.") mb_rdr_name SuggestQualifiedAfterModuleName -> text "Place" <+> quotes (text "qualified") <+> text "after the module name." SuggestThQuotationSyntax -> vcat [ text "Perhaps you intended to use quotation syntax of TemplateHaskell," , text "but the type variable or constructor is missing" ] SuggestRoles nearby -> case nearby of [] -> empty [r] -> text "Perhaps you meant" <+> quotes (ppr r) -- will this last case ever happen?? _ -> hang (text "Perhaps you meant one of these:") 2 (pprWithCommas (quotes . ppr) nearby) SuggestQualifyStarOperator -> text "To use (or export) this operator in" <+> text "modules with StarIsType," $$ text " including the definition module, you must qualify it." SuggestTypeSignatureForm -> text "A type signature should be of form :: " SuggestAddToHSigExportList _name mb_mod -> let header = text "Try adding it to the export list of" in case mb_mod of Nothing -> header <+> text "the hsig file." Just mod -> header <+> ppr (moduleName mod) <> text "'s hsig file." SuggestFixOrphanInstance -> vcat [ text "Move the instance declaration to the module of the class or of the type, or" , text "wrap the type with a newtype and declare the instance on the new type." ] SuggestAddStandaloneDerivation -> text "Use a standalone deriving declaration instead" SuggestFillInWildcardConstraint -> text "Fill in the wildcard constraint yourself" SuggestRenameForall -> vcat [ text "Consider using another name, such as" , quotes (text "forAll") <> comma <+> quotes (text "for_all") <> comma <+> text "or" <+> quotes (text "forall_") <> dot ] SuggestAppropriateTHTick ns -> text "Perhaps use a" <+> how_many <+> text "tick" where how_many | isValNameSpace ns = text "single" | otherwise = text "double" SuggestDumpSlices -> vcat [ text "If you bound a unique Template Haskell name (NameU)" , text "perhaps via newName," , text "then -ddump-splices might be useful." ] SuggestAddTick (UntickedConstructor fixity name) -> hsep [ text "Use" , char '\'' <> con , text "instead of" , con <> mb_dot ] where con = pprUntickedConstructor fixity name mb_dot | isBareSymbol fixity name -- A final dot can be confusing for a symbol without parens, e.g. -- -- * Use ': instead of :. = empty | otherwise = dot SuggestAddTick UntickedExplicitList -> text "Add a promotion tick, e.g." <+> text "'[x,y,z]" <> dot SuggestMoveToDeclarationSite what rdr_name -> text "Move the" <+> what <+> text "to the declaration site of" <+> quotes (ppr rdr_name) <> dot SuggestSimilarNames tried_rdr_name similar_names -> case similar_names of n NE.:| [] -> text "Perhaps use" <+> pp_item n _ -> sep [ text "Perhaps use one of these:" , nest 2 (pprWithCommas pp_item $ NE.toList similar_names) ] where tried_ns = occNameSpace $ rdrNameOcc tried_rdr_name pp_item = pprSimilarName tried_ns RemindFieldSelectorSuppressed rdr_name parents -> text "Notice that" <+> quotes (ppr rdr_name) <+> text "is a field selector" <+> whose $$ text "that has been suppressed by NoFieldSelectors." where -- parents may be empty if this is a pattern synonym field without a selector whose | null parents = empty | otherwise = text "belonging to the type" <> plural parents <+> pprQuotedList parents ImportSuggestion import_suggestion -> pprImportSuggestion import_suggestion SuggestImportingDataCon -> text "Import the data constructor to bring it into scope" SuggestPlacePragmaInHeader -> text "Perhaps you meant to place it in the module header?" $$ text "The module header is the section at the top of the file, before the" <+> quotes (text "module") <+> text "keyword" perhapsAsPat :: SDoc perhapsAsPat = text "Perhaps you meant an as-pattern, which must not be surrounded by whitespace" -- | Pretty-print an 'ImportSuggestion'. pprImportSuggestion :: ImportSuggestion -> SDoc pprImportSuggestion (CouldImportFrom mods occ_name) | (mod, imv) NE.:| [] <- mods = fsep [ text "Perhaps you want to add" , quotes (ppr occ_name) , text "to the import list" , text "in the import of" , quotes (ppr mod) , parens (ppr (imv_span imv)) <> dot ] | otherwise = fsep [ text "Perhaps you want to add" , quotes (ppr occ_name) , text "to one of these import lists:" ] $$ nest 2 (vcat [ quotes (ppr mod) <+> parens (ppr (imv_span imv)) | (mod,imv) <- NE.toList mods ]) pprImportSuggestion (CouldUnhideFrom mods occ_name) | (mod, imv) NE.:| [] <- mods = fsep [ text "Perhaps you want to remove" , quotes (ppr occ_name) , text "from the explicit hiding list" , text "in the import of" , quotes (ppr mod) , parens (ppr (imv_span imv)) <> dot ] | otherwise = fsep [ text "Perhaps you want to remove" , quotes (ppr occ_name) , text "from the hiding clauses" , text "in one of these imports:" ] $$ nest 2 (vcat [ quotes (ppr mod) <+> parens (ppr (imv_span imv)) | (mod,imv) <- NE.toList mods ]) -- | Pretty-print a 'SimilarName'. pprSimilarName :: NameSpace -> SimilarName -> SDoc pprSimilarName _ (SimilarName name) = quotes (ppr name) <+> parens (pprDefinedAt name) pprSimilarName tried_ns (SimilarRdrName rdr_name how_in_scope) = case how_in_scope of LocallyBoundAt loc -> pp_ns rdr_name <+> quotes (ppr rdr_name) <+> loc' where loc' = case loc of UnhelpfulSpan l -> parens (ppr l) RealSrcSpan l _ -> parens (text "line" <+> int (srcSpanStartLine l)) ImportedBy is -> pp_ns rdr_name <+> quotes (ppr rdr_name) <+> parens (text "imported from" <+> ppr (is_mod is)) where pp_ns :: RdrName -> SDoc pp_ns rdr | ns /= tried_ns = pprNameSpace ns | otherwise = empty where ns = rdrNameSpace rdr ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/HpcInfo.hs0000644000000000000000000000154414472400113020626 0ustar0000000000000000-- | Haskell Program Coverage (HPC) support module GHC.Types.HpcInfo ( HpcInfo (..) , AnyHpcUsage , emptyHpcInfo , isHpcUsed ) where import GHC.Prelude -- | Information about a modules use of Haskell Program Coverage data HpcInfo = HpcInfo { hpcInfoTickCount :: Int , hpcInfoHash :: Int } | NoHpcInfo { hpcUsed :: AnyHpcUsage -- ^ Is hpc used anywhere on the module \*tree\*? } -- | This is used to signal if one of my imports used HPC instrumentation -- even if there is no module-local HPC usage type AnyHpcUsage = Bool emptyHpcInfo :: AnyHpcUsage -> HpcInfo emptyHpcInfo = NoHpcInfo -- | Find out if HPC is used by this module or any of the modules -- it depends upon isHpcUsed :: HpcInfo -> AnyHpcUsage isHpcUsed (HpcInfo {}) = True isHpcUsed (NoHpcInfo { hpcUsed = used }) = used ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/IPE.hs0000644000000000000000000000354514472400113017720 0ustar0000000000000000module GHC.Types.IPE ( DCMap, ClosureMap, InfoTableProvMap(..), emptyInfoTableProvMap, IpeSourceLocation ) where import GHC.Prelude import GHC.Types.Name import GHC.Types.SrcLoc import GHC.Core.DataCon import GHC.Types.Unique.Map import GHC.Core.Type import Data.List.NonEmpty import GHC.Cmm.CLabel (CLabel) import qualified Data.Map.Strict as Map -- | Position and information about an info table. -- For return frames these are the contents of a 'CoreSyn.SourceNote'. type IpeSourceLocation = (RealSrcSpan, String) -- | A map from a 'Name' to the best approximate source position that -- name arose from. type ClosureMap = UniqMap Name -- The binding (Type, Maybe IpeSourceLocation) -- The best approximate source position. -- (rendered type, source position, source note -- label) -- | A map storing all the different uses of a specific data constructor and the -- approximate source position that usage arose from. -- The 'Int' is an incrementing identifier which distinguishes each usage -- of a constructor in a module. It is paired with the source position -- the constructor was used at, if possible and a string which names -- the source location. This is the same information as is the payload -- for the 'GHC.Core.SourceNote' constructor. type DCMap = UniqMap DataCon (NonEmpty (Int, Maybe IpeSourceLocation)) type InfoTableToSourceLocationMap = Map.Map CLabel (Maybe IpeSourceLocation) data InfoTableProvMap = InfoTableProvMap { provDC :: DCMap , provClosure :: ClosureMap , provInfoTables :: InfoTableToSourceLocationMap } emptyInfoTableProvMap :: InfoTableProvMap emptyInfoTableProvMap = InfoTableProvMap emptyUniqMap emptyUniqMap Map.empty ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id.hs0000644000000000000000000012164714472400113017643 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[Id]{@Ids@: Value and constructor identifiers} -} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id' represents names that not only have a 'GHC.Types.Name.Name' but also a -- 'GHC.Core.TyCo.Rep.Type' and some additional details (a 'GHC.Types.Id.Info.IdInfo' and -- one of LocalIdDetails or GlobalIdDetails) that are added, -- modified and inspected by various compiler passes. These 'GHC.Types.Var.Var' names -- may either be global or local, see "GHC.Types.Var#globalvslocal" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" module GHC.Types.Id ( -- * The main types Var, Id, isId, -- * In and Out variants InVar, InId, OutVar, OutId, -- ** Simple construction mkGlobalId, mkVanillaGlobal, mkVanillaGlobalWithInfo, mkLocalId, mkLocalCoVar, mkLocalIdOrCoVar, mkLocalIdWithInfo, mkExportedLocalId, mkExportedVanillaId, mkSysLocal, mkSysLocalM, mkSysLocalOrCoVar, mkSysLocalOrCoVarM, mkUserLocal, mkUserLocalOrCoVar, mkTemplateLocals, mkTemplateLocalsNum, mkTemplateLocal, mkScaledTemplateLocal, mkWorkerId, -- ** Taking an Id apart idName, idType, idMult, idScaledType, idUnique, idInfo, idDetails, recordSelectorTyCon, recordSelectorTyCon_maybe, -- ** Modifying an Id setIdName, setIdUnique, GHC.Types.Id.setIdType, setIdMult, updateIdTypeButNotMult, updateIdTypeAndMult, updateIdTypeAndMultM, setIdExported, setIdNotExported, globaliseId, localiseId, setIdInfo, lazySetIdInfo, modifyIdInfo, maybeModifyIdInfo, zapLamIdInfo, zapIdDemandInfo, zapIdUsageInfo, zapIdUsageEnvInfo, zapIdUsedOnceInfo, zapIdTailCallInfo, zapFragileIdInfo, zapIdDmdSig, zapStableUnfolding, transferPolyIdInfo, scaleIdBy, scaleVarBy, -- ** Predicates on Ids isImplicitId, isDeadBinder, isStrictId, isExportedId, isLocalId, isGlobalId, isRecordSelector, isNaughtyRecordSelector, isPatSynRecordSelector, isDataConRecordSelector, isClassOpId, isClassOpId_maybe, isDFunId, isPrimOpId, isPrimOpId_maybe, isFCallId, isFCallId_maybe, isDataConWorkId, isDataConWorkId_maybe, isDataConWrapId, isDataConWrapId_maybe, isDataConId_maybe, idDataCon, isConLikeId, isWorkerLikeId, isDeadEndId, idIsFrom, hasNoBinding, -- ** Join variables JoinId, isJoinId, isJoinId_maybe, idJoinArity, asJoinId, asJoinId_maybe, zapJoinId, -- ** Inline pragma stuff idInlinePragma, setInlinePragma, modifyInlinePragma, idInlineActivation, setInlineActivation, idRuleMatchInfo, -- ** One-shot lambdas isOneShotBndr, isProbablyOneShotLambda, setOneShotLambda, clearOneShotLambda, updOneShotInfo, setIdOneShotInfo, isStateHackType, stateHackOneShot, typeOneShot, -- ** Reading 'IdInfo' fields idArity, idCallArity, idFunRepArity, idUnfolding, realIdUnfolding, idSpecialisation, idCoreRules, idHasRules, idCafInfo, idLFInfo_maybe, idOneShotInfo, idStateHackOneShotInfo, idOccInfo, isNeverRepPolyId, -- ** Writing 'IdInfo' fields setIdUnfolding, zapIdUnfolding, setCaseBndrEvald, setIdArity, setIdCallArity, setIdSpecialisation, setIdCafInfo, setIdOccInfo, zapIdOccInfo, setIdLFInfo, setIdDemandInfo, setIdDmdSig, setIdCprSig, setIdCbvMarks, idCbvMarks_maybe, idCbvMarkArity, asWorkerLikeId, asNonWorkerLikeId, idDemandInfo, idDmdSig, idCprSig, idTagSig_maybe, setIdTagSig ) where import GHC.Prelude import GHC.Core ( CoreRule, isStableUnfolding, evaldUnfolding, isCompulsoryUnfolding, Unfolding( NoUnfolding ), isEvaldUnfolding, hasSomeUnfolding, noUnfolding ) import GHC.Types.Id.Info import GHC.Types.Basic -- Imported and re-exported import GHC.Types.Var( Id, CoVar, JoinId, InId, InVar, OutId, OutVar, idInfo, idDetails, setIdDetails, globaliseId, isId, isLocalId, isGlobalId, isExportedId, setIdMult, updateIdTypeAndMult, updateIdTypeButNotMult, updateIdTypeAndMultM) import qualified GHC.Types.Var as Var import GHC.Core.Type import GHC.Types.RepType import GHC.Builtin.Types.Prim import GHC.Core.DataCon import GHC.Types.Demand import GHC.Types.Cpr import GHC.Types.Name import GHC.Unit.Module import GHC.Core.Class import {-# SOURCE #-} GHC.Builtin.PrimOps (PrimOp) import GHC.Types.ForeignCall import GHC.Data.Maybe import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Builtin.Uniques (mkBuiltinUnique) import GHC.Types.Unique.Supply import GHC.Data.FastString import GHC.Core.Multiplicity import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.GlobalVars import GHC.Utils.Trace import GHC.Stg.InferTags.TagSig -- infixl so you can say (id `set` a `set` b) infixl 1 `setIdUnfolding`, `setIdArity`, `setIdCallArity`, `setIdOccInfo`, `setIdOneShotInfo`, `setIdSpecialisation`, `setInlinePragma`, `setInlineActivation`, `idCafInfo`, `setIdDemandInfo`, `setIdDmdSig`, `setIdCprSig`, `asJoinId`, `asJoinId_maybe`, `setIdCbvMarks` {- ************************************************************************ * * \subsection{Basic Id manipulation} * * ************************************************************************ -} idName :: Id -> Name idName = Var.varName idUnique :: Id -> Unique idUnique = Var.varUnique idType :: Id -> Kind idType = Var.varType idMult :: Id -> Mult idMult = Var.varMult idScaledType :: Id -> Scaled Type idScaledType id = Scaled (idMult id) (idType id) scaleIdBy :: Mult -> Id -> Id scaleIdBy m id = setIdMult id (m `mkMultMul` idMult id) -- | Like 'scaleIdBy', but skips non-Ids. Useful for scaling -- a mixed list of ids and tyvars. scaleVarBy :: Mult -> Var -> Var scaleVarBy m id | isId id = scaleIdBy m id | otherwise = id setIdName :: Id -> Name -> Id setIdName = Var.setVarName setIdUnique :: Id -> Unique -> Id setIdUnique = Var.setVarUnique -- | Not only does this set the 'Id' 'Type', it also evaluates the type to try and -- reduce space usage setIdType :: Id -> Type -> Id setIdType id ty = seqType ty `seq` Var.setVarType id ty setIdExported :: Id -> Id setIdExported = Var.setIdExported setIdNotExported :: Id -> Id setIdNotExported = Var.setIdNotExported localiseId :: Id -> Id -- Make an Id with the same unique and type as the -- incoming Id, but with an *Internal* Name and *LocalId* flavour localiseId id | assert (isId id) $ isLocalId id && isInternalName name = id | otherwise = Var.mkLocalVar (idDetails id) (localiseName name) (Var.varMult id) (idType id) (idInfo id) where name = idName id lazySetIdInfo :: Id -> IdInfo -> Id lazySetIdInfo = Var.lazySetIdInfo setIdInfo :: Id -> IdInfo -> Id setIdInfo id info = info `seq` (lazySetIdInfo id info) -- Try to avoid space leaks by seq'ing modifyIdInfo :: HasDebugCallStack => (IdInfo -> IdInfo) -> Id -> Id modifyIdInfo fn id = setIdInfo id (fn (idInfo id)) -- maybeModifyIdInfo tries to avoid unnecessary thrashing maybeModifyIdInfo :: Maybe IdInfo -> Id -> Id maybeModifyIdInfo (Just new_info) id = lazySetIdInfo id new_info maybeModifyIdInfo Nothing id = id -- maybeModifyIdInfo tries to avoid unnecessary thrashing maybeModifyIdDetails :: Maybe IdDetails -> Id -> Id maybeModifyIdDetails (Just new_details) id = setIdDetails id new_details maybeModifyIdDetails Nothing id = id {- ************************************************************************ * * \subsection{Simple Id construction} * * ************************************************************************ Absolutely all Ids are made by mkId. It is just like Var.mkId, but in addition it pins free-tyvar-info onto the Id's type, where it can easily be found. Note [Free type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~ At one time we cached the free type variables of the type of an Id at the root of the type in a TyNote. The idea was to avoid repeating the free-type-variable calculation. But it turned out to slow down the compiler overall. I don't quite know why; perhaps finding free type variables of an Id isn't all that common whereas applying a substitution (which changes the free type variables) is more common. Anyway, we removed it in March 2008. -} -- | For an explanation of global vs. local 'Id's, see "GHC.Types.Var.Var#globalvslocal" mkGlobalId :: IdDetails -> Name -> Type -> IdInfo -> Id mkGlobalId = Var.mkGlobalVar -- | Make a global 'Id' without any extra information at all mkVanillaGlobal :: Name -> Type -> Id mkVanillaGlobal name ty = mkVanillaGlobalWithInfo name ty vanillaIdInfo -- | Make a global 'Id' with no global information but some generic 'IdInfo' mkVanillaGlobalWithInfo :: Name -> Type -> IdInfo -> Id mkVanillaGlobalWithInfo = mkGlobalId VanillaId -- | For an explanation of global vs. local 'Id's, see "GHC.Types.Var#globalvslocal" mkLocalId :: HasDebugCallStack => Name -> Mult -> Type -> Id mkLocalId name w ty = mkLocalIdWithInfo name w (assert (not (isCoVarType ty)) ty) vanillaIdInfo -- | Make a local CoVar mkLocalCoVar :: Name -> Type -> CoVar mkLocalCoVar name ty = assert (isCoVarType ty) $ Var.mkLocalVar CoVarId name Many ty vanillaIdInfo -- | Like 'mkLocalId', but checks the type to see if it should make a covar mkLocalIdOrCoVar :: Name -> Mult -> Type -> Id mkLocalIdOrCoVar name w ty -- We should assert (eqType w Many) in the isCoVarType case. -- However, currently this assertion does not hold. -- In tests with -fdefer-type-errors, such as T14584a, -- we create a linear 'case' where the scrutinee is a coercion -- (see castBottomExpr). This problem is covered by #17291. | isCoVarType ty = mkLocalCoVar name ty | otherwise = mkLocalId name w ty -- proper ids only; no covars! mkLocalIdWithInfo :: HasDebugCallStack => Name -> Mult -> Type -> IdInfo -> Id mkLocalIdWithInfo name w ty info = Var.mkLocalVar VanillaId name w (assert (not (isCoVarType ty)) ty) info -- Note [Free type variables] -- | Create a local 'Id' that is marked as exported. -- This prevents things attached to it from being removed as dead code. -- See Note [Exported LocalIds] mkExportedLocalId :: IdDetails -> Name -> Type -> Id mkExportedLocalId details name ty = Var.mkExportedLocalVar details name ty vanillaIdInfo -- Note [Free type variables] mkExportedVanillaId :: Name -> Type -> Id mkExportedVanillaId name ty = Var.mkExportedLocalVar VanillaId name ty vanillaIdInfo -- Note [Free type variables] -- | Create a system local 'Id'. These are local 'Id's (see "Var#globalvslocal") -- that are created by the compiler out of thin air mkSysLocal :: FastString -> Unique -> Mult -> Type -> Id mkSysLocal fs uniq w ty = assert (not (isCoVarType ty)) $ mkLocalId (mkSystemVarName uniq fs) w ty -- | Like 'mkSysLocal', but checks to see if we have a covar type mkSysLocalOrCoVar :: FastString -> Unique -> Mult -> Type -> Id mkSysLocalOrCoVar fs uniq w ty = mkLocalIdOrCoVar (mkSystemVarName uniq fs) w ty mkSysLocalM :: MonadUnique m => FastString -> Mult -> Type -> m Id mkSysLocalM fs w ty = getUniqueM >>= (\uniq -> return (mkSysLocal fs uniq w ty)) mkSysLocalOrCoVarM :: MonadUnique m => FastString -> Mult -> Type -> m Id mkSysLocalOrCoVarM fs w ty = getUniqueM >>= (\uniq -> return (mkSysLocalOrCoVar fs uniq w ty)) -- | Create a user local 'Id'. These are local 'Id's (see "GHC.Types.Var#globalvslocal") with a name and location that the user might recognize mkUserLocal :: OccName -> Unique -> Mult -> Type -> SrcSpan -> Id mkUserLocal occ uniq w ty loc = assert (not (isCoVarType ty)) $ mkLocalId (mkInternalName uniq occ loc) w ty -- | Like 'mkUserLocal', but checks if we have a coercion type mkUserLocalOrCoVar :: OccName -> Unique -> Mult -> Type -> SrcSpan -> Id mkUserLocalOrCoVar occ uniq w ty loc = mkLocalIdOrCoVar (mkInternalName uniq occ loc) w ty {- Make some local @Ids@ for a template @CoreExpr@. These have bogus @Uniques@, but that's OK because the templates are supposed to be instantiated before use. -} -- | Workers get local names. "CoreTidy" will externalise these if necessary mkWorkerId :: Unique -> Id -> Type -> Id mkWorkerId uniq unwrkr ty = mkLocalId (mkDerivedInternalName mkWorkerOcc uniq (getName unwrkr)) Many ty -- | Create a /template local/: a family of system local 'Id's in bijection with @Int@s, typically used in unfoldings mkTemplateLocal :: Int -> Type -> Id mkTemplateLocal i ty = mkScaledTemplateLocal i (unrestricted ty) mkScaledTemplateLocal :: Int -> Scaled Type -> Id mkScaledTemplateLocal i (Scaled w ty) = mkSysLocalOrCoVar (fsLit "v") (mkBuiltinUnique i) w ty -- "OrCoVar" since this is used in a superclass selector, -- and "~" and "~~" have coercion "superclasses". -- | Create a template local for a series of types mkTemplateLocals :: [Type] -> [Id] mkTemplateLocals = mkTemplateLocalsNum 1 -- | Create a template local for a series of type, but start from a specified template local mkTemplateLocalsNum :: Int -> [Type] -> [Id] mkTemplateLocalsNum n tys = zipWith mkTemplateLocal [n..] tys {- Note [Exported LocalIds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use mkExportedLocalId for things like - Dictionary functions (DFunId) - Wrapper and matcher Ids for pattern synonyms - Default methods for classes - Pattern-synonym matcher and builder Ids - etc They marked as "exported" in the sense that they should be kept alive even if apparently unused in other bindings, and not dropped as dead code by the occurrence analyser. (But "exported" here does not mean "brought into lexical scope by an import declaration". Indeed these things are always internal Ids that the user never sees.) It's very important that they are *LocalIds*, not GlobalIds, for lots of reasons: * We want to treat them as free variables for the purpose of dependency analysis (e.g. GHC.Core.FVs.exprFreeVars). * Look them up in the current substitution when we come across occurrences of them (in Subst.lookupIdSubst). Lacking this we can get an out-of-date unfolding, which can in turn make the simplifier go into an infinite loop (#9857) * Ensure that for dfuns that the specialiser does not float dict uses above their defns, which would prevent good simplifications happening. * The strictness analyser treats a occurrence of a GlobalId as imported and assumes it contains strictness in its IdInfo, which isn't true if the thing is bound in the same module as the occurrence. In CoreTidy we must make all these LocalIds into GlobalIds, so that in importing modules (in --make mode) we treat them as properly global. That is what is happening in, say tidy_insts in GHC.Iface.Tidy. ************************************************************************ * * \subsection{Special Ids} * * ************************************************************************ -} -- | If the 'Id' is that for a record selector, extract the 'sel_tycon'. Panic otherwise. recordSelectorTyCon :: Id -> RecSelParent recordSelectorTyCon id = case recordSelectorTyCon_maybe id of Just parent -> parent _ -> panic "recordSelectorTyCon" recordSelectorTyCon_maybe :: Id -> Maybe RecSelParent recordSelectorTyCon_maybe id = case Var.idDetails id of RecSelId { sel_tycon = parent } -> Just parent _ -> Nothing isRecordSelector :: Id -> Bool isNaughtyRecordSelector :: Id -> Bool isPatSynRecordSelector :: Id -> Bool isDataConRecordSelector :: Id -> Bool isPrimOpId :: Id -> Bool isFCallId :: Id -> Bool isDataConWorkId :: Id -> Bool isDataConWrapId :: Id -> Bool isDFunId :: Id -> Bool isClassOpId :: Id -> Bool isClassOpId_maybe :: Id -> Maybe Class isPrimOpId_maybe :: Id -> Maybe PrimOp isFCallId_maybe :: Id -> Maybe ForeignCall isDataConWorkId_maybe :: Id -> Maybe DataCon isDataConWrapId_maybe :: Id -> Maybe DataCon isRecordSelector id = case Var.idDetails id of RecSelId {} -> True _ -> False isDataConRecordSelector id = case Var.idDetails id of RecSelId {sel_tycon = RecSelData _} -> True _ -> False isPatSynRecordSelector id = case Var.idDetails id of RecSelId {sel_tycon = RecSelPatSyn _} -> True _ -> False isNaughtyRecordSelector id = case Var.idDetails id of RecSelId { sel_naughty = n } -> n _ -> False isClassOpId id = case Var.idDetails id of ClassOpId _ -> True _other -> False isClassOpId_maybe id = case Var.idDetails id of ClassOpId cls -> Just cls _other -> Nothing isPrimOpId id = case Var.idDetails id of PrimOpId _ -> True _ -> False isDFunId id = case Var.idDetails id of DFunId {} -> True _ -> False isPrimOpId_maybe id = case Var.idDetails id of PrimOpId op -> Just op _ -> Nothing isFCallId id = case Var.idDetails id of FCallId _ -> True _ -> False isFCallId_maybe id = case Var.idDetails id of FCallId call -> Just call _ -> Nothing isDataConWorkId id = case Var.idDetails id of DataConWorkId _ -> True _ -> False isDataConWorkId_maybe id = case Var.idDetails id of DataConWorkId con -> Just con _ -> Nothing isDataConWrapId id = case Var.idDetails id of DataConWrapId _ -> True _ -> False isDataConWrapId_maybe id = case Var.idDetails id of DataConWrapId con -> Just con _ -> Nothing isDataConId_maybe :: Id -> Maybe DataCon isDataConId_maybe id = case Var.idDetails id of DataConWorkId con -> Just con DataConWrapId con -> Just con _ -> Nothing -- | An Id for which we might require all callers to pass strict arguments properly tagged + evaluated. -- -- See Note [CBV Function Ids] isWorkerLikeId :: Id -> Bool isWorkerLikeId id = case Var.idDetails id of WorkerLikeId _ -> True JoinId _ Just{} -> True _ -> False isJoinId :: Var -> Bool -- It is convenient in GHC.Core.Opt.SetLevels.lvlMFE to apply isJoinId -- to the free vars of an expression, so it's convenient -- if it returns False for type variables isJoinId id | isId id = case Var.idDetails id of JoinId {} -> True _ -> False | otherwise = False -- | Doesn't return strictness marks isJoinId_maybe :: Var -> Maybe JoinArity isJoinId_maybe id | isId id = assertPpr (isId id) (ppr id) $ case Var.idDetails id of JoinId arity _marks -> Just arity _ -> Nothing | otherwise = Nothing idDataCon :: Id -> DataCon -- ^ Get from either the worker or the wrapper 'Id' to the 'DataCon'. Currently used only in the desugarer. -- -- INVARIANT: @idDataCon (dataConWrapId d) = d@: remember, 'dataConWrapId' can return either the wrapper or the worker idDataCon id = isDataConId_maybe id `orElse` pprPanic "idDataCon" (ppr id) hasNoBinding :: Id -> Bool -- ^ Returns @True@ of an 'Id' which may not have a -- binding, even though it is defined in this module. -- Data constructor workers used to be things of this kind, but they aren't any -- more. Instead, we inject a binding for them at the CorePrep stage. The -- exception to this is unboxed tuples and sums datacons, which definitely have -- no binding hasNoBinding id = case Var.idDetails id of PrimOpId _ -> True -- See Note [Eta expanding primops] in GHC.Builtin.PrimOps FCallId _ -> True DataConWorkId dc -> isUnboxedTupleDataCon dc || isUnboxedSumDataCon dc _ -> isCompulsoryUnfolding (realIdUnfolding id) -- Note: this function must be very careful not to force -- any of the fields that aren't the 'uf_src' field of -- the 'Unfolding' of the 'Id'. This is because these fields are computed -- in terms of the 'uf_tmpl' field, which is not available -- until we have finished Core Lint for the unfolding, which calls 'hasNoBinding' -- in 'checkCanEtaExpand'. -- -- In particular, calling 'idUnfolding' rather than 'realIdUnfolding' here can -- force the 'uf_tmpl' field, because 'trimUnfolding' forces the 'uf_is_value' field, -- and this field is usually computed in terms of the 'uf_tmpl' field, -- so we will force that as well. -- -- See Note [Lazily checking Unfoldings] in GHC.IfaceToCore. isImplicitId :: Id -> Bool -- ^ 'isImplicitId' tells whether an 'Id's info is implied by other -- declarations, so we don't need to put its signature in an interface -- file, even if it's mentioned in some other interface unfolding. isImplicitId id = case Var.idDetails id of FCallId {} -> True ClassOpId {} -> True PrimOpId {} -> True DataConWorkId {} -> True DataConWrapId {} -> True -- These are implied by their type or class decl; -- remember that all type and class decls appear in the interface file. -- The dfun id is not an implicit Id; it must *not* be omitted, because -- it carries version info for the instance decl _ -> False idIsFrom :: Module -> Id -> Bool idIsFrom mod id = nameIsLocalOrFrom mod (idName id) isDeadBinder :: Id -> Bool isDeadBinder bndr | isId bndr = isDeadOcc (idOccInfo bndr) | otherwise = False -- TyVars count as not dead {- ************************************************************************ * * Join variables * * ************************************************************************ -} idJoinArity :: JoinId -> JoinArity idJoinArity id = isJoinId_maybe id `orElse` pprPanic "idJoinArity" (ppr id) asJoinId :: Id -> JoinArity -> JoinId asJoinId id arity = warnPprTrace (not (isLocalId id)) "global id being marked as join var" (ppr id) $ warnPprTrace (not (is_vanilla_or_join id)) "asJoinId" (ppr id <+> pprIdDetails (idDetails id)) $ id `setIdDetails` JoinId arity (idCbvMarks_maybe id) where is_vanilla_or_join id = case Var.idDetails id of VanillaId -> True -- Can workers become join ids? Yes! WorkerLikeId {} -> pprTraceDebug "asJoinId (call by value function)" (ppr id) True JoinId {} -> True _ -> False zapJoinId :: Id -> Id -- May be a regular id already zapJoinId jid | isJoinId jid = zapIdTailCallInfo (newIdDetails `seq` jid `setIdDetails` newIdDetails) -- Core Lint may complain if still marked -- as AlwaysTailCalled | otherwise = jid where newIdDetails = case idDetails jid of -- We treat join points as CBV functions. Even after they are floated out. -- See Note [Use CBV semantics only for join points and workers] JoinId _ (Just marks) -> WorkerLikeId marks JoinId _ Nothing -> WorkerLikeId [] _ -> panic "zapJoinId: newIdDetails can only be used if Id was a join Id." asJoinId_maybe :: Id -> Maybe JoinArity -> Id asJoinId_maybe id (Just arity) = asJoinId id arity asJoinId_maybe id Nothing = zapJoinId id {- ************************************************************************ * * \subsection{IdInfo stuff} * * ************************************************************************ -} --------------------------------- -- ARITY idArity :: Id -> Arity idArity id = arityInfo (idInfo id) setIdArity :: Id -> Arity -> Id setIdArity id arity = modifyIdInfo (`setArityInfo` arity) id idCallArity :: Id -> Arity idCallArity id = callArityInfo (idInfo id) setIdCallArity :: Id -> Arity -> Id setIdCallArity id arity = modifyIdInfo (`setCallArityInfo` arity) id -- | This function counts all arguments post-unarisation, which includes -- arguments with no runtime representation -- see Note [Unarisation and arity] idFunRepArity :: Id -> RepArity idFunRepArity x = countFunRepArgs (idArity x) (idType x) -- | Returns true if an application to n args diverges or throws an exception -- See Note [Dead ends] in "GHC.Types.Demand". isDeadEndId :: Var -> Bool isDeadEndId v | isId v = isDeadEndSig (idDmdSig v) | otherwise = False -- | Accesses the 'Id''s 'dmdSigInfo'. idDmdSig :: Id -> DmdSig idDmdSig id = dmdSigInfo (idInfo id) setIdDmdSig :: Id -> DmdSig -> Id setIdDmdSig id sig = modifyIdInfo (`setDmdSigInfo` sig) id idCprSig :: Id -> CprSig idCprSig id = cprSigInfo (idInfo id) setIdCprSig :: Id -> CprSig -> Id setIdCprSig id sig = modifyIdInfo (\info -> setCprSigInfo info sig) id zapIdDmdSig :: Id -> Id zapIdDmdSig id = modifyIdInfo (`setDmdSigInfo` nopSig) id -- | This predicate says whether the 'Id' has a strict demand placed on it or -- has a type such that it can always be evaluated strictly (i.e an -- unlifted type, as of GHC 7.6). We need to -- check separately whether the 'Id' has a so-called \"strict type\" because if -- the demand for the given @id@ hasn't been computed yet but @id@ has a strict -- type, we still want @isStrictId id@ to be @True@. isStrictId :: Id -> Bool isStrictId id | assertPpr (isId id) (text "isStrictId: not an id: " <+> ppr id) $ isJoinId id = False | otherwise = isStrictType (idType id) || isStrUsedDmd (idDemandInfo id) -- Take the best of both strictnesses - old and new idTagSig_maybe :: Id -> Maybe TagSig idTagSig_maybe = tagSig . idInfo --------------------------------- -- UNFOLDING -- | Returns the 'Id's unfolding, but does not expose the unfolding of a strong -- loop breaker. See 'unfoldingInfo'. -- -- If you really want the unfolding of a strong loopbreaker, call 'realIdUnfolding'. idUnfolding :: Id -> Unfolding idUnfolding id = unfoldingInfo (idInfo id) realIdUnfolding :: Id -> Unfolding -- ^ Expose the unfolding if there is one, including for loop breakers realIdUnfolding id = realUnfoldingInfo (idInfo id) setIdUnfolding :: Id -> Unfolding -> Id setIdUnfolding id unfolding = modifyIdInfo (`setUnfoldingInfo` unfolding) id idDemandInfo :: Id -> Demand idDemandInfo id = demandInfo (idInfo id) setIdDemandInfo :: Id -> Demand -> Id setIdDemandInfo id dmd = modifyIdInfo (`setDemandInfo` dmd) id setIdTagSig :: Id -> TagSig -> Id setIdTagSig id sig = modifyIdInfo (`setTagSig` sig) id -- | If all marks are NotMarkedStrict we just set nothing. setIdCbvMarks :: Id -> [CbvMark] -> Id setIdCbvMarks id marks | not (any isMarkedCbv marks) = id | otherwise = -- pprTrace "setMarks:" (ppr id <> text ":" <> ppr marks) $ case idDetails id of -- good ol (likely worker) function VanillaId -> id `setIdDetails` (WorkerLikeId trimmedMarks) JoinId arity _ -> id `setIdDetails` (JoinId arity (Just trimmedMarks)) -- Updating an existing call by value function. WorkerLikeId _ -> id `setIdDetails` (WorkerLikeId trimmedMarks) -- Do nothing for these RecSelId{} -> id DFunId{} -> id _ -> pprTrace "setIdCbvMarks: Unable to set cbv marks for" (ppr id $$ text "marks:" <> ppr marks $$ text "idDetails:" <> ppr (idDetails id)) id where -- (Currently) no point in passing args beyond the arity unlifted. -- We would have to eta expand all call sites to (length marks). -- Perhaps that's sensible but for now be conservative. -- Similarly we don't need any lazy marks at the end of the list. -- This way the length of the list is always exactly number of arguments -- that must be visible to CodeGen. See See Note [CBV Function Ids] -- for more details. trimmedMarks = dropWhileEndLE (not . isMarkedCbv) $ take (idArity id) marks idCbvMarks_maybe :: Id -> Maybe [CbvMark] idCbvMarks_maybe id = case idDetails id of WorkerLikeId marks -> Just marks JoinId _arity marks -> marks _ -> Nothing -- Id must be called with at least this arity in order to allow arguments to -- be passed unlifted. idCbvMarkArity :: Id -> Arity idCbvMarkArity fn = maybe 0 length (idCbvMarks_maybe fn) -- | Remove any cbv marks on arguments from a given Id. asNonWorkerLikeId :: Id -> Id asNonWorkerLikeId id = let details = case idDetails id of WorkerLikeId{} -> Just $ VanillaId JoinId arity Just{} -> Just $ JoinId arity Nothing _ -> Nothing in maybeModifyIdDetails details id -- | Turn this id into a WorkerLikeId if possible. asWorkerLikeId :: Id -> Id asWorkerLikeId id = let details = case idDetails id of WorkerLikeId{} -> Nothing JoinId _arity Just{} -> Nothing JoinId arity Nothing -> Just (JoinId arity (Just [])) VanillaId -> Just $ WorkerLikeId [] _ -> Nothing in maybeModifyIdDetails details id setCaseBndrEvald :: StrictnessMark -> Id -> Id -- Used for variables bound by a case expressions, both the case-binder -- itself, and any pattern-bound variables that are argument of a -- strict constructor. It just marks the variable as already-evaluated, -- so that (for example) a subsequent 'seq' can be dropped setCaseBndrEvald str id | isMarkedStrict str = id `setIdUnfolding` evaldUnfolding | otherwise = id -- | Similar to trimUnfolding, but also removes evaldness info. zapIdUnfolding :: Id -> Id zapIdUnfolding v | isId v, hasSomeUnfolding (idUnfolding v) = setIdUnfolding v noUnfolding | otherwise = v --------------------------------- -- SPECIALISATION -- See Note [Specialisations and RULES in IdInfo] in GHC.Types.Id.Info idSpecialisation :: Id -> RuleInfo idSpecialisation id = ruleInfo (idInfo id) idCoreRules :: Id -> [CoreRule] idCoreRules id = ruleInfoRules (idSpecialisation id) idHasRules :: Id -> Bool idHasRules id = not (isEmptyRuleInfo (idSpecialisation id)) setIdSpecialisation :: Id -> RuleInfo -> Id setIdSpecialisation id spec_info = modifyIdInfo (`setRuleInfo` spec_info) id --------------------------------- -- CAF INFO idCafInfo :: Id -> CafInfo idCafInfo id = cafInfo (idInfo id) setIdCafInfo :: Id -> CafInfo -> Id setIdCafInfo id caf_info = modifyIdInfo (`setCafInfo` caf_info) id --------------------------------- -- Lambda form info idLFInfo_maybe :: Id -> Maybe LambdaFormInfo idLFInfo_maybe = lfInfo . idInfo setIdLFInfo :: Id -> LambdaFormInfo -> Id setIdLFInfo id lf = modifyIdInfo (`setLFInfo` lf) id --------------------------------- -- Occurrence INFO idOccInfo :: Id -> OccInfo idOccInfo id = occInfo (idInfo id) setIdOccInfo :: Id -> OccInfo -> Id setIdOccInfo id occ_info = modifyIdInfo (`setOccInfo` occ_info) id zapIdOccInfo :: Id -> Id zapIdOccInfo b = b `setIdOccInfo` noOccInfo {- --------------------------------- -- INLINING The inline pragma tells us to be very keen to inline this Id, but it's still OK not to if optimisation is switched off. -} idInlinePragma :: Id -> InlinePragma idInlinePragma id = inlinePragInfo (idInfo id) setInlinePragma :: Id -> InlinePragma -> Id setInlinePragma id prag = modifyIdInfo (`setInlinePragInfo` prag) id modifyInlinePragma :: Id -> (InlinePragma -> InlinePragma) -> Id modifyInlinePragma id fn = modifyIdInfo (\info -> info `setInlinePragInfo` (fn (inlinePragInfo info))) id idInlineActivation :: Id -> Activation idInlineActivation id = inlinePragmaActivation (idInlinePragma id) setInlineActivation :: Id -> Activation -> Id setInlineActivation id act = modifyInlinePragma id (\prag -> setInlinePragmaActivation prag act) idRuleMatchInfo :: Id -> RuleMatchInfo idRuleMatchInfo id = inlinePragmaRuleMatchInfo (idInlinePragma id) isConLikeId :: Id -> Bool isConLikeId id = isConLike (idRuleMatchInfo id) {- --------------------------------- -- ONE-SHOT LAMBDAS -} idOneShotInfo :: Id -> OneShotInfo idOneShotInfo id = oneShotInfo (idInfo id) -- | Like 'idOneShotInfo', but taking the Horrible State Hack in to account -- See Note [The state-transformer hack] in "GHC.Core.Opt.Arity" idStateHackOneShotInfo :: Id -> OneShotInfo idStateHackOneShotInfo id | isStateHackType (idType id) = stateHackOneShot | otherwise = idOneShotInfo id -- | Returns whether the lambda associated with the 'Id' is certainly applied at most once -- This one is the "business end", called externally. -- It works on type variables as well as Ids, returning True -- Its main purpose is to encapsulate the Horrible State Hack -- See Note [The state-transformer hack] in "GHC.Core.Opt.Arity" isOneShotBndr :: Var -> Bool isOneShotBndr var | isTyVar var = True | OneShotLam <- idStateHackOneShotInfo var = True | otherwise = False -- | Should we apply the state hack to values of this 'Type'? stateHackOneShot :: OneShotInfo stateHackOneShot = OneShotLam typeOneShot :: Type -> OneShotInfo typeOneShot ty | isStateHackType ty = stateHackOneShot | otherwise = NoOneShotInfo isStateHackType :: Type -> Bool isStateHackType ty | unsafeHasNoStateHack = False | otherwise = case tyConAppTyCon_maybe ty of Just tycon -> tycon == statePrimTyCon _ -> False -- This is a gross hack. It claims that -- every function over realWorldStatePrimTy is a one-shot -- function. This is pretty true in practice, and makes a big -- difference. For example, consider -- a `thenST` \ r -> ...E... -- The early full laziness pass, if it doesn't know that r is one-shot -- will pull out E (let's say it doesn't mention r) to give -- let lvl = E in a `thenST` \ r -> ...lvl... -- When `thenST` gets inlined, we end up with -- let lvl = E in \s -> case a s of (r, s') -> ...lvl... -- and we don't re-inline E. -- -- It would be better to spot that r was one-shot to start with, but -- I don't want to rely on that. -- -- Another good example is in fill_in in PrelPack.hs. We should be able to -- spot that fill_in has arity 2 (and when Keith is done, we will) but we can't yet. isProbablyOneShotLambda :: Id -> Bool isProbablyOneShotLambda id = case idStateHackOneShotInfo id of OneShotLam -> True NoOneShotInfo -> False setOneShotLambda :: Id -> Id setOneShotLambda id = modifyIdInfo (`setOneShotInfo` OneShotLam) id clearOneShotLambda :: Id -> Id clearOneShotLambda id = modifyIdInfo (`setOneShotInfo` NoOneShotInfo) id setIdOneShotInfo :: Id -> OneShotInfo -> Id setIdOneShotInfo id one_shot = modifyIdInfo (`setOneShotInfo` one_shot) id updOneShotInfo :: Id -> OneShotInfo -> Id -- Combine the info in the Id with new info updOneShotInfo id one_shot | do_upd = setIdOneShotInfo id one_shot | otherwise = id where do_upd = case (idOneShotInfo id, one_shot) of (NoOneShotInfo, _) -> True (OneShotLam, _) -> False -- The OneShotLambda functions simply fiddle with the IdInfo flag -- But watch out: this may change the type of something else -- f = \x -> e -- If we change the one-shot-ness of x, f's type changes -- Replaces the id info if the zapper returns @Just idinfo@ zapInfo :: (IdInfo -> Maybe IdInfo) -> Id -> Id zapInfo zapper id = maybeModifyIdInfo (zapper (idInfo id)) id zapLamIdInfo :: Id -> Id zapLamIdInfo = zapInfo zapLamInfo zapFragileIdInfo :: Id -> Id zapFragileIdInfo = zapInfo zapFragileInfo zapIdDemandInfo :: Id -> Id zapIdDemandInfo = zapInfo zapDemandInfo zapIdUsageInfo :: Id -> Id zapIdUsageInfo = zapInfo zapUsageInfo zapIdUsageEnvInfo :: Id -> Id zapIdUsageEnvInfo = zapInfo zapUsageEnvInfo zapIdUsedOnceInfo :: Id -> Id zapIdUsedOnceInfo = zapInfo zapUsedOnceInfo zapIdTailCallInfo :: Id -> Id zapIdTailCallInfo = zapInfo zapTailCallInfo zapStableUnfolding :: Id -> Id zapStableUnfolding id | isStableUnfolding (realIdUnfolding id) = setIdUnfolding id NoUnfolding | otherwise = id {- Note [transferPolyIdInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~ This transfer is used in three places: FloatOut (long-distance let-floating) GHC.Core.Opt.Simplify.Utils.abstractFloats (short-distance let-floating) StgLiftLams (selectively lambda-lift local functions to top-level) Consider the short-distance let-floating: f = /\a. let g = rhs in ... Then if we float thus g' = /\a. rhs f = /\a. ...[g' a/g].... we *do not* want to lose g's * strictness information * arity * inline pragma (though that is bit more debatable) * occurrence info Mostly this is just an optimisation, but it's *vital* to transfer the occurrence info. Consider NonRec { f = /\a. let Rec { g* = ..g.. } in ... } where the '*' means 'LoopBreaker'. Then if we float we must get Rec { g'* = /\a. ...(g' a)... } NonRec { f = /\a. ...[g' a/g]....} where g' is also marked as LoopBreaker. If not, terrible things can happen if we re-simplify the binding (and the Simplifier does sometimes simplify a term twice); see #4345. It's not so simple to retain * worker info * rules so we simply discard those. Sooner or later this may bite us. If we abstract wrt one or more *value* binders, we must modify the arity and strictness info before transferring it. E.g. f = \x. e --> g' = \y. \x. e + substitute (g' y) for g Notice that g' has an arity one more than the original g -} transferPolyIdInfo :: Id -- Original Id -> [Var] -- Abstract wrt these variables -> Id -- New Id -> Id transferPolyIdInfo old_id abstract_wrt new_id = modifyIdInfo transfer new_id `setIdCbvMarks` new_cbv_marks where arity_increase = count isId abstract_wrt -- Arity increases by the -- number of value binders old_info = idInfo old_id old_arity = arityInfo old_info old_inline_prag = inlinePragInfo old_info old_occ_info = occInfo old_info new_arity = old_arity + arity_increase new_occ_info = zapOccTailCallInfo old_occ_info old_strictness = dmdSigInfo old_info new_strictness = prependArgsDmdSig arity_increase old_strictness old_cpr = cprSigInfo old_info old_cbv_marks = fromMaybe (replicate old_arity NotMarkedCbv) (idCbvMarks_maybe old_id) abstr_cbv_marks = mapMaybe getMark abstract_wrt new_cbv_marks = abstr_cbv_marks ++ old_cbv_marks getMark v | not (isId v) = Nothing | isId v , isEvaldUnfolding (idUnfolding v) , mightBeLiftedType (idType v) = Just MarkedCbv | otherwise = Just NotMarkedCbv transfer new_info = new_info `setArityInfo` new_arity `setInlinePragInfo` old_inline_prag `setOccInfo` new_occ_info `setDmdSigInfo` new_strictness `setCprSigInfo` old_cpr isNeverRepPolyId :: Id -> Bool isNeverRepPolyId = isNeverRepPolyIdInfo . idInfo ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id/Info.hs0000644000000000000000000010514714472400113020533 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1993-1998 \section[IdInfo]{@IdInfos@: Non-essential information about @Ids@} (And a pretty good illustration of quite a few things wrong with Haskell. [WDP 94/11]) -} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE BinaryLiterals #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} module GHC.Types.Id.Info ( -- * The IdDetails type IdDetails(..), pprIdDetails, coVarDetails, isCoVarDetails, JoinArity, isJoinIdDetails_maybe, RecSelParent(..), -- * The IdInfo type IdInfo, -- Abstract vanillaIdInfo, noCafIdInfo, -- ** The OneShotInfo type OneShotInfo(..), oneShotInfo, noOneShotInfo, hasNoOneShotInfo, setOneShotInfo, -- ** Zapping various forms of Info zapLamInfo, zapFragileInfo, zapDemandInfo, zapUsageInfo, zapUsageEnvInfo, zapUsedOnceInfo, zapTailCallInfo, zapCallArityInfo, trimUnfolding, -- ** The ArityInfo type ArityInfo, unknownArity, arityInfo, setArityInfo, ppArityInfo, callArityInfo, setCallArityInfo, -- ** Demand and strictness Info dmdSigInfo, setDmdSigInfo, cprSigInfo, setCprSigInfo, demandInfo, setDemandInfo, pprStrictness, -- ** Unfolding Info realUnfoldingInfo, unfoldingInfo, setUnfoldingInfo, hasInlineUnfolding, -- ** The InlinePragInfo type InlinePragInfo, inlinePragInfo, setInlinePragInfo, -- ** The OccInfo type OccInfo(..), isDeadOcc, isStrongLoopBreaker, isWeakLoopBreaker, occInfo, setOccInfo, InsideLam(..), BranchCount, TailCallInfo(..), tailCallInfo, isAlwaysTailCalled, -- ** The RuleInfo type RuleInfo(..), emptyRuleInfo, isEmptyRuleInfo, ruleInfoFreeVars, ruleInfoRules, setRuleInfoHead, ruleInfo, setRuleInfo, tagSigInfo, -- ** The CAFInfo type CafInfo(..), ppCafInfo, mayHaveCafRefs, cafInfo, setCafInfo, -- ** The LambdaFormInfo type LambdaFormInfo, lfInfo, setLFInfo, setTagSig, tagSig, -- ** Tick-box Info TickBoxOp(..), TickBoxId, -- ** Levity info LevityInfo, levityInfo, setNeverRepPoly, setLevityInfoWithType, isNeverRepPolyIdInfo ) where import GHC.Prelude import GHC.Core import GHC.Core.Class import {-# SOURCE #-} GHC.Builtin.PrimOps (PrimOp) import GHC.Types.Name import GHC.Types.Var.Set import GHC.Types.Basic import GHC.Core.DataCon import GHC.Core.TyCon import GHC.Core.PatSyn import GHC.Core.Type import GHC.Types.ForeignCall import GHC.Unit.Module import GHC.Types.Demand import GHC.Types.Cpr import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Stg.InferTags.TagSig import Data.Word import GHC.StgToCmm.Types (LambdaFormInfo) -- infixl so you can say (id `set` a `set` b) infixl 1 `setRuleInfo`, `setArityInfo`, `setInlinePragInfo`, `setUnfoldingInfo`, `setOneShotInfo`, `setOccInfo`, `setCafInfo`, `setDmdSigInfo`, `setCprSigInfo`, `setDemandInfo`, `setNeverRepPoly`, `setLevityInfoWithType` {- ************************************************************************ * * IdDetails * * ************************************************************************ -} -- | Identifier Details -- -- The 'IdDetails' of an 'Id' give stable, and necessary, -- information about the Id. data IdDetails = VanillaId -- | The 'Id' for a record selector | RecSelId { sel_tycon :: RecSelParent , sel_naughty :: Bool -- True <=> a "naughty" selector which can't actually exist, for example @x@ in: -- data T = forall a. MkT { x :: a } } -- See Note [Naughty record selectors] in GHC.Tc.TyCl | DataConWorkId DataCon -- ^ The 'Id' is for a data constructor /worker/ | DataConWrapId DataCon -- ^ The 'Id' is for a data constructor /wrapper/ -- [the only reasons we need to know is so that -- a) to support isImplicitId -- b) when desugaring a RecordCon we can get -- from the Id back to the data con] | ClassOpId Class -- ^ The 'Id' is a superclass selector, -- or class operation of a class | PrimOpId PrimOp -- ^ The 'Id' is for a primitive operator | FCallId ForeignCall -- ^ The 'Id' is for a foreign call. -- Type will be simple: no type families, newtypes, etc | TickBoxOpId TickBoxOp -- ^ The 'Id' is for a HPC tick box (both traditional and binary) | DFunId Bool -- ^ A dictionary function. -- Bool = True <=> the class has only one method, so may be -- implemented with a newtype, so it might be bad -- to be strict on this dictionary | CoVarId -- ^ A coercion variable -- This only covers /un-lifted/ coercions, of type -- (t1 ~# t2) or (t1 ~R# t2), not their lifted variants | JoinId JoinArity (Maybe [CbvMark]) -- ^ An 'Id' for a join point taking n arguments -- Note [Join points] in "GHC.Core" -- Can also work as a WorkerLikeId if given `CbvMark`s. -- See Note [CBV Function Ids] -- The [CbvMark] is always empty (and ignored) until after Tidy. | WorkerLikeId [CbvMark] -- ^ An 'Id' for a worker like function, which might expect some arguments to be -- passed both evaluated and tagged. -- Worker like functions are create by W/W and SpecConstr and we can expect that they -- aren't used unapplied. -- See Note [CBV Function Ids] -- See Note [Tag Inference] -- The [CbvMark] is always empty (and ignored) until after Tidy for ids from the current -- module. {- Note [CBV Function Ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A WorkerLikeId essentially allows us to constrain the calling convention for the given Id. Each such Id carries with it a list of CbvMarks with each element representing a value argument. Arguments who have a matching `MarkedCbv` entry in the list need to be passed evaluated+*properly tagged*. CallByValueFunIds give us additional expressiveness which we use to improve runtime. This is all part of the TagInference work. See also Note [Tag Inference]. They allows us to express the fact that an argument is not only evaluated to WHNF once we entered it's RHS but also that an lifted argument is already *properly tagged* once we jump into the RHS. This means when e.g. branching on such an argument the RHS doesn't needed to perform an eval check to ensure the argument isn't an indirection. All seqs on such an argument in the functions body become no-ops as well. The invariants around the arguments of call by value function like Ids are then: * In any call `(f e1 .. en)`, if `f`'s i'th argument is marked `MarkedCbv`, then the caller must ensure that the i'th argument * points directly to the value (and hence is certainly evaluated before the call) * is a properly tagged pointer to that value * The following functions (and only these functions) have `CbvMarks`: * Any `WorkerLikeId` * Some `JoinId` bindings. This works analogous to the Strict Field Invariant. See also Note [Strict Field Invariant]. To make this work what we do is: * During W/W and SpecConstr any worker/specialized binding we introduce is marked as a worker binding by `asWorkerLikeId`. * W/W and SpecConstr further set OtherCon[] unfoldings on arguments which represent contents of a strict fields. * During Tidy we look at all bindings. For any callByValueLike Id and join point we mark arguments as cbv if they Are strict. We don't do so for regular bindings. See Note [Use CBV semantics only for join points and workers] for why. We might have made some ids rhs *more* strict in order to make their arguments be passed CBV. See Note [Call-by-value for worker args] for why. * During CorePrep calls to CallByValueFunIds are eta expanded. * During Stg CodeGen: * When we see a call to a callByValueLike Id: * We check if all arguments marked to be passed unlifted are already tagged. * If they aren't we will wrap the call in case expressions which will evaluate+tag these arguments before jumping to the function. * During Cmm codeGen: * When generating code for the RHS of a StrictWorker binding we omit tag checks when using arguments marked as tagged. We only use this for workers and specialized versions of SpecConstr But we also check other functions during tidy and potentially turn some of them into call by value functions and mark some of their arguments as call-by-value by looking at argument unfoldings. NB: I choose to put the information into a new Id constructor since these are loaded at all optimization levels. This makes it trivial to ensure the additional calling convention demands are available at all call sites. Putting it into IdInfo would require us at the very least to always decode the IdInfo just to decide if we need to throw it away or not after. Note [Use CBV semantics only for join points and workers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A function with cbv-semantics requires arguments to be visible and if no arguments are visible requires us to eta-expand it's call site. That is for a binding with three cbv arguments like `w[WorkerLikeId[!,!,!]]` we would need to eta expand undersaturated occurences like `map w xs` into `map (\x1 x2 x3 -> w x1 x2 x3) xs. In experiments it turned out that the code size increase of doing so can outweigh the performance benefits of doing so. So we only do this for join points, workers and specialized functions (from SpecConstr). Join points are naturally always called saturated so this problem can't occur for them. For workers and specialized functions there are also always at least some applied arguments as we won't inline the wrapper/apply their rule if there are unapplied occurances like `map f xs`. -} -- | Recursive Selector Parent data RecSelParent = RecSelData TyCon | RecSelPatSyn PatSyn deriving Eq -- Either `TyCon` or `PatSyn` depending -- on the origin of the record selector. -- For a data type family, this is the -- /instance/ 'TyCon' not the family 'TyCon' instance Outputable RecSelParent where ppr p = case p of RecSelData ty_con -> ppr ty_con RecSelPatSyn ps -> ppr ps -- | Just a synonym for 'CoVarId'. Written separately so it can be -- exported in the hs-boot file. coVarDetails :: IdDetails coVarDetails = CoVarId -- | Check if an 'IdDetails' says 'CoVarId'. isCoVarDetails :: IdDetails -> Bool isCoVarDetails CoVarId = True isCoVarDetails _ = False isJoinIdDetails_maybe :: IdDetails -> Maybe (JoinArity, (Maybe [CbvMark])) isJoinIdDetails_maybe (JoinId join_arity marks) = Just (join_arity, marks) isJoinIdDetails_maybe _ = Nothing instance Outputable IdDetails where ppr = pprIdDetails pprIdDetails :: IdDetails -> SDoc pprIdDetails VanillaId = empty pprIdDetails other = brackets (pp other) where pp VanillaId = panic "pprIdDetails" pp (WorkerLikeId dmds) = text "StrictWorker" <> parens (ppr dmds) pp (DataConWorkId _) = text "DataCon" pp (DataConWrapId _) = text "DataConWrapper" pp (ClassOpId {}) = text "ClassOp" pp (PrimOpId _) = text "PrimOp" pp (FCallId _) = text "ForeignCall" pp (TickBoxOpId _) = text "TickBoxOp" pp (DFunId nt) = text "DFunId" <> ppWhen nt (text "(nt)") pp (RecSelId { sel_naughty = is_naughty }) = brackets $ text "RecSel" <> ppWhen is_naughty (text "(naughty)") pp CoVarId = text "CoVarId" pp (JoinId arity marks) = text "JoinId" <> parens (int arity) <> parens (ppr marks) {- ************************************************************************ * * \subsection{The main IdInfo type} * * ************************************************************************ -} -- | Identifier Information -- -- An 'IdInfo' gives /optional/ information about an 'Id'. If -- present it never lies, but it may not be present, in which case there -- is always a conservative assumption which can be made. -- -- Two 'Id's may have different info even though they have the same -- 'Unique' (and are hence the same 'Id'); for example, one might lack -- the properties attached to the other. -- -- Most of the 'IdInfo' gives information about the value, or definition, of -- the 'Id', independent of its usage. Exceptions to this -- are 'demandInfo', 'occInfo', 'oneShotInfo' and 'callArityInfo'. -- -- Performance note: when we update 'IdInfo', we have to reallocate this -- entire record, so it is a good idea not to let this data structure get -- too big. data IdInfo = IdInfo { ruleInfo :: RuleInfo, -- ^ Specialisations of the 'Id's function which exist. -- See Note [Specialisations and RULES in IdInfo] realUnfoldingInfo :: Unfolding, -- ^ The 'Id's unfolding inlinePragInfo :: InlinePragma, -- ^ Any inline pragma attached to the 'Id' occInfo :: OccInfo, -- ^ How the 'Id' occurs in the program dmdSigInfo :: DmdSig, -- ^ A strictness signature. Digests how a function uses its arguments -- if applied to at least 'arityInfo' arguments. cprSigInfo :: CprSig, -- ^ Information on whether the function will ultimately return a -- freshly allocated constructor. demandInfo :: Demand, -- ^ ID demand information bitfield :: {-# UNPACK #-} !BitField, -- ^ Bitfield packs CafInfo, OneShotInfo, arity info, LevityInfo, and -- call arity info in one 64-bit word. Packing these fields reduces size -- of `IdInfo` from 12 words to 7 words and reduces residency by almost -- 4% in some programs. See #17497 and associated MR. -- -- See documentation of the getters for what these packed fields mean. lfInfo :: !(Maybe LambdaFormInfo), -- ^ See Note [The LFInfo of Imported Ids] in GHC.StgToCmm.Closure -- See documentation of the getters for what these packed fields mean. tagSig :: !(Maybe TagSig) } -- | Encodes arities, OneShotInfo, CafInfo and LevityInfo. -- From least-significant to most-significant bits: -- -- - Bit 0 (1): OneShotInfo -- - Bit 1 (1): CafInfo -- - Bit 2 (1): LevityInfo -- - Bits 3-32(30): Call Arity info -- - Bits 33-62(30): Arity info -- newtype BitField = BitField Word64 emptyBitField :: BitField emptyBitField = BitField 0 bitfieldGetOneShotInfo :: BitField -> OneShotInfo bitfieldGetOneShotInfo (BitField bits) = if testBit bits 0 then OneShotLam else NoOneShotInfo bitfieldGetCafInfo :: BitField -> CafInfo bitfieldGetCafInfo (BitField bits) = if testBit bits 1 then NoCafRefs else MayHaveCafRefs bitfieldGetLevityInfo :: BitField -> LevityInfo bitfieldGetLevityInfo (BitField bits) = if testBit bits 2 then NeverLevityPolymorphic else NoLevityInfo bitfieldGetCallArityInfo :: BitField -> ArityInfo bitfieldGetCallArityInfo (BitField bits) = fromIntegral (bits `shiftR` 3) .&. ((1 `shiftL` 30) - 1) bitfieldGetArityInfo :: BitField -> ArityInfo bitfieldGetArityInfo (BitField bits) = fromIntegral (bits `shiftR` 33) bitfieldSetOneShotInfo :: OneShotInfo -> BitField -> BitField bitfieldSetOneShotInfo info (BitField bits) = case info of NoOneShotInfo -> BitField (clearBit bits 0) OneShotLam -> BitField (setBit bits 0) bitfieldSetCafInfo :: CafInfo -> BitField -> BitField bitfieldSetCafInfo info (BitField bits) = case info of MayHaveCafRefs -> BitField (clearBit bits 1) NoCafRefs -> BitField (setBit bits 1) bitfieldSetLevityInfo :: LevityInfo -> BitField -> BitField bitfieldSetLevityInfo info (BitField bits) = case info of NoLevityInfo -> BitField (clearBit bits 2) NeverLevityPolymorphic -> BitField (setBit bits 2) bitfieldSetCallArityInfo :: ArityInfo -> BitField -> BitField bitfieldSetCallArityInfo info bf@(BitField bits) = assert (info < 2^(30 :: Int) - 1) $ bitfieldSetArityInfo (bitfieldGetArityInfo bf) $ BitField ((fromIntegral info `shiftL` 3) .|. (bits .&. 0b111)) bitfieldSetArityInfo :: ArityInfo -> BitField -> BitField bitfieldSetArityInfo info (BitField bits) = assert (info < 2^(30 :: Int) - 1) $ BitField ((fromIntegral info `shiftL` 33) .|. (bits .&. ((1 `shiftL` 33) - 1))) -- Getters -- | When applied, will this Id ever have a representation-polymorphic type? levityInfo :: IdInfo -> LevityInfo levityInfo = bitfieldGetLevityInfo . bitfield -- | Info about a lambda-bound variable, if the 'Id' is one oneShotInfo :: IdInfo -> OneShotInfo oneShotInfo = bitfieldGetOneShotInfo . bitfield -- | 'Id' arity, as computed by "GHC.Core.Opt.Arity". Specifies how many arguments -- this 'Id' has to be applied to before it does any meaningful work. arityInfo :: IdInfo -> ArityInfo arityInfo = bitfieldGetArityInfo . bitfield -- | 'Id' CAF info cafInfo :: IdInfo -> CafInfo cafInfo = bitfieldGetCafInfo . bitfield -- | How this is called. This is the number of arguments to which a binding can -- be eta-expanded without losing any sharing. n <=> all calls have at least n -- arguments callArityInfo :: IdInfo -> ArityInfo callArityInfo = bitfieldGetCallArityInfo . bitfield tagSigInfo :: IdInfo -> Maybe TagSig tagSigInfo = tagSig -- Setters setRuleInfo :: IdInfo -> RuleInfo -> IdInfo setRuleInfo info sp = sp `seq` info { ruleInfo = sp } setInlinePragInfo :: IdInfo -> InlinePragma -> IdInfo setInlinePragInfo info pr = pr `seq` info { inlinePragInfo = pr } setOccInfo :: IdInfo -> OccInfo -> IdInfo setOccInfo info oc = oc `seq` info { occInfo = oc } -- Try to avoid space leaks by seq'ing -- | Essentially returns the 'realUnfoldingInfo' field, but does not expose the -- unfolding of a strong loop breaker. -- -- This is the right thing to call if you plan to decide whether an unfolding -- will inline. unfoldingInfo :: IdInfo -> Unfolding unfoldingInfo info | isStrongLoopBreaker (occInfo info) = trimUnfolding $ realUnfoldingInfo info | otherwise = realUnfoldingInfo info setUnfoldingInfo :: IdInfo -> Unfolding -> IdInfo setUnfoldingInfo info uf = -- We don't seq the unfolding, as we generate intermediate -- unfoldings which are just thrown away, so evaluating them is a -- waste of time. -- seqUnfolding uf `seq` info { realUnfoldingInfo = uf } hasInlineUnfolding :: IdInfo -> Bool -- ^ True of a /non-loop-breaker/ Id that has a /stable/ unfolding that is -- (a) always inlined; that is, with an `UnfWhen` guidance, or -- (b) a DFunUnfolding which never needs to be inlined hasInlineUnfolding info = isInlineUnfolding (unfoldingInfo info) setArityInfo :: IdInfo -> ArityInfo -> IdInfo setArityInfo info ar = info { bitfield = bitfieldSetArityInfo ar (bitfield info) } setCallArityInfo :: IdInfo -> ArityInfo -> IdInfo setCallArityInfo info ar = info { bitfield = bitfieldSetCallArityInfo ar (bitfield info) } setCafInfo :: IdInfo -> CafInfo -> IdInfo setCafInfo info caf = info { bitfield = bitfieldSetCafInfo caf (bitfield info) } setLFInfo :: IdInfo -> LambdaFormInfo -> IdInfo setLFInfo info lf = info { lfInfo = Just lf } setTagSig :: IdInfo -> TagSig -> IdInfo setTagSig info sig = info { tagSig = Just sig } setOneShotInfo :: IdInfo -> OneShotInfo -> IdInfo setOneShotInfo info lb = info { bitfield = bitfieldSetOneShotInfo lb (bitfield info) } setDemandInfo :: IdInfo -> Demand -> IdInfo setDemandInfo info dd = dd `seq` info { demandInfo = dd } setDmdSigInfo :: IdInfo -> DmdSig -> IdInfo setDmdSigInfo info dd = dd `seq` info { dmdSigInfo = dd } setCprSigInfo :: IdInfo -> CprSig -> IdInfo setCprSigInfo info cpr = cpr `seq` info { cprSigInfo = cpr } -- | Basic 'IdInfo' that carries no useful information whatsoever vanillaIdInfo :: IdInfo vanillaIdInfo = IdInfo { ruleInfo = emptyRuleInfo, realUnfoldingInfo = noUnfolding, inlinePragInfo = defaultInlinePragma, occInfo = noOccInfo, demandInfo = topDmd, dmdSigInfo = nopSig, cprSigInfo = topCprSig, bitfield = bitfieldSetCafInfo vanillaCafInfo $ bitfieldSetArityInfo unknownArity $ bitfieldSetCallArityInfo unknownArity $ bitfieldSetOneShotInfo NoOneShotInfo $ bitfieldSetLevityInfo NoLevityInfo $ emptyBitField, lfInfo = Nothing, tagSig = Nothing } -- | More informative 'IdInfo' we can use when we know the 'Id' has no CAF references noCafIdInfo :: IdInfo noCafIdInfo = vanillaIdInfo `setCafInfo` NoCafRefs -- Used for built-in type Ids in GHC.Types.Id.Make. {- ************************************************************************ * * \subsection[arity-IdInfo]{Arity info about an @Id@} * * ************************************************************************ For locally-defined Ids, the code generator maintains its own notion of their arities; so it should not be asking... (but other things besides the code-generator need arity info!) Note [Arity and function types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The arity of an 'Id' must never exceed the number of arguments that can be read off from the 'Id's type, possibly after expanding newtypes. Examples: f1 :: forall a. a -> a idArity f1 <= 1: only one value argument, of type 'a' f2 :: forall a. Show a => Int -> a idArity f2 <= 2: two value arguments, of types 'Show a' and 'Int'. newtype Id a = MkId a f3 :: forall b. Id (Int -> b) idArity f3 <= 1: there is one value argument, of type 'Int', hidden under the newtype. newtype RecFun = MkRecFun (Int -> RecFun) f4 :: RecFun no constraint on the arity of f4: we can unwrap as many layers of the newtype as we want, to get arbitrarily many arguments of type 'Int'. -} -- | Arity Information -- -- An 'ArityInfo' of @n@ tells us that partial application of this -- 'Id' to up to @n-1@ value arguments does essentially no work. -- -- That is not necessarily the same as saying that it has @n@ leading -- lambdas, because coerces may get in the way. -- -- The arity might increase later in the compilation process, if -- an extra lambda floats up to the binding site. -- -- /Invariant:/ the 'Arity' of an 'Id' must never exceed the number of -- value arguments that appear in the type of the 'Id'. -- See Note [Arity and function types]. type ArityInfo = Arity -- | It is always safe to assume that an 'Id' has an arity of 0 unknownArity :: Arity unknownArity = 0 ppArityInfo :: Int -> SDoc ppArityInfo 0 = empty ppArityInfo n = hsep [text "Arity", int n] {- ************************************************************************ * * \subsection{Inline-pragma information} * * ************************************************************************ -} -- | Inline Pragma Information -- -- Tells when the inlining is active. -- When it is active the thing may be inlined, depending on how -- big it is. -- -- If there was an @INLINE@ pragma, then as a separate matter, the -- RHS will have been made to look small with a Core inline 'Note' -- -- The default 'InlinePragInfo' is 'AlwaysActive', so the info serves -- entirely as a way to inhibit inlining until we want it type InlinePragInfo = InlinePragma {- ************************************************************************ * * Strictness * * ************************************************************************ -} pprStrictness :: DmdSig -> SDoc pprStrictness sig = ppr sig {- ************************************************************************ * * RuleInfo * * ************************************************************************ Note [Specialisations and RULES in IdInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally speaking, a GlobalId has an *empty* RuleInfo. All their RULES are contained in the globally-built rule-base. In principle, one could attach the to M.f the RULES for M.f that are defined in M. But we don't do that for instance declarations and so we just treat them all uniformly. The EXCEPTION is PrimOpIds, which do have rules in their IdInfo. That is just for convenience really. However, LocalIds may have non-empty RuleInfo. We treat them differently because: a) they might be nested, in which case a global table won't work b) the RULE might mention free variables, which we use to keep things alive In GHC.Iface.Tidy, when the LocalId becomes a GlobalId, its RULES are stripped off and put in the global list. -} -- | Rule Information -- -- Records the specializations of this 'Id' that we know about -- in the form of rewrite 'CoreRule's that target them data RuleInfo = RuleInfo [CoreRule] DVarSet -- Locally-defined free vars of *both* LHS and RHS -- of rules. I don't think it needs to include the -- ru_fn though. -- Note [Rule dependency info] in "GHC.Core.Opt.OccurAnal" -- | Assume that no specializations exist: always safe emptyRuleInfo :: RuleInfo emptyRuleInfo = RuleInfo [] emptyDVarSet isEmptyRuleInfo :: RuleInfo -> Bool isEmptyRuleInfo (RuleInfo rs _) = null rs -- | Retrieve the locally-defined free variables of both the left and -- right hand sides of the specialization rules ruleInfoFreeVars :: RuleInfo -> DVarSet ruleInfoFreeVars (RuleInfo _ fvs) = fvs ruleInfoRules :: RuleInfo -> [CoreRule] ruleInfoRules (RuleInfo rules _) = rules -- | Change the name of the function the rule is keyed on all of the 'CoreRule's setRuleInfoHead :: Name -> RuleInfo -> RuleInfo setRuleInfoHead fn (RuleInfo rules fvs) = RuleInfo (map (setRuleIdName fn) rules) fvs {- ************************************************************************ * * \subsection[CG-IdInfo]{Code generator-related information} * * ************************************************************************ -} -- CafInfo is used to build Static Reference Tables (see simplStg/SRT.hs). -- | Constant applicative form Information -- -- Records whether an 'Id' makes Constant Applicative Form references data CafInfo = MayHaveCafRefs -- ^ Indicates that the 'Id' is for either: -- -- 1. A function or static constructor -- that refers to one or more CAFs, or -- -- 2. A real live CAF | NoCafRefs -- ^ A function or static constructor -- that refers to no CAFs. deriving (Eq, Ord) -- | Assumes that the 'Id' has CAF references: definitely safe vanillaCafInfo :: CafInfo vanillaCafInfo = MayHaveCafRefs mayHaveCafRefs :: CafInfo -> Bool mayHaveCafRefs MayHaveCafRefs = True mayHaveCafRefs _ = False instance Outputable CafInfo where ppr = ppCafInfo ppCafInfo :: CafInfo -> SDoc ppCafInfo NoCafRefs = text "NoCafRefs" ppCafInfo MayHaveCafRefs = empty {- ************************************************************************ * * \subsection{Bulk operations on IdInfo} * * ************************************************************************ -} -- | This is used to remove information on lambda binders that we have -- setup as part of a lambda group, assuming they will be applied all at once, -- but turn out to be part of an unsaturated lambda as in e.g: -- -- > (\x1. \x2. e) arg1 zapLamInfo :: IdInfo -> Maybe IdInfo zapLamInfo info@(IdInfo {occInfo = occ, demandInfo = demand}) | is_safe_occ occ && is_safe_dmd demand = Nothing | otherwise = Just (info {occInfo = safe_occ, demandInfo = topDmd}) where -- The "unsafe" occ info is the ones that say I'm not in a lambda -- because that might not be true for an unsaturated lambda is_safe_occ occ | isAlwaysTailCalled occ = False is_safe_occ (OneOcc { occ_in_lam = NotInsideLam }) = False is_safe_occ _other = True safe_occ = case occ of OneOcc{} -> occ { occ_in_lam = IsInsideLam , occ_tail = NoTailCallInfo } IAmALoopBreaker{} -> occ { occ_tail = NoTailCallInfo } _other -> occ is_safe_dmd dmd = not (isStrUsedDmd dmd) -- | Remove all demand info on the 'IdInfo' zapDemandInfo :: IdInfo -> Maybe IdInfo zapDemandInfo info = Just (info {demandInfo = topDmd}) -- | Remove usage (but not strictness) info on the 'IdInfo' zapUsageInfo :: IdInfo -> Maybe IdInfo zapUsageInfo info = Just (info {demandInfo = zapUsageDemand (demandInfo info)}) -- | Remove usage environment info from the strictness signature on the 'IdInfo' zapUsageEnvInfo :: IdInfo -> Maybe IdInfo zapUsageEnvInfo info | hasDemandEnvSig (dmdSigInfo info) = Just (info {dmdSigInfo = zapDmdEnvSig (dmdSigInfo info)}) | otherwise = Nothing zapUsedOnceInfo :: IdInfo -> Maybe IdInfo zapUsedOnceInfo info = Just $ info { dmdSigInfo = zapUsedOnceSig (dmdSigInfo info) , demandInfo = zapUsedOnceDemand (demandInfo info) } zapFragileInfo :: IdInfo -> Maybe IdInfo -- ^ Zap info that depends on free variables zapFragileInfo info@(IdInfo { occInfo = occ, realUnfoldingInfo = unf }) = new_unf `seq` -- The unfolding field is not (currently) strict, so we -- force it here to avoid a (zapFragileUnfolding unf) thunk -- which might leak space Just (info `setRuleInfo` emptyRuleInfo `setUnfoldingInfo` new_unf `setOccInfo` zapFragileOcc occ) where new_unf = zapFragileUnfolding unf zapFragileUnfolding :: Unfolding -> Unfolding -- ^ Zaps any core unfolding, but /preserves/ evaluated-ness, -- i.e. an unfolding of OtherCon zapFragileUnfolding unf -- N.B. isEvaldUnfolding catches *both* OtherCon [] *and* core unfoldings -- representing values. | isEvaldUnfolding unf = evaldUnfolding | otherwise = noUnfolding trimUnfolding :: Unfolding -> Unfolding -- Squash all unfolding info, preserving only evaluated-ness trimUnfolding unf | isEvaldUnfolding unf = evaldUnfolding | otherwise = noUnfolding zapTailCallInfo :: IdInfo -> Maybe IdInfo zapTailCallInfo info = case occInfo info of occ | isAlwaysTailCalled occ -> Just (info `setOccInfo` safe_occ) | otherwise -> Nothing where safe_occ = occ { occ_tail = NoTailCallInfo } zapCallArityInfo :: IdInfo -> IdInfo zapCallArityInfo info = setCallArityInfo info 0 {- ************************************************************************ * * \subsection{TickBoxOp} * * ************************************************************************ -} type TickBoxId = Int -- | Tick box for Hpc-style coverage data TickBoxOp = TickBox Module {-# UNPACK #-} !TickBoxId instance Outputable TickBoxOp where ppr (TickBox mod n) = text "tick" <+> ppr (mod,n) {- ************************************************************************ * * Levity * * ************************************************************************ Note [Levity info] ~~~~~~~~~~~~~~~~~~ Ids store whether or not they can be representation-polymorphic at any amount of saturation. This is helpful in optimizing representation polymorphism checks, allowing us to learn that something is not representation-polymorphic without actually figuring out its type. See exprHasFixedRuntimeRep in GHC.Core.Utils for where this info is used. Historical note: this was very important when representation polymorphism was checked in the desugarer (it was needed to prevent T5631 from blowing up). It's less important now that the checks happen in the typechecker, but remains useful. Refer to Note [The Concrete mechanism] in GHC.Tc.Utils.Concrete for details about the new approach being used. -} -- See Note [Levity info] data LevityInfo = NoLevityInfo -- always safe | NeverLevityPolymorphic deriving Eq instance Outputable LevityInfo where ppr NoLevityInfo = text "NoLevityInfo" ppr NeverLevityPolymorphic = text "NeverLevityPolymorphic" -- | Marks an IdInfo describing an Id that is never representation-polymorphic -- (even when applied). The Type is only there for checking that it's really -- never representation-polymorphic. setNeverRepPoly :: HasDebugCallStack => IdInfo -> Type -> IdInfo setNeverRepPoly info ty = assertPpr (resultHasFixedRuntimeRep ty) (ppr ty) $ info { bitfield = bitfieldSetLevityInfo NeverLevityPolymorphic (bitfield info) } setLevityInfoWithType :: IdInfo -> Type -> IdInfo setLevityInfoWithType info ty | resultHasFixedRuntimeRep ty = info { bitfield = bitfieldSetLevityInfo NeverLevityPolymorphic (bitfield info) } | otherwise = info isNeverRepPolyIdInfo :: IdInfo -> Bool isNeverRepPolyIdInfo info | NeverLevityPolymorphic <- levityInfo info = True | otherwise = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id/Make.hs0000644000000000000000000021452414472400113020515 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1998 This module contains definitions for the IdInfo for things that have a standard form, namely: - data constructors - record selectors - method and superclass selectors - primitive operations -} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Types.Id.Make ( mkDictFunId, mkDictFunTy, mkDictSelId, mkDictSelRhs, mkFCallId, unwrapNewTypeBody, wrapFamInstBody, DataConBoxer(..), vanillaDataConBoxer, mkDataConRep, mkDataConWorkId, DataConBangOpts (..), BangOpts (..), -- And some particular Ids; see below for why they are wired in wiredInIds, ghcPrimIds, realWorldPrimId, voidPrimId, voidArgId, nullAddrId, seqId, lazyId, lazyIdKey, coercionTokenId, coerceId, proxyHashId, noinlineId, noinlineIdName, coerceName, leftSectionName, rightSectionName, ) where import GHC.Prelude import GHC.Builtin.Types.Prim import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Core import GHC.Core.Type import GHC.Core.Multiplicity import GHC.Core.TyCo.Rep import GHC.Core.FamInstEnv import GHC.Core.Coercion import GHC.Core.Reduction import GHC.Core.Make import GHC.Core.FVs ( mkRuleInfo ) import GHC.Core.Utils ( exprType, mkCast, mkDefaultCase ) import GHC.Core.Unfold.Make import GHC.Core.SimpleOpt import GHC.Core.TyCon import GHC.Core.Class import GHC.Core.DataCon import GHC.Types.Literal import GHC.Types.SourceText import GHC.Types.Name.Set import GHC.Types.Name import GHC.Types.ForeignCall import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Demand import GHC.Types.Cpr import GHC.Types.Unique.Supply import GHC.Types.Basic hiding ( SuccessFlag(..) ) import GHC.Types.Var (VarBndr(Bndr)) import GHC.Tc.Utils.TcType as TcType import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Data.FastString import GHC.Data.List.SetOps {- ************************************************************************ * * \subsection{Wired in Ids} * * ************************************************************************ Note [Wired-in Ids] ~~~~~~~~~~~~~~~~~~~ A "wired-in" Id can be referred to directly in GHC (e.g. 'voidPrimId') rather than by looking it up its name in some environment or fetching it from an interface file. There are several reasons why an Id might appear in the wiredInIds: * ghcPrimIds: see Note [ghcPrimIds (aka pseudoops)] * magicIds: see Note [magicIds] * errorIds, defined in GHC.Core.Make. These error functions (e.g. rUNTIME_ERROR_ID) are wired in because the desugarer generates code that mentions them directly In all cases except ghcPrimIds, there is a definition site in a library module, which may be called (e.g. in higher order situations); but the wired-in version means that the details are never read from that module's interface file; instead, the full definition is right here. Note [ghcPrimIds (aka pseudoops)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ghcPrimIds * Are exported from GHC.Prim (see ghcPrimExports, used in ghcPrimInterface) See Note [GHC.Prim] in primops.txt.pp for the remaining items in GHC.Prim. * Can't be defined in Haskell, and hence no Haskell binding site, but have perfectly reasonable unfoldings in Core * Either have a CompulsoryUnfolding (hence always inlined), or of an EvaldUnfolding and void representation (e.g. realWorldPrimId) * Are (or should be) defined in primops.txt.pp as 'pseudoop' Reason: that's how we generate documentation for them Note [magicIds] ~~~~~~~~~~~~~~~ The magicIds * Are exported from GHC.Magic * Can be defined in Haskell (and are, in ghc-prim:GHC/Magic.hs). This definition at least generates Haddock documentation for them. * May or may not have a CompulsoryUnfolding. * But have some special behaviour that can't be done via an unfolding from an interface file. * May have IdInfo that differs from what would be imported from GHC.Magic.hi. For example, 'lazy' gets a lazy strictness signature, per Note [lazyId magic]. The two remaining identifiers in GHC.Magic, runRW# and inline, are not listed in magicIds: they have special behavior but they can be known-key and not wired-in. runRW#: see Note [Simplification of runRW#] in Prep, runRW# code in Simplifier, Note [Linting of runRW#]. inline: see Note [inlineId magic] -} wiredInIds :: [Id] wiredInIds = magicIds ++ ghcPrimIds ++ errorIds -- Defined in GHC.Core.Make magicIds :: [Id] -- See Note [magicIds] magicIds = [lazyId, oneShotId, noinlineId] ghcPrimIds :: [Id] -- See Note [ghcPrimIds (aka pseudoops)] ghcPrimIds = [ realWorldPrimId , voidPrimId , nullAddrId , seqId , coerceId , proxyHashId , leftSectionId , rightSectionId ] {- ************************************************************************ * * \subsection{Data constructors} * * ************************************************************************ The wrapper for a constructor is an ordinary top-level binding that evaluates any strict args, unboxes any args that are going to be flattened, and calls the worker. We're going to build a constructor that looks like: data (Data a, C b) => T a b = T1 !a !Int b T1 = /\ a b -> \d1::Data a, d2::C b -> \p q r -> case p of { p -> case q of { q -> Con T1 [a,b] [p,q,r]}} Notice that * d2 is thrown away --- a context in a data decl is used to make sure one *could* construct dictionaries at the site the constructor is used, but the dictionary isn't actually used. * We have to check that we can construct Data dictionaries for the types a and Int. Once we've done that we can throw d1 away too. * We use (case p of q -> ...) to evaluate p, rather than "seq" because all that matters is that the arguments are evaluated. "seq" is very careful to preserve evaluation order, which we don't need to be here. You might think that we could simply give constructors some strictness info, like PrimOps, and let CoreToStg do the let-to-case transformation. But we don't do that because in the case of primops and functions strictness is a *property* not a *requirement*. In the case of constructors we need to do something active to evaluate the argument. Making an explicit case expression allows the simplifier to eliminate it in the (common) case where the constructor arg is already evaluated. Note [Wrappers for data instance tycons] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case of data instances, the wrapper also applies the coercion turning the representation type into the family instance type to cast the result of the wrapper. For example, consider the declarations data family Map k :: * -> * data instance Map (a, b) v = MapPair (Map a (Pair b v)) The tycon to which the datacon MapPair belongs gets a unique internal name of the form :R123Map, and we call it the representation tycon. In contrast, Map is the family tycon (accessible via tyConFamInst_maybe). A coercion allows you to move between representation and family type. It is accessible from :R123Map via tyConFamilyCoercion_maybe and has kind Co123Map a b v :: {Map (a, b) v ~ :R123Map a b v} The wrapper and worker of MapPair get the types -- Wrapper $WMapPair :: forall a b v. Map a (Map a b v) -> Map (a, b) v $WMapPair a b v = MapPair a b v `cast` sym (Co123Map a b v) -- Worker MapPair :: forall a b v. Map a (Map a b v) -> :R123Map a b v This coercion is conditionally applied by wrapFamInstBody. It's a bit more complicated if the data instance is a GADT as well! data instance T [a] where T1 :: forall b. b -> T [Maybe b] Hence we translate to -- Wrapper $WT1 :: forall b. b -> T [Maybe b] $WT1 b v = T1 (Maybe b) b (Maybe b) v `cast` sym (Co7T (Maybe b)) -- Worker T1 :: forall c b. (c ~ Maybe b) => b -> :R7T c -- Coercion from family type to representation type Co7T a :: T [a] ~ :R7T a Newtype instances through an additional wrinkle into the mix. Consider the following example (adapted from #15318, comment:2): data family T a newtype instance T [a] = MkT [a] Within the newtype instance, there are three distinct types at play: 1. The newtype's underlying type, [a]. 2. The instance's representation type, TList a (where TList is the representation tycon). 3. The family type, T [a]. We need two coercions in order to cast from (1) to (3): (a) A newtype coercion axiom: axiom coTList a :: TList a ~ [a] (Where TList is the representation tycon of the newtype instance.) (b) A data family instance coercion axiom: axiom coT a :: T [a] ~ TList a When we translate the newtype instance to Core, we obtain: -- Wrapper $WMkT :: forall a. [a] -> T [a] $WMkT a x = MkT a x |> Sym (coT a) -- Worker MkT :: forall a. [a] -> TList [a] MkT a x = x |> Sym (coTList a) Unlike for data instances, the worker for a newtype instance is actually an executable function which expands to a cast, but otherwise, the general strategy is essentially the same as for data instances. Also note that we have a wrapper, which is unusual for a newtype, but we make GHC produce one anyway for symmetry with the way data instances are handled. Note [Newtype datacons] ~~~~~~~~~~~~~~~~~~~~~~~ The "data constructor" for a newtype should always be vanilla. At one point this wasn't true, because the newtype arising from class C a => D a looked like newtype T:D a = D:D (C a) so the data constructor for T:C had a single argument, namely the predicate (C a). But now we treat that as an ordinary argument, not part of the theta-type, so all is well. Note [Newtype workers] ~~~~~~~~~~~~~~~~~~~~~~ A newtype does not really have a worker. Instead, newtype constructors just unfold into a cast. But we need *something* for, say, MkAge to refer to. So, we do this: * The Id used as the newtype worker will have a compulsory unfolding to a cast. See Note [Compulsory newtype unfolding] * This Id is labeled as a DataConWrapId. We don't want to use a DataConWorkId, as those have special treatment in the back end. * There is no top-level binding, because the compulsory unfolding means that it will be inlined (to a cast) at every call site. We probably should have a NewtypeWorkId, but these Ids disappear as soon as we desugar anyway, so it seems a step too far. Note [Compulsory newtype unfolding] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Newtype wrappers, just like workers, have compulsory unfoldings. This is needed so that two optimizations involving newtypes have the same effect whether a wrapper is present or not: (1) Case-of-known constructor. See Note [beta-reduction in exprIsConApp_maybe]. (2) Matching against the map/coerce RULE. Suppose we have the RULE {-# RULE "map/coerce" map coerce = ... #-} As described in Note [Getting the map/coerce RULE to work], the occurrence of 'coerce' is transformed into: {-# RULE "map/coerce" forall (c :: T1 ~R# T2). map ((\v -> v) `cast` c) = ... #-} We'd like 'map Age' to match the LHS. For this to happen, Age must be unfolded, otherwise we'll be stuck. This is tested in T16208. It also allows for the posssibility of representation-polymorphic newtypes with wrappers (with -XUnliftedNewtypes): newtype N (a :: TYPE r) = MkN a With -XUnliftedNewtypes, this is allowed -- even though MkN is representation- polymorphic. It's OK because MkN evaporates in the compiled code, becoming just a cast. That is, it has a compulsory unfolding. As long as its argument is not representation-polymorphic (which it can't be, according to Note [Representation polymorphism invariants] in GHC.Core), and it's saturated, no representation-polymorphic code ends up in the code generator. The saturation condition is effectively checked in GHC.Tc.Gen.App.hasFixedRuntimeRep_remainingValArgs. However, if we make a *wrapper* for a newtype, we get into trouble. In that case, we generate a forbidden representation-polymorphic binding, and we must then ensure that it is always instantiated at a representation-monomorphic type. The solution is simple, though: just make the newtype wrappers as ephemeral as the newtype workers. In other words, give the wrappers compulsory unfoldings and no bindings. The compulsory unfolding is given in wrap_unf in mkDataConRep, and the lack of a binding happens in GHC.Iface.Tidy.getTyConImplicitBinds, where we say that a newtype has no implicit bindings. Note [Records and linear types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All the fields, in a record constructor, are linear, because there is no syntax to specify the type of record field. There will be (see the proposal https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0111-linear-types.rst#records-and-projections ), but it isn't implemented yet. Projections of records can't be linear: data Foo = MkFoo { a :: A, b :: B } If we had a :: Foo %1 -> A We could write bad :: A %1 -> B %1 -> A bad x y = a (MkFoo { a=x, b=y }) There is an exception: if `b` (more generally all the fields besides `a`) is unrestricted, then is perfectly possible to have a linear projection. Such a linear projection has as simple definition. data Bar = MkBar { c :: C, d % Many :: D } c :: Bar %1 -> C c MkBar{ c=x, d=_} = x The `% Many` syntax, for records, does not exist yet. But there is one important special case which already happens: when there is a single field (usually a newtype). newtype Baz = MkBaz { unbaz :: E } unbaz could be linear. And, in fact, it is linear in the proposal design. However, this hasn't been implemented yet. ************************************************************************ * * \subsection{Dictionary selectors} * * ************************************************************************ Selecting a field for a dictionary. If there is just one field, then there's nothing to do. Dictionary selectors may get nested forall-types. Thus: class Foo a where op :: forall b. Ord b => a -> b -> b Then the top-level type for op is op :: forall a. Foo a => forall b. Ord b => a -> b -> b Note [Type classes and linear types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constraints, in particular type classes, don't have attached linearity information. Implicitly, they are all unrestricted. See the linear types proposal, https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0111-linear-types.rst . When translating to core `C => ...` is always translated to an unrestricted arrow `C % Many -> ...`. Therefore there is no loss of generality if we make all selectors unrestricted. -} mkDictSelId :: Name -- Name of one of the *value* selectors -- (dictionary superclass or method) -> Class -> Id mkDictSelId name clas = mkGlobalId (ClassOpId clas) name sel_ty info where tycon = classTyCon clas sel_names = map idName (classAllSelIds clas) new_tycon = isNewTyCon tycon [data_con] = tyConDataCons tycon tyvars = dataConUserTyVarBinders data_con n_ty_args = length tyvars arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses val_index = assoc "MkId.mkDictSelId" (sel_names `zip` [0..]) name sel_ty = mkInvisForAllTys tyvars $ mkInvisFunTyMany (mkClassPred clas (mkTyVarTys (binderVars tyvars))) $ scaledThing (getNth arg_tys val_index) -- See Note [Type classes and linear types] base_info = noCafIdInfo `setArityInfo` 1 `setDmdSigInfo` strict_sig `setCprSigInfo` topCprSig `setLevityInfoWithType` sel_ty info | new_tycon = base_info `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkInlineUnfoldingWithArity 1 defaultSimpleOpts (mkDictSelRhs clas val_index) -- See Note [Single-method classes] in GHC.Tc.TyCl.Instance -- for why alwaysInlinePragma | otherwise = base_info `setRuleInfo` mkRuleInfo [rule] `setInlinePragInfo` neverInlinePragma `setUnfoldingInfo` mkInlineUnfoldingWithArity 1 defaultSimpleOpts (mkDictSelRhs clas val_index) -- Add a magic BuiltinRule, but no unfolding -- so that the rule is always available to fire. -- See Note [ClassOp/DFun selection] in GHC.Tc.TyCl.Instance -- This is the built-in rule that goes -- op (dfT d1 d2) ---> opT d1 d2 rule = BuiltinRule { ru_name = fsLit "Class op " `appendFS` occNameFS (getOccName name) , ru_fn = name , ru_nargs = n_ty_args + 1 , ru_try = dictSelRule val_index n_ty_args } -- The strictness signature is of the form U(AAAVAAAA) -> T -- where the V depends on which item we are selecting -- It's worth giving one, so that absence info etc is generated -- even if the selector isn't inlined strict_sig = mkClosedDmdSig [arg_dmd] topDiv arg_dmd | new_tycon = evalDmd | otherwise = C_1N :* mkProd Unboxed dict_field_dmds where -- The evalDmd below is just a placeholder and will be replaced in -- GHC.Types.Demand.dmdTransformDictSel dict_field_dmds = [ if name == sel_name then evalDmd else absDmd | sel_name <- sel_names ] mkDictSelRhs :: Class -> Int -- 0-indexed selector among (superclasses ++ methods) -> CoreExpr mkDictSelRhs clas val_index = mkLams tyvars (Lam dict_id rhs_body) where tycon = classTyCon clas new_tycon = isNewTyCon tycon [data_con] = tyConDataCons tycon tyvars = dataConUnivTyVars data_con arg_tys = dataConRepArgTys data_con -- Includes the dictionary superclasses the_arg_id = getNth arg_ids val_index pred = mkClassPred clas (mkTyVarTys tyvars) dict_id = mkTemplateLocal 1 pred arg_ids = mkTemplateLocalsNum 2 (map scaledThing arg_tys) rhs_body | new_tycon = unwrapNewTypeBody tycon (mkTyVarTys tyvars) (Var dict_id) | otherwise = mkSingleAltCase (Var dict_id) dict_id (DataAlt data_con) arg_ids (varToCoreExpr the_arg_id) -- varToCoreExpr needed for equality superclass selectors -- sel a b d = case x of { MkC _ (g:a~b) _ -> CO g } dictSelRule :: Int -> Arity -> RuleFun -- Tries to persuade the argument to look like a constructor -- application, using exprIsConApp_maybe, and then selects -- from it -- sel_i t1..tk (D t1..tk op1 ... opm) = opi -- dictSelRule val_index n_ty_args _ id_unf _ args | (dict_arg : _) <- drop n_ty_args args , Just (_, floats, _, _, con_args) <- exprIsConApp_maybe id_unf dict_arg = Just (wrapFloats floats $ getNth con_args val_index) | otherwise = Nothing {- ************************************************************************ * * Data constructors * * ************************************************************************ -} mkDataConWorkId :: Name -> DataCon -> Id mkDataConWorkId wkr_name data_con | isNewTyCon tycon = mkGlobalId (DataConWrapId data_con) wkr_name wkr_ty nt_work_info -- See Note [Newtype workers] | otherwise = mkGlobalId (DataConWorkId data_con) wkr_name wkr_ty alg_wkr_info where tycon = dataConTyCon data_con -- The representation TyCon wkr_ty = dataConRepType data_con ----------- Workers for data types -------------- alg_wkr_info = noCafIdInfo `setArityInfo` wkr_arity `setInlinePragInfo` wkr_inline_prag `setUnfoldingInfo` evaldUnfolding -- Record that it's evaluated, -- even if arity = 0 `setLevityInfoWithType` wkr_ty -- NB: unboxed tuples have workers, so we can't use -- setNeverRepPoly wkr_inline_prag = defaultInlinePragma { inl_rule = ConLike } wkr_arity = dataConRepArity data_con ----------- Workers for newtypes -------------- univ_tvs = dataConUnivTyVars data_con arg_tys = dataConRepArgTys data_con -- Should be same as dataConOrigArgTys nt_work_info = noCafIdInfo -- The NoCaf-ness is set by noCafIdInfo `setArityInfo` 1 -- Arity 1 `setInlinePragInfo` dataConWrapperInlinePragma `setUnfoldingInfo` newtype_unf `setLevityInfoWithType` wkr_ty id_arg1 = mkScaledTemplateLocal 1 (head arg_tys) res_ty_args = mkTyCoVarTys univ_tvs newtype_unf = assertPpr (isVanillaDataCon data_con && isSingleton arg_tys) (ppr data_con) $ -- Note [Newtype datacons] mkCompulsoryUnfolding defaultSimpleOpts $ mkLams univ_tvs $ Lam id_arg1 $ wrapNewTypeBody tycon res_ty_args (Var id_arg1) {- ------------------------------------------------- -- Data constructor representation -- -- This is where we decide how to wrap/unwrap the -- constructor fields -- -------------------------------------------------- -} type Unboxer = Var -> UniqSM ([Var], CoreExpr -> CoreExpr) -- Unbox: bind rep vars by decomposing src var data Boxer = UnitBox | Boxer (TCvSubst -> UniqSM ([Var], CoreExpr)) -- Box: build src arg using these rep vars -- | Data Constructor Boxer newtype DataConBoxer = DCB ([Type] -> [Var] -> UniqSM ([Var], [CoreBind])) -- Bind these src-level vars, returning the -- rep-level vars to bind in the pattern vanillaDataConBoxer :: DataConBoxer -- No transformation on arguments needed vanillaDataConBoxer = DCB (\_tys args -> return (args, [])) {- Note [Inline partially-applied constructor wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We allow the wrapper to inline when partially applied to avoid boxing values unnecessarily. For example, consider data Foo a = Foo !Int a instance Traversable Foo where traverse f (Foo i a) = Foo i <$> f a This desugars to traverse f foo = case foo of Foo i# a -> let i = I# i# in map ($WFoo i) (f a) If the wrapper `$WFoo` is not inlined, we get a fruitless reboxing of `i`. But if we inline the wrapper, we get map (\a. case i of I# i# a -> Foo i# a) (f a) and now case-of-known-constructor eliminates the redundant allocation. -} data DataConBangOpts = FixedBangOpts [HsImplBang] -- ^ Used for imported data constructors -- See Note [Bangs on imported data constructors] | SrcBangOpts !BangOpts data BangOpts = BangOpts { bang_opt_strict_data :: !Bool -- ^ Strict fields by default , bang_opt_unbox_disable :: !Bool -- ^ Disable automatic field unboxing (e.g. if we aren't optimising) , bang_opt_unbox_strict :: !Bool -- ^ Unbox strict fields , bang_opt_unbox_small :: !Bool -- ^ Unbox small strict fields } mkDataConRep :: DataConBangOpts -> FamInstEnvs -> Name -> DataCon -> UniqSM DataConRep mkDataConRep dc_bang_opts fam_envs wrap_name data_con | not wrapper_reqd = return NoDataConRep | otherwise = do { wrap_args <- mapM (newLocal (fsLit "conrep")) wrap_arg_tys ; wrap_body <- mk_rep_app (wrap_args `zip` dropList eq_spec unboxers) initial_wrap_app ; let wrap_id = mkGlobalId (DataConWrapId data_con) wrap_name wrap_ty wrap_info wrap_info = noCafIdInfo `setArityInfo` wrap_arity -- It's important to specify the arity, so that partial -- applications are treated as values `setInlinePragInfo` wrap_prag `setUnfoldingInfo` wrap_unf `setDmdSigInfo` wrap_sig -- We need to get the CAF info right here because GHC.Iface.Tidy -- does not tidy the IdInfo of implicit bindings (like the wrapper) -- so it not make sure that the CAF info is sane `setLevityInfoWithType` wrap_ty wrap_sig = mkClosedDmdSig wrap_arg_dmds topDiv wrap_arg_dmds = replicate (length theta) topDmd ++ map mk_dmd arg_ibangs -- Don't forget the dictionary arguments when building -- the strictness signature (#14290). mk_dmd str | isBanged str = evalDmd | otherwise = topDmd wrap_prag = dataConWrapperInlinePragma `setInlinePragmaActivation` activateDuringFinal -- See Note [Activation for data constructor wrappers] -- The wrapper will usually be inlined (see wrap_unf), so its -- strictness and CPR info is usually irrelevant. But this is -- not always the case; GHC may choose not to inline it. In -- particular, the wrapper constructor is not inlined inside -- an INLINE rhs or when it is not applied to any arguments. -- See Note [Inline partially-applied constructor wrappers] -- Passing Nothing here allows the wrapper to inline when -- unsaturated. wrap_unf | isNewTyCon tycon = mkCompulsoryUnfolding defaultSimpleOpts wrap_rhs -- See Note [Compulsory newtype unfolding] | otherwise = mkInlineUnfolding defaultSimpleOpts wrap_rhs wrap_rhs = mkLams wrap_tvs $ mkLams wrap_args $ wrapFamInstBody tycon res_ty_args $ wrap_body ; return (DCR { dcr_wrap_id = wrap_id , dcr_boxer = mk_boxer boxers , dcr_arg_tys = rep_tys , dcr_stricts = rep_strs -- For newtypes, dcr_bangs is always [HsLazy]. -- See Note [HsImplBangs for newtypes]. , dcr_bangs = arg_ibangs }) } where (univ_tvs, ex_tvs, eq_spec, theta, orig_arg_tys, _orig_res_ty) = dataConFullSig data_con wrap_tvs = dataConUserTyVars data_con res_ty_args = substTyVars (mkTvSubstPrs (map eqSpecPair eq_spec)) univ_tvs tycon = dataConTyCon data_con -- The representation TyCon (not family) wrap_ty = dataConWrapperType data_con ev_tys = eqSpecPreds eq_spec ++ theta all_arg_tys = map unrestricted ev_tys ++ orig_arg_tys ev_ibangs = map (const HsLazy) ev_tys orig_bangs = dataConSrcBangs data_con wrap_arg_tys = (map unrestricted theta) ++ orig_arg_tys wrap_arity = count isCoVar ex_tvs + length wrap_arg_tys -- The wrap_args are the arguments *other than* the eq_spec -- Because we are going to apply the eq_spec args manually in the -- wrapper new_tycon = isNewTyCon tycon arg_ibangs | new_tycon = map (const HsLazy) orig_arg_tys -- See Note [HsImplBangs for newtypes] -- orig_arg_tys should be a singleton, but -- if a user declared a wrong newtype we -- detect this later (see test T2334A) | otherwise = case dc_bang_opts of SrcBangOpts bang_opts -> zipWith (dataConSrcToImplBang bang_opts fam_envs) orig_arg_tys orig_bangs FixedBangOpts bangs -> bangs (rep_tys_w_strs, wrappers) = unzip (zipWith dataConArgRep all_arg_tys (ev_ibangs ++ arg_ibangs)) (unboxers, boxers) = unzip wrappers (rep_tys, rep_strs) = unzip (concat rep_tys_w_strs) wrapper_reqd = (not new_tycon -- (Most) newtypes have only a worker, with the exception -- of some newtypes written with GADT syntax. See below. && (any isBanged (ev_ibangs ++ arg_ibangs) -- Some forcing/unboxing (includes eq_spec) || (not $ null eq_spec))) -- GADT || isFamInstTyCon tycon -- Cast result || dataConUserTyVarsArePermuted data_con -- If the data type was written with GADT syntax and -- orders the type variables differently from what the -- worker expects, it needs a data con wrapper to reorder -- the type variables. -- See Note [Data con wrappers and GADT syntax]. initial_wrap_app = Var (dataConWorkId data_con) `mkTyApps` res_ty_args `mkVarApps` ex_tvs `mkCoApps` map (mkReflCo Nominal . eqSpecType) eq_spec mk_boxer :: [Boxer] -> DataConBoxer mk_boxer boxers = DCB (\ ty_args src_vars -> do { let (ex_vars, term_vars) = splitAtList ex_tvs src_vars subst1 = zipTvSubst univ_tvs ty_args subst2 = extendTCvSubstList subst1 ex_tvs (mkTyCoVarTys ex_vars) ; (rep_ids, binds) <- go subst2 boxers term_vars ; return (ex_vars ++ rep_ids, binds) } ) go _ [] src_vars = assertPpr (null src_vars) (ppr data_con) $ return ([], []) go subst (UnitBox : boxers) (src_var : src_vars) = do { (rep_ids2, binds) <- go subst boxers src_vars ; return (src_var : rep_ids2, binds) } go subst (Boxer boxer : boxers) (src_var : src_vars) = do { (rep_ids1, arg) <- boxer subst ; (rep_ids2, binds) <- go subst boxers src_vars ; return (rep_ids1 ++ rep_ids2, NonRec src_var arg : binds) } go _ (_:_) [] = pprPanic "mk_boxer" (ppr data_con) mk_rep_app :: [(Id,Unboxer)] -> CoreExpr -> UniqSM CoreExpr mk_rep_app [] con_app = return con_app mk_rep_app ((wrap_arg, unboxer) : prs) con_app = do { (rep_ids, unbox_fn) <- unboxer wrap_arg ; expr <- mk_rep_app prs (mkVarApps con_app rep_ids) ; return (unbox_fn expr) } dataConWrapperInlinePragma :: InlinePragma -- See Note [DataCon wrappers are conlike] dataConWrapperInlinePragma = alwaysInlineConLikePragma {- Note [Activation for data constructor wrappers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Activation on a data constructor wrapper allows it to inline only in FinalPhase. This way rules have a chance to fire if they mention a data constructor on the left RULE "foo" f (K a b) = ... Since the LHS of rules are simplified with InitialPhase, we won't inline the wrapper on the LHS either. On the other hand, this means that exprIsConApp_maybe must be able to deal with wrappers so that case-of-constructor is not delayed; see Note [exprIsConApp_maybe on data constructors with wrappers] for details. It used to activate in phases 2 (afterInitial) and later, but it makes it awkward to write a RULE[1] with a constructor on the left: it would work if a constructor has no wrapper, but whether a constructor has a wrapper depends, for instance, on the order of type argument of that constructors. Therefore changing the order of type argument could make previously working RULEs fail. See also https://gitlab.haskell.org/ghc/ghc/issues/15840 . Note [DataCon wrappers are conlike] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DataCon workers are clearly ConLike --- they are the “Con” in “ConLike”, after all --- but what about DataCon wrappers? Should they be marked ConLike, too? Yes, absolutely! As described in Note [CONLIKE pragma] in GHC.Types.Basic, isConLike influences GHC.Core.Utils.exprIsExpandable, which is used by both RULE matching and the case-of-known-constructor optimization. It’s crucial that both of those things can see applications of DataCon wrappers: * User-defined RULEs match on wrappers, not workers, so we might need to look through an unfolding built from a DataCon wrapper to determine if a RULE matches. * Likewise, if we have something like let x = $WC a b in ... case x of { C y z -> e } ... we still want to apply case-of-known-constructor. Therefore, it’s important that we consider DataCon wrappers conlike. This is especially true now that we don’t inline DataCon wrappers until the final simplifier phase; see Note [Activation for data constructor wrappers]. For further reading, see: * Note [Conlike is interesting] in GHC.Core.Op.Simplify.Utils * Note [Lone variables] in GHC.Core.Unfold * Note [exprIsConApp_maybe on data constructors with wrappers] in GHC.Core.SimpleOpt * #18012 Note [Bangs on imported data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We pass Maybe [HsImplBang] to mkDataConRep to make use of HsImplBangs from imported modules. - Nothing <=> use HsSrcBangs - Just bangs <=> use HsImplBangs For imported types we can't work it all out from the HsSrcBangs, because we want to be very sure to follow what the original module (where the data type was declared) decided, and that depends on what flags were enabled when it was compiled. So we record the decisions in the interface file. The HsImplBangs passed are in 1-1 correspondence with the dataConOrigArgTys of the DataCon. Note [Data con wrappers and unlifted types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT !Int# We certainly do not want to make a wrapper $WMkT x = case x of y { DEFAULT -> MkT y } For a start, it's still to generate a no-op. But worse, since wrappers are currently injected at TidyCore, we don't even optimise it away! So the stupid case expression stays there. This actually happened for the Integer data type (see #1600 comment:66)! Note [Data con wrappers and GADT syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider these two very similar data types: data T1 a b = MkT1 b data T2 a b where MkT2 :: forall b a. b -> T2 a b Despite their similar appearance, T2 will have a data con wrapper but T1 will not. What sets them apart? The types of their constructors, which are: MkT1 :: forall a b. b -> T1 a b MkT2 :: forall b a. b -> T2 a b MkT2's use of GADT syntax allows it to permute the order in which `a` and `b` would normally appear. See Note [DataCon user type variable binders] in GHC.Core.DataCon for further discussion on this topic. The worker data cons for T1 and T2, however, both have types such that `a` is expected to come before `b` as arguments. Because MkT2 permutes this order, it needs a data con wrapper to swizzle around the type variables to be in the order the worker expects. A somewhat surprising consequence of this is that *newtypes* can have data con wrappers! After all, a newtype can also be written with GADT syntax: newtype T3 a b where MkT3 :: forall b a. b -> T3 a b Again, this needs a wrapper data con to reorder the type variables. It does mean that this newtype constructor requires another level of indirection when being called, but the inliner should make swift work of that. Note [HsImplBangs for newtypes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most of the time, we use the dataConSrctoImplBang function to decide what strictness/unpackedness to use for the fields of a data type constructor. But there is an exception to this rule: newtype constructors. You might not think that newtypes would pose a challenge, since newtypes are seemingly forbidden from having strictness annotations in the first place. But consider this (from #16141): {-# LANGUAGE StrictData #-} {-# OPTIONS_GHC -O #-} newtype T a b where MkT :: forall b a. Int -> T a b Because StrictData (plus optimization) is enabled, invoking dataConSrcToImplBang would sneak in and unpack the field of type Int to Int#! This would be disastrous, since the wrapper for `MkT` uses a coercion involving Int, not Int#. Bottom line: dataConSrcToImplBang should never be invoked for newtypes. In the case of a newtype constructor, we simply hardcode its dcr_bangs field to [HsLazy]. -} ------------------------- -- | Conjure a fresh local binder. newLocal :: FastString -- ^ a string which will form part of the 'Var'\'s name -> Scaled Type -- ^ the type of the 'Var' -> UniqSM Var newLocal name_stem (Scaled w ty) = do { uniq <- getUniqueM ; return (mkSysLocalOrCoVar name_stem uniq w ty) } -- We should not have "OrCoVar" here, this is a bug (#17545) -- | Unpack/Strictness decisions from source module. -- -- This function should only ever be invoked for data constructor fields, and -- never on the field of a newtype constructor. -- See @Note [HsImplBangs for newtypes]@. dataConSrcToImplBang :: BangOpts -> FamInstEnvs -> Scaled Type -> HsSrcBang -> HsImplBang dataConSrcToImplBang bang_opts fam_envs arg_ty (HsSrcBang ann unpk NoSrcStrict) | bang_opt_strict_data bang_opts -- StrictData => strict field = dataConSrcToImplBang bang_opts fam_envs arg_ty (HsSrcBang ann unpk SrcStrict) | otherwise -- no StrictData => lazy field = HsLazy dataConSrcToImplBang _ _ _ (HsSrcBang _ _ SrcLazy) = HsLazy dataConSrcToImplBang bang_opts fam_envs arg_ty (HsSrcBang _ unpk_prag SrcStrict) | isUnliftedType (scaledThing arg_ty) -- NB: non-newtype data constructors can't have representation-polymorphic fields -- so this is OK. = HsLazy -- For !Int#, say, use HsLazy -- See Note [Data con wrappers and unlifted types] | not (bang_opt_unbox_disable bang_opts) -- Don't unpack if disabled , let mb_co = topNormaliseType_maybe fam_envs (scaledThing arg_ty) -- Unwrap type families and newtypes arg_ty' = case mb_co of { Just redn -> scaledSet arg_ty (reductionReducedType redn) ; Nothing -> arg_ty } , isUnpackableType bang_opts fam_envs (scaledThing arg_ty') , (rep_tys, _) <- dataConArgUnpack arg_ty' , case unpk_prag of NoSrcUnpack -> bang_opt_unbox_strict bang_opts || (bang_opt_unbox_small bang_opts && rep_tys `lengthAtMost` 1) -- See Note [Unpack one-wide fields] srcUnpack -> isSrcUnpacked srcUnpack = case mb_co of Nothing -> HsUnpack Nothing Just redn -> HsUnpack (Just $ reductionCoercion redn) | otherwise -- Record the strict-but-no-unpack decision = HsStrict -- | Wrappers/Workers and representation following Unpack/Strictness -- decisions dataConArgRep :: Scaled Type -> HsImplBang -> ([(Scaled Type,StrictnessMark)] -- Rep types ,(Unboxer,Boxer)) dataConArgRep arg_ty HsLazy = ([(arg_ty, NotMarkedStrict)], (unitUnboxer, unitBoxer)) dataConArgRep arg_ty HsStrict = ([(arg_ty, MarkedStrict)], (seqUnboxer, unitBoxer)) dataConArgRep arg_ty (HsUnpack Nothing) | (rep_tys, wrappers) <- dataConArgUnpack arg_ty = (rep_tys, wrappers) dataConArgRep (Scaled w _) (HsUnpack (Just co)) | let co_rep_ty = coercionRKind co , (rep_tys, wrappers) <- dataConArgUnpack (Scaled w co_rep_ty) = (rep_tys, wrapCo co co_rep_ty wrappers) ------------------------- wrapCo :: Coercion -> Type -> (Unboxer, Boxer) -> (Unboxer, Boxer) wrapCo co rep_ty (unbox_rep, box_rep) -- co :: arg_ty ~ rep_ty = (unboxer, boxer) where unboxer arg_id = do { rep_id <- newLocal (fsLit "cowrap_unbx") (Scaled (idMult arg_id) rep_ty) ; (rep_ids, rep_fn) <- unbox_rep rep_id ; let co_bind = NonRec rep_id (Var arg_id `Cast` co) ; return (rep_ids, Let co_bind . rep_fn) } boxer = Boxer $ \ subst -> do { (rep_ids, rep_expr) <- case box_rep of UnitBox -> do { rep_id <- newLocal (fsLit "cowrap_bx") (linear $ TcType.substTy subst rep_ty) ; return ([rep_id], Var rep_id) } Boxer boxer -> boxer subst ; let sco = substCoUnchecked subst co ; return (rep_ids, rep_expr `Cast` mkSymCo sco) } ------------------------ seqUnboxer :: Unboxer seqUnboxer v = return ([v], mkDefaultCase (Var v) v) unitUnboxer :: Unboxer unitUnboxer v = return ([v], \e -> e) unitBoxer :: Boxer unitBoxer = UnitBox ------------------------- dataConArgUnpack :: Scaled Type -> ( [(Scaled Type, StrictnessMark)] -- Rep types , (Unboxer, Boxer) ) dataConArgUnpack (Scaled arg_mult arg_ty) | Just (tc, tc_args) <- splitTyConApp_maybe arg_ty , Just con <- tyConSingleAlgDataCon_maybe tc -- NB: check for an *algebraic* data type -- A recursive newtype might mean that -- 'arg_ty' is a newtype , let rep_tys = map (scaleScaled arg_mult) $ dataConInstArgTys con tc_args = assert (null (dataConExTyCoVars con)) -- Note [Unpacking GADTs and existentials] ( rep_tys `zip` dataConRepStrictness con ,( \ arg_id -> do { rep_ids <- mapM (newLocal (fsLit "unbx")) rep_tys ; let r_mult = idMult arg_id ; let rep_ids' = map (scaleIdBy r_mult) rep_ids ; let unbox_fn body = mkSingleAltCase (Var arg_id) arg_id (DataAlt con) rep_ids' body ; return (rep_ids, unbox_fn) } , Boxer $ \ subst -> do { rep_ids <- mapM (newLocal (fsLit "bx") . TcType.substScaledTyUnchecked subst) rep_tys ; return (rep_ids, Var (dataConWorkId con) `mkTyApps` (substTysUnchecked subst tc_args) `mkVarApps` rep_ids ) } ) ) | otherwise = pprPanic "dataConArgUnpack" (ppr arg_ty) -- An interface file specified Unpacked, but we couldn't unpack it isUnpackableType :: BangOpts -> FamInstEnvs -> Type -> Bool -- True if we can unpack the UNPACK the argument type -- See Note [Recursive unboxing] -- We look "deeply" inside rather than relying on the DataCons -- we encounter on the way, because otherwise we might well -- end up relying on ourselves! isUnpackableType bang_opts fam_envs ty | Just data_con <- unpackable_type ty = ok_con_args emptyNameSet data_con | otherwise = False where ok_con_args dcs con | dc_name `elemNameSet` dcs = False | otherwise = all (ok_arg dcs') (dataConOrigArgTys con `zip` dataConSrcBangs con) -- NB: dataConSrcBangs gives the *user* request; -- We'd get a black hole if we used dataConImplBangs where dc_name = getName con dcs' = dcs `extendNameSet` dc_name ok_arg dcs (Scaled _ ty, bang) = not (attempt_unpack bang) || ok_ty dcs norm_ty where norm_ty = topNormaliseType fam_envs ty ok_ty dcs ty | Just data_con <- unpackable_type ty = ok_con_args dcs data_con | otherwise = True -- NB True here, in contrast to False at top level attempt_unpack (HsSrcBang _ SrcUnpack NoSrcStrict) = bang_opt_strict_data bang_opts attempt_unpack (HsSrcBang _ SrcUnpack SrcStrict) = True attempt_unpack (HsSrcBang _ NoSrcUnpack SrcStrict) = True -- Be conservative attempt_unpack (HsSrcBang _ NoSrcUnpack NoSrcStrict) = bang_opt_strict_data bang_opts -- Be conservative attempt_unpack _ = False unpackable_type :: Type -> Maybe DataCon -- Works just on a single level unpackable_type ty | Just (tc, _) <- splitTyConApp_maybe ty , Just data_con <- tyConSingleAlgDataCon_maybe tc , null (dataConExTyCoVars data_con) -- See Note [Unpacking GADTs and existentials] = Just data_con | otherwise = Nothing {- Note [Unpacking GADTs and existentials] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is nothing stopping us unpacking a data type with equality components, like data Equal a b where Equal :: Equal a a And it'd be fine to unpack a product type with existential components too, but that would require a bit more plumbing, so currently we don't. So for now we require: null (dataConExTyCoVars data_con) See #14978 Note [Unpack one-wide fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The flag UnboxSmallStrictFields ensures that any field that can (safely) be unboxed to a word-sized unboxed field, should be so unboxed. For example: data A = A Int# newtype B = B A data C = C !B data D = D !C data E = E !() data F = F !D data G = G !F !F All of these should have an Int# as their representation, except G which should have two Int#s. However data T = T !(S Int) data S = S !a Here we can represent T with an Int#. Note [Recursive unboxing] ~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data R = MkR {-# UNPACK #-} !S Int data S = MkS {-# UNPACK #-} !Int The representation arguments of MkR are the *representation* arguments of S (plus Int); the rep args of MkS are Int#. This is all fine. But be careful not to try to unbox this! data T = MkT {-# UNPACK #-} !T Int Because then we'd get an infinite number of arguments. Here is a more complicated case: data S = MkS {-# UNPACK #-} !T Int data T = MkT {-# UNPACK #-} !S Int Each of S and T must decide independently whether to unpack and they had better not both say yes. So they must both say no. Also behave conservatively when there is no UNPACK pragma data T = MkS !T Int with -funbox-strict-fields or -funbox-small-strict-fields we need to behave as if there was an UNPACK pragma there. But it's the *argument* type that matters. This is fine: data S = MkS S !Int because Int is non-recursive. ************************************************************************ * * Wrapping and unwrapping newtypes and type families * * ************************************************************************ -} wrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr -- The wrapper for the data constructor for a newtype looks like this: -- newtype T a = MkT (a,Int) -- MkT :: forall a. (a,Int) -> T a -- MkT = /\a. \(x:(a,Int)). x `cast` sym (CoT a) -- where CoT is the coercion TyCon associated with the newtype -- -- The call (wrapNewTypeBody T [a] e) returns the -- body of the wrapper, namely -- e `cast` (CoT [a]) -- -- If a coercion constructor is provided in the newtype, then we use -- it, otherwise the wrap/unwrap are both no-ops wrapNewTypeBody tycon args result_expr = assert (isNewTyCon tycon) $ mkCast result_expr (mkSymCo co) where co = mkUnbranchedAxInstCo Representational (newTyConCo tycon) args [] -- When unwrapping, we do *not* apply any family coercion, because this will -- be done via a CoPat by the type checker. We have to do it this way as -- computing the right type arguments for the coercion requires more than just -- a splitting operation (cf, GHC.Tc.Gen.Pat.tcConPat). unwrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr unwrapNewTypeBody tycon args result_expr = assert (isNewTyCon tycon) $ mkCast result_expr (mkUnbranchedAxInstCo Representational (newTyConCo tycon) args []) -- If the type constructor is a representation type of a data instance, wrap -- the expression into a cast adjusting the expression type, which is an -- instance of the representation type, to the corresponding instance of the -- family instance type. -- See Note [Wrappers for data instance tycons] wrapFamInstBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr wrapFamInstBody tycon args body | Just co_con <- tyConFamilyCoercion_maybe tycon = mkCast body (mkSymCo (mkUnbranchedAxInstCo Representational co_con args [])) | otherwise = body {- ************************************************************************ * * * Foreign calls * * ************************************************************************ -} -- For each ccall we manufacture a separate CCallOpId, giving it -- a fresh unique, a type that is correct for this particular ccall, -- and a CCall structure that gives the correct details about calling -- convention etc. -- -- The *name* of this Id is a local name whose OccName gives the full -- details of the ccall, type and all. This means that the interface -- file reader can reconstruct a suitable Id mkFCallId :: Unique -> ForeignCall -> Type -> Id mkFCallId uniq fcall ty = assert (noFreeVarsOfType ty) $ -- A CCallOpId should have no free type variables; -- when doing substitutions won't substitute over it mkGlobalId (FCallId fcall) name ty info where occ_str = renderWithContext defaultSDocContext (braces (ppr fcall <+> ppr ty)) -- The "occurrence name" of a ccall is the full info about the -- ccall; it is encoded, but may have embedded spaces etc! name = mkFCallName uniq occ_str info = noCafIdInfo `setArityInfo` arity `setDmdSigInfo` strict_sig `setCprSigInfo` topCprSig `setLevityInfoWithType` ty (bndrs, _) = tcSplitPiTys ty arity = count isAnonTyCoBinder bndrs strict_sig = mkClosedDmdSig (replicate arity topDmd) topDiv -- the call does not claim to be strict in its arguments, since they -- may be lifted (foreign import prim) and the called code doesn't -- necessarily force them. See #11076. {- ************************************************************************ * * \subsection{DictFuns and default methods} * * ************************************************************************ Note [Dict funs and default methods] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dict funs and default methods are *not* ImplicitIds. Their definition involves user-written code, so we can't figure out their strictness etc based on fixed info, as we can for constructors and record selectors (say). NB: See also Note [Exported LocalIds] in GHC.Types.Id -} mkDictFunId :: Name -- Name to use for the dict fun; -> [TyVar] -> ThetaType -> Class -> [Type] -> Id -- Implements the DFun Superclass Invariant (see GHC.Tc.TyCl.Instance) -- See Note [Dict funs and default methods] mkDictFunId dfun_name tvs theta clas tys = mkExportedLocalId (DFunId is_nt) dfun_name dfun_ty where is_nt = isNewTyCon (classTyCon clas) dfun_ty = mkDictFunTy tvs theta clas tys mkDictFunTy :: [TyVar] -> ThetaType -> Class -> [Type] -> Type mkDictFunTy tvs theta clas tys = mkSpecSigmaTy tvs theta (mkClassPred clas tys) {- ************************************************************************ * * \subsection{Un-definable} * * ************************************************************************ These Ids can't be defined in Haskell. They could be defined in unfoldings in the wired-in GHC.Prim interface file, but we'd have to ensure that they were definitely, definitely inlined, because there is no curried identifier for them. That's what mkCompulsoryUnfolding does. Alternatively, we could add the definitions to mi_decls of ghcPrimIface but it's not clear if this would be simpler. coercionToken# is not listed in ghcPrimIds, since its type uses (~#) which is not supposed to be used in expressions (GHC throws an assertion failure when trying.) -} nullAddrName, seqName, realWorldName, voidPrimIdName, coercionTokenName, coerceName, proxyName, leftSectionName, rightSectionName :: Name nullAddrName = mkWiredInIdName gHC_PRIM (fsLit "nullAddr#") nullAddrIdKey nullAddrId seqName = mkWiredInIdName gHC_PRIM (fsLit "seq") seqIdKey seqId realWorldName = mkWiredInIdName gHC_PRIM (fsLit "realWorld#") realWorldPrimIdKey realWorldPrimId voidPrimIdName = mkWiredInIdName gHC_PRIM (fsLit "void#") voidPrimIdKey voidPrimId coercionTokenName = mkWiredInIdName gHC_PRIM (fsLit "coercionToken#") coercionTokenIdKey coercionTokenId coerceName = mkWiredInIdName gHC_PRIM (fsLit "coerce") coerceKey coerceId proxyName = mkWiredInIdName gHC_PRIM (fsLit "proxy#") proxyHashKey proxyHashId leftSectionName = mkWiredInIdName gHC_PRIM (fsLit "leftSection") leftSectionKey leftSectionId rightSectionName = mkWiredInIdName gHC_PRIM (fsLit "rightSection") rightSectionKey rightSectionId -- Names listed in magicIds; see Note [magicIds] lazyIdName, oneShotName, noinlineIdName :: Name lazyIdName = mkWiredInIdName gHC_MAGIC (fsLit "lazy") lazyIdKey lazyId oneShotName = mkWiredInIdName gHC_MAGIC (fsLit "oneShot") oneShotKey oneShotId noinlineIdName = mkWiredInIdName gHC_MAGIC (fsLit "noinline") noinlineIdKey noinlineId ------------------------------------------------ proxyHashId :: Id proxyHashId = pcMiscPrelId proxyName ty (noCafIdInfo `setUnfoldingInfo` evaldUnfolding -- Note [evaldUnfoldings] `setNeverRepPoly` ty) where -- proxy# :: forall {k} (a:k). Proxy# k a -- -- The visibility of the `k` binder is Inferred to match the type of the -- Proxy data constructor (#16293). [kv,tv] = mkTemplateKiTyVars [liftedTypeKind] id kv_ty = mkTyVarTy kv tv_ty = mkTyVarTy tv ty = mkInfForAllTy kv $ mkSpecForAllTy tv $ mkProxyPrimTy kv_ty tv_ty ------------------------------------------------ nullAddrId :: Id -- nullAddr# :: Addr# -- The reason it is here is because we don't provide -- a way to write this literal in Haskell. nullAddrId = pcMiscPrelId nullAddrName addrPrimTy info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts (Lit nullAddrLit) `setNeverRepPoly` addrPrimTy ------------------------------------------------ seqId :: Id -- See Note [seqId magic] seqId = pcMiscPrelId seqName ty info where info = noCafIdInfo `setInlinePragInfo` inline_prag `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts rhs `setArityInfo` arity inline_prag = alwaysInlinePragma `setInlinePragmaActivation` ActiveAfter NoSourceText 0 -- Make 'seq' not inline-always, so that simpleOptExpr -- (see GHC.Core.Subst.simple_app) won't inline 'seq' on the -- LHS of rules. That way we can have rules for 'seq'; -- see Note [seqId magic] -- seq :: forall (r :: RuntimeRep) a (b :: TYPE r). a -> b -> b ty = mkInfForAllTy runtimeRep2TyVar $ mkSpecForAllTys [alphaTyVar, openBetaTyVar] $ mkVisFunTyMany alphaTy (mkVisFunTyMany openBetaTy openBetaTy) [x,y] = mkTemplateLocals [alphaTy, openBetaTy] rhs = mkLams ([runtimeRep2TyVar, alphaTyVar, openBetaTyVar, x, y]) $ Case (Var x) x openBetaTy [Alt DEFAULT [] (Var y)] arity = 2 ------------------------------------------------ lazyId :: Id -- See Note [lazyId magic] lazyId = pcMiscPrelId lazyIdName ty info where info = noCafIdInfo `setNeverRepPoly` ty ty = mkSpecForAllTys [alphaTyVar] (mkVisFunTyMany alphaTy alphaTy) noinlineId :: Id -- See Note [noinlineId magic] noinlineId = pcMiscPrelId noinlineIdName ty info where info = noCafIdInfo `setNeverRepPoly` ty ty = mkSpecForAllTys [alphaTyVar] (mkVisFunTyMany alphaTy alphaTy) oneShotId :: Id -- See Note [The oneShot function] oneShotId = pcMiscPrelId oneShotName ty info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts rhs `setArityInfo` arity ty = mkInfForAllTys [ runtimeRep1TyVar, runtimeRep2TyVar ] $ mkSpecForAllTys [ openAlphaTyVar, openBetaTyVar ] $ mkVisFunTyMany fun_ty fun_ty fun_ty = mkVisFunTyMany openAlphaTy openBetaTy [body, x] = mkTemplateLocals [fun_ty, openAlphaTy] x' = setOneShotLambda x -- Here is the magic bit! rhs = mkLams [ runtimeRep1TyVar, runtimeRep2TyVar , openAlphaTyVar, openBetaTyVar , body, x'] $ Var body `App` Var x' arity = 2 ---------------------------------------------------------------------- {- Note [Wired-in Ids for rebindable syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The functions leftSectionId, rightSectionId are wired in here ONLY because they are use in a representation-polymorphic way by the rebindable syntax mechanism. See GHC.Rename.Expr Note [Handling overloaded and rebindable constructs]. Alas, we can't currenly give Haskell definitions for representation-polymorphic functions. They have Compulsory unfoldings, so that the representation polymorphism does not linger for long. -} -- See Note [Left and right sections] in GHC.Rename.Expr -- See Note [Wired-in Ids for rebindable syntax] -- leftSection :: forall r1 r2 n (a::TYPE r1) (b::TYPE r2). -- (a %n-> b) -> a %n-> b -- leftSection f x = f x -- Important that it is eta-expanded, so that (leftSection undefined `seq` ()) -- is () and not undefined -- Important that is is multiplicity-polymorphic (test linear/should_compile/OldList) leftSectionId :: Id leftSectionId = pcMiscPrelId leftSectionName ty info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts rhs `setArityInfo` arity ty = mkInfForAllTys [runtimeRep1TyVar,runtimeRep2TyVar, multiplicityTyVar1] $ mkSpecForAllTys [openAlphaTyVar, openBetaTyVar] $ exprType body [f,x] = mkTemplateLocals [mkVisFunTy mult openAlphaTy openBetaTy, openAlphaTy] mult = mkTyVarTy multiplicityTyVar1 :: Mult xmult = setIdMult x mult rhs = mkLams [ runtimeRep1TyVar, runtimeRep2TyVar, multiplicityTyVar1 , openAlphaTyVar, openBetaTyVar ] body body = mkLams [f,xmult] $ App (Var f) (Var xmult) arity = 2 -- See Note [Left and right sections] in GHC.Rename.Expr -- See Note [Wired-in Ids for rebindable syntax] -- rightSection :: forall r1 r2 r3 n1 n2 (a::TYPE r1) (b::TYPE r2) (c::TYPE r3). -- (a %n1 -> b %n2-> c) -> b %n2-> a %n1-> c -- rightSection f y x = f x y -- Again, multiplicity polymorphism is important rightSectionId :: Id rightSectionId = pcMiscPrelId rightSectionName ty info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts rhs `setArityInfo` arity ty = mkInfForAllTys [runtimeRep1TyVar,runtimeRep2TyVar,runtimeRep3TyVar , multiplicityTyVar1, multiplicityTyVar2 ] $ mkSpecForAllTys [openAlphaTyVar, openBetaTyVar, openGammaTyVar ] $ exprType body mult1 = mkTyVarTy multiplicityTyVar1 mult2 = mkTyVarTy multiplicityTyVar2 [f,x,y] = mkTemplateLocals [ mkVisFunTys [ Scaled mult1 openAlphaTy , Scaled mult2 openBetaTy ] openGammaTy , openAlphaTy, openBetaTy ] xmult = setIdMult x mult1 ymult = setIdMult y mult2 rhs = mkLams [ runtimeRep1TyVar, runtimeRep2TyVar, runtimeRep3TyVar , multiplicityTyVar1, multiplicityTyVar2 , openAlphaTyVar, openBetaTyVar, openGammaTyVar ] body body = mkLams [f,ymult,xmult] $ mkVarApps (Var f) [xmult,ymult] arity = 3 -------------------------------------------------------------------------------- coerceId :: Id coerceId = pcMiscPrelId coerceName ty info where info = noCafIdInfo `setInlinePragInfo` alwaysInlinePragma `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts rhs `setArityInfo` 2 eqRTy = mkTyConApp coercibleTyCon [ tYPE_r, a, b ] eqRPrimTy = mkTyConApp eqReprPrimTyCon [ tYPE_r, tYPE_r, a, b ] ty = mkInvisForAllTys [ Bndr rv InferredSpec , Bndr av SpecifiedSpec , Bndr bv SpecifiedSpec ] $ mkInvisFunTyMany eqRTy $ mkVisFunTyMany a b bndrs@[rv,av,bv] = mkTemplateKiTyVar runtimeRepTy (\r -> [mkTYPEapp r, mkTYPEapp r]) [r, a, b] = mkTyVarTys bndrs tYPE_r = mkTYPEapp r [eqR,x,eq] = mkTemplateLocals [eqRTy, a, eqRPrimTy] rhs = mkLams (bndrs ++ [eqR, x]) $ mkWildCase (Var eqR) (unrestricted eqRTy) b $ [Alt (DataAlt coercibleDataCon) [eq] (Cast (Var x) (mkCoVarCo eq))] {- Note [seqId magic] ~~~~~~~~~~~~~~~~~~ 'GHC.Prim.seq' is special in several ways. a) Its fixity is set in GHC.Iface.Load.ghcPrimIface b) It has quite a bit of desugaring magic. See GHC.HsToCore.Utils Note [Desugaring seq] (1) and (2) and (3) c) There is some special rule handing: Note [User-defined RULES for seq] Historical note: In GHC.Tc.Gen.Expr we used to need a special typing rule for 'seq', to handle calls whose second argument had an unboxed type, e.g. x `seq` 3# However, with representation polymorphism we can now give seq the type seq :: forall (r :: RuntimeRep) a (b :: TYPE r). a -> b -> b which handles this case without special treatment in the typechecker. Note [User-defined RULES for seq] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Roman found situations where he had case (f n) of _ -> e where he knew that f (which was strict in n) would terminate if n did. Notice that the result of (f n) is discarded. So it makes sense to transform to case n of _ -> e Rather than attempt some general analysis to support this, I've added enough support that you can do this using a rewrite rule: RULE "f/seq" forall n. seq (f n) = seq n You write that rule. When GHC sees a case expression that discards its result, it mentally transforms it to a call to 'seq' and looks for a RULE. (This is done in GHC.Core.Opt.Simplify.trySeqRules.) As usual, the correctness of the rule is up to you. VERY IMPORTANT: to make this work, we give the RULE an arity of 1, not 2. If we wrote RULE "f/seq" forall n e. seq (f n) e = seq n e with rule arity 2, then two bad things would happen: - The magical desugaring done in Note [seqId magic] item (b) for saturated application of 'seq' would turn the LHS into a case expression! - The code in GHC.Core.Opt.Simplify.rebuildCase would need to actually supply the value argument, which turns out to be awkward. See also: Note [User-defined RULES for seq] in GHC.Core.Opt.Simplify. Note [lazyId magic] ~~~~~~~~~~~~~~~~~~~ lazy :: forall a. a -> a 'lazy' is used to make sure that a sub-expression, and its free variables, are truly used call-by-need, with no code motion. Key examples: * pseq: pseq a b = a `seq` lazy b We want to make sure that the free vars of 'b' are not evaluated before 'a', even though the expression is plainly strict in 'b'. * catch: catch a b = catch# (lazy a) b Again, it's clear that 'a' will be evaluated strictly (and indeed applied to a state token) but we want to make sure that any exceptions arising from the evaluation of 'a' are caught by the catch (see #11555). Implementing 'lazy' is a bit tricky: * It must not have a strictness signature: by being a built-in Id, all the info about lazyId comes from here, not from GHC.Magic.hi. This is important, because the strictness analyser will spot it as strict! * It must not have an unfolding: it gets "inlined" by a HACK in CorePrep. It's very important to do this inlining *after* unfoldings are exposed in the interface file. Otherwise, the unfolding for (say) pseq in the interface file will not mention 'lazy', so if we inline 'pseq' we'll totally miss the very thing that 'lazy' was there for in the first place. See #3259 for a real world example. * Suppose CorePrep sees (catch# (lazy e) b). At all costs we must avoid using call by value here: case e of r -> catch# r b Avoiding that is the whole point of 'lazy'. So in CorePrep (which generate the 'case' expression for a call-by-value call) we must spot the 'lazy' on the arg (in CorePrep.cpeApp), and build a 'let' instead. * lazyId is defined in GHC.Base, so we don't *have* to inline it. If it appears un-applied, we'll end up just calling it. Note [noinlineId magic] ~~~~~~~~~~~~~~~~~~~~~~~ 'noinline' is used to make sure that a function f is never inlined, e.g., as in 'noinline f x'. We won't inline f because we never inline lone variables (see Note [Lone variables] in GHC.Core.Unfold You might think that we could implement noinline like this: {-# NOINLINE #-} noinline :: forall a. a -> a noinline x = x But actually we give 'noinline' a wired-in name for three distinct reasons: 1. We don't want to leave a (useless) call to noinline in the final program, to be executed at runtime. So we have a little bit of magic to optimize away 'noinline' after we are done running the simplifier. This is done in GHC.CoreToStg.Prep.cpeApp. 2. 'noinline' sometimes gets inserted automatically when we serialize an expression to the interface format, in GHC.CoreToIface.toIfaceVar. See Note [Inlining and hs-boot files] in GHC.CoreToIface 3. Given foo :: Eq a => [a] -> Bool, the expression noinline foo x xs where x::Int, will naturally desugar to noinline @Int (foo @Int dEqInt) x xs But now it's entirely possible htat (foo @Int dEqInt) will inline foo, since 'foo' is no longer a lone variable -- see #18995 Solution: in the desugarer, rewrite noinline (f x y) ==> noinline f x y This is done in GHC.HsToCore.Utils.mkCoreAppDs. Note that noinline as currently implemented can hide some simplifications since it hides strictness from the demand analyser. Specifically, the demand analyser will treat 'noinline f x' as lazy in 'x', even if the demand signature of 'f' specifies that it is strict in its argument. We considered fixing this this by adding a special case to the demand analyser to address #16588. However, the special case seemed like a large and expensive hammer to address a rare case and consequently we rather opted to use a more minimal solution. Note [The oneShot function] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the context of making left-folds fuse somewhat okish (see ticket #7994 and Note [Left folds via right fold]) it was determined that it would be useful if library authors could explicitly tell the compiler that a certain lambda is called at most once. The oneShot function allows that. 'oneShot' is representation-polymorphic, i.e. the type variables can refer to unlifted types as well (#10744); e.g. oneShot (\x:Int# -> x +# 1#) Like most magic functions it has a compulsory unfolding, so there is no need for a real definition somewhere. We have one in GHC.Magic for the convenience of putting the documentation there. It uses `setOneShotLambda` on the lambda's binder. That is the whole magic: A typical call looks like oneShot (\y. e) after unfolding the definition `oneShot = \f \x[oneshot]. f x` we get (\f \x[oneshot]. f x) (\y. e) --> \x[oneshot]. ((\y.e) x) --> \x[oneshot] e[x/y] which is what we want. It is only effective if the one-shot info survives as long as possible; in particular it must make it into the interface in unfoldings. See Note [Preserve OneShotInfo] in GHC.Core.Tidy. Also see https://gitlab.haskell.org/ghc/ghc/wikis/one-shot. ------------------------------------------------------------- @realWorld#@ used to be a magic literal, \tr{void#}. If things get nasty as-is, change it back to a literal (@Literal@). voidArgId is a Local Id used simply as an argument in functions where we just want an arg to avoid having a thunk of unlifted type. E.g. x = \ void :: Void# -> (# p, q #) This comes up in strictness analysis Note [evaldUnfoldings] ~~~~~~~~~~~~~~~~~~~~~~ The evaldUnfolding makes it look that some primitive value is evaluated, which in turn makes Simplify.interestingArg return True, which in turn makes INLINE things applied to said value likely to be inlined. -} realWorldPrimId :: Id -- :: State# RealWorld realWorldPrimId = pcMiscPrelId realWorldName realWorldStatePrimTy (noCafIdInfo `setUnfoldingInfo` evaldUnfolding -- Note [evaldUnfoldings] `setOneShotInfo` stateHackOneShot `setNeverRepPoly` realWorldStatePrimTy) voidPrimId :: Id -- Global constant :: Void# -- The type Void# is now the same as (# #) (ticket #18441), -- this identifier just signifies the (# #) datacon -- and is kept for backwards compatibility. -- We cannot define it in normal Haskell, since it's -- a top-level unlifted value. voidPrimId = pcMiscPrelId voidPrimIdName unboxedUnitTy (noCafIdInfo `setUnfoldingInfo` mkCompulsoryUnfolding defaultSimpleOpts rhs `setNeverRepPoly` unboxedUnitTy) where rhs = Var (dataConWorkId unboxedUnitDataCon) voidArgId :: Id -- Local lambda-bound :: Void# voidArgId = mkSysLocal (fsLit "void") voidArgIdKey Many unboxedUnitTy coercionTokenId :: Id -- :: () ~# () coercionTokenId -- See Note [Coercion tokens] in "GHC.CoreToStg" = pcMiscPrelId coercionTokenName (mkTyConApp eqPrimTyCon [liftedTypeKind, liftedTypeKind, unitTy, unitTy]) noCafIdInfo pcMiscPrelId :: Name -> Type -> IdInfo -> Id pcMiscPrelId name ty info = mkVanillaGlobalWithInfo name ty info ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Literal.hs0000644000000000000000000012514314472400113020676 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 -} {-# LANGUAGE DeriveDataTypeable, ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE AllowAmbiguousTypes #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Core literals module GHC.Types.Literal ( -- * Main data type Literal(..) -- Exported to ParseIface , LitNumType(..) -- ** Creating Literals , mkLitInt, mkLitIntWrap, mkLitIntWrapC, mkLitIntUnchecked , mkLitWord, mkLitWordWrap, mkLitWordWrapC, mkLitWordUnchecked , mkLitInt8, mkLitInt8Wrap, mkLitInt8Unchecked , mkLitWord8, mkLitWord8Wrap, mkLitWord8Unchecked , mkLitInt16, mkLitInt16Wrap, mkLitInt16Unchecked , mkLitWord16, mkLitWord16Wrap, mkLitWord16Unchecked , mkLitInt32, mkLitInt32Wrap, mkLitInt32Unchecked , mkLitWord32, mkLitWord32Wrap, mkLitWord32Unchecked , mkLitInt64, mkLitInt64Wrap, mkLitInt64Unchecked , mkLitWord64, mkLitWord64Wrap, mkLitWord64Unchecked , mkLitFloat, mkLitDouble , mkLitChar, mkLitString , mkLitBigNat , mkLitNumber, mkLitNumberWrap -- ** Operations on Literals , literalType , pprLiteral , litNumIsSigned , litNumRange , litNumCheckRange , litNumWrap , litNumCoerce , litNumNarrow , litNumBitSize , isMinBound , isMaxBound -- ** Predicates on Literals and their contents , litIsDupable, litIsTrivial, litIsLifted , inCharRange , isZeroLit, isOneLit , litFitsInChar , litValue, mapLitValue , isLitValue_maybe, isLitRubbish -- ** Coercions , narrowInt8Lit, narrowInt16Lit, narrowInt32Lit, narrowInt64Lit , narrowWord8Lit, narrowWord16Lit, narrowWord32Lit, narrowWord64Lit , convertToIntLit, convertToWordLit , charToIntLit, intToCharLit , floatToIntLit, intToFloatLit, doubleToIntLit, intToDoubleLit , nullAddrLit, floatToDoubleLit, doubleToFloatLit ) where import GHC.Prelude import GHC.Builtin.Types.Prim import GHC.Core.TyCo.Rep ( RuntimeRepType ) import GHC.Core.Type import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Types.Basic import GHC.Utils.Binary import GHC.Settings.Constants import GHC.Platform import GHC.Utils.Panic import GHC.Utils.Encoding import Data.ByteString (ByteString) import Data.Int import Data.Word import Data.Char import Data.Data ( Data ) import GHC.Exts import Numeric ( fromRat ) {- ************************************************************************ * * \subsection{Literals} * * ************************************************************************ -} -- | So-called 'Literal's are one of: -- -- * An unboxed numeric literal or floating-point literal which is presumed -- to be surrounded by appropriate constructors (@Int#@, etc.), so that -- the overall thing makes sense. -- -- We maintain the invariant that the 'Integer' in the 'LitNumber' -- constructor is actually in the (possibly target-dependent) range. -- The mkLit{Int,Word}*Wrap smart constructors ensure this by applying -- the target machine's wrapping semantics. Use these in situations -- where you know the wrapping semantics are correct. -- -- * The literal derived from the label mentioned in a \"foreign label\" -- declaration ('LitLabel') -- -- * A 'LitRubbish' to be used in place of values that are never used. -- -- * A character -- * A string -- * The NULL pointer -- data Literal = LitChar Char -- ^ @Char#@ - at least 31 bits. Create with -- 'mkLitChar' | LitNumber !LitNumType !Integer -- ^ Any numeric literal that can be -- internally represented with an Integer. | LitString !ByteString -- ^ A string-literal: stored and emitted -- UTF-8 encoded, we'll arrange to decode it -- at runtime. Also emitted with a @\'\\0\'@ -- terminator. Create with 'mkLitString' | LitNullAddr -- ^ The @NULL@ pointer, the only pointer value -- that can be represented as a Literal. Create -- with 'nullAddrLit' | LitRubbish RuntimeRepType -- ^ A nonsense value of the given -- representation. See Note [Rubbish literals]. -- -- The Type argument, rr, is of kind RuntimeRep. -- The type of the literal is forall (a:TYPE rr). a -- -- INVARIANT: the Type has no free variables -- and so substitution etc can ignore it -- | LitFloat Rational -- ^ @Float#@. Create with 'mkLitFloat' | LitDouble Rational -- ^ @Double#@. Create with 'mkLitDouble' | LitLabel FastString (Maybe Int) FunctionOrData -- ^ A label literal. Parameters: -- -- 1) The name of the symbol mentioned in the -- declaration -- -- 2) The size (in bytes) of the arguments -- the label expects. Only applicable with -- @stdcall@ labels. @Just x@ => @\@ will -- be appended to label name when emitting -- assembly. -- -- 3) Flag indicating whether the symbol -- references a function or a data deriving Data -- | Numeric literal type data LitNumType = LitNumBigNat -- ^ @Bignat@ (see Note [BigNum literals]) | LitNumInt -- ^ @Int#@ - according to target machine | LitNumInt8 -- ^ @Int8#@ - exactly 8 bits | LitNumInt16 -- ^ @Int16#@ - exactly 16 bits | LitNumInt32 -- ^ @Int32#@ - exactly 32 bits | LitNumInt64 -- ^ @Int64#@ - exactly 64 bits | LitNumWord -- ^ @Word#@ - according to target machine | LitNumWord8 -- ^ @Word8#@ - exactly 8 bits | LitNumWord16 -- ^ @Word16#@ - exactly 16 bits | LitNumWord32 -- ^ @Word32#@ - exactly 32 bits | LitNumWord64 -- ^ @Word64#@ - exactly 64 bits deriving (Data,Enum,Eq,Ord) -- | Indicate if a numeric literal type supports negative numbers litNumIsSigned :: LitNumType -> Bool litNumIsSigned nt = case nt of LitNumBigNat -> False LitNumInt -> True LitNumInt8 -> True LitNumInt16 -> True LitNumInt32 -> True LitNumInt64 -> True LitNumWord -> False LitNumWord8 -> False LitNumWord16 -> False LitNumWord32 -> False LitNumWord64 -> False -- | Number of bits litNumBitSize :: Platform -> LitNumType -> Maybe Word litNumBitSize platform nt = case nt of LitNumBigNat -> Nothing LitNumInt -> Just (fromIntegral (platformWordSizeInBits platform)) LitNumInt8 -> Just 8 LitNumInt16 -> Just 16 LitNumInt32 -> Just 32 LitNumInt64 -> Just 64 LitNumWord -> Just (fromIntegral (platformWordSizeInBits platform)) LitNumWord8 -> Just 8 LitNumWord16 -> Just 16 LitNumWord32 -> Just 32 LitNumWord64 -> Just 64 instance Binary LitNumType where put_ bh numTyp = putByte bh (fromIntegral (fromEnum numTyp)) get bh = do h <- getByte bh return (toEnum (fromIntegral h)) {- Note [BigNum literals] ~~~~~~~~~~~~~~~~~~~~~~ GHC supports 2 kinds of arbitrary precision numbers (a.k.a BigNum): * data Natural = NS Word# | NB BigNat# * data Integer = IS Int# | IN BigNat# | IP BigNat# In the past, we had Core constructors to represent Integer and Natural literals. These literals were then lowered into their real Core representation only in Core prep. The issue with this approach is that literals have two representations and we have to ensure that we handle them the same everywhere (in every optimisation, etc.). For example (0 :: Integer) was representable in Core with both: Lit (LitNumber LitNumInteger 0) -- literal App (Var integerISDataCon) (Lit (LitNumber LitNumInt 0)) -- real representation Nowadays we always use the real representation for Integer and Natural literals. However we still have two representations for BigNat# literals. BigNat# literals are still lowered in Core prep into a call to a constructor function (BigNat# is ByteArray# and we don't have ByteArray# literals yet so we have to build them at runtime). Note [String literals] ~~~~~~~~~~~~~~~~~~~~~~ String literals are UTF-8 encoded and stored into ByteStrings in the following ASTs: Haskell, Core, Stg, Cmm. TH can also emit ByteString based string literals with the BytesPrimL constructor (see #14741). It wasn't true before as [Word8] was used in Cmm AST and in TH which was quite bad for performance with large strings (see #16198 and #14741). To include string literals into output objects, the assembler code generator has to embed the UTF-8 encoded binary blob. See Note [Embedding large binary blobs] for more details. -} instance Binary Literal where put_ bh (LitChar aa) = do putByte bh 0; put_ bh aa put_ bh (LitString ab) = do putByte bh 1; put_ bh ab put_ bh (LitNullAddr) = putByte bh 2 put_ bh (LitFloat ah) = do putByte bh 3; put_ bh ah put_ bh (LitDouble ai) = do putByte bh 4; put_ bh ai put_ bh (LitLabel aj mb fod) = do putByte bh 5 put_ bh aj put_ bh mb put_ bh fod put_ bh (LitNumber nt i) = do putByte bh 6 put_ bh nt put_ bh i put_ _ (LitRubbish b) = pprPanic "Binary LitRubbish" (ppr b) -- We use IfaceLitRubbish; see Note [Rubbish literals], item (6) get bh = do h <- getByte bh case h of 0 -> do aa <- get bh return (LitChar aa) 1 -> do ab <- get bh return (LitString ab) 2 -> return (LitNullAddr) 3 -> do ah <- get bh return (LitFloat ah) 4 -> do ai <- get bh return (LitDouble ai) 5 -> do aj <- get bh mb <- get bh fod <- get bh return (LitLabel aj mb fod) 6 -> do nt <- get bh i <- get bh return (LitNumber nt i) _ -> pprPanic "Binary:Literal" (int (fromIntegral h)) instance Outputable Literal where ppr = pprLiteral id instance Eq Literal where a == b = compare a b == EQ -- | Needed for the @Ord@ instance of 'AltCon', which in turn is needed in -- 'GHC.Data.TrieMap.CoreMap'. instance Ord Literal where compare = cmpLit {- Construction ~~~~~~~~~~~~ -} {- Note [Word/Int underflow/overflow] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ According to the Haskell Report 2010 (Sections 18.1 and 23.1 about signed and unsigned integral types): "All arithmetic is performed modulo 2^n, where n is the number of bits in the type." GHC stores Word# and Int# constant values as Integer. Core optimizations such as constant folding must ensure that the Integer value remains in the valid target Word/Int range (see #13172). The following functions are used to ensure this. Note that we *don't* warn the user about overflow. It's not done at runtime either, and compilation of completely harmless things like ((124076834 :: Word32) + (2147483647 :: Word32)) doesn't yield a warning. Instead we simply squash the value into the *target* Int/Word range. -} -- | Make a literal number using wrapping semantics if the value is out of -- bound. mkLitNumberWrap :: Platform -> LitNumType -> Integer -> Literal mkLitNumberWrap platform nt i = case nt of LitNumInt -> case platformWordSize platform of PW4 -> wrap @Int32 PW8 -> wrap @Int64 LitNumWord -> case platformWordSize platform of PW4 -> wrap @Word32 PW8 -> wrap @Word64 LitNumInt8 -> wrap @Int8 LitNumInt16 -> wrap @Int16 LitNumInt32 -> wrap @Int32 LitNumInt64 -> wrap @Int64 LitNumWord8 -> wrap @Word8 LitNumWord16 -> wrap @Word16 LitNumWord32 -> wrap @Word32 LitNumWord64 -> wrap @Word64 LitNumBigNat | i < 0 -> panic "mkLitNumberWrap: trying to create a negative BigNat" | otherwise -> LitNumber nt i where wrap :: forall a. (Integral a, Num a) => Literal wrap = LitNumber nt (toInteger (fromIntegral i :: a)) -- | Wrap a literal number according to its type using wrapping semantics. litNumWrap :: Platform -> Literal -> Literal litNumWrap platform (LitNumber nt i) = mkLitNumberWrap platform nt i litNumWrap _ l = pprPanic "litNumWrap" (ppr l) -- | Coerce a literal number into another using wrapping semantics. litNumCoerce :: LitNumType -> Platform -> Literal -> Literal litNumCoerce pt platform (LitNumber _nt i) = mkLitNumberWrap platform pt i litNumCoerce _ _ l = pprPanic "litNumWrapCoerce: not a number" (ppr l) -- | Narrow a literal number by converting it into another number type and then -- converting it back to its original type. litNumNarrow :: LitNumType -> Platform -> Literal -> Literal litNumNarrow pt platform (LitNumber nt i) = case mkLitNumberWrap platform pt i of LitNumber _ j -> mkLitNumberWrap platform nt j l -> pprPanic "litNumNarrow: got invalid literal" (ppr l) litNumNarrow _ _ l = pprPanic "litNumNarrow: invalid literal" (ppr l) -- | Check that a given number is in the range of a numeric literal litNumCheckRange :: Platform -> LitNumType -> Integer -> Bool litNumCheckRange platform nt i = maybe True (i >=) m_lower && maybe True (i <=) m_upper where (m_lower, m_upper) = litNumRange platform nt -- | Get the literal range litNumRange :: Platform -> LitNumType -> (Maybe Integer, Maybe Integer) litNumRange platform nt = case nt of LitNumInt -> (Just (platformMinInt platform), Just (platformMaxInt platform)) LitNumWord -> (Just 0, Just (platformMaxWord platform)) LitNumInt8 -> bounded_range @Int8 LitNumInt16 -> bounded_range @Int16 LitNumInt32 -> bounded_range @Int32 LitNumInt64 -> bounded_range @Int64 LitNumWord8 -> bounded_range @Word8 LitNumWord16 -> bounded_range @Word16 LitNumWord32 -> bounded_range @Word32 LitNumWord64 -> bounded_range @Word64 LitNumBigNat -> (Just 0, Nothing) where bounded_range :: forall a . (Integral a, Bounded a) => (Maybe Integer,Maybe Integer) bounded_range = case boundedRange @a of (mi,ma) -> (Just mi, Just ma) -- | Create a numeric 'Literal' of the given type mkLitNumber :: Platform -> LitNumType -> Integer -> Literal mkLitNumber platform nt i = assertPpr (litNumCheckRange platform nt i) (integer i) (LitNumber nt i) -- | Creates a 'Literal' of type @Int#@ mkLitInt :: Platform -> Integer -> Literal mkLitInt platform x = assertPpr (platformInIntRange platform x) (integer x) (mkLitIntUnchecked x) -- | Creates a 'Literal' of type @Int#@. -- If the argument is out of the (target-dependent) range, it is wrapped. -- See Note [Word/Int underflow/overflow] mkLitIntWrap :: Platform -> Integer -> Literal mkLitIntWrap platform i = mkLitNumberWrap platform LitNumInt i -- | Creates a 'Literal' of type @Int#@ without checking its range. mkLitIntUnchecked :: Integer -> Literal mkLitIntUnchecked i = LitNumber LitNumInt i -- | Creates a 'Literal' of type @Int#@, as well as a 'Bool'ean flag indicating -- overflow. That is, if the argument is out of the (target-dependent) range -- the argument is wrapped and the overflow flag will be set. -- See Note [Word/Int underflow/overflow] mkLitIntWrapC :: Platform -> Integer -> (Literal, Bool) mkLitIntWrapC platform i = (n, i /= i') where n@(LitNumber _ i') = mkLitIntWrap platform i -- | Creates a 'Literal' of type @Word#@ mkLitWord :: Platform -> Integer -> Literal mkLitWord platform x = assertPpr (platformInWordRange platform x) (integer x) (mkLitWordUnchecked x) -- | Creates a 'Literal' of type @Word#@. -- If the argument is out of the (target-dependent) range, it is wrapped. -- See Note [Word/Int underflow/overflow] mkLitWordWrap :: Platform -> Integer -> Literal mkLitWordWrap platform i = mkLitNumberWrap platform LitNumWord i -- | Creates a 'Literal' of type @Word#@ without checking its range. mkLitWordUnchecked :: Integer -> Literal mkLitWordUnchecked i = LitNumber LitNumWord i -- | Creates a 'Literal' of type @Word#@, as well as a 'Bool'ean flag indicating -- carry. That is, if the argument is out of the (target-dependent) range -- the argument is wrapped and the carry flag will be set. -- See Note [Word/Int underflow/overflow] mkLitWordWrapC :: Platform -> Integer -> (Literal, Bool) mkLitWordWrapC platform i = (n, i /= i') where n@(LitNumber _ i') = mkLitWordWrap platform i -- | Creates a 'Literal' of type @Int8#@ mkLitInt8 :: Integer -> Literal mkLitInt8 x = assertPpr (inBoundedRange @Int8 x) (integer x) (mkLitInt8Unchecked x) -- | Creates a 'Literal' of type @Int8#@. -- If the argument is out of the range, it is wrapped. mkLitInt8Wrap :: Integer -> Literal mkLitInt8Wrap i = mkLitInt8Unchecked (toInteger (fromIntegral i :: Int8)) -- | Creates a 'Literal' of type @Int8#@ without checking its range. mkLitInt8Unchecked :: Integer -> Literal mkLitInt8Unchecked i = LitNumber LitNumInt8 i -- | Creates a 'Literal' of type @Word8#@ mkLitWord8 :: Integer -> Literal mkLitWord8 x = assertPpr (inBoundedRange @Word8 x) (integer x) (mkLitWord8Unchecked x) -- | Creates a 'Literal' of type @Word8#@. -- If the argument is out of the range, it is wrapped. mkLitWord8Wrap :: Integer -> Literal mkLitWord8Wrap i = mkLitWord8Unchecked (toInteger (fromIntegral i :: Word8)) -- | Creates a 'Literal' of type @Word8#@ without checking its range. mkLitWord8Unchecked :: Integer -> Literal mkLitWord8Unchecked i = LitNumber LitNumWord8 i -- | Creates a 'Literal' of type @Int16#@ mkLitInt16 :: Integer -> Literal mkLitInt16 x = assertPpr (inBoundedRange @Int16 x) (integer x) (mkLitInt16Unchecked x) -- | Creates a 'Literal' of type @Int16#@. -- If the argument is out of the range, it is wrapped. mkLitInt16Wrap :: Integer -> Literal mkLitInt16Wrap i = mkLitInt16Unchecked (toInteger (fromIntegral i :: Int16)) -- | Creates a 'Literal' of type @Int16#@ without checking its range. mkLitInt16Unchecked :: Integer -> Literal mkLitInt16Unchecked i = LitNumber LitNumInt16 i -- | Creates a 'Literal' of type @Word16#@ mkLitWord16 :: Integer -> Literal mkLitWord16 x = assertPpr (inBoundedRange @Word16 x) (integer x) (mkLitWord16Unchecked x) -- | Creates a 'Literal' of type @Word16#@. -- If the argument is out of the range, it is wrapped. mkLitWord16Wrap :: Integer -> Literal mkLitWord16Wrap i = mkLitWord16Unchecked (toInteger (fromIntegral i :: Word16)) -- | Creates a 'Literal' of type @Word16#@ without checking its range. mkLitWord16Unchecked :: Integer -> Literal mkLitWord16Unchecked i = LitNumber LitNumWord16 i -- | Creates a 'Literal' of type @Int32#@ mkLitInt32 :: Integer -> Literal mkLitInt32 x = assertPpr (inBoundedRange @Int32 x) (integer x) (mkLitInt32Unchecked x) -- | Creates a 'Literal' of type @Int32#@. -- If the argument is out of the range, it is wrapped. mkLitInt32Wrap :: Integer -> Literal mkLitInt32Wrap i = mkLitInt32Unchecked (toInteger (fromIntegral i :: Int32)) -- | Creates a 'Literal' of type @Int32#@ without checking its range. mkLitInt32Unchecked :: Integer -> Literal mkLitInt32Unchecked i = LitNumber LitNumInt32 i -- | Creates a 'Literal' of type @Word32#@ mkLitWord32 :: Integer -> Literal mkLitWord32 x = assertPpr (inBoundedRange @Word32 x) (integer x) (mkLitWord32Unchecked x) -- | Creates a 'Literal' of type @Word32#@. -- If the argument is out of the range, it is wrapped. mkLitWord32Wrap :: Integer -> Literal mkLitWord32Wrap i = mkLitWord32Unchecked (toInteger (fromIntegral i :: Word32)) -- | Creates a 'Literal' of type @Word32#@ without checking its range. mkLitWord32Unchecked :: Integer -> Literal mkLitWord32Unchecked i = LitNumber LitNumWord32 i -- | Creates a 'Literal' of type @Int64#@ mkLitInt64 :: Integer -> Literal mkLitInt64 x = assertPpr (inBoundedRange @Int64 x) (integer x) (mkLitInt64Unchecked x) -- | Creates a 'Literal' of type @Int64#@. -- If the argument is out of the range, it is wrapped. mkLitInt64Wrap :: Integer -> Literal mkLitInt64Wrap i = mkLitInt64Unchecked (toInteger (fromIntegral i :: Int64)) -- | Creates a 'Literal' of type @Int64#@ without checking its range. mkLitInt64Unchecked :: Integer -> Literal mkLitInt64Unchecked i = LitNumber LitNumInt64 i -- | Creates a 'Literal' of type @Word64#@ mkLitWord64 :: Integer -> Literal mkLitWord64 x = assertPpr (inBoundedRange @Word64 x) (integer x) (mkLitWord64Unchecked x) -- | Creates a 'Literal' of type @Word64#@. -- If the argument is out of the range, it is wrapped. mkLitWord64Wrap :: Integer -> Literal mkLitWord64Wrap i = mkLitWord64Unchecked (toInteger (fromIntegral i :: Word64)) -- | Creates a 'Literal' of type @Word64#@ without checking its range. mkLitWord64Unchecked :: Integer -> Literal mkLitWord64Unchecked i = LitNumber LitNumWord64 i -- | Creates a 'Literal' of type @Float#@ mkLitFloat :: Rational -> Literal mkLitFloat = LitFloat -- | Creates a 'Literal' of type @Double#@ mkLitDouble :: Rational -> Literal mkLitDouble = LitDouble -- | Creates a 'Literal' of type @Char#@ mkLitChar :: Char -> Literal mkLitChar = LitChar -- | Creates a 'Literal' of type @Addr#@, which is appropriate for passing to -- e.g. some of the \"error\" functions in GHC.Err such as @GHC.Err.runtimeError@ mkLitString :: String -> Literal -- stored UTF-8 encoded mkLitString [] = LitString mempty mkLitString s = LitString (utf8EncodeString s) mkLitBigNat :: Integer -> Literal mkLitBigNat x = assertPpr (x >= 0) (integer x) (LitNumber LitNumBigNat x) isLitRubbish :: Literal -> Bool isLitRubbish (LitRubbish {}) = True isLitRubbish _ = False inBoundedRange :: forall a. (Bounded a, Integral a) => Integer -> Bool inBoundedRange x = x >= toInteger (minBound :: a) && x <= toInteger (maxBound :: a) boundedRange :: forall a. (Bounded a, Integral a) => (Integer,Integer) boundedRange = (toInteger (minBound :: a), toInteger (maxBound :: a)) isMinBound :: Platform -> Literal -> Bool isMinBound _ (LitChar c) = c == minBound isMinBound platform (LitNumber nt i) = case nt of LitNumInt -> i == platformMinInt platform LitNumInt8 -> i == toInteger (minBound :: Int8) LitNumInt16 -> i == toInteger (minBound :: Int16) LitNumInt32 -> i == toInteger (minBound :: Int32) LitNumInt64 -> i == toInteger (minBound :: Int64) LitNumWord -> i == 0 LitNumWord8 -> i == 0 LitNumWord16 -> i == 0 LitNumWord32 -> i == 0 LitNumWord64 -> i == 0 LitNumBigNat -> i == 0 isMinBound _ _ = False isMaxBound :: Platform -> Literal -> Bool isMaxBound _ (LitChar c) = c == maxBound isMaxBound platform (LitNumber nt i) = case nt of LitNumInt -> i == platformMaxInt platform LitNumInt8 -> i == toInteger (maxBound :: Int8) LitNumInt16 -> i == toInteger (maxBound :: Int16) LitNumInt32 -> i == toInteger (maxBound :: Int32) LitNumInt64 -> i == toInteger (maxBound :: Int64) LitNumWord -> i == platformMaxWord platform LitNumWord8 -> i == toInteger (maxBound :: Word8) LitNumWord16 -> i == toInteger (maxBound :: Word16) LitNumWord32 -> i == toInteger (maxBound :: Word32) LitNumWord64 -> i == toInteger (maxBound :: Word64) LitNumBigNat -> False isMaxBound _ _ = False inCharRange :: Char -> Bool inCharRange c = c >= '\0' && c <= chr tARGET_MAX_CHAR -- | Tests whether the literal represents a zero of whatever type it is isZeroLit :: Literal -> Bool isZeroLit (LitNumber _ 0) = True isZeroLit (LitFloat 0) = True isZeroLit (LitDouble 0) = True isZeroLit _ = False -- | Tests whether the literal represents a one of whatever type it is isOneLit :: Literal -> Bool isOneLit (LitNumber _ 1) = True isOneLit (LitFloat 1) = True isOneLit (LitDouble 1) = True isOneLit _ = False -- | Returns the 'Integer' contained in the 'Literal', for when that makes -- sense, i.e. for 'Char' and numbers. litValue :: Literal -> Integer litValue l = case isLitValue_maybe l of Just x -> x Nothing -> pprPanic "litValue" (ppr l) -- | Returns the 'Integer' contained in the 'Literal', for when that makes -- sense, i.e. for 'Char' and numbers. isLitValue_maybe :: Literal -> Maybe Integer isLitValue_maybe (LitChar c) = Just $ toInteger $ ord c isLitValue_maybe (LitNumber _ i) = Just i isLitValue_maybe _ = Nothing -- | Apply a function to the 'Integer' contained in the 'Literal', for when that -- makes sense, e.g. for 'Char' and numbers. -- For fixed-size integral literals, the result will be wrapped in accordance -- with the semantics of the target type. -- See Note [Word/Int underflow/overflow] mapLitValue :: Platform -> (Integer -> Integer) -> Literal -> Literal mapLitValue _ f (LitChar c) = mkLitChar (fchar c) where fchar = chr . fromInteger . f . toInteger . ord mapLitValue platform f (LitNumber nt i) = mkLitNumberWrap platform nt (f i) mapLitValue _ _ l = pprPanic "mapLitValue" (ppr l) {- Coercions ~~~~~~~~~ -} charToIntLit, intToCharLit, floatToIntLit, intToFloatLit, doubleToIntLit, intToDoubleLit, floatToDoubleLit, doubleToFloatLit :: Literal -> Literal -- | Narrow a literal number (unchecked result range) narrowLit' :: forall a. Integral a => LitNumType -> Literal -> Literal narrowLit' nt' (LitNumber _ i) = LitNumber nt' (toInteger (fromInteger i :: a)) narrowLit' _ l = pprPanic "narrowLit" (ppr l) narrowInt8Lit, narrowInt16Lit, narrowInt32Lit, narrowInt64Lit, narrowWord8Lit, narrowWord16Lit, narrowWord32Lit, narrowWord64Lit :: Literal -> Literal narrowInt8Lit = narrowLit' @Int8 LitNumInt8 narrowInt16Lit = narrowLit' @Int16 LitNumInt16 narrowInt32Lit = narrowLit' @Int32 LitNumInt32 narrowInt64Lit = narrowLit' @Int64 LitNumInt64 narrowWord8Lit = narrowLit' @Word8 LitNumWord8 narrowWord16Lit = narrowLit' @Word16 LitNumWord16 narrowWord32Lit = narrowLit' @Word32 LitNumWord32 narrowWord64Lit = narrowLit' @Word64 LitNumWord64 -- | Extend or narrow a fixed-width literal (e.g. 'Int16#') to a target -- word-sized literal ('Int#' or 'Word#'). Narrowing can only happen on 32-bit -- architectures when we convert a 64-bit literal into a 32-bit one. convertToWordLit, convertToIntLit :: Platform -> Literal -> Literal convertToWordLit platform (LitNumber _nt i) = mkLitWordWrap platform i convertToWordLit _platform l = pprPanic "convertToWordLit" (ppr l) convertToIntLit platform (LitNumber _nt i) = mkLitIntWrap platform i convertToIntLit _platform l = pprPanic "convertToIntLit" (ppr l) charToIntLit (LitChar c) = mkLitIntUnchecked (toInteger (ord c)) charToIntLit l = pprPanic "charToIntLit" (ppr l) intToCharLit (LitNumber _ i) = LitChar (chr (fromInteger i)) intToCharLit l = pprPanic "intToCharLit" (ppr l) floatToIntLit (LitFloat f) = mkLitIntUnchecked (truncate f) floatToIntLit l = pprPanic "floatToIntLit" (ppr l) intToFloatLit (LitNumber _ i) = LitFloat (fromInteger i) intToFloatLit l = pprPanic "intToFloatLit" (ppr l) doubleToIntLit (LitDouble f) = mkLitIntUnchecked (truncate f) doubleToIntLit l = pprPanic "doubleToIntLit" (ppr l) intToDoubleLit (LitNumber _ i) = LitDouble (fromInteger i) intToDoubleLit l = pprPanic "intToDoubleLit" (ppr l) floatToDoubleLit (LitFloat f) = LitDouble f floatToDoubleLit l = pprPanic "floatToDoubleLit" (ppr l) doubleToFloatLit (LitDouble d) = LitFloat d doubleToFloatLit l = pprPanic "doubleToFloatLit" (ppr l) nullAddrLit :: Literal nullAddrLit = LitNullAddr {- Predicates ~~~~~~~~~~ -} -- | True if there is absolutely no penalty to duplicating the literal. -- False principally of strings. -- -- "Why?", you say? I'm glad you asked. Well, for one duplicating strings would -- blow up code sizes. Not only this, it's also unsafe. -- -- Consider a program that wants to traverse a string. One way it might do this -- is to first compute the Addr# pointing to the end of the string, and then, -- starting from the beginning, bump a pointer using eqAddr# to determine the -- end. For instance, -- -- @ -- -- Given pointers to the start and end of a string, count how many zeros -- -- the string contains. -- countZeros :: Addr# -> Addr# -> -> Int -- countZeros start end = go start 0 -- where -- go off n -- | off `addrEq#` end = n -- | otherwise = go (off `plusAddr#` 1) n' -- where n' | isTrue# (indexInt8OffAddr# off 0# ==# 0#) = n + 1 -- | otherwise = n -- @ -- -- Consider what happens if we considered strings to be trivial (and therefore -- duplicable) and emitted a call like @countZeros "hello"# ("hello"# -- `plusAddr`# 5)@. The beginning and end pointers do not belong to the same -- string, meaning that an iteration like the above would blow up terribly. -- This is what happened in #12757. -- -- Ultimately the solution here is to make primitive strings a bit more -- structured, ensuring that the compiler can't inline in ways that will break -- user code. One approach to this is described in #8472. litIsTrivial :: Literal -> Bool -- c.f. GHC.Core.Utils.exprIsTrivial litIsTrivial (LitString _) = False litIsTrivial (LitNumber nt _) = case nt of LitNumBigNat -> False LitNumInt -> True LitNumInt8 -> True LitNumInt16 -> True LitNumInt32 -> True LitNumInt64 -> True LitNumWord -> True LitNumWord8 -> True LitNumWord16 -> True LitNumWord32 -> True LitNumWord64 -> True litIsTrivial _ = True -- | True if code space does not go bad if we duplicate this literal litIsDupable :: Platform -> Literal -> Bool -- c.f. GHC.Core.Utils.exprIsDupable litIsDupable platform x = case x of LitNumber nt i -> case nt of LitNumBigNat -> i <= platformMaxWord platform * 8 -- arbitrary, reasonable LitNumInt -> True LitNumInt8 -> True LitNumInt16 -> True LitNumInt32 -> True LitNumInt64 -> True LitNumWord -> True LitNumWord8 -> True LitNumWord16 -> True LitNumWord32 -> True LitNumWord64 -> True LitString _ -> False _ -> True litFitsInChar :: Literal -> Bool litFitsInChar (LitNumber _ i) = i >= toInteger (ord minBound) && i <= toInteger (ord maxBound) litFitsInChar _ = False litIsLifted :: Literal -> Bool litIsLifted (LitNumber nt _) = case nt of LitNumBigNat -> True LitNumInt -> False LitNumInt8 -> False LitNumInt16 -> False LitNumInt32 -> False LitNumInt64 -> False LitNumWord -> False LitNumWord8 -> False LitNumWord16 -> False LitNumWord32 -> False LitNumWord64 -> False litIsLifted _ = False -- Even RUBBISH[LiftedRep] is unlifted, as rubbish values are always evaluated. {- Types ~~~~~ -} -- | Find the Haskell 'Type' the literal occupies literalType :: Literal -> Type literalType LitNullAddr = addrPrimTy literalType (LitChar _) = charPrimTy literalType (LitString _) = addrPrimTy literalType (LitFloat _) = floatPrimTy literalType (LitDouble _) = doublePrimTy literalType (LitLabel _ _ _) = addrPrimTy literalType (LitNumber lt _) = case lt of LitNumBigNat -> byteArrayPrimTy LitNumInt -> intPrimTy LitNumInt8 -> int8PrimTy LitNumInt16 -> int16PrimTy LitNumInt32 -> int32PrimTy LitNumInt64 -> int64PrimTy LitNumWord -> wordPrimTy LitNumWord8 -> word8PrimTy LitNumWord16 -> word16PrimTy LitNumWord32 -> word32PrimTy LitNumWord64 -> word64PrimTy -- LitRubbish: see Note [Rubbish literals] literalType (LitRubbish rep) = mkForAllTy a Inferred (mkTyVarTy a) where a = mkTemplateKindVar (mkTYPEapp rep) {- Comparison ~~~~~~~~~~ -} cmpLit :: Literal -> Literal -> Ordering cmpLit (LitChar a) (LitChar b) = a `compare` b cmpLit (LitString a) (LitString b) = a `compare` b cmpLit (LitNullAddr) (LitNullAddr) = EQ cmpLit (LitFloat a) (LitFloat b) = a `compare` b cmpLit (LitDouble a) (LitDouble b) = a `compare` b cmpLit (LitLabel a _ _) (LitLabel b _ _) = a `lexicalCompareFS` b cmpLit (LitNumber nt1 a) (LitNumber nt2 b) = (nt1 `compare` nt2) `mappend` (a `compare` b) cmpLit (LitRubbish b1) (LitRubbish b2) = b1 `nonDetCmpType` b2 cmpLit lit1 lit2 | isTrue# (dataToTag# lit1 <# dataToTag# lit2) = LT | otherwise = GT {- Printing ~~~~~~~~ * See Note [Printing of literals in Core] -} pprLiteral :: (SDoc -> SDoc) -> Literal -> SDoc pprLiteral _ (LitChar c) = pprPrimChar c pprLiteral _ (LitString s) = pprHsBytes s pprLiteral _ (LitNullAddr) = text "__NULL" pprLiteral _ (LitFloat f) = float (fromRat f) <> primFloatSuffix pprLiteral _ (LitDouble d) = double (fromRat d) <> primDoubleSuffix pprLiteral _ (LitNumber nt i) = case nt of LitNumBigNat -> integer i LitNumInt -> pprPrimInt i LitNumInt8 -> pprPrimInt8 i LitNumInt16 -> pprPrimInt16 i LitNumInt32 -> pprPrimInt32 i LitNumInt64 -> pprPrimInt64 i LitNumWord -> pprPrimWord i LitNumWord8 -> pprPrimWord8 i LitNumWord16 -> pprPrimWord16 i LitNumWord32 -> pprPrimWord32 i LitNumWord64 -> pprPrimWord64 i pprLiteral add_par (LitLabel l mb fod) = add_par (text "__label" <+> b <+> ppr fod) where b = case mb of Nothing -> pprHsString l Just x -> doubleQuotes (text (unpackFS l ++ '@':show x)) pprLiteral _ (LitRubbish rep) = text "RUBBISH" <> parens (ppr rep) {- Note [Printing of literals in Core] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The function `add_par` is used to wrap parenthesis around labels (`LitLabel`), if they occur in a context requiring an atomic thing (for example function application). Although not all Core literals would be valid Haskell, we are trying to stay as close as possible to Haskell syntax in the printing of Core, to make it easier for a Haskell user to read Core. To that end: * We do print parenthesis around negative `LitInteger`, because we print `LitInteger` using plain number literals (no prefix or suffix), and plain number literals in Haskell require parenthesis in contexts like function application (i.e. `1 - -1` is not valid Haskell). * We don't print parenthesis around other (negative) literals, because they aren't needed in GHC/Haskell either (i.e. `1# -# -1#` is accepted by GHC's parser). Literal Output Output if context requires an atom (if different) ------- ------- ---------------------- LitChar 'a'# LitString "aaa"# LitNullAddr "__NULL" LitInt -1# LitIntN -1#N LitWord 1## LitWordN 1##N LitFloat -1.0# LitDouble -1.0## LitBigNat 1 LitLabel "__label" ... ("__label" ...) LitRubbish "RUBBISH[...]" Note [Rubbish literals] ~~~~~~~~~~~~~~~~~~~~~~~ Sometimes, we need to cough up a rubbish value of a certain type that is used in place of dead code we thus aim to eliminate. The value of a dead occurrence has no effect on the dynamic semantics of the program, so we can pick any value of the same representation. Exploiting the results of absence analysis in worker/wrapper is a scenario where we need such a rubbish value, see examples in Note [Absent fillers] in GHC.Core.Opt.WorkWrap.Utils. It's completely undefined what the *value* of a rubbish value is, e.g., we could pick @0#@ for @Int#@ or @42#@; it mustn't matter where it's inserted into a Core program. We embed these rubbish values in the 'LitRubbish' case of the 'Literal' data type. Here are the moving parts: 1. Source Haskell: No way to produce rubbish lits in source syntax. Purely an IR feature. 2. Core: 'LitRubbish' carries a `Type` of kind RuntimeRep, describing the runtime representaion of the literal (is it a pointer, an unboxed Double#, or whatever). We have it that `RUBBISH[rr]` has type `forall (a :: TYPE rr). a`. See the `LitRubbish` case of `literalType`. The function GHC.Core.Make.mkLitRubbish makes a Core rubbish literal of a given type. It obeys the following invariants: INVARIANT 1: 'rr' has no free variables. Main reason: we don't need to run substitutions and free variable finders over Literal. The rules around levity/runtime-rep polymorphism naturally uphold this invariant. INVARIANT 2: we never make a rubbish literal of type (a ~# b). Reason: see Note [Core type and coercion invariant] in GHC.Core. We can't substitute a LitRubbish inside a coercion, so it's best not to make one. They are zero width anyway, so passing absent ones around costs nothing. If we wanted an absent filler of type (a ~# b) we should use (Coercion (UnivCo ...)), but it doesn't seem worth making a new UnivCoProvenance for this purpose. This is sad, though: see #18983. 3. STG: The type app in `RUBBISH[IntRep] @Int# :: Int#` is erased and we get the (untyped) 'StgLit' `RUBBISH[IntRep] :: Int#` in STG. It's treated mostly opaque, with the exception of the Unariser, where we take apart a case scrutinisation on, or arg occurrence of, e.g., `RUBBISH[TupleRep[IntRep,DoubleRep]]` (which may stand in for `(# Int#, Double# #)`) into its sub-parts `RUBBISH[IntRep]` and `RUBBISH[DoubleRep]`, similar to unboxed tuples. `RUBBISH[VoidRep]` is erased. See 'unariseRubbish_maybe' and also Note [Post-unarisation invariants]. 4. Cmm: We translate 'LitRubbish' to their actual rubbish value in 'cgLit'. The particulars are boring, and only matter when debugging illicit use of a rubbish value; see Modes of failure below. 5. Bytecode: In GHC.ByteCode.Asm we just lower it as a 0 literal, because it's all boxed to the host GC anyway. 6. IfaceSyn: `Literal` is part of `IfaceSyn`, but `Type` really isn't. So in the passage from Core to Iface I put LitRubbish into its owns IfaceExpr data constructor, IfaceLitRubbish. The remaining constructors of Literal are fine as IfaceSyn. Wrinkles a) Why do we put the `Type` (of kind RuntimeRep) inside the literal? Could we not instead /apply/ the literal to that RuntimeRep? Alas no, becuase then LitRubbish :: forall (rr::RuntimeRep) (a::TYPE rr). a and that's am ill-formed type because its kind is `TYPE rr`, which escapes the binding site of `rr`. Annoying. b) A rubbish literal is not bottom, and replies True to exprOkForSpeculation. For unboxed types there is no bottom anyway. If we have let (x::Int#) = RUBBISH[IntRep] @Int# we want to convert that to a case! We want to leave it as a let, and probably discard it as dead code soon after because x is unused. c) We can see a rubbish literal at the head of an application chain. Most obviously, pretty much every rubbish literal is the head of a type application e.g. `RUBBISH[IntRep] @Int#`. But see also Note [How a rubbish literal can be the head of an application] c) Literal is in Ord, because (and only because) we use Ord on AltCon when building a TypeMap. Annoying. We use `nonDetCmpType` here; the non-determinism won't matter because it's only used in TrieMap. Moreover, rubbish literals should not appear in patterns anyway. d) Why not lower LitRubbish in CoreToStg? Because it enables us to use LitRubbish when unarising unboxed sums in the future, and it allows rubbish values of e.g. VecRep, for which we can't cough up dummy values in STG. Modes of failure ---------------- Suppose there is a bug in GHC, and a rubbish value is used after all. That is undefined behavior, of course, but let us list a few examples for failure modes: a) For an value of unboxed numeric type like `Int#`, we just use a silly value like 42#. The error might propoagate indefinitely, hence we better pick a rather unique literal. Same for Word, Floats, Char and VecRep. b) For AddrRep (like String lits), we mit a null pointer, resulting in a definitive segfault when accessed. c) For boxed values, unlifted or not, we use a pointer to a fixed closure, like `()`, so that the GC has a pointer to follow. If we use that pointer as an 'Array#', we will likely access fields of the array that don't exist, and a seg-fault is likely, but not guaranteed. If we use that pointer as `Either Int Bool`, we might try to access the 'Int' field of the 'Left' constructor (which has the same ConTag as '()'), which doesn't exists. In the best case, we'll find an invalid pointer in its position and get a seg-fault, in the worst case the error manifests only one or two indirections later. Note [How a rubbish literal can be the head of an application] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this (#19824): h :: T3 -> Int -> blah h _ (I# n) = ... f :: (T1 -> T2 -> T3) -> T4 -> blah f g x = ....(h (g n s) x)... Demand analysis finds that h does not use its first argument, and w/w's h to {-# INLINE h #-} h a b = case b of I# n -> $wh n Demand analysis also finds that f does not use its first arg, so the worker for f look like $wf x = let g = RUBBISH in ....(h (g n s) x)... Now we inline g to get: $wf x = ....(h (RUBBISH n s) x)... And lo, until we inline `h`, we have that application of RUBBISH in $wf's RHS. But surely `h` will inline? Not if the arguments look boring. Well, RUBBISH doesn't look boring. But it could be a bit more complicated like f g x = let t = ...(g n s)... in ...(h t x)... and now the call looks more boring. Anyway, the point is that we might reasonably see RUBBISH at the head of an application chain. It would be fine to rewrite RUBBISH @(ta->tb->tr) a b ---> RUBBISH @tr but we don't currently do so. It is NOT ok to discard the entire continuation: case RUBBISH @ty of DEFAULT -> blah does not return RUBBISH! -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Meta.hs0000644000000000000000000000321714472400113020165 0ustar0000000000000000-- | Metaprogramming types module GHC.Types.Meta ( MetaRequest(..) , MetaHook , MetaResult -- data constructors not exported to ensure correct response type , metaRequestE , metaRequestP , metaRequestT , metaRequestD , metaRequestAW ) where import GHC.Prelude import GHC.Serialized ( Serialized ) import GHC.Hs -- | The supported metaprogramming result types data MetaRequest = MetaE (LHsExpr GhcPs -> MetaResult) | MetaP (LPat GhcPs -> MetaResult) | MetaT (LHsType GhcPs -> MetaResult) | MetaD ([LHsDecl GhcPs] -> MetaResult) | MetaAW (Serialized -> MetaResult) -- | data constructors not exported to ensure correct result type data MetaResult = MetaResE { unMetaResE :: LHsExpr GhcPs } | MetaResP { unMetaResP :: LPat GhcPs } | MetaResT { unMetaResT :: LHsType GhcPs } | MetaResD { unMetaResD :: [LHsDecl GhcPs] } | MetaResAW { unMetaResAW :: Serialized } type MetaHook f = MetaRequest -> LHsExpr GhcTc -> f MetaResult metaRequestE :: Functor f => MetaHook f -> LHsExpr GhcTc -> f (LHsExpr GhcPs) metaRequestE h = fmap unMetaResE . h (MetaE MetaResE) metaRequestP :: Functor f => MetaHook f -> LHsExpr GhcTc -> f (LPat GhcPs) metaRequestP h = fmap unMetaResP . h (MetaP MetaResP) metaRequestT :: Functor f => MetaHook f -> LHsExpr GhcTc -> f (LHsType GhcPs) metaRequestT h = fmap unMetaResT . h (MetaT MetaResT) metaRequestD :: Functor f => MetaHook f -> LHsExpr GhcTc -> f [LHsDecl GhcPs] metaRequestD h = fmap unMetaResD . h (MetaD MetaResD) metaRequestAW :: Functor f => MetaHook f -> LHsExpr GhcTc -> f Serialized metaRequestAW h = fmap unMetaResAW . h (MetaAW MetaResAW) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name.hs0000644000000000000000000007604314472400113020166 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TypeFamilies #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[Name]{@Name@: to transmit name info from renamer to typechecker} -} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name' is the type of names that have had their scoping and -- binding resolved. They have an 'OccName' but also a 'GHC.Types.Unique.Unique' -- that disambiguates Names that have the same 'OccName' and indeed is used for all -- 'Name' comparison. Names also contain information about where they originated -- from, see "GHC.Types.Name#name_sorts" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" -- -- #name_sorts# -- Names are one of: -- -- * External, if they name things declared in other modules. Some external -- Names are wired in, i.e. they name primitives defined in the compiler itself -- -- * Internal, if they name things in the module being compiled. Some internal -- Names are system names, if they are names manufactured by the compiler module GHC.Types.Name ( -- * The main types Name, -- Abstract BuiltInSyntax(..), -- ** Creating 'Name's mkSystemName, mkSystemNameAt, mkInternalName, mkClonedInternalName, mkDerivedInternalName, mkSystemVarName, mkSysTvName, mkFCallName, mkExternalName, mkWiredInName, -- ** Manipulating and deconstructing 'Name's nameUnique, setNameUnique, nameOccName, nameNameSpace, nameModule, nameModule_maybe, setNameLoc, tidyNameOcc, localiseName, nameSrcLoc, nameSrcSpan, pprNameDefnLoc, pprDefinedAt, pprFullName, pprTickyName, -- ** Predicates on 'Name's isSystemName, isInternalName, isExternalName, isTyVarName, isTyConName, isDataConName, isValName, isVarName, isDynLinkName, isWiredInName, isWiredIn, isBuiltInSyntax, isHoleName, wiredInNameTyThing_maybe, nameIsLocalOrFrom, nameIsExternalOrFrom, nameIsHomePackage, nameIsHomePackageImport, nameIsFromExternalPackage, stableNameCmp, -- * Class 'NamedThing' and overloaded friends NamedThing(..), getSrcLoc, getSrcSpan, getOccString, getOccFS, pprInfixName, pprPrefixName, pprModulePrefix, pprNameUnqualified, nameStableString, -- Re-export the OccName stuff module GHC.Types.Name.Occurrence ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.TyThing ( TyThing ) import GHC.Platform import GHC.Types.Name.Occurrence import GHC.Unit.Module import GHC.Unit.Home import GHC.Types.SrcLoc import GHC.Types.Unique import GHC.Utils.Misc import GHC.Data.Maybe import GHC.Utils.Binary import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic import Control.DeepSeq import Data.Data {- ************************************************************************ * * \subsection[Name-datatype]{The @Name@ datatype, and name construction} * * ************************************************************************ -} -- | A unique, unambiguous name for something, containing information about where -- that thing originated. data Name = Name { n_sort :: NameSort -- ^ What sort of name it is , n_occ :: OccName -- ^ Its occurrence name. -- -- NOTE: kept lazy to allow known names to be known constructor applications -- and to inline better. See Note [Fast comparison for built-in Names] , n_uniq :: {-# UNPACK #-} !Unique -- ^ Its unique. , n_loc :: !SrcSpan -- ^ Definition site -- -- NOTE: we make the n_loc field strict to eliminate some potential -- (and real!) space leaks, due to the fact that we don't look at -- the SrcLoc in a Name all that often. } -- See Note [About the NameSorts] data NameSort = External Module | WiredIn Module TyThing BuiltInSyntax -- A variant of External, for wired-in things | Internal -- A user-defined Id or TyVar -- defined in the module being compiled | System -- A system-defined Id or TyVar. Typically the -- OccName is very uninformative (like 's') instance Outputable NameSort where ppr (External _) = text "external" ppr (WiredIn _ _ _) = text "wired-in" ppr Internal = text "internal" ppr System = text "system" instance NFData Name where rnf Name{..} = rnf n_sort `seq` rnf n_occ `seq` n_uniq `seq` rnf n_loc instance NFData NameSort where rnf (External m) = rnf m rnf (WiredIn m t b) = rnf m `seq` t `seq` b `seq` () -- XXX this is a *lie*, we're not going to rnf the TyThing, but -- since the TyThings for WiredIn Names are all static they can't -- be hiding space leaks or errors. rnf Internal = () rnf System = () -- | BuiltInSyntax is for things like @(:)@, @[]@ and tuples, -- which have special syntactic forms. They aren't in scope -- as such. data BuiltInSyntax = BuiltInSyntax | UserSyntax {- Note [Fast comparison for built-in Names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this wired-in Name in GHC.Builtin.Names: int8TyConName = tcQual gHC_INT (fsLit "Int8") int8TyConKey Ultimately this turns into something like: int8TyConName = Name gHC_INT (mkOccName ..."Int8") int8TyConKey So a comparison like `x == int8TyConName` will turn into `getUnique x == int8TyConKey`, nice and efficient. But if the `n_occ` field is strict, that definition will look like: int8TyCOnName = case (mkOccName..."Int8") of occ -> Name gHC_INT occ int8TyConKey and now the comparison will not optimise. This matters even more when there are numerous comparisons (see #19386): if | tc == int8TyCon -> ... | tc == int16TyCon -> ... ...etc... when we would like to get a single multi-branched case. TL;DR: we make the `n_occ` field lazy. -} {- Note [About the NameSorts] ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Initially, top-level Ids (including locally-defined ones) get External names, and all other local Ids get Internal names 2. In any invocation of GHC, an External Name for "M.x" has one and only one unique. This unique association is ensured via the Name Cache; see Note [The Name Cache] in GHC.Iface.Env. 3. Things with a External name are given C static labels, so they finally appear in the .o file's symbol table. They appear in the symbol table in the form M.n. If originally-local things have this property they must be made @External@ first. 4. In the tidy-core phase, a External that is not visible to an importer is changed to Internal, and a Internal that is visible is changed to External 5. A System Name differs in the following ways: a) has unique attached when printing dumps b) unifier eliminates sys tyvars in favour of user provs where possible Before anything gets printed in interface files or output code, it's fed through a 'tidy' processor, which zaps the OccNames to have unique names; and converts all sys-locals to user locals If any desugarer sys-locals have survived that far, they get changed to "ds1", "ds2", etc. Built-in syntax => It's a syntactic form, not "in scope" (e.g. []) Wired-in thing => The thing (Id, TyCon) is fully known to the compiler, not read from an interface file. E.g. Bool, True, Int, Float, and many others All built-in syntax is for wired-in things. -} instance HasOccName Name where occName = nameOccName nameUnique :: Name -> Unique nameOccName :: Name -> OccName nameNameSpace :: Name -> NameSpace nameModule :: HasDebugCallStack => Name -> Module nameSrcLoc :: Name -> SrcLoc nameSrcSpan :: Name -> SrcSpan nameUnique name = n_uniq name nameOccName name = n_occ name nameNameSpace name = occNameSpace (n_occ name) nameSrcLoc name = srcSpanStart (n_loc name) nameSrcSpan name = n_loc name {- ************************************************************************ * * \subsection{Predicates on names} * * ************************************************************************ -} isInternalName :: Name -> Bool isExternalName :: Name -> Bool isSystemName :: Name -> Bool isWiredInName :: Name -> Bool isWiredInName (Name {n_sort = WiredIn _ _ _}) = True isWiredInName _ = False isWiredIn :: NamedThing thing => thing -> Bool isWiredIn = isWiredInName . getName wiredInNameTyThing_maybe :: Name -> Maybe TyThing wiredInNameTyThing_maybe (Name {n_sort = WiredIn _ thing _}) = Just thing wiredInNameTyThing_maybe _ = Nothing isBuiltInSyntax :: Name -> Bool isBuiltInSyntax (Name {n_sort = WiredIn _ _ BuiltInSyntax}) = True isBuiltInSyntax _ = False isExternalName (Name {n_sort = External _}) = True isExternalName (Name {n_sort = WiredIn _ _ _}) = True isExternalName _ = False isInternalName name = not (isExternalName name) isHoleName :: Name -> Bool isHoleName = isHoleModule . nameModule -- | Will the 'Name' come from a dynamically linked package? isDynLinkName :: Platform -> Module -> Name -> Bool isDynLinkName platform this_mod name | Just mod <- nameModule_maybe name -- Issue #8696 - when GHC is dynamically linked, it will attempt -- to load the dynamic dependencies of object files at compile -- time for things like QuasiQuotes or -- TemplateHaskell. Unfortunately, this interacts badly with -- intra-package linking, because we don't generate indirect -- (dynamic) symbols for intra-package calls. This means that if a -- module with an intra-package call is loaded without its -- dependencies, then GHC fails to link. -- -- In the mean time, always force dynamic indirections to be -- generated: when the module name isn't the module being -- compiled, references are dynamic. = case platformOS platform of -- On Windows the hack for #8696 makes it unlinkable. -- As the entire setup of the code from Cmm down to the RTS expects -- the use of trampolines for the imported functions only when -- doing intra-package linking, e.g. referring to a symbol defined in the same -- package should not use a trampoline. -- I much rather have dynamic TH not supported than the entire Dynamic linking -- not due to a hack. -- Also not sure this would break on Windows anyway. OSMinGW32 -> moduleUnit mod /= moduleUnit this_mod -- For the other platforms, still perform the hack _ -> mod /= this_mod | otherwise = False -- no, it is not even an external name nameModule name = nameModule_maybe name `orElse` pprPanic "nameModule" (ppr (n_sort name) <+> ppr name) nameModule_maybe :: Name -> Maybe Module nameModule_maybe (Name { n_sort = External mod}) = Just mod nameModule_maybe (Name { n_sort = WiredIn mod _ _}) = Just mod nameModule_maybe _ = Nothing is_interactive_or_from :: Module -> Module -> Bool is_interactive_or_from from mod = from == mod || isInteractiveModule mod nameIsLocalOrFrom :: Module -> Name -> Bool -- ^ Returns True if the name is -- (a) Internal -- (b) External but from the specified module -- (c) External but from the 'interactive' package -- -- The key idea is that -- False means: the entity is defined in some other module -- you can find the details (type, fixity, instances) -- in some interface file -- those details will be stored in the EPT or HPT -- -- True means: the entity is defined in this module or earlier in -- the GHCi session -- you can find details (type, fixity, instances) in the -- TcGblEnv or TcLclEnv -- -- The isInteractiveModule part is because successive interactions of a GHCi session -- each give rise to a fresh module (Ghci1, Ghci2, etc), but they all come -- from the magic 'interactive' package; and all the details are kept in the -- TcLclEnv, TcGblEnv, NOT in the HPT or EPT. -- See Note [The interactive package] in "GHC.Runtime.Context" nameIsLocalOrFrom from name | Just mod <- nameModule_maybe name = is_interactive_or_from from mod | otherwise = True nameIsExternalOrFrom :: Module -> Name -> Bool -- ^ Returns True if the name is external or from the 'interactive' package -- See documentation of `nameIsLocalOrFrom` function nameIsExternalOrFrom from name | Just mod <- nameModule_maybe name = is_interactive_or_from from mod | otherwise = False nameIsHomePackage :: Module -> Name -> Bool -- True if the Name is defined in module of this package nameIsHomePackage this_mod = \nm -> case n_sort nm of External nm_mod -> moduleUnit nm_mod == this_pkg WiredIn nm_mod _ _ -> moduleUnit nm_mod == this_pkg Internal -> True System -> False where this_pkg = moduleUnit this_mod nameIsHomePackageImport :: Module -> Name -> Bool -- True if the Name is defined in module of this package -- /other than/ the this_mod nameIsHomePackageImport this_mod = \nm -> case nameModule_maybe nm of Nothing -> False Just nm_mod -> nm_mod /= this_mod && moduleUnit nm_mod == this_pkg where this_pkg = moduleUnit this_mod -- | Returns True if the Name comes from some other package: neither this -- package nor the interactive package. nameIsFromExternalPackage :: HomeUnit -> Name -> Bool nameIsFromExternalPackage home_unit name | Just mod <- nameModule_maybe name , notHomeModule home_unit mod -- Not the current unit , not (isInteractiveModule mod) -- Not the 'interactive' package = True | otherwise = False isTyVarName :: Name -> Bool isTyVarName name = isTvOcc (nameOccName name) isTyConName :: Name -> Bool isTyConName name = isTcOcc (nameOccName name) isDataConName :: Name -> Bool isDataConName name = isDataOcc (nameOccName name) isValName :: Name -> Bool isValName name = isValOcc (nameOccName name) isVarName :: Name -> Bool isVarName = isVarOcc . nameOccName isSystemName (Name {n_sort = System}) = True isSystemName _ = False {- ************************************************************************ * * \subsection{Making names} * * ************************************************************************ -} -- | Create a name which is (for now at least) local to the current module and hence -- does not need a 'Module' to disambiguate it from other 'Name's mkInternalName :: Unique -> OccName -> SrcSpan -> Name mkInternalName uniq occ loc = Name { n_uniq = uniq , n_sort = Internal , n_occ = occ , n_loc = loc } -- NB: You might worry that after lots of huffing and -- puffing we might end up with two local names with distinct -- uniques, but the same OccName. Indeed we can, but that's ok -- * the insides of the compiler don't care: they use the Unique -- * when printing for -ddump-xxx you can switch on -dppr-debug to get the -- uniques if you get confused -- * for interface files we tidyCore first, which makes -- the OccNames distinct when they need to be mkClonedInternalName :: Unique -> Name -> Name mkClonedInternalName uniq (Name { n_occ = occ, n_loc = loc }) = Name { n_uniq = uniq, n_sort = Internal , n_occ = occ, n_loc = loc } mkDerivedInternalName :: (OccName -> OccName) -> Unique -> Name -> Name mkDerivedInternalName derive_occ uniq (Name { n_occ = occ, n_loc = loc }) = Name { n_uniq = uniq, n_sort = Internal , n_occ = derive_occ occ, n_loc = loc } -- | Create a name which definitely originates in the given module mkExternalName :: Unique -> Module -> OccName -> SrcSpan -> Name {-# INLINE mkExternalName #-} -- WATCH OUT! External Names should be in the Name Cache -- (see Note [The Name Cache] in GHC.Iface.Env), so don't just call mkExternalName -- with some fresh unique without populating the Name Cache mkExternalName uniq mod occ loc = Name { n_uniq = uniq, n_sort = External mod, n_occ = occ, n_loc = loc } -- | Create a name which is actually defined by the compiler itself mkWiredInName :: Module -> OccName -> Unique -> TyThing -> BuiltInSyntax -> Name {-# INLINE mkWiredInName #-} mkWiredInName mod occ uniq thing built_in = Name { n_uniq = uniq, n_sort = WiredIn mod thing built_in, n_occ = occ, n_loc = wiredInSrcSpan } -- | Create a name brought into being by the compiler mkSystemName :: Unique -> OccName -> Name mkSystemName uniq occ = mkSystemNameAt uniq occ noSrcSpan mkSystemNameAt :: Unique -> OccName -> SrcSpan -> Name mkSystemNameAt uniq occ loc = Name { n_uniq = uniq, n_sort = System , n_occ = occ, n_loc = loc } mkSystemVarName :: Unique -> FastString -> Name mkSystemVarName uniq fs = mkSystemName uniq (mkVarOccFS fs) mkSysTvName :: Unique -> FastString -> Name mkSysTvName uniq fs = mkSystemName uniq (mkTyVarOccFS fs) -- | Make a name for a foreign call mkFCallName :: Unique -> String -> Name mkFCallName uniq str = mkInternalName uniq (mkVarOcc str) noSrcSpan -- The encoded string completely describes the ccall -- When we renumber/rename things, we need to be -- able to change a Name's Unique to match the cached -- one in the thing it's the name of. If you know what I mean. setNameUnique :: Name -> Unique -> Name setNameUnique name uniq = name {n_uniq = uniq} -- This is used for hsigs: we want to use the name of the originally exported -- entity, but edit the location to refer to the reexport site setNameLoc :: Name -> SrcSpan -> Name setNameLoc name loc = name {n_loc = loc} tidyNameOcc :: Name -> OccName -> Name -- We set the OccName of a Name when tidying -- In doing so, we change System --> Internal, so that when we print -- it we don't get the unique by default. It's tidy now! tidyNameOcc name@(Name { n_sort = System }) occ = name { n_occ = occ, n_sort = Internal} tidyNameOcc name occ = name { n_occ = occ } -- | Make the 'Name' into an internal name, regardless of what it was to begin with localiseName :: Name -> Name localiseName n = n { n_sort = Internal } {- ************************************************************************ * * \subsection{Hashing and comparison} * * ************************************************************************ -} cmpName :: Name -> Name -> Ordering cmpName n1 n2 = n_uniq n1 `nonDetCmpUnique` n_uniq n2 -- | Compare Names lexicographically -- This only works for Names that originate in the source code or have been -- tidied. stableNameCmp :: Name -> Name -> Ordering stableNameCmp (Name { n_sort = s1, n_occ = occ1 }) (Name { n_sort = s2, n_occ = occ2 }) = (s1 `sort_cmp` s2) `thenCmp` (occ1 `compare` occ2) -- The ordinary compare on OccNames is lexicographic where -- Later constructors are bigger sort_cmp (External m1) (External m2) = m1 `stableModuleCmp` m2 sort_cmp (External {}) _ = LT sort_cmp (WiredIn {}) (External {}) = GT sort_cmp (WiredIn m1 _ _) (WiredIn m2 _ _) = m1 `stableModuleCmp` m2 sort_cmp (WiredIn {}) _ = LT sort_cmp Internal (External {}) = GT sort_cmp Internal (WiredIn {}) = GT sort_cmp Internal Internal = EQ sort_cmp Internal System = LT sort_cmp System System = EQ sort_cmp System _ = GT {- ************************************************************************ * * \subsection[Name-instances]{Instance declarations} * * ************************************************************************ -} -- | The same comments as for `Name`'s `Ord` instance apply. instance Eq Name where a == b = case (a `compare` b) of { EQ -> True; _ -> False } a /= b = case (a `compare` b) of { EQ -> False; _ -> True } -- | __Caution__: This instance is implemented via `nonDetCmpUnique`, which -- means that the ordering is not stable across deserialization or rebuilds. -- -- See `nonDetCmpUnique` for further information, and trac #15240 for a bug -- caused by improper use of this instance. -- For a deterministic lexicographic ordering, use `stableNameCmp`. instance Ord Name where compare = cmpName instance Uniquable Name where getUnique = nameUnique instance NamedThing Name where getName n = n instance Data Name where -- don't traverse? toConstr _ = abstractConstr "Name" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Name" {- ************************************************************************ * * \subsection{Binary} * * ************************************************************************ -} -- | Assumes that the 'Name' is a non-binding one. See -- 'GHC.Iface.Syntax.putIfaceTopBndr' and 'GHC.Iface.Syntax.getIfaceTopBndr' for -- serializing binding 'Name's. See 'UserData' for the rationale for this -- distinction. instance Binary Name where put_ bh name = case getUserData bh of UserData{ ud_put_nonbinding_name = put_name } -> put_name bh name get bh = case getUserData bh of UserData { ud_get_name = get_name } -> get_name bh {- ************************************************************************ * * \subsection{Pretty printing} * * ************************************************************************ -} instance Outputable Name where ppr name = pprName name instance OutputableBndr Name where pprBndr _ name = pprName name pprInfixOcc = pprInfixName pprPrefixOcc = pprPrefixName pprName :: Name -> SDoc pprName (Name {n_sort = sort, n_uniq = uniq, n_occ = occ}) = getPprStyle $ \sty -> getPprDebug $ \debug -> case sort of WiredIn mod _ builtin -> pprExternal debug sty uniq mod occ True builtin External mod -> pprExternal debug sty uniq mod occ False UserSyntax System -> pprSystem debug sty uniq occ Internal -> pprInternal debug sty uniq occ -- | Print fully qualified name (with unit-id, module and unique) pprFullName :: Module -> Name -> SDoc pprFullName this_mod Name{n_sort = sort, n_uniq = uniq, n_occ = occ} = let mod = case sort of WiredIn m _ _ -> m External m -> m System -> this_mod Internal -> this_mod in ftext (unitIdFS (moduleUnitId mod)) <> colon <> ftext (moduleNameFS $ moduleName mod) <> dot <> ftext (occNameFS occ) <> char '_' <> pprUniqueAlways uniq -- | Print a ticky ticky styled name -- -- Module argument is the module to use for internal and system names. When -- printing the name in a ticky profile, the module name is included even for -- local things. However, ticky uses the format "x (M)" rather than "M.x". -- Hence, this function provides a separation from normal styling. pprTickyName :: Module -> Name -> SDoc pprTickyName this_mod name | isInternalName name = pprName name <+> parens (ppr this_mod) | otherwise = pprName name -- | Print the string of Name unqualifiedly directly. pprNameUnqualified :: Name -> SDoc pprNameUnqualified Name { n_occ = occ } = ppr_occ_name occ pprExternal :: Bool -> PprStyle -> Unique -> Module -> OccName -> Bool -> BuiltInSyntax -> SDoc pprExternal debug sty uniq mod occ is_wired is_builtin | codeStyle sty = ppr mod <> char '_' <> ppr_z_occ_name occ -- In code style, always qualify -- ToDo: maybe we could print all wired-in things unqualified -- in code style, to reduce symbol table bloat? | debug = pp_mod <> ppr_occ_name occ <> braces (hsep [if is_wired then text "(w)" else empty, pprNameSpaceBrief (occNameSpace occ), pprUnique uniq]) | BuiltInSyntax <- is_builtin = ppr_occ_name occ -- Never qualify builtin syntax | otherwise = if isHoleModule mod then case qualName sty mod occ of NameUnqual -> ppr_occ_name occ _ -> braces (ppr (moduleName mod) <> dot <> ppr_occ_name occ) else pprModulePrefix sty mod occ <> ppr_occ_name occ where pp_mod = ppUnlessOption sdocSuppressModulePrefixes (ppr mod <> dot) pprInternal :: Bool -> PprStyle -> Unique -> OccName -> SDoc pprInternal debug sty uniq occ | codeStyle sty = pprUniqueAlways uniq | debug = ppr_occ_name occ <> braces (hsep [pprNameSpaceBrief (occNameSpace occ), pprUnique uniq]) | dumpStyle sty = ppr_occ_name occ <> ppr_underscore_unique uniq -- For debug dumps, we're not necessarily dumping -- tidied code, so we need to print the uniques. | otherwise = ppr_occ_name occ -- User style -- Like Internal, except that we only omit the unique in Iface style pprSystem :: Bool -> PprStyle -> Unique -> OccName -> SDoc pprSystem debug sty uniq occ | codeStyle sty = pprUniqueAlways uniq | debug = ppr_occ_name occ <> ppr_underscore_unique uniq <> braces (pprNameSpaceBrief (occNameSpace occ)) | otherwise = ppr_occ_name occ <> ppr_underscore_unique uniq -- If the tidy phase hasn't run, the OccName -- is unlikely to be informative (like 's'), -- so print the unique pprModulePrefix :: PprStyle -> Module -> OccName -> SDoc -- Print the "M." part of a name, based on whether it's in scope or not -- See Note [Printing original names] in GHC.Types.Name.Ppr pprModulePrefix sty mod occ = ppUnlessOption sdocSuppressModulePrefixes $ case qualName sty mod occ of -- See Outputable.QualifyName: NameQual modname -> ppr modname <> dot -- Name is in scope NameNotInScope1 -> ppr mod <> dot -- Not in scope NameNotInScope2 -> ppr (moduleUnit mod) <> colon -- Module not in <> ppr (moduleName mod) <> dot -- scope either NameUnqual -> empty -- In scope unqualified pprUnique :: Unique -> SDoc -- Print a unique unless we are suppressing them pprUnique uniq = ppUnlessOption sdocSuppressUniques $ pprUniqueAlways uniq ppr_underscore_unique :: Unique -> SDoc -- Print an underscore separating the name from its unique -- But suppress it if we aren't printing the uniques anyway ppr_underscore_unique uniq = ppUnlessOption sdocSuppressUniques $ char '_' <> pprUniqueAlways uniq ppr_occ_name :: OccName -> SDoc ppr_occ_name occ = ftext (occNameFS occ) -- Don't use pprOccName; instead, just print the string of the OccName; -- we print the namespace in the debug stuff above -- In code style, we Z-encode the strings. The results of Z-encoding each FastString are -- cached behind the scenes in the FastString implementation. ppr_z_occ_name :: OccName -> SDoc ppr_z_occ_name occ = ztext (zEncodeFS (occNameFS occ)) -- Prints (if mod information is available) "Defined at " or -- "Defined in " information for a Name. pprDefinedAt :: Name -> SDoc pprDefinedAt name = text "Defined" <+> pprNameDefnLoc name pprNameDefnLoc :: Name -> SDoc -- Prints "at " or -- or "in " depending on what info is available pprNameDefnLoc name = case nameSrcLoc name of -- nameSrcLoc rather than nameSrcSpan -- It seems less cluttered to show a location -- rather than a span for the definition point RealSrcLoc s _ -> text "at" <+> ppr s UnhelpfulLoc s | isInternalName name || isSystemName name -> text "at" <+> ftext s | otherwise -> text "in" <+> quotes (ppr (nameModule name)) -- | Get a string representation of a 'Name' that's unique and stable -- across recompilations. Used for deterministic generation of binds for -- derived instances. -- eg. "$aeson_70dylHtv1FFGeai1IoxcQr$Data.Aeson.Types.Internal$String" nameStableString :: Name -> String nameStableString Name{..} = nameSortStableString n_sort ++ "$" ++ occNameString n_occ nameSortStableString :: NameSort -> String nameSortStableString System = "$_sys" nameSortStableString Internal = "$_in" nameSortStableString (External mod) = moduleStableString mod nameSortStableString (WiredIn mod _ _) = moduleStableString mod {- ************************************************************************ * * \subsection{Overloaded functions related to Names} * * ************************************************************************ -} -- | A class allowing convenient access to the 'Name' of various datatypes class NamedThing a where getOccName :: a -> OccName getName :: a -> Name getOccName n = nameOccName (getName n) -- Default method instance NamedThing e => NamedThing (Located e) where getName = getName . unLoc getSrcLoc :: NamedThing a => a -> SrcLoc getSrcSpan :: NamedThing a => a -> SrcSpan getOccString :: NamedThing a => a -> String getOccFS :: NamedThing a => a -> FastString getSrcLoc = nameSrcLoc . getName getSrcSpan = nameSrcSpan . getName getOccString = occNameString . getOccName getOccFS = occNameFS . getOccName pprInfixName :: (Outputable a, NamedThing a) => a -> SDoc -- See Outputable.pprPrefixVar, pprInfixVar; -- add parens or back-quotes as appropriate pprInfixName n = pprInfixVar (isSymOcc (getOccName n)) (ppr n) pprPrefixName :: NamedThing a => a -> SDoc pprPrefixName thing = pprPrefixVar (isSymOcc (nameOccName name)) (ppr name) where name = getName thing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Cache.hs0000644000000000000000000001264014472400113021162 0ustar0000000000000000 {-# LANGUAGE RankNTypes #-} -- | The Name Cache module GHC.Types.Name.Cache ( NameCache (..) , initNameCache , takeUniqFromNameCache , updateNameCache' , updateNameCache -- * OrigNameCache , OrigNameCache , lookupOrigNameCache , extendOrigNameCache' , extendOrigNameCache ) where import GHC.Prelude import GHC.Unit.Module import GHC.Types.Name import GHC.Types.Unique.Supply import GHC.Builtin.Types import GHC.Builtin.Names import GHC.Utils.Outputable import GHC.Utils.Panic import Control.Concurrent.MVar import Control.Monad {- Note [The Name Cache] ~~~~~~~~~~~~~~~~~~~~~ The Name Cache makes sure that, during any invocation of GHC, each External Name "M.x" has one, and only one globally-agreed Unique. * The first time we come across M.x we make up a Unique and record that association in the Name Cache. * When we come across "M.x" again, we look it up in the Name Cache, and get a hit. The functions newGlobalBinder, allocateGlobalBinder do the main work. When you make an External name, you should probably be calling one of them. Names in a NameCache are always stored as a Global, and have the SrcLoc of their binding locations. Actually that's not quite right. When we first encounter the original name, we might not be at its binding site (e.g. we are reading an interface file); so we give it 'noSrcLoc' then. Later, when we find its binding site, we fix it up. Note [Built-in syntax and the OrigNameCache] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Built-in syntax like tuples and unboxed sums are quite ubiquitous. To lower their cost we use two tricks, a. We specially encode tuple and sum Names in interface files' symbol tables to avoid having to look up their names while loading interface files. Namely these names are encoded as by their Uniques. We know how to get from a Unique back to the Name which it represents via the mapping defined in the SumTupleUniques module. See Note [Symbol table representation of names] in GHC.Iface.Binary and for details. b. We don't include them in the Orig name cache but instead parse their OccNames (in isBuiltInOcc_maybe) to avoid bloating the name cache with them. Why is the second measure necessary? Good question; afterall, 1) the parser emits built-in syntax directly as Exact RdrNames, and 2) built-in syntax never needs to looked-up during interface loading due to (a). It turns out that there are two reasons why we might look up an Orig RdrName for built-in syntax, * If you use setRdrNameSpace on an Exact RdrName it may be turned into an Orig RdrName. * Template Haskell turns a BuiltInSyntax Name into a TH.NameG (GHC.HsToCore.Quote.globalVar), and parses a NameG into an Orig RdrName (GHC.ThToHs.thRdrName). So, e.g. $(do { reify '(,); ... }) will go this route (#8954). -} -- | The NameCache makes sure that there is just one Unique assigned for -- each original name; i.e. (module-name, occ-name) pair and provides -- something of a lookup mechanism for those names. data NameCache = NameCache { nsUniqChar :: {-# UNPACK #-} !Char , nsNames :: {-# UNPACK #-} !(MVar OrigNameCache) } -- | Per-module cache of original 'OccName's given 'Name's type OrigNameCache = ModuleEnv (OccEnv Name) takeUniqFromNameCache :: NameCache -> IO Unique takeUniqFromNameCache (NameCache c _) = uniqFromMask c lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name lookupOrigNameCache nc mod occ | mod == gHC_TYPES || mod == gHC_PRIM || mod == gHC_TUPLE , Just name <- isBuiltInOcc_maybe occ = -- See Note [Known-key names], 3(c) in GHC.Builtin.Names -- Special case for tuples; there are too many -- of them to pre-populate the original-name cache Just name | otherwise = case lookupModuleEnv nc mod of Nothing -> Nothing Just occ_env -> lookupOccEnv occ_env occ extendOrigNameCache' :: OrigNameCache -> Name -> OrigNameCache extendOrigNameCache' nc name = assertPpr (isExternalName name) (ppr name) $ extendOrigNameCache nc (nameModule name) (nameOccName name) name extendOrigNameCache :: OrigNameCache -> Module -> OccName -> Name -> OrigNameCache extendOrigNameCache nc mod occ name = extendModuleEnvWith combine nc mod (unitOccEnv occ name) where combine _ occ_env = extendOccEnv occ_env occ name initNameCache :: Char -> [Name] -> IO NameCache initNameCache c names = NameCache c <$> newMVar (initOrigNames names) initOrigNames :: [Name] -> OrigNameCache initOrigNames names = foldl' extendOrigNameCache' emptyModuleEnv names -- | Update the name cache with the given function updateNameCache' :: NameCache -> (OrigNameCache -> IO (OrigNameCache, c)) -- The updating function -> IO c updateNameCache' (NameCache _c nc) upd_fn = modifyMVar' nc upd_fn -- this should be in `base` modifyMVar' :: MVar a -> (a -> IO (a,b)) -> IO b modifyMVar' m f = modifyMVar m $ f >=> \c -> fst c `seq` pure c -- | Update the name cache with the given function -- -- Additionally, it ensures that the given Module and OccName are evaluated. -- If not, chaos can ensue: -- we read the name-cache -- then pull on mod (say) -- which does some stuff that modifies the name cache -- This did happen, with tycon_mod in GHC.IfaceToCore.tcIfaceAlt (DataAlt..) updateNameCache :: NameCache -> Module -> OccName -> (OrigNameCache -> IO (OrigNameCache, c)) -> IO c updateNameCache name_cache !_mod !_occ upd_fn = updateNameCache' name_cache upd_fn ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Env.hs0000644000000000000000000001644314472400113020714 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[NameEnv]{@NameEnv@: name environments} -} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE ScopedTypeVariables #-} module GHC.Types.Name.Env ( -- * Var, Id and TyVar environments (maps) NameEnv, -- ** Manipulating these environments mkNameEnv, mkNameEnvWith, emptyNameEnv, isEmptyNameEnv, unitNameEnv, nonDetNameEnvElts, extendNameEnv_C, extendNameEnv_Acc, extendNameEnv, extendNameEnvList, extendNameEnvList_C, filterNameEnv, anyNameEnv, plusNameEnv, plusNameEnv_C, plusNameEnv_CD, plusNameEnv_CD2, alterNameEnv, lookupNameEnv, lookupNameEnv_NF, delFromNameEnv, delListFromNameEnv, elemNameEnv, mapNameEnv, disjointNameEnv, seqEltsNameEnv, DNameEnv, emptyDNameEnv, isEmptyDNameEnv, lookupDNameEnv, delFromDNameEnv, filterDNameEnv, mapDNameEnv, adjustDNameEnv, alterDNameEnv, extendDNameEnv, eltsDNameEnv, extendDNameEnv_C, plusDNameEnv_C, foldDNameEnv, nonDetStrictFoldDNameEnv, -- ** Dependency analysis depAnal ) where import GHC.Prelude import GHC.Data.Graph.Directed import GHC.Types.Name import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Data.Maybe {- ************************************************************************ * * \subsection{Name environment} * * ************************************************************************ -} {- Note [depAnal determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~ depAnal is deterministic provided it gets the nodes in a deterministic order. The order of lists that get_defs and get_uses return doesn't matter, as these are only used to construct the edges, and stronglyConnCompFromEdgedVertices is deterministic even when the edges are not in deterministic order as explained in Note [Deterministic SCC] in GHC.Data.Graph.Directed. -} depAnal :: forall node. (node -> [Name]) -- Defs -> (node -> [Name]) -- Uses -> [node] -> [SCC node] -- Perform dependency analysis on a group of definitions, -- where each definition may define more than one Name -- -- The get_defs and get_uses functions are called only once per node depAnal get_defs get_uses nodes = stronglyConnCompFromEdgedVerticesUniq graph_nodes where graph_nodes = (map mk_node keyed_nodes) :: [Node Int node] keyed_nodes = nodes `zip` [(1::Int)..] mk_node (node, key) = let !edges = (mapMaybe (lookupNameEnv key_map) (get_uses node)) in DigraphNode node key edges key_map :: NameEnv Int -- Maps a Name to the key of the decl that defines it key_map = mkNameEnv [(name,key) | (node, key) <- keyed_nodes, name <- get_defs node] {- ************************************************************************ * * \subsection{Name environment} * * ************************************************************************ -} -- | Name Environment type NameEnv a = UniqFM Name a -- Domain is Name emptyNameEnv :: NameEnv a isEmptyNameEnv :: NameEnv a -> Bool mkNameEnv :: [(Name,a)] -> NameEnv a mkNameEnvWith :: (a -> Name) -> [a] -> NameEnv a nonDetNameEnvElts :: NameEnv a -> [a] alterNameEnv :: (Maybe a-> Maybe a) -> NameEnv a -> Name -> NameEnv a extendNameEnv_C :: (a->a->a) -> NameEnv a -> Name -> a -> NameEnv a extendNameEnv_Acc :: (a->b->b) -> (a->b) -> NameEnv b -> Name -> a -> NameEnv b extendNameEnv :: NameEnv a -> Name -> a -> NameEnv a plusNameEnv :: NameEnv a -> NameEnv a -> NameEnv a plusNameEnv_C :: (a->a->a) -> NameEnv a -> NameEnv a -> NameEnv a plusNameEnv_CD :: (a->a->a) -> NameEnv a -> a -> NameEnv a -> a -> NameEnv a plusNameEnv_CD2 :: (Maybe a->Maybe a->a) -> NameEnv a -> NameEnv a -> NameEnv a extendNameEnvList :: NameEnv a -> [(Name,a)] -> NameEnv a extendNameEnvList_C :: (a->a->a) -> NameEnv a -> [(Name,a)] -> NameEnv a delFromNameEnv :: NameEnv a -> Name -> NameEnv a delListFromNameEnv :: NameEnv a -> [Name] -> NameEnv a elemNameEnv :: Name -> NameEnv a -> Bool unitNameEnv :: Name -> a -> NameEnv a lookupNameEnv :: NameEnv a -> Name -> Maybe a lookupNameEnv_NF :: NameEnv a -> Name -> a filterNameEnv :: (elt -> Bool) -> NameEnv elt -> NameEnv elt anyNameEnv :: (elt -> Bool) -> NameEnv elt -> Bool mapNameEnv :: (elt1 -> elt2) -> NameEnv elt1 -> NameEnv elt2 disjointNameEnv :: NameEnv a -> NameEnv a -> Bool seqEltsNameEnv :: (elt -> ()) -> NameEnv elt -> () nonDetNameEnvElts x = nonDetEltsUFM x emptyNameEnv = emptyUFM isEmptyNameEnv = isNullUFM unitNameEnv x y = unitUFM x y extendNameEnv x y z = addToUFM x y z extendNameEnvList x l = addListToUFM x l lookupNameEnv x y = lookupUFM x y alterNameEnv = alterUFM mkNameEnv l = listToUFM l mkNameEnvWith f = mkNameEnv . map (\a -> (f a, a)) elemNameEnv x y = elemUFM x y plusNameEnv x y = plusUFM x y plusNameEnv_C f x y = plusUFM_C f x y {-# INLINE plusNameEnv_CD #-} plusNameEnv_CD f x d y b = plusUFM_CD f x d y b plusNameEnv_CD2 f x y = plusUFM_CD2 f x y extendNameEnv_C f x y z = addToUFM_C f x y z mapNameEnv f x = mapUFM f x extendNameEnv_Acc x y z a b = addToUFM_Acc x y z a b extendNameEnvList_C x y z = addListToUFM_C x y z delFromNameEnv x y = delFromUFM x y delListFromNameEnv x y = delListFromUFM x y filterNameEnv x y = filterUFM x y anyNameEnv f x = foldUFM ((||) . f) False x disjointNameEnv x y = disjointUFM x y seqEltsNameEnv seqElt x = seqEltsUFM seqElt x lookupNameEnv_NF env n = expectJust "lookupNameEnv_NF" (lookupNameEnv env n) -- | Deterministic Name Environment -- -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why -- we need DNameEnv. type DNameEnv a = UniqDFM Name a emptyDNameEnv :: DNameEnv a emptyDNameEnv = emptyUDFM isEmptyDNameEnv :: DNameEnv a -> Bool isEmptyDNameEnv = isNullUDFM lookupDNameEnv :: DNameEnv a -> Name -> Maybe a lookupDNameEnv = lookupUDFM delFromDNameEnv :: DNameEnv a -> Name -> DNameEnv a delFromDNameEnv = delFromUDFM filterDNameEnv :: (a -> Bool) -> DNameEnv a -> DNameEnv a filterDNameEnv = filterUDFM mapDNameEnv :: (a -> b) -> DNameEnv a -> DNameEnv b mapDNameEnv = mapUDFM adjustDNameEnv :: (a -> a) -> DNameEnv a -> Name -> DNameEnv a adjustDNameEnv = adjustUDFM alterDNameEnv :: (Maybe a -> Maybe a) -> DNameEnv a -> Name -> DNameEnv a alterDNameEnv = alterUDFM extendDNameEnv :: DNameEnv a -> Name -> a -> DNameEnv a extendDNameEnv = addToUDFM extendDNameEnv_C :: (a -> a -> a) -> DNameEnv a -> Name -> a -> DNameEnv a extendDNameEnv_C = addToUDFM_C eltsDNameEnv :: DNameEnv a -> [a] eltsDNameEnv = eltsUDFM foldDNameEnv :: (a -> b -> b) -> b -> DNameEnv a -> b foldDNameEnv = foldUDFM plusDNameEnv_C :: (elt -> elt -> elt) -> DNameEnv elt -> DNameEnv elt -> DNameEnv elt plusDNameEnv_C = plusUDFM_C nonDetStrictFoldDNameEnv :: (a -> b -> b) -> b -> DNameEnv a -> b nonDetStrictFoldDNameEnv = nonDetStrictFoldUDFM ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Occurrence.hs0000644000000000000000000010220014472400113022237 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE LambdaCase #-} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName' represents names as strings with just a little more information: -- the \"namespace\" that the name came from, e.g. the namespace of value, type constructors or -- data constructors -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" module GHC.Types.Name.Occurrence ( -- * The 'NameSpace' type NameSpace, -- Abstract -- ** Construction -- $real_vs_source_data_constructors tcName, clsName, tcClsName, dataName, varName, tvName, srcDataName, -- ** Pretty Printing pprNameSpace, pprNonVarNameSpace, pprNameSpaceBrief, -- * The 'OccName' type OccName, -- Abstract, instance of Outputable pprOccName, -- ** Construction mkOccName, mkOccNameFS, mkVarOcc, mkVarOccFS, mkDataOcc, mkDataOccFS, mkTyVarOcc, mkTyVarOccFS, mkTcOcc, mkTcOccFS, mkClsOcc, mkClsOccFS, mkDFunOcc, setOccNameSpace, demoteOccName, promoteOccName, HasOccName(..), -- ** Derived 'OccName's isDerivedOccName, mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkBuilderOcc, mkDefaultMethodOcc, isDefaultMethodOcc, isTypeableBindOcc, mkNewTyCoOcc, mkClassOpAuxOcc, mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc, mkClassDataConOcc, mkDictOcc, mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenR, mkGen1R, mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc, mkSuperDictSelOcc, mkSuperDictAuxOcc, mkLocalOcc, mkMethodOcc, mkInstTyTcOcc, mkInstTyCoOcc, mkEqPredCoOcc, mkRecFldSelOcc, mkTyConRepOcc, -- ** Deconstruction occNameFS, occNameString, occNameSpace, isVarOcc, isTvOcc, isTcOcc, isDataOcc, isDataSymOcc, isSymOcc, isValOcc, parenSymOcc, startsWithUnderscore, isTcClsNameSpace, isTvNameSpace, isDataConNameSpace, isVarNameSpace, isValNameSpace, -- * The 'OccEnv' type OccEnv, emptyOccEnv, unitOccEnv, extendOccEnv, mapOccEnv, lookupOccEnv, mkOccEnv, mkOccEnv_C, extendOccEnvList, elemOccEnv, nonDetOccEnvElts, foldOccEnv, plusOccEnv, plusOccEnv_C, extendOccEnv_C, extendOccEnv_Acc, filterOccEnv, delListFromOccEnv, delFromOccEnv, alterOccEnv, minusOccEnv, minusOccEnv_C, pprOccEnv, -- * The 'OccSet' type OccSet, emptyOccSet, unitOccSet, mkOccSet, extendOccSet, extendOccSetList, unionOccSets, unionManyOccSets, minusOccSet, elemOccSet, isEmptyOccSet, intersectOccSet, filterOccSet, occSetToEnv, -- * Tidying up TidyOccEnv, emptyTidyOccEnv, initTidyOccEnv, tidyOccName, avoidClashesOccEnv, delTidyOccEnvList, -- FsEnv FastStringEnv, emptyFsEnv, lookupFsEnv, extendFsEnv, mkFsEnv ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Types.Unique import GHC.Builtin.Uniques import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Data.FastString import GHC.Data.FastString.Env import GHC.Utils.Outputable import GHC.Utils.Lexeme import GHC.Utils.Binary import Control.DeepSeq import Data.Char import Data.Data {- ************************************************************************ * * \subsection{Name space} * * ************************************************************************ -} data NameSpace = VarName -- Variables, including "real" data constructors | DataName -- "Source" data constructors | TvName -- Type variables | TcClsName -- Type constructors and classes; Haskell has them -- in the same name space for now. deriving( Eq, Ord ) -- Note [Data Constructors] -- ~~~~~~~~~~~~~~~~~~~~~~~~ -- see also: Note [Data Constructor Naming] in GHC.Core.DataCon -- -- $real_vs_source_data_constructors -- There are two forms of data constructor: -- -- [Source data constructors] The data constructors mentioned in Haskell source code -- -- [Real data constructors] The data constructors of the representation type, which may not be the same as the source type -- -- For example: -- -- > data T = T !(Int, Int) -- -- The source datacon has type @(Int, Int) -> T@ -- The real datacon has type @Int -> Int -> T@ -- -- GHC chooses a representation based on the strictness etc. tcName, clsName, tcClsName :: NameSpace dataName, srcDataName :: NameSpace tvName, varName :: NameSpace -- Though type constructors and classes are in the same name space now, -- the NameSpace type is abstract, so we can easily separate them later tcName = TcClsName -- Type constructors clsName = TcClsName -- Classes tcClsName = TcClsName -- Not sure which! dataName = DataName srcDataName = DataName -- Haskell-source data constructors should be -- in the Data name space tvName = TvName varName = VarName isDataConNameSpace :: NameSpace -> Bool isDataConNameSpace DataName = True isDataConNameSpace _ = False isTcClsNameSpace :: NameSpace -> Bool isTcClsNameSpace TcClsName = True isTcClsNameSpace _ = False isTvNameSpace :: NameSpace -> Bool isTvNameSpace TvName = True isTvNameSpace _ = False isVarNameSpace :: NameSpace -> Bool -- Variables or type variables, but not constructors isVarNameSpace TvName = True isVarNameSpace VarName = True isVarNameSpace _ = False isValNameSpace :: NameSpace -> Bool isValNameSpace DataName = True isValNameSpace VarName = True isValNameSpace _ = False pprNameSpace :: NameSpace -> SDoc pprNameSpace DataName = text "data constructor" pprNameSpace VarName = text "variable" pprNameSpace TvName = text "type variable" pprNameSpace TcClsName = text "type constructor or class" pprNonVarNameSpace :: NameSpace -> SDoc pprNonVarNameSpace VarName = empty pprNonVarNameSpace ns = pprNameSpace ns pprNameSpaceBrief :: NameSpace -> SDoc pprNameSpaceBrief DataName = char 'd' pprNameSpaceBrief VarName = char 'v' pprNameSpaceBrief TvName = text "tv" pprNameSpaceBrief TcClsName = text "tc" -- demoteNameSpace lowers the NameSpace if possible. We can not know -- in advance, since a TvName can appear in an HsTyVar. -- See Note [Demotion] in GHC.Rename.Env. demoteNameSpace :: NameSpace -> Maybe NameSpace demoteNameSpace VarName = Nothing demoteNameSpace DataName = Nothing demoteNameSpace TvName = Nothing demoteNameSpace TcClsName = Just DataName -- promoteNameSpace promotes the NameSpace as follows. -- See Note [Promotion] in GHC.Rename.Env. promoteNameSpace :: NameSpace -> Maybe NameSpace promoteNameSpace DataName = Just TcClsName promoteNameSpace VarName = Just TvName promoteNameSpace TcClsName = Nothing promoteNameSpace TvName = Nothing {- ************************************************************************ * * \subsection[Name-pieces-datatypes]{The @OccName@ datatypes} * * ************************************************************************ -} -- | Occurrence Name -- -- In this context that means: -- "classified (i.e. as a type name, value name, etc) but not qualified -- and not yet resolved" data OccName = OccName { occNameSpace :: !NameSpace , occNameFS :: !FastString } instance Eq OccName where (OccName sp1 s1) == (OccName sp2 s2) = s1 == s2 && sp1 == sp2 instance Ord OccName where -- Compares lexicographically, *not* by Unique of the string compare (OccName sp1 s1) (OccName sp2 s2) = (s1 `lexicalCompareFS` s2) `thenCmp` (sp1 `compare` sp2) instance Data OccName where -- don't traverse? toConstr _ = abstractConstr "OccName" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "OccName" instance HasOccName OccName where occName = id instance NFData OccName where rnf x = x `seq` () {- ************************************************************************ * * \subsection{Printing} * * ************************************************************************ -} instance Outputable OccName where ppr = pprOccName instance OutputableBndr OccName where pprBndr _ = ppr pprInfixOcc n = pprInfixVar (isSymOcc n) (ppr n) pprPrefixOcc n = pprPrefixVar (isSymOcc n) (ppr n) pprOccName :: OccName -> SDoc pprOccName (OccName sp occ) = getPprStyle $ \ sty -> if codeStyle sty then ztext (zEncodeFS occ) else pp_occ <> whenPprDebug (braces (pprNameSpaceBrief sp)) where pp_occ = sdocOption sdocSuppressUniques $ \case True -> text (strip_th_unique (unpackFS occ)) False -> ftext occ -- See Note [Suppressing uniques in OccNames] strip_th_unique ('[' : c : _) | isAlphaNum c = [] strip_th_unique (c : cs) = c : strip_th_unique cs strip_th_unique [] = [] {- Note [Suppressing uniques in OccNames] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a hack to de-wobblify the OccNames that contain uniques from Template Haskell that have been turned into a string in the OccName. See Note [Unique OccNames from Template Haskell] in "GHC.ThToHs" ************************************************************************ * * \subsection{Construction} * * ************************************************************************ -} mkOccName :: NameSpace -> String -> OccName mkOccName occ_sp str = OccName occ_sp (mkFastString str) mkOccNameFS :: NameSpace -> FastString -> OccName mkOccNameFS occ_sp fs = OccName occ_sp fs mkVarOcc :: String -> OccName mkVarOcc s = mkOccName varName s mkVarOccFS :: FastString -> OccName mkVarOccFS fs = mkOccNameFS varName fs mkDataOcc :: String -> OccName mkDataOcc = mkOccName dataName mkDataOccFS :: FastString -> OccName mkDataOccFS = mkOccNameFS dataName mkTyVarOcc :: String -> OccName mkTyVarOcc = mkOccName tvName mkTyVarOccFS :: FastString -> OccName mkTyVarOccFS fs = mkOccNameFS tvName fs mkTcOcc :: String -> OccName mkTcOcc = mkOccName tcName mkTcOccFS :: FastString -> OccName mkTcOccFS = mkOccNameFS tcName mkClsOcc :: String -> OccName mkClsOcc = mkOccName clsName mkClsOccFS :: FastString -> OccName mkClsOccFS = mkOccNameFS clsName -- demoteOccName lowers the Namespace of OccName. -- See Note [Demotion] in GHC.Rename.Env. demoteOccName :: OccName -> Maybe OccName demoteOccName (OccName space name) = do space' <- demoteNameSpace space return $ OccName space' name -- promoteOccName promotes the NameSpace of OccName. -- See Note [Promotion] in GHC.Rename.Env. promoteOccName :: OccName -> Maybe OccName promoteOccName (OccName space name) = do space' <- promoteNameSpace space return $ OccName space' name {- | Other names in the compiler add additional information to an OccName. This class provides a consistent way to access the underlying OccName. -} class HasOccName name where occName :: name -> OccName {- ************************************************************************ * * Environments * * ************************************************************************ OccEnvs are used mainly for the envts in ModIfaces. Note [The Unique of an OccName] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ They are efficient, because FastStrings have unique Int# keys. We assume this key is less than 2^24, and indeed FastStrings are allocated keys sequentially starting at 0. So we can make a Unique using mkUnique ns key :: Unique where 'ns' is a Char representing the name space. This in turn makes it easy to build an OccEnv. -} instance Uniquable OccName where -- See Note [The Unique of an OccName] getUnique (OccName VarName fs) = mkVarOccUnique fs getUnique (OccName DataName fs) = mkDataOccUnique fs getUnique (OccName TvName fs) = mkTvOccUnique fs getUnique (OccName TcClsName fs) = mkTcOccUnique fs newtype OccEnv a = A (UniqFM OccName a) deriving Data emptyOccEnv :: OccEnv a unitOccEnv :: OccName -> a -> OccEnv a extendOccEnv :: OccEnv a -> OccName -> a -> OccEnv a extendOccEnvList :: OccEnv a -> [(OccName, a)] -> OccEnv a lookupOccEnv :: OccEnv a -> OccName -> Maybe a mkOccEnv :: [(OccName,a)] -> OccEnv a mkOccEnv_C :: (a -> a -> a) -> [(OccName,a)] -> OccEnv a elemOccEnv :: OccName -> OccEnv a -> Bool foldOccEnv :: (a -> b -> b) -> b -> OccEnv a -> b nonDetOccEnvElts :: OccEnv a -> [a] extendOccEnv_C :: (a->a->a) -> OccEnv a -> OccName -> a -> OccEnv a extendOccEnv_Acc :: (a->b->b) -> (a->b) -> OccEnv b -> OccName -> a -> OccEnv b plusOccEnv :: OccEnv a -> OccEnv a -> OccEnv a plusOccEnv_C :: (a->a->a) -> OccEnv a -> OccEnv a -> OccEnv a mapOccEnv :: (a->b) -> OccEnv a -> OccEnv b delFromOccEnv :: OccEnv a -> OccName -> OccEnv a delListFromOccEnv :: OccEnv a -> [OccName] -> OccEnv a filterOccEnv :: (elt -> Bool) -> OccEnv elt -> OccEnv elt alterOccEnv :: (Maybe elt -> Maybe elt) -> OccEnv elt -> OccName -> OccEnv elt minusOccEnv :: OccEnv a -> OccEnv b -> OccEnv a -- | Alters (replaces or removes) those elements of the map that are mentioned in the second map minusOccEnv_C :: (a -> b -> Maybe a) -> OccEnv a -> OccEnv b -> OccEnv a emptyOccEnv = A emptyUFM unitOccEnv x y = A $ unitUFM x y extendOccEnv (A x) y z = A $ addToUFM x y z extendOccEnvList (A x) l = A $ addListToUFM x l lookupOccEnv (A x) y = lookupUFM x y mkOccEnv l = A $ listToUFM l elemOccEnv x (A y) = elemUFM x y foldOccEnv a b (A c) = foldUFM a b c nonDetOccEnvElts (A x) = nonDetEltsUFM x plusOccEnv (A x) (A y) = A $ plusUFM x y plusOccEnv_C f (A x) (A y) = A $ plusUFM_C f x y extendOccEnv_C f (A x) y z = A $ addToUFM_C f x y z extendOccEnv_Acc f g (A x) y z = A $ addToUFM_Acc f g x y z mapOccEnv f (A x) = A $ mapUFM f x mkOccEnv_C comb l = A $ addListToUFM_C comb emptyUFM l delFromOccEnv (A x) y = A $ delFromUFM x y delListFromOccEnv (A x) y = A $ delListFromUFM x y filterOccEnv x (A y) = A $ filterUFM x y alterOccEnv fn (A y) k = A $ alterUFM fn y k minusOccEnv (A x) (A y) = A $ minusUFM x y minusOccEnv_C fn (A x) (A y) = A $ minusUFM_C fn x y instance Outputable a => Outputable (OccEnv a) where ppr x = pprOccEnv ppr x pprOccEnv :: (a -> SDoc) -> OccEnv a -> SDoc pprOccEnv ppr_elt (A env) = pprUniqFM ppr_elt env type OccSet = UniqSet OccName emptyOccSet :: OccSet unitOccSet :: OccName -> OccSet mkOccSet :: [OccName] -> OccSet extendOccSet :: OccSet -> OccName -> OccSet extendOccSetList :: OccSet -> [OccName] -> OccSet unionOccSets :: OccSet -> OccSet -> OccSet unionManyOccSets :: [OccSet] -> OccSet minusOccSet :: OccSet -> OccSet -> OccSet elemOccSet :: OccName -> OccSet -> Bool isEmptyOccSet :: OccSet -> Bool intersectOccSet :: OccSet -> OccSet -> OccSet filterOccSet :: (OccName -> Bool) -> OccSet -> OccSet -- | Converts an OccSet to an OccEnv (operationally the identity) occSetToEnv :: OccSet -> OccEnv OccName emptyOccSet = emptyUniqSet unitOccSet = unitUniqSet mkOccSet = mkUniqSet extendOccSet = addOneToUniqSet extendOccSetList = addListToUniqSet unionOccSets = unionUniqSets unionManyOccSets = unionManyUniqSets minusOccSet = minusUniqSet elemOccSet = elementOfUniqSet isEmptyOccSet = isEmptyUniqSet intersectOccSet = intersectUniqSets filterOccSet = filterUniqSet occSetToEnv = A . getUniqSet {- ************************************************************************ * * \subsection{Predicates and taking them apart} * * ************************************************************************ -} occNameString :: OccName -> String occNameString (OccName _ s) = unpackFS s setOccNameSpace :: NameSpace -> OccName -> OccName setOccNameSpace sp (OccName _ occ) = OccName sp occ isVarOcc, isTvOcc, isTcOcc, isDataOcc :: OccName -> Bool isVarOcc (OccName VarName _) = True isVarOcc _ = False isTvOcc (OccName TvName _) = True isTvOcc _ = False isTcOcc (OccName TcClsName _) = True isTcOcc _ = False -- | /Value/ 'OccNames's are those that are either in -- the variable or data constructor namespaces isValOcc :: OccName -> Bool isValOcc (OccName VarName _) = True isValOcc (OccName DataName _) = True isValOcc _ = False isDataOcc (OccName DataName _) = True isDataOcc _ = False -- | Test if the 'OccName' is a data constructor that starts with -- a symbol (e.g. @:@, or @[]@) isDataSymOcc :: OccName -> Bool isDataSymOcc (OccName DataName s) = isLexConSym s isDataSymOcc _ = False -- Pretty inefficient! -- | Test if the 'OccName' is that for any operator (whether -- it is a data constructor or variable or whatever) isSymOcc :: OccName -> Bool isSymOcc (OccName DataName s) = isLexConSym s isSymOcc (OccName TcClsName s) = isLexSym s isSymOcc (OccName VarName s) = isLexSym s isSymOcc (OccName TvName s) = isLexSym s -- Pretty inefficient! parenSymOcc :: OccName -> SDoc -> SDoc -- ^ Wrap parens around an operator parenSymOcc occ doc | isSymOcc occ = parens doc | otherwise = doc startsWithUnderscore :: OccName -> Bool -- ^ Haskell 98 encourages compilers to suppress warnings about unused -- names in a pattern if they start with @_@: this implements that test startsWithUnderscore occ = headFS (occNameFS occ) == '_' {- ************************************************************************ * * \subsection{Making system names} * * ************************************************************************ Here's our convention for splitting up the interface file name space: d... dictionary identifiers (local variables, so no name-clash worries) All of these other OccNames contain a mixture of alphabetic and symbolic characters, and hence cannot possibly clash with a user-written type or function name $f... Dict-fun identifiers (from inst decls) $dmop Default method for 'op' $pnC n'th superclass selector for class C $wf Worker for function 'f' $sf.. Specialised version of f D:C Data constructor for dictionary for class C NTCo:T Coercion connecting newtype T with its representation type TFCo:R Coercion connecting a data family to its representation type R In encoded form these appear as Zdfxxx etc :... keywords (export:, letrec: etc.) --- I THINK THIS IS WRONG! This knowledge is encoded in the following functions. @mk_deriv@ generates an @OccName@ from the prefix and a string. NB: The string must already be encoded! -} -- | Build an 'OccName' derived from another 'OccName'. -- -- Note that the pieces of the name are passed in as a @[FastString]@ so that -- the whole name can be constructed with a single 'concatFS', minimizing -- unnecessary intermediate allocations. mk_deriv :: NameSpace -> FastString -- ^ A prefix which distinguishes one sort of -- derived name from another -> [FastString] -- ^ The name we are deriving from in pieces which -- will be concatenated. -> OccName mk_deriv occ_sp sys_prefix str = mkOccNameFS occ_sp (concatFS $ sys_prefix : str) isDerivedOccName :: OccName -> Bool -- ^ Test for definitions internally generated by GHC. This predicate -- is used to suppress printing of internal definitions in some debug prints isDerivedOccName occ = case occNameString occ of '$':c:_ | isAlphaNum c -> True -- E.g. $wfoo c:':':_ | isAlphaNum c -> True -- E.g. N:blah newtype coercions _other -> False isDefaultMethodOcc :: OccName -> Bool isDefaultMethodOcc occ = case occNameString occ of '$':'d':'m':_ -> True _ -> False -- | Is an 'OccName' one of a Typeable @TyCon@ or @Module@ binding? -- This is needed as these bindings are renamed differently. -- See Note [Grand plan for Typeable] in "GHC.Tc.Instance.Typeable". isTypeableBindOcc :: OccName -> Bool isTypeableBindOcc occ = case occNameString occ of '$':'t':'c':_ -> True -- mkTyConRepOcc '$':'t':'r':_ -> True -- Module binding _ -> False mkDataConWrapperOcc, mkWorkerOcc, mkMatcherOcc, mkBuilderOcc, mkDefaultMethodOcc, mkClassDataConOcc, mkDictOcc, mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkRepEqOcc, mkGenR, mkGen1R, mkDataConWorkerOcc, mkNewTyCoOcc, mkInstTyCoOcc, mkEqPredCoOcc, mkClassOpAuxOcc, mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc, mkDataTOcc, mkDataCOcc, mkTyConRepOcc :: OccName -> OccName -- These derived variables have a prefix that no Haskell value could have mkDataConWrapperOcc = mk_simple_deriv varName "$W" mkWorkerOcc = mk_simple_deriv varName "$w" mkMatcherOcc = mk_simple_deriv varName "$m" mkBuilderOcc = mk_simple_deriv varName "$b" mkDefaultMethodOcc = mk_simple_deriv varName "$dm" mkClassOpAuxOcc = mk_simple_deriv varName "$c" mkDictOcc = mk_simple_deriv varName "$d" mkIPOcc = mk_simple_deriv varName "$i" mkSpecOcc = mk_simple_deriv varName "$s" mkForeignExportOcc = mk_simple_deriv varName "$f" mkRepEqOcc = mk_simple_deriv tvName "$r" -- In RULES involving Coercible mkClassDataConOcc = mk_simple_deriv dataName "C:" -- Data con for a class mkNewTyCoOcc = mk_simple_deriv tcName "N:" -- Coercion for newtypes mkInstTyCoOcc = mk_simple_deriv tcName "D:" -- Coercion for type functions mkEqPredCoOcc = mk_simple_deriv tcName "$co" -- Used in derived instances for the names of auxiliary bindings. -- See Note [Auxiliary binders] in GHC.Tc.Deriv.Generate. mkCon2TagOcc = mk_simple_deriv varName "$con2tag_" mkTag2ConOcc = mk_simple_deriv varName "$tag2con_" mkMaxTagOcc = mk_simple_deriv varName "$maxtag_" mkDataTOcc = mk_simple_deriv varName "$t" mkDataCOcc = mk_simple_deriv varName "$c" -- TyConRepName stuff; see Note [Grand plan for Typeable] in GHC.Tc.Instance.Typeable mkTyConRepOcc occ = mk_simple_deriv varName prefix occ where prefix | isDataOcc occ = "$tc'" | otherwise = "$tc" -- Generic deriving mechanism mkGenR = mk_simple_deriv tcName "Rep_" mkGen1R = mk_simple_deriv tcName "Rep1_" -- Overloaded record field selectors mkRecFldSelOcc :: String -> OccName mkRecFldSelOcc s = mk_deriv varName "$sel" [fsLit s] mk_simple_deriv :: NameSpace -> FastString -> OccName -> OccName mk_simple_deriv sp px occ = mk_deriv sp px [occNameFS occ] -- Data constructor workers are made by setting the name space -- of the data constructor OccName (which should be a DataName) -- to VarName mkDataConWorkerOcc datacon_occ = setOccNameSpace varName datacon_occ mkSuperDictAuxOcc :: Int -> OccName -> OccName mkSuperDictAuxOcc index cls_tc_occ = mk_deriv varName "$cp" [fsLit $ show index, occNameFS cls_tc_occ] mkSuperDictSelOcc :: Int -- ^ Index of superclass, e.g. 3 -> OccName -- ^ Class, e.g. @Ord@ -> OccName -- ^ Derived 'Occname', e.g. @$p3Ord@ mkSuperDictSelOcc index cls_tc_occ = mk_deriv varName "$p" [fsLit $ show index, occNameFS cls_tc_occ] mkLocalOcc :: Unique -- ^ Unique to combine with the 'OccName' -> OccName -- ^ Local name, e.g. @sat@ -> OccName -- ^ Nice unique version, e.g. @$L23sat@ mkLocalOcc uniq occ = mk_deriv varName "$L" [fsLit $ show uniq, occNameFS occ] -- The Unique might print with characters -- that need encoding (e.g. 'z'!) -- | Derive a name for the representation type constructor of a -- @data@\/@newtype@ instance. mkInstTyTcOcc :: String -- ^ Family name, e.g. @Map@ -> OccSet -- ^ avoid these Occs -> OccName -- ^ @R:Map@ mkInstTyTcOcc str = chooseUniqueOcc tcName ('R' : ':' : str) mkDFunOcc :: String -- ^ Typically the class and type glommed together e.g. @OrdMaybe@. -- Only used in debug mode, for extra clarity -> Bool -- ^ Is this a hs-boot instance DFun? -> OccSet -- ^ avoid these Occs -> OccName -- ^ E.g. @$f3OrdMaybe@ -- In hs-boot files we make dict funs like $fx7ClsTy, which get bound to the real -- thing when we compile the mother module. Reason: we don't know exactly -- what the mother module will call it. mkDFunOcc info_str is_boot set = chooseUniqueOcc VarName (prefix ++ info_str) set where prefix | is_boot = "$fx" | otherwise = "$f" {- Sometimes we need to pick an OccName that has not already been used, given a set of in-use OccNames. -} chooseUniqueOcc :: NameSpace -> String -> OccSet -> OccName chooseUniqueOcc ns str set = loop (mkOccName ns str) (0::Int) where loop occ n | occ `elemOccSet` set = loop (mkOccName ns (str ++ show n)) (n+1) | otherwise = occ {- We used to add a '$m' to indicate a method, but that gives rise to bad error messages from the type checker when we print the function name or pattern of an instance-decl binding. Why? Because the binding is zapped to use the method name in place of the selector name. (See GHC.Tc.TyCl.Class.tcMethodBind) The way it is now, -ddump-xx output may look confusing, but you can always say -dppr-debug to get the uniques. However, we *do* have to zap the first character to be lower case, because overloaded constructors (blarg) generate methods too. And convert to VarName space e.g. a call to constructor MkFoo where data (Ord a) => Foo a = MkFoo a If this is necessary, we do it by prefixing '$m'. These guys never show up in error messages. What a hack. -} mkMethodOcc :: OccName -> OccName mkMethodOcc occ@(OccName VarName _) = occ mkMethodOcc occ = mk_simple_deriv varName "$m" occ {- ************************************************************************ * * \subsection{Tidying them up} * * ************************************************************************ Before we print chunks of code we like to rename it so that we don't have to print lots of silly uniques in it. But we mustn't accidentally introduce name clashes! So the idea is that we leave the OccName alone unless it accidentally clashes with one that is already in scope; if so, we tack on '1' at the end and try again, then '2', and so on till we find a unique one. There's a wrinkle for operators. Consider '>>='. We can't use '>>=1' because that isn't a single lexeme. So we encode it to 'lle' and *then* tack on the '1', if necessary. Note [TidyOccEnv] ~~~~~~~~~~~~~~~~~ type TidyOccEnv = UniqFM Int * Domain = The OccName's FastString. These FastStrings are "taken"; make sure that we don't re-use * Int, n = A plausible starting point for new guesses There is no guarantee that "FSn" is available; you must look that up in the TidyOccEnv. But it's a good place to start looking. * When looking for a renaming for "foo2" we strip off the "2" and start with "foo". Otherwise if we tidy twice we get silly names like foo23. However, if it started with digits at the end, we always make a name with digits at the end, rather than shortening "foo2" to just "foo", even if "foo" is unused. Reasons: - Plain "foo" might be used later - We use trailing digits to subtly indicate a unification variable in typechecker error message; see TypeRep.tidyTyVarBndr We have to take care though! Consider a machine-generated module (#10370) module Foo where a1 = e1 a2 = e2 ... a2000 = e2000 Then "a1", "a2" etc are all marked taken. But now if we come across "a7" again, we have to do a linear search to find a free one, "a2001". That might just be acceptable once. But if we now come across "a8" again, we don't want to repeat that search. So we use the TidyOccEnv mapping for "a" (not "a7" or "a8") as our base for starting the search; and we make sure to update the starting point for "a" after we allocate a new one. Note [Tidying multiple names at once] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider > :t (id,id,id) Every id contributes a type variable to the type signature, and all of them are "a". If we tidy them one by one, we get (id,id,id) :: (a2 -> a2, a1 -> a1, a -> a) which is a bit unfortunate, as it unfairly renames only two of them. What we would like to see is (id,id,id) :: (a3 -> a3, a2 -> a2, a1 -> a1) To achieve this, the function avoidClashesOccEnv can be used to prepare the TidyEnv, by “blocking” every name that occurs twice in the map. This way, none of the "a"s will get the privilege of keeping this name, and all of them will get a suitable number by tidyOccName. This prepared TidyEnv can then be used with tidyOccName. See tidyTyCoVarBndrs for an example where this is used. This is #12382. -} type TidyOccEnv = UniqFM FastString Int -- The in-scope OccNames -- See Note [TidyOccEnv] emptyTidyOccEnv :: TidyOccEnv emptyTidyOccEnv = emptyUFM initTidyOccEnv :: [OccName] -> TidyOccEnv -- Initialise with names to avoid! initTidyOccEnv = foldl' add emptyUFM where add env (OccName _ fs) = addToUFM env fs 1 delTidyOccEnvList :: TidyOccEnv -> [FastString] -> TidyOccEnv delTidyOccEnvList = delListFromUFM -- see Note [Tidying multiple names at once] avoidClashesOccEnv :: TidyOccEnv -> [OccName] -> TidyOccEnv avoidClashesOccEnv env occs = go env emptyUFM occs where go env _ [] = env go env seenOnce ((OccName _ fs):occs) | fs `elemUFM` env = go env seenOnce occs | fs `elemUFM` seenOnce = go (addToUFM env fs 1) seenOnce occs | otherwise = go env (addToUFM seenOnce fs ()) occs tidyOccName :: TidyOccEnv -> OccName -> (TidyOccEnv, OccName) tidyOccName env occ@(OccName occ_sp fs) | not (fs `elemUFM` env) = -- Desired OccName is free, so use it, -- and record in 'env' that it's no longer available (addToUFM env fs 1, occ) | otherwise = case lookupUFM env base1 of Nothing -> (addToUFM env base1 2, OccName occ_sp base1) Just n -> find 1 n where base :: String -- Drop trailing digits (see Note [TidyOccEnv]) base = dropWhileEndLE isDigit (unpackFS fs) base1 = mkFastString (base ++ "1") find !k !n = case lookupUFM env new_fs of Just {} -> find (k+1 :: Int) (n+k) -- By using n+k, the n argument to find goes -- 1, add 1, add 2, add 3, etc which -- moves at quadratic speed through a dense patch Nothing -> (new_env, OccName occ_sp new_fs) where new_fs = mkFastString (base ++ show n) new_env = addToUFM (addToUFM env new_fs 1) base1 (n+1) -- Update: base1, so that next time we'll start where we left off -- new_fs, so that we know it is taken -- If they are the same (n==1), the former wins -- See Note [TidyOccEnv] {- ************************************************************************ * * Binary instance Here rather than in GHC.Iface.Binary because OccName is abstract * * ************************************************************************ -} instance Binary NameSpace where put_ bh VarName = putByte bh 0 put_ bh DataName = putByte bh 1 put_ bh TvName = putByte bh 2 put_ bh TcClsName = putByte bh 3 get bh = do h <- getByte bh case h of 0 -> return VarName 1 -> return DataName 2 -> return TvName _ -> return TcClsName instance Binary OccName where put_ bh (OccName aa ab) = do put_ bh aa put_ bh ab get bh = do aa <- get bh ab <- get bh return (OccName aa ab) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Ppr.hs0000644000000000000000000001676114472400113020730 0ustar0000000000000000 module GHC.Types.Name.Ppr ( mkPrintUnqualified , mkQualModule , mkQualPackage , pkgQual ) where import GHC.Prelude import GHC.Unit import GHC.Unit.Env import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Builtin.Types import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Misc import GHC.Builtin.Types.Prim (tYPETyConName, funTyConName) {- Note [Printing original names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Deciding how to print names is pretty tricky. We are given a name P:M.T, where P is the package name, M is the defining module, and T is the occurrence name, and we have to decide in which form to display the name given a GlobalRdrEnv describing the current scope. Ideally we want to display the name in the form in which it is in scope. However, the name might not be in scope at all, and that's where it gets tricky. Here are the cases: 1. T uniquely maps to P:M.T ---> "T" NameUnqual 2. There is an X for which X.T uniquely maps to P:M.T ---> "X.T" NameQual X 3. There is no binding for "M.T" ---> "M.T" NameNotInScope1 4. Otherwise ---> "P:M.T" NameNotInScope2 (3) and (4) apply when the entity P:M.T is not in the GlobalRdrEnv at all. In these cases we still want to refer to the name as "M.T", *but* "M.T" might mean something else in the current scope (e.g. if there's an "import X as M"), so to avoid confusion we avoid using "M.T" if there's already a binding for it. Instead we write P:M.T. There's one further subtlety: in case (3), what if there are two things around, P1:M.T and P2:M.T? Then we don't want to print both of them as M.T! However only one of the modules P1:M and P2:M can be exposed (say P2), so we use M.T for that, and P1:M.T for the other one. This is handled by the qual_mod component of PrintUnqualified, inside the (ppr mod) of case (3), in Name.pprModulePrefix Note [Printing unit ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the old days, original names were tied to PackageIds, which directly corresponded to the entities that users wrote in Cabal files, and were perfectly suitable for printing when we need to disambiguate packages. However, with instantiated units, the situation can be different: if the key is instantiated with some holes, we should try to give the user some more useful information. -} -- | Creates some functions that work out the best ways to format -- names for the user according to a set of heuristics. mkPrintUnqualified :: UnitEnv -> GlobalRdrEnv -> PrintUnqualified mkPrintUnqualified unit_env env = QueryQualify qual_name (mkQualModule unit_state home_unit) (mkQualPackage unit_state) where unit_state = ue_units unit_env home_unit = ue_homeUnit unit_env qual_name mod occ | [gre] <- unqual_gres , right_name gre = NameUnqual -- If there's a unique entity that's in scope -- unqualified with 'occ' AND that entity is -- the right one, then we can use the unqualified name | [] <- unqual_gres , pretendNameIsInScopeForPpr , not (isDerivedOccName occ) = NameUnqual -- See Note [pretendNameIsInScopeForPpr] | [gre] <- qual_gres = NameQual (greQualModName gre) | null qual_gres = if null (lookupGRE_RdrName (mkRdrQual (moduleName mod) occ) env) then NameNotInScope1 else NameNotInScope2 | otherwise = NameNotInScope1 -- Can happen if 'f' is bound twice in the module -- Eg f = True; g = 0; f = False where is_name :: Name -> Bool is_name name = assertPpr (isExternalName name) (ppr name) $ nameModule name == mod && nameOccName name == occ -- See Note [pretendNameIsInScopeForPpr] pretendNameIsInScopeForPpr :: Bool pretendNameIsInScopeForPpr = any is_name [ liftedTypeKindTyConName , constraintKindTyConName , heqTyConName , coercibleTyConName , eqTyConName , tYPETyConName , funTyConName , oneDataConName , manyDataConName ] right_name gre = greDefinitionModule gre == Just mod unqual_gres = lookupGRE_RdrName (mkRdrUnqual occ) env qual_gres = filter right_name (lookupGlobalRdrEnv env occ) -- we can mention a module P:M without the P: qualifier iff -- "import M" would resolve unambiguously to P:M. (if P is the -- current package we can just assume it is unqualified). {- Note [pretendNameIsInScopeForPpr] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Normally, a name is printed unqualified if it's in scope and unambiguous: ghci> :t not not :: Bool -> Bool Out of scope names are qualified: ghci> import Prelude hiding (Bool) ghci> :t not not :: GHC.Types.Bool -> GHC.Types.Bool And so are ambiguous names: ghci> data Bool ghci> :t not not :: Prelude.Bool -> Prelude.Bool However, these rules alone would lead to excessive qualification: ghci> :k Functor Functor :: (GHC.Types.Type -> GHC.Types.Type) -> GHC.Types.Constraint Even if the user has not imported Data.Kind, we would rather print: Functor :: (Type -> Type) -> Constraint So we maintain a list of names for which we only require that they are unambiguous. It reduces the amount of qualification in GHCi output and error messages thus improving readability. One potential problem here is that external tooling that relies on parsing GHCi output (e.g. Emacs mode for Haskell) requires names to be properly qualified to make sense of the output (see #11208). So extend this list with care. Side note (int-index): This function is distinct from GHC.Bulitin.Names.pretendNameIsInScope (used when filtering out instances), and perhaps we could unify them by taking a union, but I have not looked into what that would entail. -} -- | Creates a function for formatting modules based on two heuristics: -- (1) if the module is the current module, don't qualify, and (2) if there -- is only one exposed package which exports this module, don't qualify. mkQualModule :: UnitState -> Maybe HomeUnit -> QueryQualifyModule mkQualModule unit_state mhome_unit mod | Just home_unit <- mhome_unit , isHomeModule home_unit mod = False | [(_, pkgconfig)] <- lookup, mkUnit pkgconfig == moduleUnit mod -- this says: we are given a module P:M, is there just one exposed package -- that exposes a module M, and is it package P? = False | otherwise = True where lookup = lookupModuleInAllUnits unit_state (moduleName mod) -- | Creates a function for formatting packages based on two heuristics: -- (1) don't qualify if the package in question is "main", and (2) only qualify -- with a unit id if the package ID would be ambiguous. mkQualPackage :: UnitState -> QueryQualifyPackage mkQualPackage pkgs uid | uid == mainUnit || uid == interactiveUnit -- Skip the lookup if it's main, since it won't be in the package -- database! = False | Just pkgid <- mb_pkgid , searchPackageId pkgs pkgid `lengthIs` 1 -- this says: we are given a package pkg-0.1@MMM, are there only one -- exposed packages whose package ID is pkg-0.1? = False | otherwise = True where mb_pkgid = fmap unitPackageId (lookupUnit pkgs uid) -- | A function which only qualifies package names if necessary; but -- qualifies all other identifiers. pkgQual :: UnitState -> PrintUnqualified pkgQual pkgs = alwaysQualify { queryQualifyPackage = mkQualPackage pkgs } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Reader.hs0000644000000000000000000014762614472400113021376 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE CPP #-} {-# LANGUAGE DeriveDataTypeable #-} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName' is the type of names that come directly from the parser. They -- have not yet had their scoping and binding resolved by the renamer and can be -- thought of to a first approximation as an 'GHC.Types.Name.Occurrence.OccName' with an optional module -- qualifier -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var': see "GHC.Types.Var#name_types" module GHC.Types.Name.Reader ( -- * The main type RdrName(..), -- Constructors exported only to GHC.Iface.Binary -- ** Construction mkRdrUnqual, mkRdrQual, mkUnqual, mkVarUnqual, mkQual, mkOrig, nameRdrName, getRdrName, -- ** Destruction rdrNameOcc, rdrNameSpace, demoteRdrName, promoteRdrName, isRdrDataCon, isRdrTyVar, isRdrTc, isQual, isQual_maybe, isUnqual, isOrig, isOrig_maybe, isExact, isExact_maybe, isSrcRdrName, -- * Local mapping of 'RdrName' to 'Name.Name' LocalRdrEnv, emptyLocalRdrEnv, extendLocalRdrEnv, extendLocalRdrEnvList, lookupLocalRdrEnv, lookupLocalRdrOcc, elemLocalRdrEnv, inLocalRdrEnvScope, localRdrEnvElts, minusLocalRdrEnv, -- * Global mapping of 'RdrName' to 'GlobalRdrElt's GlobalRdrEnv, emptyGlobalRdrEnv, mkGlobalRdrEnv, plusGlobalRdrEnv, lookupGlobalRdrEnv, extendGlobalRdrEnv, greOccName, shadowNames, pprGlobalRdrEnv, globalRdrEnvElts, lookupGRE_RdrName, lookupGRE_RdrName', lookupGRE_Name, lookupGRE_GreName, lookupGRE_FieldLabel, lookupGRE_Name_OccName, getGRE_NameQualifier_maybes, transformGREs, pickGREs, pickGREsModExp, -- * GlobalRdrElts gresFromAvails, gresFromAvail, localGREsFromAvail, availFromGRE, greRdrNames, greSrcSpan, greQualModName, gresToAvailInfo, greDefinitionModule, greDefinitionSrcSpan, greMangledName, grePrintableName, greFieldLabel, -- ** Global 'RdrName' mapping elements: 'GlobalRdrElt', 'Provenance', 'ImportSpec' GlobalRdrElt(..), isLocalGRE, isRecFldGRE, isDuplicateRecFldGRE, isNoFieldSelectorGRE, isFieldSelectorGRE, unQualOK, qualSpecOK, unQualSpecOK, pprNameProvenance, GreName(..), greNameSrcSpan, Parent(..), greParent_maybe, ImportSpec(..), ImpDeclSpec(..), ImpItemSpec(..), importSpecLoc, importSpecModule, isExplicitItem, bestImport, -- * Utils opIsAt ) where import GHC.Prelude import GHC.Unit.Module import GHC.Types.Name import GHC.Types.Avail import GHC.Types.Name.Set import GHC.Data.Maybe import GHC.Types.SrcLoc as SrcLoc import GHC.Data.FastString import GHC.Types.FieldLabel import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.Set import GHC.Utils.Misc as Utils import GHC.Utils.Panic import GHC.Types.Name.Env import Data.Data import Data.List( sortBy ) import GHC.Data.Bag {- ************************************************************************ * * \subsection{The main data type} * * ************************************************************************ -} -- | Reader Name -- -- Do not use the data constructors of RdrName directly: prefer the family -- of functions that creates them, such as 'mkRdrUnqual' -- -- - Note: A Located RdrName will only have API Annotations if it is a -- compound one, -- e.g. -- -- > `bar` -- > ( ~ ) -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnOpen' @'('@ or @'['@ or @'[:'@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ or @']'@ or @':]'@,, -- 'GHC.Parser.Annotation.AnnBackquote' @'`'@, -- 'GHC.Parser.Annotation.AnnVal' -- 'GHC.Parser.Annotation.AnnTilde', -- For details on above see Note [exact print annotations] in "GHC.Parser.Annotation" data RdrName = Unqual OccName -- ^ Unqualified name -- -- Used for ordinary, unqualified occurrences, e.g. @x@, @y@ or @Foo@. -- Create such a 'RdrName' with 'mkRdrUnqual' | Qual ModuleName OccName -- ^ Qualified name -- -- A qualified name written by the user in -- /source/ code. The module isn't necessarily -- the module where the thing is defined; -- just the one from which it is imported. -- Examples are @Bar.x@, @Bar.y@ or @Bar.Foo@. -- Create such a 'RdrName' with 'mkRdrQual' | Orig Module OccName -- ^ Original name -- -- An original name; the module is the /defining/ module. -- This is used when GHC generates code that will be fed -- into the renamer (e.g. from deriving clauses), but where -- we want to say \"Use Prelude.map dammit\". One of these -- can be created with 'mkOrig' | Exact Name -- ^ Exact name -- -- We know exactly the 'Name'. This is used: -- -- (1) When the parser parses built-in syntax like @[]@ -- and @(,)@, but wants a 'RdrName' from it -- -- (2) By Template Haskell, when TH has generated a unique name -- -- Such a 'RdrName' can be created by using 'getRdrName' on a 'Name' deriving Data {- ************************************************************************ * * \subsection{Simple functions} * * ************************************************************************ -} instance HasOccName RdrName where occName = rdrNameOcc rdrNameOcc :: RdrName -> OccName rdrNameOcc (Qual _ occ) = occ rdrNameOcc (Unqual occ) = occ rdrNameOcc (Orig _ occ) = occ rdrNameOcc (Exact name) = nameOccName name rdrNameSpace :: RdrName -> NameSpace rdrNameSpace = occNameSpace . rdrNameOcc -- demoteRdrName lowers the NameSpace of RdrName. -- See Note [Demotion] in GHC.Rename.Env demoteRdrName :: RdrName -> Maybe RdrName demoteRdrName (Unqual occ) = fmap Unqual (demoteOccName occ) demoteRdrName (Qual m occ) = fmap (Qual m) (demoteOccName occ) demoteRdrName (Orig _ _) = Nothing demoteRdrName (Exact _) = Nothing -- promoteRdrName promotes the NameSpace of RdrName. -- See Note [Promotion] in GHC.Rename.Env. promoteRdrName :: RdrName -> Maybe RdrName promoteRdrName (Unqual occ) = fmap Unqual (promoteOccName occ) promoteRdrName (Qual m occ) = fmap (Qual m) (promoteOccName occ) promoteRdrName (Orig _ _) = Nothing promoteRdrName (Exact _) = Nothing -- These two are the basic constructors mkRdrUnqual :: OccName -> RdrName mkRdrUnqual occ = Unqual occ mkRdrQual :: ModuleName -> OccName -> RdrName mkRdrQual mod occ = Qual mod occ mkOrig :: Module -> OccName -> RdrName mkOrig mod occ = Orig mod occ --------------- -- These two are used when parsing source files -- They do encode the module and occurrence names mkUnqual :: NameSpace -> FastString -> RdrName mkUnqual sp n = Unqual (mkOccNameFS sp n) mkVarUnqual :: FastString -> RdrName mkVarUnqual n = Unqual (mkVarOccFS n) -- | Make a qualified 'RdrName' in the given namespace and where the 'ModuleName' and -- the 'OccName' are taken from the first and second elements of the tuple respectively mkQual :: NameSpace -> (FastString, FastString) -> RdrName mkQual sp (m, n) = Qual (mkModuleNameFS m) (mkOccNameFS sp n) getRdrName :: NamedThing thing => thing -> RdrName getRdrName name = nameRdrName (getName name) nameRdrName :: Name -> RdrName nameRdrName name = Exact name -- Keep the Name even for Internal names, so that the -- unique is still there for debug printing, particularly -- of Types (which are converted to IfaceTypes before printing) nukeExact :: Name -> RdrName nukeExact n | isExternalName n = Orig (nameModule n) (nameOccName n) | otherwise = Unqual (nameOccName n) isRdrDataCon :: RdrName -> Bool isRdrTyVar :: RdrName -> Bool isRdrTc :: RdrName -> Bool isRdrDataCon rn = isDataOcc (rdrNameOcc rn) isRdrTyVar rn = isTvOcc (rdrNameOcc rn) isRdrTc rn = isTcOcc (rdrNameOcc rn) isSrcRdrName :: RdrName -> Bool isSrcRdrName (Unqual _) = True isSrcRdrName (Qual _ _) = True isSrcRdrName _ = False isUnqual :: RdrName -> Bool isUnqual (Unqual _) = True isUnqual _ = False isQual :: RdrName -> Bool isQual (Qual _ _) = True isQual _ = False isQual_maybe :: RdrName -> Maybe (ModuleName, OccName) isQual_maybe (Qual m n) = Just (m,n) isQual_maybe _ = Nothing isOrig :: RdrName -> Bool isOrig (Orig _ _) = True isOrig _ = False isOrig_maybe :: RdrName -> Maybe (Module, OccName) isOrig_maybe (Orig m n) = Just (m,n) isOrig_maybe _ = Nothing isExact :: RdrName -> Bool isExact (Exact _) = True isExact _ = False isExact_maybe :: RdrName -> Maybe Name isExact_maybe (Exact n) = Just n isExact_maybe _ = Nothing {- ************************************************************************ * * \subsection{Instances} * * ************************************************************************ -} instance Outputable RdrName where ppr (Exact name) = ppr name ppr (Unqual occ) = ppr occ ppr (Qual mod occ) = ppr mod <> dot <> ppr occ ppr (Orig mod occ) = getPprStyle (\sty -> pprModulePrefix sty mod occ <> ppr occ) instance OutputableBndr RdrName where pprBndr _ n | isTvOcc (rdrNameOcc n) = char '@' <> ppr n | otherwise = ppr n pprInfixOcc rdr = pprInfixVar (isSymOcc (rdrNameOcc rdr)) (ppr rdr) pprPrefixOcc rdr | Just name <- isExact_maybe rdr = pprPrefixName name -- pprPrefixName has some special cases, so -- we delegate to them rather than reproduce them | otherwise = pprPrefixVar (isSymOcc (rdrNameOcc rdr)) (ppr rdr) instance Eq RdrName where (Exact n1) == (Exact n2) = n1==n2 -- Convert exact to orig (Exact n1) == r2@(Orig _ _) = nukeExact n1 == r2 r1@(Orig _ _) == (Exact n2) = r1 == nukeExact n2 (Orig m1 o1) == (Orig m2 o2) = m1==m2 && o1==o2 (Qual m1 o1) == (Qual m2 o2) = m1==m2 && o1==o2 (Unqual o1) == (Unqual o2) = o1==o2 _ == _ = False instance Ord RdrName where a <= b = case (a `compare` b) of { LT -> True; EQ -> True; GT -> False } a < b = case (a `compare` b) of { LT -> True; EQ -> False; GT -> False } a >= b = case (a `compare` b) of { LT -> False; EQ -> True; GT -> True } a > b = case (a `compare` b) of { LT -> False; EQ -> False; GT -> True } -- Exact < Unqual < Qual < Orig -- [Note: Apr 2004] We used to use nukeExact to convert Exact to Orig -- before comparing so that Prelude.map == the exact Prelude.map, but -- that meant that we reported duplicates when renaming bindings -- generated by Template Haskell; e.g -- do { n1 <- newName "foo"; n2 <- newName "foo"; -- } -- I think we can do without this conversion compare (Exact n1) (Exact n2) = n1 `compare` n2 compare (Exact _) _ = LT compare (Unqual _) (Exact _) = GT compare (Unqual o1) (Unqual o2) = o1 `compare` o2 compare (Unqual _) _ = LT compare (Qual _ _) (Exact _) = GT compare (Qual _ _) (Unqual _) = GT compare (Qual m1 o1) (Qual m2 o2) = (o1 `compare` o2) `thenCmp` (m1 `compare` m2) compare (Qual _ _) (Orig _ _) = LT compare (Orig m1 o1) (Orig m2 o2) = (o1 `compare` o2) `thenCmp` (m1 `compare` m2) compare (Orig _ _) _ = GT {- ************************************************************************ * * LocalRdrEnv * * ************************************************************************ -} {- Note [LocalRdrEnv] ~~~~~~~~~~~~~~~~~~~~~ The LocalRdrEnv is used to store local bindings (let, where, lambda, case). * It is keyed by OccName, because we never use it for qualified names. * It maps the OccName to a Name. That Name is almost always an Internal Name, but (hackily) it can be External too for top-level pattern bindings. See Note [bindLocalNames for an External name] in GHC.Rename.Pat * We keep the current mapping (lre_env), *and* the set of all Names in scope (lre_in_scope). Reason: see Note [Splicing Exact names] in GHC.Rename.Env. -} -- | Local Reader Environment -- See Note [LocalRdrEnv] data LocalRdrEnv = LRE { lre_env :: OccEnv Name , lre_in_scope :: NameSet } instance Outputable LocalRdrEnv where ppr (LRE {lre_env = env, lre_in_scope = ns}) = hang (text "LocalRdrEnv {") 2 (vcat [ text "env =" <+> pprOccEnv ppr_elt env , text "in_scope =" <+> pprUFM (getUniqSet ns) (braces . pprWithCommas ppr) ] <+> char '}') where ppr_elt name = parens (ppr (getUnique (nameOccName name))) <+> ppr name -- So we can see if the keys line up correctly emptyLocalRdrEnv :: LocalRdrEnv emptyLocalRdrEnv = LRE { lre_env = emptyOccEnv , lre_in_scope = emptyNameSet } extendLocalRdrEnv :: LocalRdrEnv -> Name -> LocalRdrEnv -- See Note [LocalRdrEnv] extendLocalRdrEnv lre@(LRE { lre_env = env, lre_in_scope = ns }) name = lre { lre_env = extendOccEnv env (nameOccName name) name , lre_in_scope = extendNameSet ns name } extendLocalRdrEnvList :: LocalRdrEnv -> [Name] -> LocalRdrEnv -- See Note [LocalRdrEnv] extendLocalRdrEnvList lre@(LRE { lre_env = env, lre_in_scope = ns }) names = lre { lre_env = extendOccEnvList env [(nameOccName n, n) | n <- names] , lre_in_scope = extendNameSetList ns names } lookupLocalRdrEnv :: LocalRdrEnv -> RdrName -> Maybe Name lookupLocalRdrEnv (LRE { lre_env = env, lre_in_scope = ns }) rdr | Unqual occ <- rdr = lookupOccEnv env occ -- See Note [Local bindings with Exact Names] | Exact name <- rdr , name `elemNameSet` ns = Just name | otherwise = Nothing lookupLocalRdrOcc :: LocalRdrEnv -> OccName -> Maybe Name lookupLocalRdrOcc (LRE { lre_env = env }) occ = lookupOccEnv env occ elemLocalRdrEnv :: RdrName -> LocalRdrEnv -> Bool elemLocalRdrEnv rdr_name (LRE { lre_env = env, lre_in_scope = ns }) = case rdr_name of Unqual occ -> occ `elemOccEnv` env Exact name -> name `elemNameSet` ns -- See Note [Local bindings with Exact Names] Qual {} -> False Orig {} -> False localRdrEnvElts :: LocalRdrEnv -> [Name] localRdrEnvElts (LRE { lre_env = env }) = nonDetOccEnvElts env inLocalRdrEnvScope :: Name -> LocalRdrEnv -> Bool -- This is the point of the NameSet inLocalRdrEnvScope name (LRE { lre_in_scope = ns }) = name `elemNameSet` ns minusLocalRdrEnv :: LocalRdrEnv -> OccEnv a -> LocalRdrEnv minusLocalRdrEnv lre@(LRE { lre_env = env }) occs = lre { lre_env = minusOccEnv env occs } {- Note [Local bindings with Exact Names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ With Template Haskell we can make local bindings that have Exact Names. Computing shadowing etc may use elemLocalRdrEnv (at least it certainly does so in GHC.Rename.HsType.bindHsQTyVars), so for an Exact Name we must consult the in-scope-name-set. ************************************************************************ * * GlobalRdrEnv * * ************************************************************************ -} -- | Global Reader Environment type GlobalRdrEnv = OccEnv [GlobalRdrElt] -- ^ Keyed by 'OccName'; when looking up a qualified name -- we look up the 'OccName' part, and then check the 'Provenance' -- to see if the appropriate qualification is valid. This -- saves routinely doubling the size of the env by adding both -- qualified and unqualified names to the domain. -- -- The list in the codomain is required because there may be name clashes -- These only get reported on lookup, not on construction -- -- INVARIANT 1: All the members of the list have distinct -- 'gre_name' fields; that is, no duplicate Names -- -- INVARIANT 2: Imported provenance => Name is an ExternalName -- However LocalDefs can have an InternalName. This -- happens only when type-checking a [d| ... |] Template -- Haskell quotation; see this note in GHC.Rename.Names -- Note [Top-level Names in Template Haskell decl quotes] -- -- INVARIANT 3: If the GlobalRdrEnv maps [occ -> gre], then -- greOccName gre = occ -- -- NB: greOccName gre is usually the same as -- nameOccName (greMangledName gre), but not always in the -- case of record selectors; see Note [GreNames] -- | Global Reader Element -- -- An element of the 'GlobalRdrEnv' data GlobalRdrElt = GRE { gre_name :: !GreName -- ^ See Note [GreNames] , gre_par :: !Parent -- ^ See Note [Parents] , gre_lcl :: !Bool -- ^ True <=> the thing was defined locally , gre_imp :: !(Bag ImportSpec) -- ^ In scope through these imports } deriving (Data) -- INVARIANT: either gre_lcl = True or gre_imp is non-empty -- See Note [GlobalRdrElt provenance] -- | See Note [Parents] data Parent = NoParent | ParentIs { par_is :: Name } deriving (Eq, Data) instance Outputable Parent where ppr NoParent = empty ppr (ParentIs n) = text "parent:" <> ppr n plusParent :: Parent -> Parent -> Parent -- See Note [Combining parents] plusParent p1@(ParentIs _) p2 = hasParent p1 p2 plusParent p1 p2@(ParentIs _) = hasParent p2 p1 plusParent NoParent NoParent = NoParent hasParent :: Parent -> Parent -> Parent #if defined(DEBUG) hasParent p NoParent = p hasParent p p' | p /= p' = pprPanic "hasParent" (ppr p <+> ppr p') -- Parents should agree #endif hasParent p _ = p {- Note [GlobalRdrElt provenance] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The gre_lcl and gre_imp fields of a GlobalRdrElt describe its "provenance", i.e. how the Name came to be in scope. It can be in scope two ways: - gre_lcl = True: it is bound in this module - gre_imp: a list of all the imports that brought it into scope It's an INVARIANT that you have one or the other; that is, either gre_lcl is True, or gre_imp is non-empty. It is just possible to have *both* if there is a module loop: a Name is defined locally in A, and also brought into scope by importing a module that SOURCE-imported A. Example (#7672): A.hs-boot module A where data T B.hs module B(Decl.T) where import {-# SOURCE #-} qualified A as Decl A.hs module A where import qualified B data T = Z | S B.T In A.hs, 'T' is locally bound, *and* imported as B.T. Note [Parents] ~~~~~~~~~~~~~~~~~ The children of a Name are the things that are abbreviated by the ".." notation in export lists. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Parent Children ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ data T Data constructors Record-field ids data family T Data constructors and record-field ids of all visible data instances of T class C Class operations Associated type constructors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constructor Meaning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NoParent Not bundled with a type constructor. ParentIs n Bundled with the type constructor corresponding to n. Pattern synonym constructors (and their record fields, if any) are unusual: their gre_par is NoParent in the module in which they are defined. However, a pattern synonym can be bundled with a type constructor on export, in which case whenever the pattern synonym is imported the gre_par will be ParentIs. Thus the gre_name and gre_par fields are independent, because a normal datatype introduces FieldGreNames using ParentIs, but a record pattern synonym can introduce FieldGreNames that use NoParent. (In the past we represented fields using an additional constructor of the Parent type, which could not adequately represent this situation.) See also Note [Representing pattern synonym fields in AvailInfo] in GHC.Types.Avail. Note [GreNames] ~~~~~~~~~~~~~~~ A `GlobalRdrElt` has a field `gre_name :: GreName`, which uniquely identifies what the `GlobalRdrElt` describes. There are two sorts of `GreName` (see the data type decl): * NormalGreName Name: this is used for most entities; the Name uniquely identifies it. It is stored in the GlobalRdrEnv under the OccName of the Name. * FieldGreName FieldLabel: is used only for field labels of a record. With -XDuplicateRecordFields there may be many field labels `x` in scope; e.g. data T1 = MkT1 { x :: Int } data T2 = MkT2 { x :: Bool } Each has a different GlobalRdrElt with a distinct GreName. The two fields are uniquely identified by their record selectors, which are stored in the FieldLabel, and have mangled names like `$sel:x:MkT1`. See Note [FieldLabel] in GHC.Types.FieldLabel. These GREs are stored in the GlobalRdrEnv under the OccName of the field (i.e. "x" in both cases above), /not/ the OccName of the mangled record selector function. A GreName, and hence a GRE, has both a "printable" and a "mangled" Name. These are identical for normal names, but for record fields compiled with -XDuplicateRecordFields they will differ. So we have two pairs of functions: * greNameMangledName :: GreName -> Name greMangledName :: GlobalRdrElt -> Name The "mangled" Name is the actual Name of the selector function, e.g. $sel:x:MkT1. This should not be displayed to the user, but is used to uniquely identify the field in the renamer, and later in the backend. * greNamePrintableName :: GreName -> Name grePrintableName :: GlobalRdrElt -> Name The "printable" Name is the "manged" Name with its OccName replaced with that of the field label. This is how the field should be output to the user. Since the right Name to use is context-dependent, we do not define a NamedThing instance for GREName (or GlobalRdrElt), but instead make the choice explicit. Note [Combining parents] ~~~~~~~~~~~~~~~~~~~~~~~~ With an associated type we might have module M where class C a where data T a op :: T a -> a instance C Int where data T Int = TInt instance C Bool where data T Bool = TBool Then: C is the parent of T T is the parent of TInt and TBool So: in an export list C(..) is short for C( op, T ) T(..) is short for T( TInt, TBool ) Module M exports everything, so its exports will be AvailTC C [C,T,op] AvailTC T [T,TInt,TBool] On import we convert to GlobalRdrElt and then combine those. For T that will mean we have one GRE with Parent C one GRE with NoParent That's why plusParent picks the "best" case. -} -- | make a 'GlobalRdrEnv' where all the elements point to the same -- Provenance (useful for "hiding" imports, or imports with no details). gresFromAvails :: Maybe ImportSpec -> [AvailInfo] -> [GlobalRdrElt] -- prov = Nothing => locally bound -- Just spec => imported as described by spec gresFromAvails prov avails = concatMap (gresFromAvail (const prov)) avails localGREsFromAvail :: AvailInfo -> [GlobalRdrElt] -- Turn an Avail into a list of LocalDef GlobalRdrElts localGREsFromAvail = gresFromAvail (const Nothing) gresFromAvail :: (Name -> Maybe ImportSpec) -> AvailInfo -> [GlobalRdrElt] gresFromAvail prov_fn avail = map mk_gre (availNonFldNames avail) ++ map mk_fld_gre (availFlds avail) where mk_gre n = case prov_fn n of -- Nothing => bound locally -- Just is => imported from 'is' Nothing -> GRE { gre_name = NormalGreName n, gre_par = mkParent n avail , gre_lcl = True, gre_imp = emptyBag } Just is -> GRE { gre_name = NormalGreName n, gre_par = mkParent n avail , gre_lcl = False, gre_imp = unitBag is } mk_fld_gre fl = case prov_fn (flSelector fl) of -- Nothing => bound locally -- Just is => imported from 'is' Nothing -> GRE { gre_name = FieldGreName fl, gre_par = availParent avail , gre_lcl = True, gre_imp = emptyBag } Just is -> GRE { gre_name = FieldGreName fl, gre_par = availParent avail , gre_lcl = False, gre_imp = unitBag is } instance HasOccName GlobalRdrElt where occName = greOccName -- | See Note [GreNames] greOccName :: GlobalRdrElt -> OccName greOccName = occName . gre_name -- | A 'Name' for the GRE for internal use. Careful: the 'OccName' of this -- 'Name' is not necessarily the same as the 'greOccName' (see Note [GreNames]). greMangledName :: GlobalRdrElt -> Name greMangledName = greNameMangledName . gre_name -- | A 'Name' for the GRE suitable for output to the user. Its 'OccName' will -- be the 'greOccName' (see Note [GreNames]). grePrintableName :: GlobalRdrElt -> Name grePrintableName = greNamePrintableName . gre_name -- | The SrcSpan of the name pointed to by the GRE. greDefinitionSrcSpan :: GlobalRdrElt -> SrcSpan greDefinitionSrcSpan = nameSrcSpan . greMangledName -- | The module in which the name pointed to by the GRE is defined. greDefinitionModule :: GlobalRdrElt -> Maybe Module greDefinitionModule = nameModule_maybe . greMangledName greQualModName :: GlobalRdrElt -> ModuleName -- Get a suitable module qualifier for the GRE -- (used in mkPrintUnqualified) -- Prerecondition: the greMangledName is always External greQualModName gre@(GRE { gre_lcl = lcl, gre_imp = iss }) | lcl, Just mod <- greDefinitionModule gre = moduleName mod | Just is <- headMaybe iss = is_as (is_decl is) | otherwise = pprPanic "greQualModName" (ppr gre) greRdrNames :: GlobalRdrElt -> [RdrName] greRdrNames gre@GRE{ gre_lcl = lcl, gre_imp = iss } = bagToList $ (if lcl then unitBag unqual else emptyBag) `unionBags` concatMapBag do_spec (mapBag is_decl iss) where occ = greOccName gre unqual = Unqual occ do_spec decl_spec | is_qual decl_spec = unitBag qual | otherwise = listToBag [unqual,qual] where qual = Qual (is_as decl_spec) occ -- the SrcSpan that pprNameProvenance prints out depends on whether -- the Name is defined locally or not: for a local definition the -- definition site is used, otherwise the location of the import -- declaration. We want to sort the export locations in -- exportClashErr by this SrcSpan, we need to extract it: greSrcSpan :: GlobalRdrElt -> SrcSpan greSrcSpan gre@(GRE { gre_lcl = lcl, gre_imp = iss } ) | lcl = greDefinitionSrcSpan gre | Just is <- headMaybe iss = is_dloc (is_decl is) | otherwise = pprPanic "greSrcSpan" (ppr gre) mkParent :: Name -> AvailInfo -> Parent mkParent _ (Avail _) = NoParent mkParent n (AvailTC m _) | n == m = NoParent | otherwise = ParentIs m availParent :: AvailInfo -> Parent availParent (AvailTC m _) = ParentIs m availParent (Avail {}) = NoParent greParent_maybe :: GlobalRdrElt -> Maybe Name greParent_maybe gre = case gre_par gre of NoParent -> Nothing ParentIs n -> Just n -- | Takes a list of distinct GREs and folds them -- into AvailInfos. This is more efficient than mapping each individual -- GRE to an AvailInfo and the folding using `plusAvail` but needs the -- uniqueness assumption. gresToAvailInfo :: [GlobalRdrElt] -> [AvailInfo] gresToAvailInfo gres = nonDetNameEnvElts avail_env where avail_env :: NameEnv AvailInfo -- Keyed by the parent (avail_env, _) = foldl' add (emptyNameEnv, emptyNameSet) gres add :: (NameEnv AvailInfo, NameSet) -> GlobalRdrElt -> (NameEnv AvailInfo, NameSet) add (env, done) gre | name `elemNameSet` done = (env, done) -- Don't insert twice into the AvailInfo | otherwise = ( extendNameEnv_Acc comb availFromGRE env key gre , done `extendNameSet` name ) where name = greMangledName gre key = case greParent_maybe gre of Just parent -> parent Nothing -> greMangledName gre -- We want to insert the child `k` into a list of children but -- need to maintain the invariant that the parent is first. -- -- We also use the invariant that `k` is not already in `ns`. insertChildIntoChildren :: Name -> [GreName] -> GreName -> [GreName] insertChildIntoChildren _ [] k = [k] insertChildIntoChildren p (n:ns) k | NormalGreName p == k = k:n:ns | otherwise = n:k:ns comb :: GlobalRdrElt -> AvailInfo -> AvailInfo comb _ (Avail n) = Avail n -- Duplicated name, should not happen comb gre (AvailTC m ns) = case gre_par gre of NoParent -> AvailTC m (gre_name gre:ns) -- Not sure this ever happens ParentIs {} -> AvailTC m (insertChildIntoChildren m ns (gre_name gre)) availFromGRE :: GlobalRdrElt -> AvailInfo availFromGRE (GRE { gre_name = child, gre_par = parent }) = case parent of ParentIs p -> AvailTC p [child] NoParent | NormalGreName me <- child, isTyConName me -> AvailTC me [child] | otherwise -> Avail child emptyGlobalRdrEnv :: GlobalRdrEnv emptyGlobalRdrEnv = emptyOccEnv globalRdrEnvElts :: GlobalRdrEnv -> [GlobalRdrElt] globalRdrEnvElts env = foldOccEnv (++) [] env instance Outputable GlobalRdrElt where ppr gre = hang (ppr (greMangledName gre) <+> ppr (gre_par gre)) 2 (pprNameProvenance gre) pprGlobalRdrEnv :: Bool -> GlobalRdrEnv -> SDoc pprGlobalRdrEnv locals_only env = vcat [ text "GlobalRdrEnv" <+> ppWhen locals_only (text "(locals only)") <+> lbrace , nest 2 (vcat [ pp (remove_locals gre_list) | gre_list <- nonDetOccEnvElts env ] <+> rbrace) ] where remove_locals gres | locals_only = filter isLocalGRE gres | otherwise = gres pp [] = empty pp gres = hang (ppr occ <+> parens (text "unique" <+> ppr (getUnique occ)) <> colon) 2 (vcat (map ppr gres)) where occ = nameOccName (greMangledName (head gres)) lookupGlobalRdrEnv :: GlobalRdrEnv -> OccName -> [GlobalRdrElt] lookupGlobalRdrEnv env occ_name = case lookupOccEnv env occ_name of Nothing -> [] Just gres -> gres lookupGRE_RdrName :: RdrName -> GlobalRdrEnv -> [GlobalRdrElt] -- ^ Look for this 'RdrName' in the global environment. Omits record fields -- without selector functions (see Note [NoFieldSelectors] in GHC.Rename.Env). lookupGRE_RdrName rdr_name env = filter (not . isNoFieldSelectorGRE) (lookupGRE_RdrName' rdr_name env) lookupGRE_RdrName' :: RdrName -> GlobalRdrEnv -> [GlobalRdrElt] -- ^ Look for this 'RdrName' in the global environment. Includes record fields -- without selector functions (see Note [NoFieldSelectors] in GHC.Rename.Env). lookupGRE_RdrName' rdr_name env = case lookupOccEnv env (rdrNameOcc rdr_name) of Nothing -> [] Just gres -> pickGREs rdr_name gres lookupGRE_Name :: GlobalRdrEnv -> Name -> Maybe GlobalRdrElt -- ^ Look for precisely this 'Name' in the environment. This tests -- whether it is in scope, ignoring anything else that might be in -- scope with the same 'OccName'. lookupGRE_Name env name = lookupGRE_Name_OccName env name (nameOccName name) lookupGRE_GreName :: GlobalRdrEnv -> GreName -> Maybe GlobalRdrElt -- ^ Look for precisely this 'GreName' in the environment. This tests -- whether it is in scope, ignoring anything else that might be in -- scope with the same 'OccName'. lookupGRE_GreName env gname = lookupGRE_Name_OccName env (greNameMangledName gname) (occName gname) lookupGRE_FieldLabel :: GlobalRdrEnv -> FieldLabel -> Maybe GlobalRdrElt -- ^ Look for a particular record field selector in the environment, where the -- selector name and field label may be different: the GlobalRdrEnv is keyed on -- the label. See Note [GreNames] for why this happens. lookupGRE_FieldLabel env fl = lookupGRE_Name_OccName env (flSelector fl) (mkVarOccFS (flLabel fl)) lookupGRE_Name_OccName :: GlobalRdrEnv -> Name -> OccName -> Maybe GlobalRdrElt -- ^ Look for precisely this 'Name' in the environment, but with an 'OccName' -- that might differ from that of the 'Name'. See 'lookupGRE_FieldLabel' and -- Note [GreNames]. lookupGRE_Name_OccName env name occ = case [ gre | gre <- lookupGlobalRdrEnv env occ , greMangledName gre == name ] of [] -> Nothing [gre] -> Just gre gres -> pprPanic "lookupGRE_Name_OccName" (ppr name $$ ppr occ $$ ppr gres) -- See INVARIANT 1 on GlobalRdrEnv getGRE_NameQualifier_maybes :: GlobalRdrEnv -> Name -> [Maybe [ModuleName]] -- Returns all the qualifiers by which 'x' is in scope -- Nothing means "the unqualified version is in scope" -- [] means the thing is not in scope at all getGRE_NameQualifier_maybes env name = case lookupGRE_Name env name of Just gre -> [qualifier_maybe gre] Nothing -> [] where qualifier_maybe (GRE { gre_lcl = lcl, gre_imp = iss }) | lcl = Nothing | otherwise = Just $ map (is_as . is_decl) (bagToList iss) isLocalGRE :: GlobalRdrElt -> Bool isLocalGRE (GRE {gre_lcl = lcl }) = lcl isRecFldGRE :: GlobalRdrElt -> Bool isRecFldGRE = isJust . greFieldLabel isDuplicateRecFldGRE :: GlobalRdrElt -> Bool -- ^ Is this a record field defined with DuplicateRecordFields? -- (See Note [GreNames]) isDuplicateRecFldGRE = maybe False ((DuplicateRecordFields ==) . flHasDuplicateRecordFields) . greFieldLabel isNoFieldSelectorGRE :: GlobalRdrElt -> Bool -- ^ Is this a record field defined with NoFieldSelectors? -- (See Note [NoFieldSelectors] in GHC.Rename.Env) isNoFieldSelectorGRE = maybe False ((NoFieldSelectors ==) . flHasFieldSelector) . greFieldLabel isFieldSelectorGRE :: GlobalRdrElt -> Bool -- ^ Is this a record field defined with FieldSelectors? -- (See Note [NoFieldSelectors] in GHC.Rename.Env) isFieldSelectorGRE = maybe False ((FieldSelectors ==) . flHasFieldSelector) . greFieldLabel greFieldLabel :: GlobalRdrElt -> Maybe FieldLabel -- ^ Returns the field label of this GRE, if it has one greFieldLabel = greNameFieldLabel . gre_name unQualOK :: GlobalRdrElt -> Bool -- ^ Test if an unqualified version of this thing would be in scope unQualOK (GRE {gre_lcl = lcl, gre_imp = iss }) | lcl = True | otherwise = any unQualSpecOK iss {- Note [GRE filtering] ~~~~~~~~~~~~~~~~~~~~~~~ (pickGREs rdr gres) takes a list of GREs which have the same OccName as 'rdr', say "x". It does two things: (a) filters the GREs to a subset that are in scope * Qualified, as 'M.x' if want_qual is Qual M _ * Unqualified, as 'x' if want_unqual is Unqual _ (b) for that subset, filter the provenance field (gre_lcl and gre_imp) to ones that brought it into scope qualified or unqualified resp. Example: module A ( f ) where import qualified Foo( f ) import Baz( f ) f = undefined Let's suppose that Foo.f and Baz.f are the same entity really, but the local 'f' is different, so there will be two GREs matching "f": gre1: gre_lcl = True, gre_imp = [] gre2: gre_lcl = False, gre_imp = [ imported from Foo, imported from Bar ] The use of "f" in the export list is ambiguous because it's in scope from the local def and the import Baz(f); but *not* the import qualified Foo. pickGREs returns two GRE gre1: gre_lcl = True, gre_imp = [] gre2: gre_lcl = False, gre_imp = [ imported from Bar ] Now the "ambiguous occurrence" message can correctly report how the ambiguity arises. -} pickGREs :: RdrName -> [GlobalRdrElt] -> [GlobalRdrElt] -- ^ Takes a list of GREs which have the right OccName 'x' -- Pick those GREs that are in scope -- * Qualified, as 'M.x' if want_qual is Qual M _ -- * Unqualified, as 'x' if want_unqual is Unqual _ -- -- Return each such GRE, with its ImportSpecs filtered, to reflect -- how it is in scope qualified or unqualified respectively. -- See Note [GRE filtering] pickGREs (Unqual {}) gres = mapMaybe pickUnqualGRE gres pickGREs (Qual mod _) gres = mapMaybe (pickQualGRE mod) gres pickGREs _ _ = [] -- I don't think this actually happens pickUnqualGRE :: GlobalRdrElt -> Maybe GlobalRdrElt pickUnqualGRE gre@(GRE { gre_lcl = lcl, gre_imp = iss }) | not lcl, null iss' = Nothing | otherwise = Just (gre { gre_imp = iss' }) where iss' = filterBag unQualSpecOK iss pickQualGRE :: ModuleName -> GlobalRdrElt -> Maybe GlobalRdrElt pickQualGRE mod gre@(GRE { gre_lcl = lcl, gre_imp = iss }) | not lcl', null iss' = Nothing | otherwise = Just (gre { gre_lcl = lcl', gre_imp = iss' }) where iss' = filterBag (qualSpecOK mod) iss lcl' = lcl && name_is_from mod name_is_from :: ModuleName -> Bool name_is_from mod = case greDefinitionModule gre of Just n_mod -> moduleName n_mod == mod Nothing -> False pickGREsModExp :: ModuleName -> [GlobalRdrElt] -> [(GlobalRdrElt,GlobalRdrElt)] -- ^ Pick GREs that are in scope *both* qualified *and* unqualified -- Return each GRE that is, as a pair -- (qual_gre, unqual_gre) -- These two GREs are the original GRE with imports filtered to express how -- it is in scope qualified an unqualified respectively -- -- Used only for the 'module M' item in export list; -- see 'GHC.Tc.Gen.Export.exports_from_avail' pickGREsModExp mod gres = mapMaybe (pickBothGRE mod) gres -- | isBuiltInSyntax filter out names for built-in syntax They -- just clutter up the environment (esp tuples), and the -- parser will generate Exact RdrNames for them, so the -- cluttered envt is no use. Really, it's only useful for -- GHC.Base and GHC.Tuple. pickBothGRE :: ModuleName -> GlobalRdrElt -> Maybe (GlobalRdrElt, GlobalRdrElt) pickBothGRE mod gre | isBuiltInSyntax (greMangledName gre) = Nothing | Just gre1 <- pickQualGRE mod gre , Just gre2 <- pickUnqualGRE gre = Just (gre1, gre2) | otherwise = Nothing -- Building GlobalRdrEnvs plusGlobalRdrEnv :: GlobalRdrEnv -> GlobalRdrEnv -> GlobalRdrEnv plusGlobalRdrEnv env1 env2 = plusOccEnv_C (foldr insertGRE) env1 env2 mkGlobalRdrEnv :: [GlobalRdrElt] -> GlobalRdrEnv mkGlobalRdrEnv gres = foldr add emptyGlobalRdrEnv gres where add gre env = extendOccEnv_Acc insertGRE Utils.singleton env (greOccName gre) gre insertGRE :: GlobalRdrElt -> [GlobalRdrElt] -> [GlobalRdrElt] insertGRE new_g [] = [new_g] insertGRE new_g (old_g : old_gs) | gre_name new_g == gre_name old_g = new_g `plusGRE` old_g : old_gs | otherwise = old_g : insertGRE new_g old_gs plusGRE :: GlobalRdrElt -> GlobalRdrElt -> GlobalRdrElt -- Used when the gre_name fields match plusGRE g1 g2 = GRE { gre_name = gre_name g1 , gre_lcl = gre_lcl g1 || gre_lcl g2 , gre_imp = gre_imp g1 `unionBags` gre_imp g2 , gre_par = gre_par g1 `plusParent` gre_par g2 } transformGREs :: (GlobalRdrElt -> GlobalRdrElt) -> [OccName] -> GlobalRdrEnv -> GlobalRdrEnv -- ^ Apply a transformation function to the GREs for these OccNames transformGREs trans_gre occs rdr_env = foldr trans rdr_env occs where trans occ env = case lookupOccEnv env occ of Just gres -> extendOccEnv env occ (map trans_gre gres) Nothing -> env extendGlobalRdrEnv :: GlobalRdrEnv -> GlobalRdrElt -> GlobalRdrEnv extendGlobalRdrEnv env gre = extendOccEnv_Acc insertGRE Utils.singleton env (greOccName gre) gre {- Note [GlobalRdrEnv shadowing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before adding new names to the GlobalRdrEnv we nuke some existing entries; this is "shadowing". The actual work is done by RdrEnv.shadowNames. Suppose env' = shadowNames env f `extendGlobalRdrEnv` M.f Then: * Looking up (Unqual f) in env' should succeed, returning M.f, even if env contains existing unqualified bindings for f. They are shadowed * Looking up (Qual M.f) in env' should succeed, returning M.f * Looking up (Qual X.f) in env', where X /= M, should be the same as looking up (Qual X.f) in env. That is, shadowNames does /not/ delete earlier qualified bindings There are two reasons for shadowing: * The GHCi REPL - Ids bought into scope on the command line (eg let x = True) have External Names, like Ghci4.x. We want a new binding for 'x' (say) to override the existing binding for 'x'. Example: ghci> :load M -- Brings `x` and `M.x` into scope ghci> x ghci> "Hello" ghci> M.x ghci> "hello" ghci> let x = True -- Shadows `x` ghci> x -- The locally bound `x` -- NOT an ambiguous reference ghci> True ghci> M.x -- M.x is still in scope! ghci> "Hello" So when we add `x = True` we must not delete the `M.x` from the `GlobalRdrEnv`; rather we just want to make it "qualified only"; hence the `set_qual` in `shadowNames`. See also Note [Interactively-bound Ids in GHCi] in GHC.Runtime.Context - Data types also have External Names, like Ghci4.T; but we still want 'T' to mean the newly-declared 'T', not an old one. * Nested Template Haskell declaration brackets See Note [Top-level Names in Template Haskell decl quotes] in GHC.Rename.Names Consider a TH decl quote: module M where f x = h [d| f = ...f...M.f... |] We must shadow the outer unqualified binding of 'f', else we'll get a complaint when extending the GlobalRdrEnv, saying that there are two bindings for 'f'. There are several tricky points: - This shadowing applies even if the binding for 'f' is in a where-clause, and hence is in the *local* RdrEnv not the *global* RdrEnv. This is done in lcl_env_TH in extendGlobalRdrEnvRn. - The External Name M.f from the enclosing module must certainly still be available. So we don't nuke it entirely; we just make it seem like qualified import. - We only shadow *External* names (which come from the main module), or from earlier GHCi commands. Do not shadow *Internal* names because in the bracket [d| class C a where f :: a f = 4 |] rnSrcDecls will first call extendGlobalRdrEnvRn with C[f] from the class decl, and *separately* extend the envt with the value binding. At that stage, the class op 'f' will have an Internal name. -} shadowNames :: GlobalRdrEnv -> OccEnv a -> GlobalRdrEnv -- Remove certain old GREs that share the same OccName as this new Name. -- See Note [GlobalRdrEnv shadowing] for details shadowNames = minusOccEnv_C (\gres _ -> Just (mapMaybe shadow gres)) where shadow :: GlobalRdrElt -> Maybe GlobalRdrElt shadow old_gre@(GRE { gre_lcl = lcl, gre_imp = iss }) = case greDefinitionModule old_gre of Nothing -> Just old_gre -- Old name is Internal; do not shadow Just old_mod | null iss' -- Nothing remains -> Nothing | otherwise -> Just (old_gre { gre_lcl = False, gre_imp = iss' }) where iss' = lcl_imp `unionBags` mapMaybeBag set_qual iss lcl_imp | lcl = listToBag [mk_fake_imp_spec old_gre old_mod] | otherwise = emptyBag mk_fake_imp_spec old_gre old_mod -- Urgh! = ImpSpec id_spec ImpAll where old_mod_name = moduleName old_mod id_spec = ImpDeclSpec { is_mod = old_mod_name , is_as = old_mod_name , is_qual = True , is_dloc = greDefinitionSrcSpan old_gre } set_qual :: ImportSpec -> Maybe ImportSpec set_qual is = Just (is { is_decl = (is_decl is) { is_qual = True } }) {- ************************************************************************ * * ImportSpec * * ************************************************************************ -} -- | Import Specification -- -- The 'ImportSpec' of something says how it came to be imported -- It's quite elaborate so that we can give accurate unused-name warnings. data ImportSpec = ImpSpec { is_decl :: ImpDeclSpec, is_item :: ImpItemSpec } deriving( Eq, Data ) -- | Import Declaration Specification -- -- Describes a particular import declaration and is -- shared among all the 'Provenance's for that decl data ImpDeclSpec = ImpDeclSpec { is_mod :: ModuleName, -- ^ Module imported, e.g. @import Muggle@ -- Note the @Muggle@ may well not be -- the defining module for this thing! -- TODO: either should be Module, or there -- should be a Maybe UnitId here too. is_as :: ModuleName, -- ^ Import alias, e.g. from @as M@ (or @Muggle@ if there is no @as@ clause) is_qual :: Bool, -- ^ Was this import qualified? is_dloc :: SrcSpan -- ^ The location of the entire import declaration } deriving (Eq, Data) -- | Import Item Specification -- -- Describes import info a particular Name data ImpItemSpec = ImpAll -- ^ The import had no import list, -- or had a hiding list | ImpSome { is_explicit :: Bool, is_iloc :: SrcSpan -- Location of the import item } -- ^ The import had an import list. -- The 'is_explicit' field is @True@ iff the thing was named -- /explicitly/ in the import specs rather -- than being imported as part of a "..." group. Consider: -- -- > import C( T(..) ) -- -- Here the constructors of @T@ are not named explicitly; -- only @T@ is named explicitly. deriving (Eq, Data) bestImport :: [ImportSpec] -> ImportSpec -- See Note [Choosing the best import declaration] bestImport iss = case sortBy best iss of (is:_) -> is [] -> pprPanic "bestImport" (ppr iss) where best :: ImportSpec -> ImportSpec -> Ordering -- Less means better -- Unqualified always wins over qualified; then -- import-all wins over import-some; then -- earlier declaration wins over later best (ImpSpec { is_item = item1, is_decl = d1 }) (ImpSpec { is_item = item2, is_decl = d2 }) = (is_qual d1 `compare` is_qual d2) `thenCmp` (best_item item1 item2) `thenCmp` SrcLoc.leftmost_smallest (is_dloc d1) (is_dloc d2) best_item :: ImpItemSpec -> ImpItemSpec -> Ordering best_item ImpAll ImpAll = EQ best_item ImpAll (ImpSome {}) = LT best_item (ImpSome {}) ImpAll = GT best_item (ImpSome { is_explicit = e1 }) (ImpSome { is_explicit = e2 }) = e1 `compare` e2 -- False < True, so if e1 is explicit and e2 is not, we get GT {- Note [Choosing the best import declaration] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When reporting unused import declarations we use the following rules. (see [wiki:commentary/compiler/unused-imports]) Say that an import-item is either * an entire import-all decl (eg import Foo), or * a particular item in an import list (eg import Foo( ..., x, ...)). The general idea is that for each /occurrence/ of an imported name, we will attribute that use to one import-item. Once we have processed all the occurrences, any import items with no uses attributed to them are unused, and are warned about. More precisely: 1. For every RdrName in the program text, find its GlobalRdrElt. 2. Then, from the [ImportSpec] (gre_imp) of that GRE, choose one the "chosen import-item", and mark it "used". This is done by 'bestImport' 3. After processing all the RdrNames, bleat about any import-items that are unused. This is done in GHC.Rename.Names.warnUnusedImportDecls. The function 'bestImport' returns the dominant import among the ImportSpecs it is given, implementing Step 2. We say import-item A dominates import-item B if we choose A over B. In general, we try to choose the import that is most likely to render other imports unnecessary. Here is the dominance relationship we choose: a) import Foo dominates import qualified Foo. b) import Foo dominates import Foo(x). c) Otherwise choose the textually first one. Rationale for (a). Consider import qualified M -- Import #1 import M( x ) -- Import #2 foo = M.x + x The unqualified 'x' can only come from import #2. The qualified 'M.x' could come from either, but bestImport picks import #2, because it is more likely to be useful in other imports, as indeed it is in this case (see #5211 for a concrete example). But the rules are not perfect; consider import qualified M -- Import #1 import M( x ) -- Import #2 foo = M.x + M.y The M.x will use import #2, but M.y can only use import #1. -} unQualSpecOK :: ImportSpec -> Bool -- ^ Is in scope unqualified? unQualSpecOK is = not (is_qual (is_decl is)) qualSpecOK :: ModuleName -> ImportSpec -> Bool -- ^ Is in scope qualified with the given module? qualSpecOK mod is = mod == is_as (is_decl is) importSpecLoc :: ImportSpec -> SrcSpan importSpecLoc (ImpSpec decl ImpAll) = is_dloc decl importSpecLoc (ImpSpec _ item) = is_iloc item importSpecModule :: ImportSpec -> ModuleName importSpecModule is = is_mod (is_decl is) isExplicitItem :: ImpItemSpec -> Bool isExplicitItem ImpAll = False isExplicitItem (ImpSome {is_explicit = exp}) = exp pprNameProvenance :: GlobalRdrElt -> SDoc -- ^ Print out one place where the name was define/imported -- (With -dppr-debug, print them all) pprNameProvenance gre@(GRE { gre_lcl = lcl, gre_imp = iss }) = ifPprDebug (vcat pp_provs) (head pp_provs) where name = greMangledName gre pp_provs = pp_lcl ++ map pp_is (bagToList iss) pp_lcl = if lcl then [text "defined at" <+> ppr (nameSrcLoc name)] else [] pp_is is = sep [ppr is, ppr_defn_site is name] -- If we know the exact definition point (which we may do with GHCi) -- then show that too. But not if it's just "imported from X". ppr_defn_site :: ImportSpec -> Name -> SDoc ppr_defn_site imp_spec name | same_module && not (isGoodSrcSpan loc) = empty -- Nothing interesting to say | otherwise = parens $ hang (text "and originally defined" <+> pp_mod) 2 (pprLoc loc) where loc = nameSrcSpan name defining_mod = assertPpr (isExternalName name) (ppr name) $ nameModule name same_module = importSpecModule imp_spec == moduleName defining_mod pp_mod | same_module = empty | otherwise = text "in" <+> quotes (ppr defining_mod) instance Outputable ImportSpec where ppr imp_spec = text "imported" <+> qual <+> text "from" <+> quotes (ppr (importSpecModule imp_spec)) <+> pprLoc (importSpecLoc imp_spec) where qual | is_qual (is_decl imp_spec) = text "qualified" | otherwise = empty pprLoc :: SrcSpan -> SDoc pprLoc (RealSrcSpan s _) = text "at" <+> ppr s pprLoc (UnhelpfulSpan {}) = empty -- | Indicate if the given name is the "@" operator opIsAt :: RdrName -> Bool opIsAt e = e == mkUnqual varName (fsLit "@") ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Set.hs0000644000000000000000000001670114472400113020714 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1998 -} {-# LANGUAGE GeneralizedNewtypeDeriving #-} module GHC.Types.Name.Set ( -- * Names set type NameSet, -- ** Manipulating these sets emptyNameSet, unitNameSet, mkNameSet, unionNameSet, unionNameSets, minusNameSet, elemNameSet, extendNameSet, extendNameSetList, delFromNameSet, delListFromNameSet, isEmptyNameSet, filterNameSet, intersectsNameSet, disjointNameSet, intersectNameSet, nameSetAny, nameSetAll, nameSetElemsStable, -- * Free variables FreeVars, -- ** Manipulating sets of free variables isEmptyFVs, emptyFVs, plusFVs, plusFV, mkFVs, addOneFV, unitFV, delFV, delFVs, intersectFVs, -- * Defs and uses Defs, Uses, DefUse, DefUses, -- ** Manipulating defs and uses emptyDUs, usesOnly, mkDUs, plusDU, findUses, duDefs, duUses, allUses, -- * Non-CAFfy names NonCaffySet(..) ) where import GHC.Prelude import GHC.Types.Name import GHC.Data.OrdList import GHC.Types.Unique.Set import Data.List (sortBy) {- ************************************************************************ * * \subsection[Sets of names} * * ************************************************************************ -} type NameSet = UniqSet Name emptyNameSet :: NameSet unitNameSet :: Name -> NameSet extendNameSetList :: NameSet -> [Name] -> NameSet extendNameSet :: NameSet -> Name -> NameSet mkNameSet :: [Name] -> NameSet unionNameSet :: NameSet -> NameSet -> NameSet unionNameSets :: [NameSet] -> NameSet minusNameSet :: NameSet -> NameSet -> NameSet elemNameSet :: Name -> NameSet -> Bool isEmptyNameSet :: NameSet -> Bool delFromNameSet :: NameSet -> Name -> NameSet delListFromNameSet :: NameSet -> [Name] -> NameSet filterNameSet :: (Name -> Bool) -> NameSet -> NameSet intersectNameSet :: NameSet -> NameSet -> NameSet intersectsNameSet :: NameSet -> NameSet -> Bool disjointNameSet :: NameSet -> NameSet -> Bool -- ^ True if there is a non-empty intersection. -- @s1 `intersectsNameSet` s2@ doesn't compute @s2@ if @s1@ is empty isEmptyNameSet = isEmptyUniqSet emptyNameSet = emptyUniqSet unitNameSet = unitUniqSet mkNameSet = mkUniqSet extendNameSetList = addListToUniqSet extendNameSet = addOneToUniqSet unionNameSet = unionUniqSets unionNameSets = unionManyUniqSets minusNameSet = minusUniqSet elemNameSet = elementOfUniqSet delFromNameSet = delOneFromUniqSet filterNameSet = filterUniqSet intersectNameSet = intersectUniqSets disjointNameSet = disjointUniqSets delListFromNameSet set ns = foldl' delFromNameSet set ns intersectsNameSet s1 s2 = not (s1 `disjointNameSet` s2) nameSetAny :: (Name -> Bool) -> NameSet -> Bool nameSetAny = uniqSetAny nameSetAll :: (Name -> Bool) -> NameSet -> Bool nameSetAll = uniqSetAll -- | Get the elements of a NameSet with some stable ordering. -- This only works for Names that originate in the source code or have been -- tidied. -- See Note [Deterministic UniqFM] to learn about nondeterminism nameSetElemsStable :: NameSet -> [Name] nameSetElemsStable ns = sortBy stableNameCmp $ nonDetEltsUniqSet ns -- It's OK to use nonDetEltsUniqSet here because we immediately sort -- with stableNameCmp {- ************************************************************************ * * \subsection{Free variables} * * ************************************************************************ These synonyms are useful when we are thinking of free variables -} type FreeVars = NameSet plusFV :: FreeVars -> FreeVars -> FreeVars addOneFV :: FreeVars -> Name -> FreeVars unitFV :: Name -> FreeVars emptyFVs :: FreeVars plusFVs :: [FreeVars] -> FreeVars mkFVs :: [Name] -> FreeVars delFV :: Name -> FreeVars -> FreeVars delFVs :: [Name] -> FreeVars -> FreeVars intersectFVs :: FreeVars -> FreeVars -> FreeVars isEmptyFVs :: NameSet -> Bool isEmptyFVs = isEmptyNameSet emptyFVs = emptyNameSet plusFVs = unionNameSets plusFV = unionNameSet mkFVs = mkNameSet addOneFV = extendNameSet unitFV = unitNameSet delFV n s = delFromNameSet s n delFVs ns s = delListFromNameSet s ns intersectFVs = intersectNameSet {- ************************************************************************ * * Defs and uses * * ************************************************************************ -} -- | A set of names that are defined somewhere type Defs = NameSet -- | A set of names that are used somewhere type Uses = NameSet -- | @(Just ds, us) =>@ The use of any member of the @ds@ -- implies that all the @us@ are used too. -- Also, @us@ may mention @ds@. -- -- @Nothing =>@ Nothing is defined in this group, but -- nevertheless all the uses are essential. -- Used for instance declarations, for example type DefUse = (Maybe Defs, Uses) -- | A number of 'DefUse's in dependency order: earlier 'Defs' scope over later 'Uses' -- In a single (def, use) pair, the defs also scope over the uses type DefUses = OrdList DefUse emptyDUs :: DefUses emptyDUs = nilOL usesOnly :: Uses -> DefUses usesOnly uses = unitOL (Nothing, uses) mkDUs :: [(Defs,Uses)] -> DefUses mkDUs pairs = toOL [(Just defs, uses) | (defs,uses) <- pairs] plusDU :: DefUses -> DefUses -> DefUses plusDU = appOL duDefs :: DefUses -> Defs duDefs dus = foldr get emptyNameSet dus where get (Nothing, _u1) d2 = d2 get (Just d1, _u1) d2 = d1 `unionNameSet` d2 allUses :: DefUses -> Uses -- ^ Just like 'duUses', but 'Defs' are not eliminated from the 'Uses' returned allUses dus = foldr get emptyNameSet dus where get (_d1, u1) u2 = u1 `unionNameSet` u2 duUses :: DefUses -> Uses -- ^ Collect all 'Uses', regardless of whether the group is itself used, -- but remove 'Defs' on the way duUses dus = foldr get emptyNameSet dus where get (Nothing, rhs_uses) uses = rhs_uses `unionNameSet` uses get (Just defs, rhs_uses) uses = (rhs_uses `unionNameSet` uses) `minusNameSet` defs findUses :: DefUses -> Uses -> Uses -- ^ Given some 'DefUses' and some 'Uses', find all the uses, transitively. -- The result is a superset of the input 'Uses'; and includes things defined -- in the input 'DefUses' (but only if they are used) findUses dus uses = foldr get uses dus where get (Nothing, rhs_uses) uses = rhs_uses `unionNameSet` uses get (Just defs, rhs_uses) uses | defs `intersectsNameSet` uses -- Used || nameSetAny (startsWithUnderscore . nameOccName) defs -- At least one starts with an "_", -- so treat the group as used = rhs_uses `unionNameSet` uses | otherwise -- No def is used = uses -- | 'Id's which have no CAF references. This is a result of analysis of C--. -- It is always safe to use an empty 'NonCaffySet'. TODO Refer to Note. newtype NonCaffySet = NonCaffySet { ncs_nameSet :: NameSet } deriving (Semigroup, Monoid) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/PkgQual.hs0000644000000000000000000000203514472400113020640 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE LambdaCase #-} module GHC.Types.PkgQual where import GHC.Prelude import GHC.Types.SourceText import GHC.Unit.Types import GHC.Utils.Outputable import Data.Data -- | Package-qualifier as it was parsed data RawPkgQual = NoRawPkgQual -- ^ No package qualifier | RawPkgQual StringLiteral -- ^ Raw package qualifier string. deriving (Data) -- | Package-qualifier after renaming -- -- Renaming detects if "this" or the unit-id of the home-unit was used as a -- package qualifier. data PkgQual = NoPkgQual -- ^ No package qualifier | ThisPkg UnitId -- ^ Import from home-unit | OtherPkg UnitId -- ^ Import from another unit deriving (Data, Ord, Eq) instance Outputable RawPkgQual where ppr = \case NoRawPkgQual -> empty RawPkgQual (StringLiteral st p _) -> pprWithSourceText st (doubleQuotes (ftext p)) instance Outputable PkgQual where ppr = \case NoPkgQual -> empty ThisPkg u -> doubleQuotes (ppr u) OtherPkg u -> doubleQuotes (ppr u) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/RepType.hs0000644000000000000000000006507314472400113020677 0ustar0000000000000000 {-# LANGUAGE FlexibleContexts #-} module GHC.Types.RepType ( -- * Code generator views onto Types UnaryType, NvUnaryType, isNvUnaryType, unwrapType, -- * Predicates on types isZeroBitTy, -- * Type representation for the code generator typePrimRep, typePrimRep1, runtimeRepPrimRep, typePrimRepArgs, PrimRep(..), primRepToType, primRepToRuntimeRep, countFunRepArgs, countConRepArgs, dataConRuntimeRepStrictness, tyConPrimRep, tyConPrimRep1, runtimeRepPrimRep_maybe, kindPrimRep_maybe, typePrimRep_maybe, -- * Unboxed sum representation type ubxSumRepType, layoutUbxSum, typeSlotTy, SlotTy (..), slotPrimRep, primRepSlot, -- * Is this type known to be data? mightBeFunTy ) where import GHC.Prelude import GHC.Types.Basic (Arity, RepArity) import GHC.Core.DataCon import GHC.Builtin.Names import GHC.Core.Coercion import GHC.Core.TyCon import GHC.Core.TyCon.RecWalk import GHC.Core.TyCo.Rep import GHC.Core.Type import {-# SOURCE #-} GHC.Builtin.Types ( anyTypeOfKind , vecRepDataConTyCon , liftedRepTy, unliftedRepTy, zeroBitRepTy , intRepDataConTy , int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy , wordRepDataConTy , word16RepDataConTy, word8RepDataConTy, word32RepDataConTy, word64RepDataConTy , addrRepDataConTy , floatRepDataConTy, doubleRepDataConTy , vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy , vec64DataConTy , int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy , int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy , word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy , doubleElemRepDataConTy ) import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.List (sort) import qualified Data.IntSet as IS {- ********************************************************************** * * Representation types * * ********************************************************************** -} type NvUnaryType = Type type UnaryType = Type -- Both are always a value type; i.e. its kind is TYPE rr -- for some rr; moreover the rr is never a variable. -- -- NvUnaryType : never an unboxed tuple or sum, or void -- -- UnaryType : never an unboxed tuple or sum; -- can be Void# or (# #) isNvUnaryType :: Type -> Bool isNvUnaryType ty | [_] <- typePrimRep ty = True | otherwise = False -- INVARIANT: the result list is never empty. typePrimRepArgs :: HasDebugCallStack => Type -> [PrimRep] typePrimRepArgs ty | [] <- reps = [VoidRep] | otherwise = reps where reps = typePrimRep ty -- | Gets rid of the stuff that prevents us from understanding the -- runtime representation of a type. Including: -- 1. Casts -- 2. Newtypes -- 3. Foralls -- 4. Synonyms -- But not type/data families, because we don't have the envs to hand. unwrapType :: Type -> Type unwrapType ty | Just (_, unwrapped) <- topNormaliseTypeX stepper mappend inner_ty = unwrapped | otherwise = inner_ty where inner_ty = go ty go t | Just t' <- coreView t = go t' go (ForAllTy _ t) = go t go (CastTy t _) = go t go t = t -- cf. Coercion.unwrapNewTypeStepper stepper rec_nts tc tys | Just (ty', _) <- instNewTyCon_maybe tc tys = case checkRecTc rec_nts tc of Just rec_nts' -> NS_Step rec_nts' (go ty') () Nothing -> NS_Abort -- infinite newtypes | otherwise = NS_Done countFunRepArgs :: Arity -> Type -> RepArity countFunRepArgs 0 _ = 0 countFunRepArgs n ty | FunTy _ _ arg res <- unwrapType ty = length (typePrimRepArgs arg) + countFunRepArgs (n - 1) res | otherwise = pprPanic "countFunRepArgs: arity greater than type can handle" (ppr (n, ty, typePrimRep ty)) countConRepArgs :: DataCon -> RepArity countConRepArgs dc = go (dataConRepArity dc) (dataConRepType dc) where go :: Arity -> Type -> RepArity go 0 _ = 0 go n ty | FunTy _ _ arg res <- unwrapType ty = length (typePrimRep arg) + go (n - 1) res | otherwise = pprPanic "countConRepArgs: arity greater than type can handle" (ppr (n, ty, typePrimRep ty)) dataConRuntimeRepStrictness :: HasDebugCallStack => DataCon -> [StrictnessMark] -- ^ Give the demands on the arguments of a -- Core constructor application (Con dc args) at runtime. -- Assumes the constructor is not levity polymorphic. For example -- unboxed tuples won't work. dataConRuntimeRepStrictness dc = -- pprTrace "dataConRuntimeRepStrictness" (ppr dc $$ ppr (dataConRepArgTys dc)) $ let repMarks = dataConRepStrictness dc repTys = map irrelevantMult $ dataConRepArgTys dc in -- todo: assert dc != unboxedTuple/unboxedSum go repMarks repTys [] where go (mark:marks) (ty:types) out_marks -- Zero-width argument, mark is irrelevant at runtime. | -- pprTrace "VoidTy" (ppr ty) $ (isZeroBitTy ty) = go marks types out_marks -- Single rep argument, e.g. Int -- Keep mark as-is | [_] <- reps = go marks types (mark:out_marks) -- Multi-rep argument, e.g. (# Int, Bool #) or (# Int | Bool #) -- Make up one non-strict mark per runtime argument. | otherwise -- TODO: Assert real_reps /= null = go marks types ((replicate (length real_reps) NotMarkedStrict)++out_marks) where reps = typePrimRep ty real_reps = filter (not . isVoidRep) $ reps go [] [] out_marks = reverse out_marks go _m _t _o = pprPanic "dataConRuntimeRepStrictness2" (ppr dc $$ ppr _m $$ ppr _t $$ ppr _o) -- | True if the type has zero width. isZeroBitTy :: HasDebugCallStack => Type -> Bool isZeroBitTy = null . typePrimRep {- ********************************************************************** * * Unboxed sums See Note [Translating unboxed sums to unboxed tuples] in GHC.Stg.Unarise * * ********************************************************************** -} type SortedSlotTys = [SlotTy] -- | Given the arguments of a sum type constructor application, -- return the unboxed sum rep type. -- -- E.g. -- -- (# Int# | Maybe Int | (# Int#, Float# #) #) -- -- We call `ubxSumRepType [ [IntRep], [LiftedRep], [IntRep, FloatRep] ]`, -- which returns [WordSlot, PtrSlot, WordSlot, FloatSlot] -- -- INVARIANT: Result slots are sorted (via Ord SlotTy), except that at the head -- of the list we have the slot for the tag. ubxSumRepType :: [[PrimRep]] -> [SlotTy] ubxSumRepType constrs0 -- These first two cases never classify an actual unboxed sum, which always -- has at least two disjuncts. But it could happen if a user writes, e.g., -- forall (a :: TYPE (SumRep [IntRep])). ... -- which could never be instantiated. We still don't want to panic. | constrs0 `lengthLessThan` 2 = [WordSlot] | otherwise = let combine_alts :: [SortedSlotTys] -- slots of constructors -> SortedSlotTys -- final slots combine_alts constrs = foldl' merge [] constrs merge :: SortedSlotTys -> SortedSlotTys -> SortedSlotTys merge existing_slots [] = existing_slots merge [] needed_slots = needed_slots merge (es : ess) (s : ss) | Just s' <- s `fitsIn` es = -- found a slot, use it s' : merge ess ss | s < es = -- we need a new slot and this is the right place for it s : merge (es : ess) ss | otherwise = -- keep searching for a slot es : merge ess (s : ss) -- Nesting unboxed tuples and sums is OK, so we need to flatten first. rep :: [PrimRep] -> SortedSlotTys rep ty = sort (map primRepSlot ty) sumRep = WordSlot : combine_alts (map rep constrs0) -- WordSlot: for the tag of the sum in sumRep layoutUbxSum :: HasDebugCallStack => SortedSlotTys -- Layout of sum. Does not include tag. -- We assume that they are in increasing order -> [SlotTy] -- Slot types of things we want to map to locations in the -- sum layout -> [Int] -- Where to map 'things' in the sum layout layoutUbxSum sum_slots0 arg_slots0 = go arg_slots0 IS.empty where go :: [SlotTy] -> IS.IntSet -> [Int] go [] _ = [] go (arg : args) used = let slot_idx = findSlot arg 0 sum_slots0 used in slot_idx : go args (IS.insert slot_idx used) findSlot :: SlotTy -> Int -> SortedSlotTys -> IS.IntSet -> Int findSlot arg slot_idx (slot : slots) useds | not (IS.member slot_idx useds) , Just slot == arg `fitsIn` slot = slot_idx | otherwise = findSlot arg (slot_idx + 1) slots useds findSlot _ _ [] _ = pprPanic "findSlot" (text "Can't find slot" $$ text "sum_slots:" <> ppr sum_slots0 $$ text "arg_slots:" <> ppr arg_slots0 ) -------------------------------------------------------------------------------- -- We have 3 kinds of slots: -- -- - Pointer slot: Only shared between actual pointers to Haskell heap (i.e. -- boxed objects). These come in two variants: Lifted and unlifted (see -- #19645). -- -- - Word slots: Shared between IntRep, WordRep, Int64Rep, Word64Rep, AddrRep. -- -- - Float slots: Shared between floating point types. -- -- - Void slots: Shared between void types. Not used in sums. -- -- TODO(michalt): We should probably introduce `SlotTy`s for 8-/16-/32-bit -- values, so that we can pack things more tightly. data SlotTy = PtrLiftedSlot | PtrUnliftedSlot | WordSlot | Word64Slot | FloatSlot | DoubleSlot | VecSlot Int PrimElemRep deriving (Eq, Ord) -- Constructor order is important! If slot A could fit into slot B -- then slot A must occur first. E.g. FloatSlot before DoubleSlot -- -- We are assuming that WordSlot is smaller than or equal to Word64Slot -- (would not be true on a 128-bit machine) instance Outputable SlotTy where ppr PtrLiftedSlot = text "PtrLiftedSlot" ppr PtrUnliftedSlot = text "PtrUnliftedSlot" ppr Word64Slot = text "Word64Slot" ppr WordSlot = text "WordSlot" ppr DoubleSlot = text "DoubleSlot" ppr FloatSlot = text "FloatSlot" ppr (VecSlot n e) = text "VecSlot" <+> ppr n <+> ppr e typeSlotTy :: UnaryType -> Maybe SlotTy typeSlotTy ty | isZeroBitTy ty = Nothing | otherwise = Just (primRepSlot (typePrimRep1 ty)) primRepSlot :: PrimRep -> SlotTy primRepSlot VoidRep = pprPanic "primRepSlot" (text "No slot for VoidRep") primRepSlot LiftedRep = PtrLiftedSlot primRepSlot UnliftedRep = PtrUnliftedSlot primRepSlot IntRep = WordSlot primRepSlot Int8Rep = WordSlot primRepSlot Int16Rep = WordSlot primRepSlot Int32Rep = WordSlot primRepSlot Int64Rep = Word64Slot primRepSlot WordRep = WordSlot primRepSlot Word8Rep = WordSlot primRepSlot Word16Rep = WordSlot primRepSlot Word32Rep = WordSlot primRepSlot Word64Rep = Word64Slot primRepSlot AddrRep = WordSlot primRepSlot FloatRep = FloatSlot primRepSlot DoubleRep = DoubleSlot primRepSlot (VecRep n e) = VecSlot n e slotPrimRep :: SlotTy -> PrimRep slotPrimRep PtrLiftedSlot = LiftedRep slotPrimRep PtrUnliftedSlot = UnliftedRep slotPrimRep Word64Slot = Word64Rep slotPrimRep WordSlot = WordRep slotPrimRep DoubleSlot = DoubleRep slotPrimRep FloatSlot = FloatRep slotPrimRep (VecSlot n e) = VecRep n e -- | Returns the bigger type if one fits into the other. (commutative) -- -- Note that lifted and unlifted pointers are *not* in a fits-in relation for -- the reasons described in Note [Don't merge lifted and unlifted slots] in -- GHC.Stg.Unarise. fitsIn :: SlotTy -> SlotTy -> Maybe SlotTy fitsIn ty1 ty2 | ty1 == ty2 = Just ty1 | isWordSlot ty1 && isWordSlot ty2 = Just (max ty1 ty2) | otherwise = Nothing -- We used to share slots between Float/Double but currently we can't easily -- covert between float/double in a way that is both work free and safe. -- So we put them in different slots. -- See Note [Casting slot arguments] where isWordSlot Word64Slot = True isWordSlot WordSlot = True isWordSlot _ = False {- ********************************************************************** * * PrimRep * * ************************************************************************* Note [RuntimeRep and PrimRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Note describes the relationship between GHC.Types.RuntimeRep (of levity/representation polymorphism fame) and GHC.Core.TyCon.PrimRep, as these types are closely related. A "primitive entity" is one that can be * stored in one register * manipulated with one machine instruction Examples include: * a 32-bit integer * a 32-bit float * a 64-bit float * a machine address (heap pointer), etc. * a quad-float (on a machine with SIMD register and instructions) * ...etc... The "representation or a primitive entity" specifies what kind of register is needed and how many bits are required. The data type GHC.Core.TyCon.PrimRep enumerates all the possibilities. data PrimRep = VoidRep -- See Note [VoidRep] | LiftedRep -- ^ Lifted pointer | UnliftedRep -- ^ Unlifted pointer | Int8Rep -- ^ Signed, 8-bit value | Int16Rep -- ^ Signed, 16-bit value ...etc... | VecRep Int PrimElemRep -- ^ SIMD fixed-width vector The Haskell source language is a bit more flexible: a single value may need multiple PrimReps. For example utup :: (# Int, Int #) -> Bool utup x = ... Here x :: (# Int, Int #), and that takes two registers, and two instructions to move around. Unboxed sums are similar. Every Haskell expression e has a type ty, whose kind is of form TYPE rep e :: ty :: TYPE rep where rep :: RuntimeRep. Here rep describes the runtime representation for e's value, but RuntimeRep has some extra cases: data RuntimeRep = VecRep VecCount VecElem -- ^ a SIMD vector type | TupleRep [RuntimeRep] -- ^ An unboxed tuple of the given reps | SumRep [RuntimeRep] -- ^ An unboxed sum of the given reps | BoxedRep Levity -- ^ boxed; represented by a pointer | IntRep -- ^ signed, word-sized value ...etc... data Levity = Lifted | Unlifted It's all in 1-1 correspondence with PrimRep except for TupleRep and SumRep, which describe unboxed products and sums respectively. RuntimeRep is defined in the library ghc-prim:GHC.Types. It is also "wired-in" to GHC: see GHC.Builtin.Types.runtimeRepTyCon. The unarisation pass, in GHC.Stg.Unarise, transforms the program, so that every variable has a type that has a PrimRep. For example, unarisation transforms our utup function above, to take two Int arguments instead of one (# Int, Int #) argument. Also, note that boxed types are represented slightly differently in RuntimeRep and PrimRep. PrimRep just has the nullary LiftedRep and UnliftedRep data constructors. RuntimeRep has a BoxedRep data constructor, which accepts a Levity. The subtle distinction is that since BoxedRep can accept a variable argument, RuntimeRep can talk about levity polymorphic types. PrimRep, by contrast, cannot. See also Note [Getting from RuntimeRep to PrimRep] and Note [VoidRep]. Note [VoidRep] ~~~~~~~~~~~~~~ PrimRep contains a constructor VoidRep, while RuntimeRep does not. Yet representations are often characterised by a list of PrimReps, where a void would be denoted as []. (See also Note [RuntimeRep and PrimRep].) However, after the unariser, all identifiers have exactly one PrimRep, but void arguments still exist. Thus, PrimRep includes VoidRep to describe these binders. Perhaps post-unariser representations (which need VoidRep) should be a different type than pre-unariser representations (which use a list and do not need VoidRep), but we have what we have. RuntimeRep instead uses TupleRep '[] to denote a void argument. When converting a TupleRep '[] into a list of PrimReps, we get an empty list. Note [Getting from RuntimeRep to PrimRep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ General info on RuntimeRep and PrimRep is in Note [RuntimeRep and PrimRep]. How do we get from an Id to the list or PrimReps used to store it? We get the Id's type ty (using idType), then ty's kind ki (using typeKind), then pattern-match on ki to extract rep (in kindPrimRep), then extract the PrimRep from the RuntimeRep (in runtimeRepPrimRep). We now must convert the RuntimeRep to a list of PrimReps. Let's look at two examples: 1. x :: Int# 2. y :: (# Int, Word# #) With these types, we can extract these kinds: 1. Int# :: TYPE IntRep 2. (# Int, Word# #) :: TYPE (TupleRep [LiftedRep, WordRep]) In the end, we will get these PrimReps: 1. [IntRep] 2. [LiftedRep, WordRep] It would thus seem that we should have a function somewhere of type `RuntimeRep -> [PrimRep]`. This doesn't work though: when we look at the argument of TYPE, we get something of type Type (of course). RuntimeRep exists in the user's program, but not in GHC as such. Instead, we must decompose the Type of kind RuntimeRep into tycons and extract the PrimReps from the TyCons. This is what runtimeRepPrimRep does: it takes a Type and returns a [PrimRep] runtimeRepPrimRep works by using tyConRuntimeRepInfo. That function should be passed the TyCon produced by promoting one of the constructors of RuntimeRep into type-level data. The RuntimeRep promoted datacons are associated with a RuntimeRepInfo (stored directly in the PromotedDataCon constructor of TyCon, field promDcRepInfo). This pairing happens in GHC.Builtin.Types. A RuntimeRepInfo usually(*) contains a function from [Type] to [PrimRep]: the [Type] are the arguments to the promoted datacon. These arguments are necessary for the TupleRep and SumRep constructors, so that this process can recur, producing a flattened list of PrimReps. Calling this extracted function happens in runtimeRepPrimRep; the functions themselves are defined in tupleRepDataCon and sumRepDataCon, both in GHC.Builtin.Types. The (*) above is to support vector representations. RuntimeRep refers to VecCount and VecElem, whose promoted datacons have nuggets of information related to vectors; these form the other alternatives for RuntimeRepInfo. Returning to our examples, the Types we get (after stripping off TYPE) are 1. TyConApp (PromotedDataCon "IntRep") [] 2. TyConApp (PromotedDataCon "TupleRep") [TyConApp (PromotedDataCon ":") [ TyConApp (AlgTyCon "RuntimeRep") [] , TyConApp (PromotedDataCon "LiftedRep") [] , TyConApp (PromotedDataCon ":") [ TyConApp (AlgTyCon "RuntimeRep") [] , TyConApp (PromotedDataCon "WordRep") [] , TyConApp (PromotedDataCon "'[]") [TyConApp (AlgTyCon "RuntimeRep") []]]]] runtimeRepPrimRep calls tyConRuntimeRepInfo on (PromotedDataCon "IntRep"), resp. (PromotedDataCon "TupleRep"), extracting a function that will produce the PrimReps. In example 1, this function is passed an empty list (the empty list of args to IntRep) and returns the PrimRep IntRep. (See the definition of runtimeRepSimpleDataCons in GHC.Builtin.Types and its helper function mk_runtime_rep_dc.) Example 2 passes the promoted list as the one argument to the extracted function. The extracted function is defined as prim_rep_fun within tupleRepDataCon in GHC.Builtin.Types. It takes one argument, decomposes the promoted list (with extractPromotedList), and then recurs back to runtimeRepPrimRep to process the LiftedRep and WordRep, concatentating the results. -} -- | Discovers the primitive representation of a 'Type'. Returns -- a list of 'PrimRep': it's a list because of the possibility of -- no runtime representation (void) or multiple (unboxed tuple/sum) -- See also Note [Getting from RuntimeRep to PrimRep] typePrimRep :: HasDebugCallStack => Type -> [PrimRep] typePrimRep ty = kindPrimRep (text "typePrimRep" <+> parens (ppr ty <+> dcolon <+> ppr (typeKind ty))) (typeKind ty) -- | Discovers the primitive representation of a 'Type'. Returns -- a list of 'PrimRep': it's a list because of the possibility of -- no runtime representation (void) or multiple (unboxed tuple/sum) -- See also Note [Getting from RuntimeRep to PrimRep] -- Returns Nothing if rep can't be determined. Eg. levity polymorphic types. typePrimRep_maybe :: Type -> Maybe [PrimRep] typePrimRep_maybe ty = kindPrimRep_maybe (typeKind ty) -- | Like 'typePrimRep', but assumes that there is precisely one 'PrimRep' output; -- an empty list of PrimReps becomes a VoidRep. -- This assumption holds after unarise, see Note [Post-unarisation invariants]. -- Before unarise it may or may not hold. -- See also Note [RuntimeRep and PrimRep] and Note [VoidRep] typePrimRep1 :: HasDebugCallStack => UnaryType -> PrimRep typePrimRep1 ty = case typePrimRep ty of [] -> VoidRep [rep] -> rep _ -> pprPanic "typePrimRep1" (ppr ty $$ ppr (typePrimRep ty)) -- | Find the runtime representation of a 'TyCon'. Defined here to -- avoid module loops. Returns a list of the register shapes necessary. -- See also Note [Getting from RuntimeRep to PrimRep] tyConPrimRep :: HasDebugCallStack => TyCon -> [PrimRep] tyConPrimRep tc = kindPrimRep (text "kindRep tc" <+> ppr tc $$ ppr res_kind) res_kind where res_kind = tyConResKind tc -- | Like 'tyConPrimRep', but assumed that there is precisely zero or -- one 'PrimRep' output -- See also Note [Getting from RuntimeRep to PrimRep] and Note [VoidRep] tyConPrimRep1 :: HasDebugCallStack => TyCon -> PrimRep tyConPrimRep1 tc = case tyConPrimRep tc of [] -> VoidRep [rep] -> rep _ -> pprPanic "tyConPrimRep1" (ppr tc $$ ppr (tyConPrimRep tc)) -- | Take a kind (of shape @TYPE rr@) and produce the 'PrimRep's -- of values of types of this kind. -- See also Note [Getting from RuntimeRep to PrimRep] kindPrimRep :: HasDebugCallStack => SDoc -> Kind -> [PrimRep] kindPrimRep doc ki | Just ki' <- coreView ki = kindPrimRep doc ki' kindPrimRep doc (TyConApp typ [runtime_rep]) = assert (typ `hasKey` tYPETyConKey) $ runtimeRepPrimRep doc runtime_rep kindPrimRep doc ki = pprPanic "kindPrimRep" (ppr ki $$ doc) -- NB: We could implement the partial methods by calling into the maybe -- variants here. But then both would need to pass around the doc argument. -- | Take a kind (of shape @TYPE rr@) and produce the 'PrimRep's -- of values of types of this kind. -- See also Note [Getting from RuntimeRep to PrimRep] -- Returns Nothing if rep can't be determined. Eg. levity polymorphic types. kindPrimRep_maybe :: HasDebugCallStack => Kind -> Maybe [PrimRep] kindPrimRep_maybe ki | Just ki' <- coreView ki = kindPrimRep_maybe ki' kindPrimRep_maybe (TyConApp typ [runtime_rep]) = assert (typ `hasKey` tYPETyConKey) $ runtimeRepPrimRep_maybe runtime_rep kindPrimRep_maybe _ki = Nothing -- | Take a type of kind RuntimeRep and extract the list of 'PrimRep' that -- it encodes. See also Note [Getting from RuntimeRep to PrimRep] -- The [PrimRep] is the final runtime representation /after/ unarisation runtimeRepPrimRep :: HasDebugCallStack => SDoc -> Type -> [PrimRep] runtimeRepPrimRep doc rr_ty | Just rr_ty' <- coreView rr_ty = runtimeRepPrimRep doc rr_ty' | TyConApp rr_dc args <- rr_ty , RuntimeRep fun <- tyConRuntimeRepInfo rr_dc = fun args | otherwise = pprPanic "runtimeRepPrimRep" (doc $$ ppr rr_ty) -- | Take a type of kind RuntimeRep and extract the list of 'PrimRep' that -- it encodes. See also Note [Getting from RuntimeRep to PrimRep] -- The [PrimRep] is the final runtime representation /after/ unarisation -- Returns Nothing if rep can't be determined. Eg. levity polymorphic types. runtimeRepPrimRep_maybe :: Type -> Maybe [PrimRep] runtimeRepPrimRep_maybe rr_ty | Just rr_ty' <- coreView rr_ty = runtimeRepPrimRep_maybe rr_ty' | TyConApp rr_dc args <- rr_ty , RuntimeRep fun <- tyConRuntimeRepInfo rr_dc = Just $! fun args | otherwise = Nothing -- | Convert a 'PrimRep' to a 'Type' of kind RuntimeRep primRepToRuntimeRep :: PrimRep -> Type primRepToRuntimeRep rep = case rep of VoidRep -> zeroBitRepTy LiftedRep -> liftedRepTy UnliftedRep -> unliftedRepTy IntRep -> intRepDataConTy Int8Rep -> int8RepDataConTy Int16Rep -> int16RepDataConTy Int32Rep -> int32RepDataConTy Int64Rep -> int64RepDataConTy WordRep -> wordRepDataConTy Word8Rep -> word8RepDataConTy Word16Rep -> word16RepDataConTy Word32Rep -> word32RepDataConTy Word64Rep -> word64RepDataConTy AddrRep -> addrRepDataConTy FloatRep -> floatRepDataConTy DoubleRep -> doubleRepDataConTy VecRep n elem -> TyConApp vecRepDataConTyCon [n', elem'] where n' = case n of 2 -> vec2DataConTy 4 -> vec4DataConTy 8 -> vec8DataConTy 16 -> vec16DataConTy 32 -> vec32DataConTy 64 -> vec64DataConTy _ -> pprPanic "Disallowed VecCount" (ppr n) elem' = case elem of Int8ElemRep -> int8ElemRepDataConTy Int16ElemRep -> int16ElemRepDataConTy Int32ElemRep -> int32ElemRepDataConTy Int64ElemRep -> int64ElemRepDataConTy Word8ElemRep -> word8ElemRepDataConTy Word16ElemRep -> word16ElemRepDataConTy Word32ElemRep -> word32ElemRepDataConTy Word64ElemRep -> word64ElemRepDataConTy FloatElemRep -> floatElemRepDataConTy DoubleElemRep -> doubleElemRepDataConTy -- | Convert a PrimRep back to a Type. Used only in the unariser to give types -- to fresh Ids. Really, only the type's representation matters. -- See also Note [RuntimeRep and PrimRep] primRepToType :: PrimRep -> Type primRepToType = anyTypeOfKind . mkTYPEapp . primRepToRuntimeRep -------------- mightBeFunTy :: Type -> Bool -- Return False only if we are *sure* it's a data type -- Look through newtypes etc as much as possible. Used to -- decide if we need to enter a closure via a slow call. -- -- AK: It would be nice to figure out and document the difference -- between this and isFunTy at some point. mightBeFunTy ty | [LiftedRep] <- typePrimRep ty , Just tc <- tyConAppTyCon_maybe (unwrapType ty) , isDataTyCon tc = False | otherwise = True ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/SafeHaskell.hs0000644000000000000000000000517314472400113021464 0ustar0000000000000000-- | This stuff here is related to supporting the Safe Haskell extension, -- primarily about storing under what trust type a module has been compiled. module GHC.Types.SafeHaskell ( IsSafeImport , SafeHaskellMode(..) , IfaceTrustInfo , getSafeMode , setSafeMode , noIfaceTrustInfo ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Utils.Outputable import Data.Word -- | Is an import a safe import? type IsSafeImport = Bool -- | The various Safe Haskell modes data SafeHaskellMode = Sf_None -- ^ inferred unsafe | Sf_Unsafe -- ^ declared and checked | Sf_Trustworthy -- ^ declared and checked | Sf_Safe -- ^ declared and checked | Sf_SafeInferred -- ^ inferred as safe | Sf_Ignore -- ^ @-fno-safe-haskell@ state deriving (Eq) instance Show SafeHaskellMode where show Sf_None = "None" show Sf_Unsafe = "Unsafe" show Sf_Trustworthy = "Trustworthy" show Sf_Safe = "Safe" show Sf_SafeInferred = "Safe-Inferred" show Sf_Ignore = "Ignore" instance Outputable SafeHaskellMode where ppr = text . show -- | Safe Haskell information for 'ModIface' -- Simply a wrapper around SafeHaskellMode to sepperate iface and flags newtype IfaceTrustInfo = TrustInfo SafeHaskellMode getSafeMode :: IfaceTrustInfo -> SafeHaskellMode getSafeMode (TrustInfo x) = x setSafeMode :: SafeHaskellMode -> IfaceTrustInfo setSafeMode = TrustInfo noIfaceTrustInfo :: IfaceTrustInfo noIfaceTrustInfo = setSafeMode Sf_None trustInfoToNum :: IfaceTrustInfo -> Word8 trustInfoToNum it = case getSafeMode it of Sf_None -> 0 Sf_Unsafe -> 1 Sf_Trustworthy -> 2 Sf_Safe -> 3 Sf_SafeInferred -> 4 Sf_Ignore -> 0 numToTrustInfo :: Word8 -> IfaceTrustInfo numToTrustInfo 0 = setSafeMode Sf_None numToTrustInfo 1 = setSafeMode Sf_Unsafe numToTrustInfo 2 = setSafeMode Sf_Trustworthy numToTrustInfo 3 = setSafeMode Sf_Safe numToTrustInfo 4 = setSafeMode Sf_SafeInferred numToTrustInfo n = error $ "numToTrustInfo: bad input number! (" ++ show n ++ ")" instance Outputable IfaceTrustInfo where ppr (TrustInfo Sf_None) = text "none" ppr (TrustInfo Sf_Ignore) = text "none" ppr (TrustInfo Sf_Unsafe) = text "unsafe" ppr (TrustInfo Sf_Trustworthy) = text "trustworthy" ppr (TrustInfo Sf_Safe) = text "safe" ppr (TrustInfo Sf_SafeInferred) = text "safe-inferred" instance Binary IfaceTrustInfo where put_ bh iftrust = putByte bh $ trustInfoToNum iftrust get bh = getByte bh >>= (return . numToTrustInfo) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/SourceError.hs0000644000000000000000000000504514472400113021552 0ustar0000000000000000-- | Source errors module GHC.Types.SourceError ( SourceError (..) , mkSrcErr , srcErrorMessages , throwErrors , throwOneError , handleSourceError ) where import GHC.Prelude import GHC.Types.Error import GHC.Utils.Monad import GHC.Utils.Panic import GHC.Utils.Exception import GHC.Utils.Error (pprMsgEnvelopeBagWithLoc) import GHC.Utils.Outputable import GHC.Driver.Errors.Ppr () -- instance Diagnostic GhcMessage import GHC.Driver.Errors.Types import Control.Monad.Catch as MC (MonadCatch, catch) mkSrcErr :: Messages GhcMessage -> SourceError mkSrcErr = SourceError srcErrorMessages :: SourceError -> Messages GhcMessage srcErrorMessages (SourceError msgs) = msgs throwErrors :: MonadIO io => Messages GhcMessage -> io a throwErrors = liftIO . throwIO . mkSrcErr throwOneError :: MonadIO io => MsgEnvelope GhcMessage -> io a throwOneError = throwErrors . singleMessage -- | A source error is an error that is caused by one or more errors in the -- source code. A 'SourceError' is thrown by many functions in the -- compilation pipeline. Inside GHC these errors are merely printed via -- 'log_action', but API clients may treat them differently, for example, -- insert them into a list box. If you want the default behaviour, use the -- idiom: -- -- > handleSourceError printExceptionAndWarnings $ do -- > ... api calls that may fail ... -- -- The 'SourceError's error messages can be accessed via 'srcErrorMessages'. -- This list may be empty if the compiler failed due to @-Werror@ -- ('Opt_WarnIsError'). -- -- See 'printExceptionAndWarnings' for more information on what to take care -- of when writing a custom error handler. newtype SourceError = SourceError (Messages GhcMessage) instance Show SourceError where -- We implement 'Show' because it's required by the 'Exception' instance, but diagnostics -- shouldn't be shown via the 'Show' typeclass, but rather rendered using the ppr functions. -- This also explains why there is no 'Show' instance for a 'MsgEnvelope'. show (SourceError msgs) = renderWithContext defaultSDocContext . vcat . pprMsgEnvelopeBagWithLoc . getMessages $ msgs instance Exception SourceError -- | Perform the given action and call the exception handler if the action -- throws a 'SourceError'. See 'SourceError' for more information. handleSourceError :: (MonadCatch m) => (SourceError -> m a) -- ^ exception handler -> m a -- ^ action to perform -> m a handleSourceError handler act = MC.catch act (\(e :: SourceError) -> handler e) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/SourceFile.hs0000644000000000000000000000565214472400113021344 0ustar0000000000000000module GHC.Types.SourceFile ( HscSource(..) , hscSourceToIsBoot , isHsBootOrSig , isHsigFile , hscSourceString ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Unit.Types -- Note [HscSource types] -- ~~~~~~~~~~~~~~~~~~~~~~ -- There are three types of source file for Haskell code: -- -- * HsSrcFile is an ordinary hs file which contains code, -- -- * HsBootFile is an hs-boot file, which is used to break -- recursive module imports (there will always be an -- HsSrcFile associated with it), and -- -- * HsigFile is an hsig file, which contains only type -- signatures and is used to specify signatures for -- modules. -- -- Syntactically, hs-boot files and hsig files are quite similar: they -- only include type signatures and must be associated with an -- actual HsSrcFile. isHsBootOrSig allows us to abstract over code -- which is indifferent to which. However, there are some important -- differences, mostly owing to the fact that hsigs are proper -- modules (you `import Sig` directly) whereas HsBootFiles are -- temporary placeholders (you `import {-# SOURCE #-} Mod). -- When we finish compiling the true implementation of an hs-boot, -- we replace the HomeModInfo with the real HsSrcFile. An HsigFile, on the -- other hand, is never replaced (in particular, we *cannot* use the -- HomeModInfo of the original HsSrcFile backing the signature, since it -- will export too many symbols.) -- -- Additionally, while HsSrcFile is the only Haskell file -- which has *code*, we do generate .o files for HsigFile, because -- this is how the recompilation checker figures out if a file -- needs to be recompiled. These are fake object files which -- should NOT be linked against. data HscSource = HsSrcFile -- ^ .hs file | HsBootFile -- ^ .hs-boot file | HsigFile -- ^ .hsig file deriving (Eq, Ord, Show) -- | Tests if an 'HscSource' is a boot file, primarily for constructing elements -- of 'BuildModule'. We conflate signatures and modules because they are bound -- in the same namespace; only boot interfaces can be disambiguated with -- `import {-# SOURCE #-}`. hscSourceToIsBoot :: HscSource -> IsBootInterface hscSourceToIsBoot HsBootFile = IsBoot hscSourceToIsBoot _ = NotBoot instance Binary HscSource where put_ bh HsSrcFile = putByte bh 0 put_ bh HsBootFile = putByte bh 1 put_ bh HsigFile = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return HsSrcFile 1 -> return HsBootFile _ -> return HsigFile hscSourceString :: HscSource -> String hscSourceString HsSrcFile = "" hscSourceString HsBootFile = "[boot]" hscSourceString HsigFile = "[sig]" -- See Note [HscSource types] isHsBootOrSig :: HscSource -> Bool isHsBootOrSig HsBootFile = True isHsBootOrSig HsigFile = True isHsBootOrSig _ = False isHsigFile :: HscSource -> Bool isHsigFile HsigFile = True isHsigFile _ = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/SourceText.hs0000644000000000000000000002665214472400113021414 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} -- | Source text -- -- Keeping Source Text for source to source conversions -- module GHC.Types.SourceText ( SourceText (..) , pprWithSourceText -- * Literals , IntegralLit(..) , FractionalLit(..) , StringLiteral(..) , negateIntegralLit , negateFractionalLit , mkIntegralLit , mkTHFractionalLit, rationalFromFractionalLit , integralFractionalLit, mkSourceFractionalLit , FractionalExponentBase(..) -- Used by the pm checker. , fractionalLitFromRational , mkFractionalLit ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Binary import GHC.Utils.Panic import Data.Function (on) import Data.Data import GHC.Real ( Ratio(..) ) import GHC.Types.SrcLoc {- Note [Pragma source text] ~~~~~~~~~~~~~~~~~~~~~~~~~ The lexer does a case-insensitive match for pragmas, as well as accepting both UK and US spelling variants. So {-# SPECIALISE #-} {-# SPECIALIZE #-} {-# Specialize #-} will all generate ITspec_prag token for the start of the pragma. In order to be able to do source to source conversions, the original source text for the token needs to be preserved, hence the `SourceText` field. So the lexer will then generate ITspec_prag "{ -# SPECIALISE" ITspec_prag "{ -# SPECIALIZE" ITspec_prag "{ -# Specialize" for the cases above. [without the space between '{' and '-', otherwise this comment won't parse] Note [Literal source text] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The lexer/parser converts literals from their original source text versions to an appropriate internal representation. This is a problem for tools doing source to source conversions, so the original source text is stored in literals where this can occur. Motivating examples for HsLit HsChar '\n' == '\x20` HsCharPrim '\x41`# == `A` HsString "\x20\x41" == " A" HsStringPrim "\x20"# == " "# HsInt 001 == 1 HsIntPrim 002# == 2# HsWordPrim 003## == 3## HsInt64Prim 004## == 4## HsWord64Prim 005## == 5## HsInteger 006 == 6 For OverLitVal HsIntegral 003 == 0x003 HsIsString "\x41nd" == "And" -} -- Note [Literal source text],[Pragma source text] data SourceText = SourceText String | NoSourceText -- ^ For when code is generated, e.g. TH, -- deriving. The pretty printer will then make -- its own representation of the item. deriving (Data, Show, Eq ) instance Outputable SourceText where ppr (SourceText s) = text "SourceText" <+> text s ppr NoSourceText = text "NoSourceText" instance Binary SourceText where put_ bh NoSourceText = putByte bh 0 put_ bh (SourceText s) = do putByte bh 1 put_ bh s get bh = do h <- getByte bh case h of 0 -> return NoSourceText 1 -> do s <- get bh return (SourceText s) _ -> panic $ "Binary SourceText:" ++ show h -- | Special combinator for showing string literals. pprWithSourceText :: SourceText -> SDoc -> SDoc pprWithSourceText NoSourceText d = d pprWithSourceText (SourceText src) _ = text src ------------------------------------------------ -- Literals ------------------------------------------------ -- | Integral Literal -- -- Used (instead of Integer) to represent negative zegative zero which is -- required for NegativeLiterals extension to correctly parse `-0::Double` -- as negative zero. See also #13211. data IntegralLit = IL { il_text :: SourceText , il_neg :: Bool -- See Note [Negative zero] in GHC.Rename.Pat , il_value :: Integer } deriving (Data, Show) mkIntegralLit :: Integral a => a -> IntegralLit mkIntegralLit i = IL { il_text = SourceText (show i_integer) , il_neg = i < 0 , il_value = i_integer } where i_integer :: Integer i_integer = toInteger i negateIntegralLit :: IntegralLit -> IntegralLit negateIntegralLit (IL text neg value) = case text of SourceText ('-':src) -> IL (SourceText src) False (negate value) SourceText src -> IL (SourceText ('-':src)) True (negate value) NoSourceText -> IL NoSourceText (not neg) (negate value) -- | Fractional Literal -- -- Used (instead of Rational) to represent exactly the floating point literal that we -- encountered in the user's source program. This allows us to pretty-print exactly what -- the user wrote, which is important e.g. for floating point numbers that can't represented -- as Doubles (we used to via Double for pretty-printing). See also #2245. -- Note [FractionalLit representation] in GHC.HsToCore.Match.Literal -- The actual value then is: sign * fl_signi * (fl_exp_base^fl_exp) -- where sign = if fl_neg then (-1) else 1 -- -- For example FL { fl_neg = True, fl_signi = 5.3, fl_exp = 4, fl_exp_base = Base10 } -- denotes -5300 data FractionalLit = FL { fl_text :: SourceText -- ^ How the value was written in the source , fl_neg :: Bool -- See Note [Negative zero] , fl_signi :: Rational -- The significand component of the literal , fl_exp :: Integer -- The exponent component of the literal , fl_exp_base :: FractionalExponentBase -- See Note [fractional exponent bases] } deriving (Data, Show) -- The Show instance is required for the derived GHC.Parser.Lexer.Token instance when DEBUG is on -- See Note [FractionalLit representation] in GHC.HsToCore.Match.Literal data FractionalExponentBase = Base2 -- Used in hex fractional literals | Base10 deriving (Eq, Ord, Data, Show) mkFractionalLit :: SourceText -> Bool -> Rational -> Integer -> FractionalExponentBase -> FractionalLit mkFractionalLit = FL mkRationalWithExponentBase :: Rational -> Integer -> FractionalExponentBase -> Rational mkRationalWithExponentBase i e feb = i * (eb ^^ e) where eb = case feb of Base2 -> 2 ; Base10 -> 10 fractionalLitFromRational :: Rational -> FractionalLit fractionalLitFromRational r = FL { fl_text = NoSourceText , fl_neg = r < 0 , fl_signi = r , fl_exp = 0 , fl_exp_base = Base10 } rationalFromFractionalLit :: FractionalLit -> Rational rationalFromFractionalLit (FL _ _ i e expBase) = mkRationalWithExponentBase i e expBase mkTHFractionalLit :: Rational -> FractionalLit mkTHFractionalLit r = FL { fl_text = SourceText (show (realToFrac r::Double)) -- Converting to a Double here may technically lose -- precision (see #15502). We could alternatively -- convert to a Rational for the most accuracy, but -- it would cause Floats and Doubles to be displayed -- strangely, so we opt not to do this. (In contrast -- to mkIntegralLit, where we always convert to an -- Integer for the highest accuracy.) , fl_neg = r < 0 , fl_signi = r , fl_exp = 0 , fl_exp_base = Base10 } negateFractionalLit :: FractionalLit -> FractionalLit negateFractionalLit (FL text neg i e eb) = case text of SourceText ('-':src) -> FL (SourceText src) False (negate i) e eb SourceText src -> FL (SourceText ('-':src)) True (negate i) e eb NoSourceText -> FL NoSourceText (not neg) (negate i) e eb -- | The integer should already be negated if it's negative. integralFractionalLit :: Bool -> Integer -> FractionalLit integralFractionalLit neg i = FL { fl_text = SourceText (show i) , fl_neg = neg , fl_signi = i :% 1 , fl_exp = 0 , fl_exp_base = Base10 } -- | The arguments should already be negated if they are negative. mkSourceFractionalLit :: String -> Bool -> Integer -> Integer -> FractionalExponentBase -> FractionalLit mkSourceFractionalLit !str !b !r !i !ff = FL (SourceText str) b (r :% 1) i ff {- Note [fractional exponent bases] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For hexadecimal rationals of the form 0x0.3p10 the exponent is given on base 2 rather than base 10. These are the only options, hence the sum type. See also #15646. -} -- Comparison operations are needed when grouping literals -- for compiling pattern-matching (module GHC.HsToCore.Match.Literal) instance Eq IntegralLit where (==) = (==) `on` il_value instance Ord IntegralLit where compare = compare `on` il_value instance Outputable IntegralLit where ppr (IL (SourceText src) _ _) = text src ppr (IL NoSourceText _ value) = text (show value) -- | Compare fractional lits with small exponents for value equality but -- large values for syntactic equality. compareFractionalLit :: FractionalLit -> FractionalLit -> Ordering compareFractionalLit fl1 fl2 | fl_exp fl1 < 100 && fl_exp fl2 < 100 && fl_exp fl1 >= -100 && fl_exp fl2 >= -100 = rationalFromFractionalLit fl1 `compare` rationalFromFractionalLit fl2 | otherwise = (compare `on` (\x -> (fl_signi x, fl_exp x, fl_exp_base x))) fl1 fl2 -- | Be wary of using this instance to compare for equal *values* when exponents are -- large. The same value expressed in different syntactic form won't compare as equal when -- any of the exponents is >= 100. instance Eq FractionalLit where (==) fl1 fl2 = case compare fl1 fl2 of EQ -> True _ -> False -- | Be wary of using this instance to compare for equal *values* when exponents are -- large. The same value expressed in different syntactic form won't compare as equal when -- any of the exponents is >= 100. instance Ord FractionalLit where compare = compareFractionalLit instance Outputable FractionalLit where ppr (fl@(FL {})) = pprWithSourceText (fl_text fl) $ rational $ mkRationalWithExponentBase (fl_signi fl) (fl_exp fl) (fl_exp_base fl) -- | A String Literal in the source, including its original raw format for use by -- source to source manipulation tools. data StringLiteral = StringLiteral { sl_st :: SourceText, -- literal raw source. -- See not [Literal source text] sl_fs :: FastString, -- literal string value sl_tc :: Maybe RealSrcSpan -- Location of -- possible -- trailing comma -- AZ: if we could have a LocatedA -- StringLiteral we would not need sl_tc, but -- that would cause import loops. -- AZ:2: sl_tc should be an EpaAnchor, to allow -- editing and reprinting the AST. Need a more -- robust solution. } deriving Data instance Eq StringLiteral where (StringLiteral _ a _) == (StringLiteral _ b _) = a == b instance Outputable StringLiteral where ppr sl = pprWithSourceText (sl_st sl) (ftext $ sl_fs sl) instance Binary StringLiteral where put_ bh (StringLiteral st fs _) = do put_ bh st put_ bh fs get bh = do st <- get bh fs <- get bh return (StringLiteral st fs Nothing) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/SrcLoc.hs0000644000000000000000000010414614472400113020467 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TypeFamilies #-} -- (c) The University of Glasgow, 1992-2006 -- | This module contains types that relate to the positions of things -- in source files, and allow tagging of those things with locations module GHC.Types.SrcLoc ( -- * SrcLoc RealSrcLoc, -- Abstract SrcLoc(..), -- ** Constructing SrcLoc mkSrcLoc, mkRealSrcLoc, mkGeneralSrcLoc, noSrcLoc, -- "I'm sorry, I haven't a clue" generatedSrcLoc, -- Code generated within the compiler interactiveSrcLoc, -- Code from an interactive session advanceSrcLoc, advanceBufPos, -- ** Unsafely deconstructing SrcLoc -- These are dubious exports, because they crash on some inputs srcLocFile, -- return the file name part srcLocLine, -- return the line part srcLocCol, -- return the column part -- * SrcSpan RealSrcSpan, -- Abstract SrcSpan(..), UnhelpfulSpanReason(..), -- ** Constructing SrcSpan mkGeneralSrcSpan, mkSrcSpan, mkRealSrcSpan, noSrcSpan, generatedSrcSpan, isGeneratedSrcSpan, wiredInSrcSpan, -- Something wired into the compiler interactiveSrcSpan, srcLocSpan, realSrcLocSpan, combineSrcSpans, srcSpanFirstCharacter, -- ** Deconstructing SrcSpan srcSpanStart, srcSpanEnd, realSrcSpanStart, realSrcSpanEnd, srcSpanFileName_maybe, pprUserRealSpan, pprUnhelpfulSpanReason, pprUserSpan, unhelpfulSpanFS, srcSpanToRealSrcSpan, -- ** Unsafely deconstructing SrcSpan -- These are dubious exports, because they crash on some inputs srcSpanFile, srcSpanStartLine, srcSpanEndLine, srcSpanStartCol, srcSpanEndCol, -- ** Predicates on SrcSpan isGoodSrcSpan, isOneLineSpan, isZeroWidthSpan, containsSpan, -- * StringBuffer locations BufPos(..), getBufPos, BufSpan(..), getBufSpan, removeBufSpan, -- * Located Located, RealLocated, GenLocated(..), -- ** Constructing Located noLoc, mkGeneralLocated, -- ** Deconstructing Located getLoc, unLoc, unRealSrcSpan, getRealSrcSpan, pprLocated, pprLocatedAlways, -- ** Modifying Located mapLoc, -- ** Combining and comparing Located values eqLocated, cmpLocated, cmpBufSpan, combineLocs, addCLoc, leftmost_smallest, leftmost_largest, rightmost_smallest, spans, isSubspanOf, isRealSubspanOf, sortLocated, sortRealLocated, lookupSrcLoc, lookupSrcSpan, liftL, -- * Parser locations PsLoc(..), PsSpan(..), PsLocated, advancePsLoc, mkPsSpan, psSpanStart, psSpanEnd, mkSrcSpanPs, combineRealSrcSpans, psLocatedToLocated, -- * Layout information LayoutInfo(..), leftmostColumn ) where import GHC.Prelude import GHC.Utils.Misc import GHC.Utils.Json import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import qualified GHC.Data.Strict as Strict import Control.DeepSeq import Control.Applicative (liftA2) import Data.Data import Data.List (sortBy, intercalate) import Data.Function (on) import qualified Data.Map as Map import qualified Data.Semigroup {- ************************************************************************ * * \subsection[SrcLoc-SrcLocations]{Source-location information} * * ************************************************************************ We keep information about the {\em definition} point for each entity; this is the obvious stuff: -} -- | Real Source Location -- -- Represents a single point within a file data RealSrcLoc = SrcLoc LexicalFastString -- A precise location (file name) {-# UNPACK #-} !Int -- line number, begins at 1 {-# UNPACK #-} !Int -- column number, begins at 1 deriving (Eq, Ord) -- | 0-based offset identifying the raw location in the 'StringBuffer'. -- -- The lexer increments the 'BufPos' every time a character (UTF-8 code point) -- is read from the input buffer. As UTF-8 is a variable-length encoding and -- 'StringBuffer' needs a byte offset for indexing, a 'BufPos' cannot be used -- for indexing. -- -- The parser guarantees that 'BufPos' are monotonic. See #17632. This means -- that syntactic constructs that appear later in the 'StringBuffer' are guaranteed to -- have a higher 'BufPos'. Constrast that with 'RealSrcLoc', which does *not* make the -- analogous guarantee about higher line/column numbers. -- -- This is due to #line and {-# LINE ... #-} pragmas that can arbitrarily -- modify 'RealSrcLoc'. Notice how 'setSrcLoc' and 'resetAlrLastLoc' in -- "GHC.Parser.Lexer" update 'PsLoc', modifying 'RealSrcLoc' but preserving -- 'BufPos'. -- -- Monotonicity makes 'BufPos' useful to determine the order in which syntactic -- elements appear in the source. Consider this example (haddockA041 in the test suite): -- -- haddockA041.hs -- {-# LANGUAGE CPP #-} -- -- | Module header documentation -- module Comments_and_CPP_include where -- #include "IncludeMe.hs" -- -- IncludeMe.hs: -- -- | Comment on T -- data T = MkT -- ^ Comment on MkT -- -- After the C preprocessor runs, the 'StringBuffer' will contain a program that -- looks like this (unimportant lines at the beginning removed): -- -- # 1 "haddockA041.hs" -- {-# LANGUAGE CPP #-} -- -- | Module header documentation -- module Comments_and_CPP_include where -- # 1 "IncludeMe.hs" 1 -- -- | Comment on T -- data T = MkT -- ^ Comment on MkT -- # 7 "haddockA041.hs" 2 -- -- The line pragmas inserted by CPP make the error messages more informative. -- The downside is that we can't use RealSrcLoc to determine the ordering of -- syntactic elements. -- -- With RealSrcLoc, we have the following location information recorded in the AST: -- * The module name is located at haddockA041.hs:3:8-31 -- * The Haddock comment "Comment on T" is located at IncludeMe:1:1-17 -- * The data declaration is located at IncludeMe.hs:2:1-32 -- -- Is the Haddock comment located between the module name and the data -- declaration? This is impossible to tell because the locations are not -- comparable; they even refer to different files. -- -- On the other hand, with 'BufPos', we have the following location information: -- * The module name is located at 846-870 -- * The Haddock comment "Comment on T" is located at 898-915 -- * The data declaration is located at 916-928 -- -- Aside: if you're wondering why the numbers are so high, try running -- @ghc -E haddockA041.hs@ -- and see the extra fluff that CPP inserts at the start of the file. -- -- For error messages, 'BufPos' is not useful at all. On the other hand, this is -- exactly what we need to determine the order of syntactic elements: -- 870 < 898, therefore the Haddock comment appears *after* the module name. -- 915 < 916, therefore the Haddock comment appears *before* the data declaration. -- -- We use 'BufPos' in in GHC.Parser.PostProcess.Haddock to associate Haddock -- comments with parts of the AST using location information (#17544). newtype BufPos = BufPos { bufPos :: Int } deriving (Eq, Ord, Show, Data) -- | Source Location data SrcLoc = RealSrcLoc !RealSrcLoc !(Strict.Maybe BufPos) -- See Note [Why Maybe BufPos] | UnhelpfulLoc !FastString -- Just a general indication deriving (Eq, Show) {- ************************************************************************ * * \subsection[SrcLoc-access-fns]{Access functions} * * ************************************************************************ -} mkSrcLoc :: FastString -> Int -> Int -> SrcLoc mkSrcLoc x line col = RealSrcLoc (mkRealSrcLoc x line col) Strict.Nothing mkRealSrcLoc :: FastString -> Int -> Int -> RealSrcLoc mkRealSrcLoc x line col = SrcLoc (LexicalFastString x) line col getBufPos :: SrcLoc -> Strict.Maybe BufPos getBufPos (RealSrcLoc _ mbpos) = mbpos getBufPos (UnhelpfulLoc _) = Strict.Nothing -- | Built-in "bad" 'SrcLoc' values for particular locations noSrcLoc, generatedSrcLoc, interactiveSrcLoc :: SrcLoc noSrcLoc = UnhelpfulLoc (fsLit "") generatedSrcLoc = UnhelpfulLoc (fsLit "") interactiveSrcLoc = UnhelpfulLoc (fsLit "") -- | Creates a "bad" 'SrcLoc' that has no detailed information about its location mkGeneralSrcLoc :: FastString -> SrcLoc mkGeneralSrcLoc = UnhelpfulLoc -- | Gives the filename of the 'RealSrcLoc' srcLocFile :: RealSrcLoc -> FastString srcLocFile (SrcLoc (LexicalFastString fname) _ _) = fname -- | Raises an error when used on a "bad" 'SrcLoc' srcLocLine :: RealSrcLoc -> Int srcLocLine (SrcLoc _ l _) = l -- | Raises an error when used on a "bad" 'SrcLoc' srcLocCol :: RealSrcLoc -> Int srcLocCol (SrcLoc _ _ c) = c -- | Move the 'SrcLoc' down by one line if the character is a newline, -- to the next 8-char tabstop if it is a tab, and across by one -- character in any other case advanceSrcLoc :: RealSrcLoc -> Char -> RealSrcLoc advanceSrcLoc (SrcLoc f l _) '\n' = SrcLoc f (l + 1) 1 advanceSrcLoc (SrcLoc f l c) '\t' = SrcLoc f l (advance_tabstop c) advanceSrcLoc (SrcLoc f l c) _ = SrcLoc f l (c + 1) advance_tabstop :: Int -> Int advance_tabstop c = ((((c - 1) `shiftR` 3) + 1) `shiftL` 3) + 1 advanceBufPos :: BufPos -> BufPos advanceBufPos (BufPos i) = BufPos (i+1) {- ************************************************************************ * * \subsection[SrcLoc-instances]{Instance declarations for various names} * * ************************************************************************ -} sortLocated :: [Located a] -> [Located a] sortLocated = sortBy (leftmost_smallest `on` getLoc) sortRealLocated :: [RealLocated a] -> [RealLocated a] sortRealLocated = sortBy (compare `on` getLoc) lookupSrcLoc :: SrcLoc -> Map.Map RealSrcLoc a -> Maybe a lookupSrcLoc (RealSrcLoc l _) = Map.lookup l lookupSrcLoc (UnhelpfulLoc _) = const Nothing lookupSrcSpan :: SrcSpan -> Map.Map RealSrcSpan a -> Maybe a lookupSrcSpan (RealSrcSpan l _) = Map.lookup l lookupSrcSpan (UnhelpfulSpan _) = const Nothing instance Outputable RealSrcLoc where ppr (SrcLoc (LexicalFastString src_path) src_line src_col) = hcat [ pprFastFilePath src_path <> colon , int src_line <> colon , int src_col ] -- I don't know why there is this style-based difference -- if userStyle sty || debugStyle sty then -- hcat [ pprFastFilePath src_path, char ':', -- int src_line, -- char ':', int src_col -- ] -- else -- hcat [text "{-# LINE ", int src_line, space, -- char '\"', pprFastFilePath src_path, text " #-}"] instance Outputable SrcLoc where ppr (RealSrcLoc l _) = ppr l ppr (UnhelpfulLoc s) = ftext s instance Data RealSrcSpan where -- don't traverse? toConstr _ = abstractConstr "RealSrcSpan" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "RealSrcSpan" instance Data SrcSpan where -- don't traverse? toConstr _ = abstractConstr "SrcSpan" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "SrcSpan" {- ************************************************************************ * * \subsection[SrcSpan]{Source Spans} * * ************************************************************************ -} {- | A 'RealSrcSpan' delimits a portion of a text file. It could be represented by a pair of (line,column) coordinates, but in fact we optimise slightly by using more compact representations for single-line and zero-length spans, both of which are quite common. The end position is defined to be the column /after/ the end of the span. That is, a span of (1,1)-(1,2) is one character long, and a span of (1,1)-(1,1) is zero characters long. -} -- | Real Source Span data RealSrcSpan = RealSrcSpan' { srcSpanFile :: !FastString, srcSpanSLine :: {-# UNPACK #-} !Int, srcSpanSCol :: {-# UNPACK #-} !Int, srcSpanELine :: {-# UNPACK #-} !Int, srcSpanECol :: {-# UNPACK #-} !Int } deriving Eq -- | StringBuffer Source Span data BufSpan = BufSpan { bufSpanStart, bufSpanEnd :: {-# UNPACK #-} !BufPos } deriving (Eq, Ord, Show, Data) instance Semigroup BufSpan where BufSpan start1 end1 <> BufSpan start2 end2 = BufSpan (min start1 start2) (max end1 end2) -- | Source Span -- -- A 'SrcSpan' identifies either a specific portion of a text file -- or a human-readable description of a location. data SrcSpan = RealSrcSpan !RealSrcSpan !(Strict.Maybe BufSpan) -- See Note [Why Maybe BufPos] | UnhelpfulSpan !UnhelpfulSpanReason deriving (Eq, Show) -- Show is used by GHC.Parser.Lexer, because we -- derive Show for Token data UnhelpfulSpanReason = UnhelpfulNoLocationInfo | UnhelpfulWiredIn | UnhelpfulInteractive | UnhelpfulGenerated | UnhelpfulOther !FastString deriving (Eq, Show) removeBufSpan :: SrcSpan -> SrcSpan removeBufSpan (RealSrcSpan s _) = RealSrcSpan s Strict.Nothing removeBufSpan s = s {- Note [Why Maybe BufPos] ~~~~~~~~~~~~~~~~~~~~~~~~~~ In SrcLoc we store (Maybe BufPos); in SrcSpan we store (Maybe BufSpan). Why the Maybe? Surely, the lexer can always fill in the buffer position, and it guarantees to do so. However, sometimes the SrcLoc/SrcSpan is constructed in a different context where the buffer location is not available, and then we use Nothing instead of a fake value like BufPos (-1). Perhaps the compiler could be re-engineered to pass around BufPos more carefully and never discard it, and this 'Maybe' could be removed. If you're interested in doing so, you may find this ripgrep query useful: rg "RealSrc(Loc|Span).*?Nothing" For example, it is not uncommon to whip up source locations for e.g. error messages, constructing a SrcSpan without a BufSpan. -} instance ToJson SrcSpan where json (UnhelpfulSpan {} ) = JSNull --JSObject [( "type", "unhelpful")] json (RealSrcSpan rss _) = json rss instance ToJson RealSrcSpan where json (RealSrcSpan'{..}) = JSObject [ ("file", JSString (unpackFS srcSpanFile)) , ("startLine", JSInt srcSpanSLine) , ("startCol", JSInt srcSpanSCol) , ("endLine", JSInt srcSpanELine) , ("endCol", JSInt srcSpanECol) ] instance NFData SrcSpan where rnf x = x `seq` () getBufSpan :: SrcSpan -> Strict.Maybe BufSpan getBufSpan (RealSrcSpan _ mbspan) = mbspan getBufSpan (UnhelpfulSpan _) = Strict.Nothing -- | Built-in "bad" 'SrcSpan's for common sources of location uncertainty noSrcSpan, generatedSrcSpan, wiredInSrcSpan, interactiveSrcSpan :: SrcSpan noSrcSpan = UnhelpfulSpan UnhelpfulNoLocationInfo wiredInSrcSpan = UnhelpfulSpan UnhelpfulWiredIn interactiveSrcSpan = UnhelpfulSpan UnhelpfulInteractive generatedSrcSpan = UnhelpfulSpan UnhelpfulGenerated isGeneratedSrcSpan :: SrcSpan -> Bool isGeneratedSrcSpan (UnhelpfulSpan UnhelpfulGenerated) = True isGeneratedSrcSpan _ = False -- | Create a "bad" 'SrcSpan' that has not location information mkGeneralSrcSpan :: FastString -> SrcSpan mkGeneralSrcSpan = UnhelpfulSpan . UnhelpfulOther -- | Create a 'SrcSpan' corresponding to a single point srcLocSpan :: SrcLoc -> SrcSpan srcLocSpan (UnhelpfulLoc str) = UnhelpfulSpan (UnhelpfulOther str) srcLocSpan (RealSrcLoc l mb) = RealSrcSpan (realSrcLocSpan l) (fmap (\b -> BufSpan b b) mb) realSrcLocSpan :: RealSrcLoc -> RealSrcSpan realSrcLocSpan (SrcLoc (LexicalFastString file) line col) = RealSrcSpan' file line col line col -- | Create a 'SrcSpan' between two points in a file mkRealSrcSpan :: RealSrcLoc -> RealSrcLoc -> RealSrcSpan mkRealSrcSpan loc1 loc2 = RealSrcSpan' file line1 col1 line2 col2 where line1 = srcLocLine loc1 line2 = srcLocLine loc2 col1 = srcLocCol loc1 col2 = srcLocCol loc2 file = srcLocFile loc1 -- | 'True' if the span is known to straddle only one line. isOneLineRealSpan :: RealSrcSpan -> Bool isOneLineRealSpan (RealSrcSpan' _ line1 _ line2 _) = line1 == line2 -- | 'True' if the span is a single point isPointRealSpan :: RealSrcSpan -> Bool isPointRealSpan (RealSrcSpan' _ line1 col1 line2 col2) = line1 == line2 && col1 == col2 -- | Create a 'SrcSpan' between two points in a file mkSrcSpan :: SrcLoc -> SrcLoc -> SrcSpan mkSrcSpan (UnhelpfulLoc str) _ = UnhelpfulSpan (UnhelpfulOther str) mkSrcSpan _ (UnhelpfulLoc str) = UnhelpfulSpan (UnhelpfulOther str) mkSrcSpan (RealSrcLoc loc1 mbpos1) (RealSrcLoc loc2 mbpos2) = RealSrcSpan (mkRealSrcSpan loc1 loc2) (liftA2 BufSpan mbpos1 mbpos2) -- | Combines two 'SrcSpan' into one that spans at least all the characters -- within both spans. Returns UnhelpfulSpan if the files differ. combineSrcSpans :: SrcSpan -> SrcSpan -> SrcSpan combineSrcSpans (UnhelpfulSpan _) r = r -- this seems more useful combineSrcSpans l (UnhelpfulSpan _) = l combineSrcSpans (RealSrcSpan span1 mbspan1) (RealSrcSpan span2 mbspan2) | srcSpanFile span1 == srcSpanFile span2 = RealSrcSpan (combineRealSrcSpans span1 span2) (liftA2 combineBufSpans mbspan1 mbspan2) | otherwise = UnhelpfulSpan $ UnhelpfulOther (fsLit "") -- | Combines two 'SrcSpan' into one that spans at least all the characters -- within both spans. Assumes the "file" part is the same in both inputs combineRealSrcSpans :: RealSrcSpan -> RealSrcSpan -> RealSrcSpan combineRealSrcSpans span1 span2 = RealSrcSpan' file line_start col_start line_end col_end where (line_start, col_start) = min (srcSpanStartLine span1, srcSpanStartCol span1) (srcSpanStartLine span2, srcSpanStartCol span2) (line_end, col_end) = max (srcSpanEndLine span1, srcSpanEndCol span1) (srcSpanEndLine span2, srcSpanEndCol span2) file = srcSpanFile span1 combineBufSpans :: BufSpan -> BufSpan -> BufSpan combineBufSpans span1 span2 = BufSpan start end where start = min (bufSpanStart span1) (bufSpanStart span2) end = max (bufSpanEnd span1) (bufSpanEnd span2) -- | Convert a SrcSpan into one that represents only its first character srcSpanFirstCharacter :: SrcSpan -> SrcSpan srcSpanFirstCharacter l@(UnhelpfulSpan {}) = l srcSpanFirstCharacter (RealSrcSpan span mbspan) = RealSrcSpan (mkRealSrcSpan loc1 loc2) (fmap mkBufSpan mbspan) where loc1@(SrcLoc f l c) = realSrcSpanStart span loc2 = SrcLoc f l (c+1) mkBufSpan bspan = let bpos1@(BufPos i) = bufSpanStart bspan bpos2 = BufPos (i+1) in BufSpan bpos1 bpos2 {- ************************************************************************ * * \subsection[SrcSpan-predicates]{Predicates} * * ************************************************************************ -} -- | Test if a 'SrcSpan' is "good", i.e. has precise location information isGoodSrcSpan :: SrcSpan -> Bool isGoodSrcSpan (RealSrcSpan _ _) = True isGoodSrcSpan (UnhelpfulSpan _) = False isOneLineSpan :: SrcSpan -> Bool -- ^ True if the span is known to straddle only one line. -- For "bad" 'SrcSpan', it returns False isOneLineSpan (RealSrcSpan s _) = srcSpanStartLine s == srcSpanEndLine s isOneLineSpan (UnhelpfulSpan _) = False isZeroWidthSpan :: SrcSpan -> Bool -- ^ True if the span has a width of zero, as returned for "virtual" -- semicolons in the lexer. -- For "bad" 'SrcSpan', it returns False isZeroWidthSpan (RealSrcSpan s _) = srcSpanStartLine s == srcSpanEndLine s && srcSpanStartCol s == srcSpanEndCol s isZeroWidthSpan (UnhelpfulSpan _) = False -- | Tests whether the first span "contains" the other span, meaning -- that it covers at least as much source code. True where spans are equal. containsSpan :: RealSrcSpan -> RealSrcSpan -> Bool containsSpan s1 s2 = (srcSpanStartLine s1, srcSpanStartCol s1) <= (srcSpanStartLine s2, srcSpanStartCol s2) && (srcSpanEndLine s1, srcSpanEndCol s1) >= (srcSpanEndLine s2, srcSpanEndCol s2) && (srcSpanFile s1 == srcSpanFile s2) -- We check file equality last because it is (presumably?) least -- likely to fail. {- %************************************************************************ %* * \subsection[SrcSpan-unsafe-access-fns]{Unsafe access functions} * * ************************************************************************ -} srcSpanStartLine :: RealSrcSpan -> Int srcSpanEndLine :: RealSrcSpan -> Int srcSpanStartCol :: RealSrcSpan -> Int srcSpanEndCol :: RealSrcSpan -> Int srcSpanStartLine RealSrcSpan'{ srcSpanSLine=l } = l srcSpanEndLine RealSrcSpan'{ srcSpanELine=l } = l srcSpanStartCol RealSrcSpan'{ srcSpanSCol=l } = l srcSpanEndCol RealSrcSpan'{ srcSpanECol=c } = c {- ************************************************************************ * * \subsection[SrcSpan-access-fns]{Access functions} * * ************************************************************************ -} -- | Returns the location at the start of the 'SrcSpan' or a "bad" 'SrcSpan' if that is unavailable srcSpanStart :: SrcSpan -> SrcLoc srcSpanStart (UnhelpfulSpan r) = UnhelpfulLoc (unhelpfulSpanFS r) srcSpanStart (RealSrcSpan s b) = RealSrcLoc (realSrcSpanStart s) (fmap bufSpanStart b) -- | Returns the location at the end of the 'SrcSpan' or a "bad" 'SrcSpan' if that is unavailable srcSpanEnd :: SrcSpan -> SrcLoc srcSpanEnd (UnhelpfulSpan r) = UnhelpfulLoc (unhelpfulSpanFS r) srcSpanEnd (RealSrcSpan s b) = RealSrcLoc (realSrcSpanEnd s) (fmap bufSpanEnd b) realSrcSpanStart :: RealSrcSpan -> RealSrcLoc realSrcSpanStart s = mkRealSrcLoc (srcSpanFile s) (srcSpanStartLine s) (srcSpanStartCol s) realSrcSpanEnd :: RealSrcSpan -> RealSrcLoc realSrcSpanEnd s = mkRealSrcLoc (srcSpanFile s) (srcSpanEndLine s) (srcSpanEndCol s) -- | Obtains the filename for a 'SrcSpan' if it is "good" srcSpanFileName_maybe :: SrcSpan -> Maybe FastString srcSpanFileName_maybe (RealSrcSpan s _) = Just (srcSpanFile s) srcSpanFileName_maybe (UnhelpfulSpan _) = Nothing srcSpanToRealSrcSpan :: SrcSpan -> Maybe RealSrcSpan srcSpanToRealSrcSpan (RealSrcSpan ss _) = Just ss srcSpanToRealSrcSpan _ = Nothing {- ************************************************************************ * * \subsection[SrcSpan-instances]{Instances} * * ************************************************************************ -} -- We want to order RealSrcSpans first by the start point, then by the -- end point. instance Ord RealSrcSpan where a `compare` b = (realSrcSpanStart a `compare` realSrcSpanStart b) `thenCmp` (realSrcSpanEnd a `compare` realSrcSpanEnd b) instance Show RealSrcLoc where show (SrcLoc filename row col) = "SrcLoc " ++ show filename ++ " " ++ show row ++ " " ++ show col -- Show is used by GHC.Parser.Lexer, because we derive Show for Token instance Show RealSrcSpan where show span@(RealSrcSpan' file sl sc el ec) | isPointRealSpan span = "SrcSpanPoint " ++ show file ++ " " ++ intercalate " " (map show [sl,sc]) | isOneLineRealSpan span = "SrcSpanOneLine " ++ show file ++ " " ++ intercalate " " (map show [sl,sc,ec]) | otherwise = "SrcSpanMultiLine " ++ show file ++ " " ++ intercalate " " (map show [sl,sc,el,ec]) instance Outputable RealSrcSpan where ppr span = pprUserRealSpan True span -- I don't know why there is this style-based difference -- = getPprStyle $ \ sty -> -- if userStyle sty || debugStyle sty then -- text (showUserRealSpan True span) -- else -- hcat [text "{-# LINE ", int (srcSpanStartLine span), space, -- char '\"', pprFastFilePath $ srcSpanFile span, text " #-}"] instance Outputable SrcSpan where ppr span = pprUserSpan True span instance Outputable UnhelpfulSpanReason where ppr = pprUnhelpfulSpanReason -- I don't know why there is this style-based difference -- = getPprStyle $ \ sty -> -- if userStyle sty || debugStyle sty then -- pprUserSpan True span -- else -- case span of -- UnhelpfulSpan _ -> panic "Outputable UnhelpfulSpan" -- RealSrcSpan s -> ppr s unhelpfulSpanFS :: UnhelpfulSpanReason -> FastString unhelpfulSpanFS r = case r of UnhelpfulOther s -> s UnhelpfulNoLocationInfo -> fsLit "" UnhelpfulWiredIn -> fsLit "" UnhelpfulInteractive -> fsLit "" UnhelpfulGenerated -> fsLit "" pprUnhelpfulSpanReason :: UnhelpfulSpanReason -> SDoc pprUnhelpfulSpanReason r = ftext (unhelpfulSpanFS r) pprUserSpan :: Bool -> SrcSpan -> SDoc pprUserSpan _ (UnhelpfulSpan r) = pprUnhelpfulSpanReason r pprUserSpan show_path (RealSrcSpan s _) = pprUserRealSpan show_path s pprUserRealSpan :: Bool -> RealSrcSpan -> SDoc pprUserRealSpan show_path span@(RealSrcSpan' src_path line col _ _) | isPointRealSpan span = hcat [ ppWhen show_path (pprFastFilePath src_path <> colon) , int line <> colon , int col ] pprUserRealSpan show_path span@(RealSrcSpan' src_path line scol _ ecol) | isOneLineRealSpan span = hcat [ ppWhen show_path (pprFastFilePath src_path <> colon) , int line <> colon , int scol , ppUnless (ecol - scol <= 1) (char '-' <> int (ecol - 1)) ] -- For single-character or point spans, we just -- output the starting column number pprUserRealSpan show_path (RealSrcSpan' src_path sline scol eline ecol) = hcat [ ppWhen show_path (pprFastFilePath src_path <> colon) , parens (int sline <> comma <> int scol) , char '-' , parens (int eline <> comma <> int ecol') ] where ecol' = if ecol == 0 then ecol else ecol - 1 {- ************************************************************************ * * \subsection[Located]{Attaching SrcSpans to things} * * ************************************************************************ -} -- | We attach SrcSpans to lots of things, so let's have a datatype for it. data GenLocated l e = L l e deriving (Eq, Ord, Show, Data, Functor, Foldable, Traversable) type Located = GenLocated SrcSpan type RealLocated = GenLocated RealSrcSpan mapLoc :: (a -> b) -> GenLocated l a -> GenLocated l b mapLoc = fmap unLoc :: GenLocated l e -> e unLoc (L _ e) = e getLoc :: GenLocated l e -> l getLoc (L l _) = l noLoc :: e -> Located e noLoc e = L noSrcSpan e mkGeneralLocated :: String -> e -> Located e mkGeneralLocated s e = L (mkGeneralSrcSpan (fsLit s)) e combineLocs :: Located a -> Located b -> SrcSpan combineLocs a b = combineSrcSpans (getLoc a) (getLoc b) -- | Combine locations from two 'Located' things and add them to a third thing addCLoc :: Located a -> Located b -> c -> Located c addCLoc a b c = L (combineSrcSpans (getLoc a) (getLoc b)) c -- not clear whether to add a general Eq instance, but this is useful sometimes: -- | Tests whether the two located things are equal eqLocated :: Eq a => GenLocated l a -> GenLocated l a -> Bool eqLocated a b = unLoc a == unLoc b -- not clear whether to add a general Ord instance, but this is useful sometimes: -- | Tests the ordering of the two located things cmpLocated :: Ord a => GenLocated l a -> GenLocated l a -> Ordering cmpLocated a b = unLoc a `compare` unLoc b -- | Compare the 'BufSpan' of two located things. -- -- Precondition: both operands have an associated 'BufSpan'. cmpBufSpan :: HasDebugCallStack => Located a -> Located a -> Ordering cmpBufSpan (L l1 _) (L l2 _) | Strict.Just a <- getBufSpan l1 , Strict.Just b <- getBufSpan l2 = compare a b | otherwise = panic "cmpBufSpan: no BufSpan" instance (Outputable e) => Outputable (Located e) where ppr (L l e) = -- GenLocated: -- Print spans without the file name etc whenPprDebug (braces (pprUserSpan False l)) $$ ppr e instance (Outputable e) => Outputable (GenLocated RealSrcSpan e) where ppr (L l e) = -- GenLocated: -- Print spans without the file name etc whenPprDebug (braces (pprUserSpan False (RealSrcSpan l Strict.Nothing))) $$ ppr e pprLocated :: (Outputable l, Outputable e) => GenLocated l e -> SDoc pprLocated (L l e) = -- Print spans without the file name etc whenPprDebug (braces (ppr l)) $$ ppr e -- | Always prints the location, even without -dppr-debug pprLocatedAlways :: (Outputable l, Outputable e) => GenLocated l e -> SDoc pprLocatedAlways (L l e) = braces (ppr l) $$ ppr e {- ************************************************************************ * * \subsection{Ordering SrcSpans for InteractiveUI} * * ************************************************************************ -} -- | Strategies for ordering 'SrcSpan's leftmost_smallest, leftmost_largest, rightmost_smallest :: SrcSpan -> SrcSpan -> Ordering rightmost_smallest = compareSrcSpanBy (flip compare) leftmost_smallest = compareSrcSpanBy compare leftmost_largest = compareSrcSpanBy $ \a b -> (realSrcSpanStart a `compare` realSrcSpanStart b) `thenCmp` (realSrcSpanEnd b `compare` realSrcSpanEnd a) compareSrcSpanBy :: (RealSrcSpan -> RealSrcSpan -> Ordering) -> SrcSpan -> SrcSpan -> Ordering compareSrcSpanBy cmp (RealSrcSpan a _) (RealSrcSpan b _) = cmp a b compareSrcSpanBy _ (RealSrcSpan _ _) (UnhelpfulSpan _) = LT compareSrcSpanBy _ (UnhelpfulSpan _) (RealSrcSpan _ _) = GT compareSrcSpanBy _ (UnhelpfulSpan _) (UnhelpfulSpan _) = EQ -- | Determines whether a span encloses a given line and column index spans :: SrcSpan -> (Int, Int) -> Bool spans (UnhelpfulSpan _) _ = panic "spans UnhelpfulSpan" spans (RealSrcSpan span _) (l,c) = realSrcSpanStart span <= loc && loc <= realSrcSpanEnd span where loc = mkRealSrcLoc (srcSpanFile span) l c -- | Determines whether a span is enclosed by another one isSubspanOf :: SrcSpan -- ^ The span that may be enclosed by the other -> SrcSpan -- ^ The span it may be enclosed by -> Bool isSubspanOf (RealSrcSpan src _) (RealSrcSpan parent _) = isRealSubspanOf src parent isSubspanOf _ _ = False -- | Determines whether a span is enclosed by another one isRealSubspanOf :: RealSrcSpan -- ^ The span that may be enclosed by the other -> RealSrcSpan -- ^ The span it may be enclosed by -> Bool isRealSubspanOf src parent | srcSpanFile parent /= srcSpanFile src = False | otherwise = realSrcSpanStart parent <= realSrcSpanStart src && realSrcSpanEnd parent >= realSrcSpanEnd src liftL :: Monad m => (a -> m b) -> GenLocated l a -> m (GenLocated l b) liftL f (L loc a) = do a' <- f a return $ L loc a' getRealSrcSpan :: RealLocated a -> RealSrcSpan getRealSrcSpan (L l _) = l unRealSrcSpan :: RealLocated a -> a unRealSrcSpan (L _ e) = e -- | A location as produced by the parser. Consists of two components: -- -- * The location in the file, adjusted for #line and {-# LINE ... #-} pragmas (RealSrcLoc) -- * The location in the string buffer (BufPos) with monotonicity guarantees (see #17632) data PsLoc = PsLoc { psRealLoc :: !RealSrcLoc, psBufPos :: !BufPos } deriving (Eq, Ord, Show) data PsSpan = PsSpan { psRealSpan :: !RealSrcSpan, psBufSpan :: !BufSpan } deriving (Eq, Ord, Show, Data) type PsLocated = GenLocated PsSpan psLocatedToLocated :: PsLocated a -> Located a psLocatedToLocated (L sp a) = L (mkSrcSpanPs sp) a advancePsLoc :: PsLoc -> Char -> PsLoc advancePsLoc (PsLoc real_loc buf_loc) c = PsLoc (advanceSrcLoc real_loc c) (advanceBufPos buf_loc) mkPsSpan :: PsLoc -> PsLoc -> PsSpan mkPsSpan (PsLoc r1 b1) (PsLoc r2 b2) = PsSpan (mkRealSrcSpan r1 r2) (BufSpan b1 b2) psSpanStart :: PsSpan -> PsLoc psSpanStart (PsSpan r b) = PsLoc (realSrcSpanStart r) (bufSpanStart b) psSpanEnd :: PsSpan -> PsLoc psSpanEnd (PsSpan r b) = PsLoc (realSrcSpanEnd r) (bufSpanEnd b) mkSrcSpanPs :: PsSpan -> SrcSpan mkSrcSpanPs (PsSpan r b) = RealSrcSpan r (Strict.Just b) -- | Layout information for declarations. data LayoutInfo = -- | Explicit braces written by the user. -- -- @ -- class C a where { foo :: a; bar :: a } -- @ ExplicitBraces | -- | Virtual braces inserted by the layout algorithm. -- -- @ -- class C a where -- foo :: a -- bar :: a -- @ VirtualBraces !Int -- ^ Layout column (indentation level, begins at 1) | -- | Empty or compiler-generated blocks do not have layout information -- associated with them. NoLayoutInfo deriving (Eq, Ord, Show, Data) -- | Indentation level is 1-indexed, so the leftmost column is 1. leftmostColumn :: Int leftmostColumn = 1 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Target.hs0000644000000000000000000000441714472400113020530 0ustar0000000000000000module GHC.Types.Target ( Target(..) , TargetId(..) , InputFileBuffer , pprTarget , pprTargetId ) where import GHC.Prelude import GHC.Driver.Phases ( Phase ) import GHC.Unit import GHC.Data.StringBuffer ( StringBuffer ) import GHC.Utils.Outputable import Data.Time -- | A compilation target. -- -- A target may be supplied with the actual text of the -- module. If so, use this instead of the file contents (this -- is for use in an IDE where the file hasn't been saved by -- the user yet). -- -- These fields are strict because Targets are long lived. data Target = Target { targetId :: !TargetId, -- ^ module or filename targetAllowObjCode :: !Bool, -- ^ object code allowed? targetUnitId :: !UnitId, -- ^ id of the unit this target is part of targetContents :: !(Maybe (InputFileBuffer, UTCTime)) -- ^ Optional in-memory buffer containing the source code GHC should -- use for this target instead of reading it from disk. -- -- Since GHC version 8.10 modules which require preprocessors such as -- Literate Haskell or CPP to run are also supported. -- -- If a corresponding source file does not exist on disk this will -- result in a 'SourceError' exception if @targetId = TargetModule _@ -- is used. However together with @targetId = TargetFile _@ GHC will -- not complain about the file missing. } data TargetId = TargetModule !ModuleName -- ^ A module name: search for the file | TargetFile !FilePath !(Maybe Phase) -- ^ A filename: preprocess & parse it to find the module name. -- If specified, the Phase indicates how to compile this file -- (which phase to start from). Nothing indicates the starting phase -- should be determined from the suffix of the filename. deriving Eq type InputFileBuffer = StringBuffer pprTarget :: Target -> SDoc pprTarget Target { targetUnitId = uid, targetId = id, targetAllowObjCode = obj } = (if obj then empty else char '*') <> ppr uid <> colon <> pprTargetId id instance Outputable Target where ppr = pprTarget pprTargetId :: TargetId -> SDoc pprTargetId (TargetModule m) = ppr m pprTargetId (TargetFile f _) = text f instance Outputable TargetId where ppr = pprTargetId ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Tickish.hs0000644000000000000000000003511714472400113020701 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Types.Tickish ( GenTickish(..), CoreTickish, StgTickish, CmmTickish, XTickishId, tickishCounts, TickishScoping(..), tickishScoped, tickishScopesLike, tickishFloatable, tickishCanSplit, mkNoCount, mkNoScope, tickishIsCode, isProfTick, TickishPlacement(..), tickishPlace, tickishContains ) where import GHC.Prelude import GHC.Core.Type import GHC.Unit.Module import GHC.Types.CostCentre import GHC.Types.SrcLoc ( RealSrcSpan, containsSpan ) import GHC.Types.Var import GHC.Utils.Panic import Language.Haskell.Syntax.Extension ( NoExtField ) import Data.Data import GHC.Utils.Outputable (Outputable (ppr), text) {- ********************************************************************* * * Ticks * * ************************************************************************ -} -- | Allows attaching extra information to points in expressions {- | Used as a data type index for the GenTickish annotations. See Note [Tickish passes] -} data TickishPass = TickishPassCore | TickishPassStg | TickishPassCmm {- Note [Tickish passes] ~~~~~~~~~~~~~~~~~~~~~ Tickish annotations store different information depending on where they are used. Here's a summary of the differences between the passes. - CoreTickish: Haskell and Core The tickish annotations store the free variables of breakpoints. - StgTickish: Stg The GHCi bytecode generator (GHC.StgToByteCode) needs to know the type of each breakpoint in addition to its free variables. Since we cannot compute the type from an STG expression, the tickish annotations store the type of breakpoints in addition to the free variables. - CmmTickish: Cmm Breakpoints are unsupported and no free variables or type are stored. -} type family XBreakpoint (pass :: TickishPass) type instance XBreakpoint 'TickishPassCore = NoExtField -- | Keep track of the type of breakpoints in STG, for GHCi type instance XBreakpoint 'TickishPassStg = Type type instance XBreakpoint 'TickishPassCmm = NoExtField type family XTickishId (pass :: TickishPass) type instance XTickishId 'TickishPassCore = Id type instance XTickishId 'TickishPassStg = Id type instance XTickishId 'TickishPassCmm = NoExtField type CoreTickish = GenTickish 'TickishPassCore type StgTickish = GenTickish 'TickishPassStg -- | Tickish in Cmm context (annotations only) type CmmTickish = GenTickish 'TickishPassCmm -- If you edit this type, you may need to update the GHC formalism -- See Note [GHC Formalism] in GHC.Core.Lint data GenTickish pass = -- | An @{-# SCC #-}@ profiling annotation, either automatically -- added by the desugarer as a result of -auto-all, or added by -- the user. ProfNote { profNoteCC :: CostCentre, -- ^ the cost centre profNoteCount :: !Bool, -- ^ bump the entry count? profNoteScope :: !Bool -- ^ scopes over the enclosed expression -- (i.e. not just a tick) } -- | A "tick" used by HPC to track the execution of each -- subexpression in the original source code. | HpcTick { tickModule :: Module, tickId :: !Int } -- | A breakpoint for the GHCi debugger. This behaves like an HPC -- tick, but has a list of free variables which will be available -- for inspection in GHCi when the program stops at the breakpoint. -- -- NB. we must take account of these Ids when (a) counting free variables, -- and (b) substituting (don't substitute for them) | Breakpoint { breakpointExt :: XBreakpoint pass , breakpointId :: !Int , breakpointFVs :: [XTickishId pass] -- ^ the order of this list is important: -- it matches the order of the lists in the -- appropriate entry in 'GHC.ByteCode.Types.ModBreaks'. -- -- Careful about substitution! See -- Note [substTickish] in "GHC.Core.Subst". } -- | A source note. -- -- Source notes are pure annotations: Their presence should neither -- influence compilation nor execution. The semantics are given by -- causality: The presence of a source note means that a local -- change in the referenced source code span will possibly provoke -- the generated code to change. On the flip-side, the functionality -- of annotated code *must* be invariant against changes to all -- source code *except* the spans referenced in the source notes -- (see "Causality of optimized Haskell" paper for details). -- -- Therefore extending the scope of any given source note is always -- valid. Note that it is still undesirable though, as this reduces -- their usefulness for debugging and profiling. Therefore we will -- generally try only to make use of this property where it is -- necessary to enable optimizations. | SourceNote { sourceSpan :: RealSrcSpan -- ^ Source covered , sourceName :: String -- ^ Name for source location -- (uses same names as CCs) } deriving instance Eq (GenTickish 'TickishPassCore) deriving instance Ord (GenTickish 'TickishPassCore) deriving instance Data (GenTickish 'TickishPassCore) deriving instance Data (GenTickish 'TickishPassStg) deriving instance Eq (GenTickish 'TickishPassCmm) deriving instance Ord (GenTickish 'TickishPassCmm) deriving instance Data (GenTickish 'TickishPassCmm) -- | A "counting tick" (where tickishCounts is True) is one that -- counts evaluations in some way. We cannot discard a counting tick, -- and the compiler should preserve the number of counting ticks as -- far as possible. -- -- However, we still allow the simplifier to increase or decrease -- sharing, so in practice the actual number of ticks may vary, except -- that we never change the value from zero to non-zero or vice versa. tickishCounts :: GenTickish pass -> Bool tickishCounts n@ProfNote{} = profNoteCount n tickishCounts HpcTick{} = True tickishCounts Breakpoint{} = True tickishCounts _ = False -- | Specifies the scoping behaviour of ticks. This governs the -- behaviour of ticks that care about the covered code and the cost -- associated with it. Important for ticks relating to profiling. data TickishScoping = -- | No scoping: The tick does not care about what code it -- covers. Transformations can freely move code inside as well as -- outside without any additional annotation obligations NoScope -- | Soft scoping: We want all code that is covered to stay -- covered. Note that this scope type does not forbid -- transformations from happening, as long as all results of -- the transformations are still covered by this tick or a copy of -- it. For example -- -- let x = tick<...> (let y = foo in bar) in baz -- ===> -- let x = tick<...> bar; y = tick<...> foo in baz -- -- Is a valid transformation as far as "bar" and "foo" is -- concerned, because both still are scoped over by the tick. -- -- Note though that one might object to the "let" not being -- covered by the tick any more. However, we are generally lax -- with this - constant costs don't matter too much, and given -- that the "let" was effectively merged we can view it as having -- lost its identity anyway. -- -- Also note that this scoping behaviour allows floating a tick -- "upwards" in pretty much any situation. For example: -- -- case foo of x -> tick<...> bar -- ==> -- tick<...> case foo of x -> bar -- -- While this is always legal, we want to make a best effort to -- only make us of this where it exposes transformation -- opportunities. | SoftScope -- | Cost centre scoping: We don't want any costs to move to other -- cost-centre stacks. This means we not only want no code or cost -- to get moved out of their cost centres, but we also object to -- code getting associated with new cost-centre ticks - or -- changing the order in which they get applied. -- -- A rule of thumb is that we don't want any code to gain new -- annotations. However, there are notable exceptions, for -- example: -- -- let f = \y -> foo in tick<...> ... (f x) ... -- ==> -- tick<...> ... foo[x/y] ... -- -- In-lining lambdas like this is always legal, because inlining a -- function does not change the cost-centre stack when the -- function is called. | CostCentreScope deriving (Eq) -- | Returns the intended scoping rule for a Tickish tickishScoped :: GenTickish pass -> TickishScoping tickishScoped n@ProfNote{} | profNoteScope n = CostCentreScope | otherwise = NoScope tickishScoped HpcTick{} = NoScope tickishScoped Breakpoint{} = CostCentreScope -- Breakpoints are scoped: eventually we're going to do call -- stacks, but also this helps prevent the simplifier from moving -- breakpoints around and changing their result type (see #1531). tickishScoped SourceNote{} = SoftScope -- | Returns whether the tick scoping rule is at least as permissive -- as the given scoping rule. tickishScopesLike :: GenTickish pass -> TickishScoping -> Bool tickishScopesLike t scope = tickishScoped t `like` scope where NoScope `like` _ = True _ `like` NoScope = False SoftScope `like` _ = True _ `like` SoftScope = False CostCentreScope `like` _ = True -- | Returns @True@ for ticks that can be floated upwards easily even -- where it might change execution counts, such as: -- -- Just (tick<...> foo) -- ==> -- tick<...> (Just foo) -- -- This is a combination of @tickishSoftScope@ and -- @tickishCounts@. Note that in principle splittable ticks can become -- floatable using @mkNoTick@ -- even though there's currently no -- tickish for which that is the case. tickishFloatable :: GenTickish pass -> Bool tickishFloatable t = t `tickishScopesLike` SoftScope && not (tickishCounts t) -- | Returns @True@ for a tick that is both counting /and/ scoping and -- can be split into its (tick, scope) parts using 'mkNoScope' and -- 'mkNoTick' respectively. tickishCanSplit :: GenTickish pass -> Bool tickishCanSplit ProfNote{profNoteScope = True, profNoteCount = True} = True tickishCanSplit _ = False mkNoCount :: GenTickish pass -> GenTickish pass mkNoCount n | not (tickishCounts n) = n | not (tickishCanSplit n) = panic "mkNoCount: Cannot split!" mkNoCount n@ProfNote{} = n {profNoteCount = False} mkNoCount _ = panic "mkNoCount: Undefined split!" mkNoScope :: GenTickish pass -> GenTickish pass mkNoScope n | tickishScoped n == NoScope = n | not (tickishCanSplit n) = panic "mkNoScope: Cannot split!" mkNoScope n@ProfNote{} = n {profNoteScope = False} mkNoScope _ = panic "mkNoScope: Undefined split!" -- | Return @True@ if this source annotation compiles to some backend -- code. Without this flag, the tickish is seen as a simple annotation -- that does not have any associated evaluation code. -- -- What this means that we are allowed to disregard the tick if doing -- so means that we can skip generating any code in the first place. A -- typical example is top-level bindings: -- -- foo = tick<...> \y -> ... -- ==> -- foo = \y -> tick<...> ... -- -- Here there is just no operational difference between the first and -- the second version. Therefore code generation should simply -- translate the code as if it found the latter. tickishIsCode :: GenTickish pass -> Bool tickishIsCode SourceNote{} = False tickishIsCode _tickish = True -- all the rest for now isProfTick :: GenTickish pass -> Bool isProfTick ProfNote{} = True isProfTick _ = False -- | Governs the kind of expression that the tick gets placed on when -- annotating for example using @mkTick@. If we find that we want to -- put a tickish on an expression ruled out here, we try to float it -- inwards until we find a suitable expression. data TickishPlacement = -- | Place ticks exactly on run-time expressions. We can still -- move the tick through pure compile-time constructs such as -- other ticks, casts or type lambdas. This is the most -- restrictive placement rule for ticks, as all tickishs have in -- common that they want to track runtime processes. The only -- legal placement rule for counting ticks. -- NB: We generally try to move these as close to the relevant -- runtime expression as possible. This means they get pushed through -- tyoe arguments. E.g. we create `(tick f) @Bool` instead of `tick (f @Bool)`. PlaceRuntime -- | As @PlaceRuntime@, but we float the tick through all -- lambdas. This makes sense where there is little difference -- between annotating the lambda and annotating the lambda's code. | PlaceNonLam -- | In addition to floating through lambdas, cost-centre style -- tickishs can also be moved from constructors, non-function -- variables and literals. For example: -- -- let x = scc<...> C (scc<...> y) (scc<...> 3) in ... -- -- Neither the constructor application, the variable or the -- literal are likely to have any cost worth mentioning. And even -- if y names a thunk, the call would not care about the -- evaluation context. Therefore removing all annotations in the -- above example is safe. | PlaceCostCentre deriving (Eq,Show) instance Outputable TickishPlacement where ppr = text . show -- | Placement behaviour we want for the ticks tickishPlace :: GenTickish pass -> TickishPlacement tickishPlace n@ProfNote{} | profNoteCount n = PlaceRuntime | otherwise = PlaceCostCentre tickishPlace HpcTick{} = PlaceRuntime tickishPlace Breakpoint{} = PlaceRuntime tickishPlace SourceNote{} = PlaceNonLam -- | Returns whether one tick "contains" the other one, therefore -- making the second tick redundant. tickishContains :: Eq (GenTickish pass) => GenTickish pass -> GenTickish pass -> Bool tickishContains (SourceNote sp1 n1) (SourceNote sp2 n2) = containsSpan sp1 sp2 && n1 == n2 -- compare the String last tickishContains t1 t2 = t1 == t2 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/TyThing.hs0000644000000000000000000002756414472400113020700 0ustar0000000000000000-- | A global typecheckable-thing, essentially anything that has a name. module GHC.Types.TyThing ( TyThing (..) , MonadThings (..) , mkATyCon , mkAnId , pprShortTyThing , pprTyThingCategory , tyThingCategory , implicitTyThings , implicitConLikeThings , implicitClassThings , implicitTyConThings , implicitCoTyCon , isImplicitTyThing , tyThingParent_maybe , tyThingsTyCoVars , tyThingAvailInfo , tyThingTyCon , tyThingCoAxiom , tyThingDataCon , tyThingConLike , tyThingId ) where import GHC.Prelude import GHC.Types.Name import GHC.Types.Var import GHC.Types.Var.Set import GHC.Types.Id import GHC.Types.Id.Info import GHC.Types.Avail import GHC.Core.Class import GHC.Core.DataCon import GHC.Core.ConLike import GHC.Core.PatSyn import GHC.Core.TyCo.FVs import GHC.Core.TyCon import GHC.Core.Coercion.Axiom import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Utils.Panic import Control.Monad ( liftM ) import Control.Monad.Trans.Reader import Control.Monad.Trans.Class {- Note [ATyCon for classes] ~~~~~~~~~~~~~~~~~~~~~~~~~ Both classes and type constructors are represented in the type environment as ATyCon. You can tell the difference, and get to the class, with isClassTyCon :: TyCon -> Bool tyConClass_maybe :: TyCon -> Maybe Class The Class and its associated TyCon have the same Name. -} -- | A global typecheckable-thing, essentially anything that has a name. -- Not to be confused with a 'TcTyThing', which is also a typecheckable -- thing but in the *local* context. See "GHC.Tc.Utils.Env" for how to retrieve -- a 'TyThing' given a 'Name'. data TyThing = AnId Id | AConLike ConLike | ATyCon TyCon -- TyCons and classes; see Note [ATyCon for classes] | ACoAxiom (CoAxiom Branched) instance Outputable TyThing where ppr = pprShortTyThing instance NamedThing TyThing where -- Can't put this with the type getName (AnId id) = getName id -- decl, because the DataCon instance getName (ATyCon tc) = getName tc -- isn't visible there getName (ACoAxiom cc) = getName cc getName (AConLike cl) = conLikeName cl mkATyCon :: TyCon -> TyThing mkATyCon = ATyCon mkAnId :: Id -> TyThing mkAnId = AnId pprShortTyThing :: TyThing -> SDoc -- c.f. GHC.Types.TyThing.Ppr.pprTyThing, which prints all the details pprShortTyThing thing = pprTyThingCategory thing <+> quotes (ppr (getName thing)) pprTyThingCategory :: TyThing -> SDoc pprTyThingCategory = text . capitalise . tyThingCategory tyThingCategory :: TyThing -> String tyThingCategory (ATyCon tc) | isClassTyCon tc = "class" | otherwise = "type constructor" tyThingCategory (ACoAxiom _) = "coercion axiom" tyThingCategory (AnId _) = "identifier" tyThingCategory (AConLike (RealDataCon _)) = "data constructor" tyThingCategory (AConLike (PatSynCon _)) = "pattern synonym" {- Note [Implicit TyThings] ~~~~~~~~~~~~~~~~~~~~~~~~ DEFINITION: An "implicit" TyThing is one that does not have its own IfaceDecl in an interface file. Instead, its binding in the type environment is created as part of typechecking the IfaceDecl for some other thing. Examples: * All DataCons are implicit, because they are generated from the IfaceDecl for the data/newtype. Ditto class methods. * Record selectors are *not* implicit, because they get their own free-standing IfaceDecl. * Associated data/type families are implicit because they are included in the IfaceDecl of the parent class. (NB: the IfaceClass decl happens to use IfaceDecl recursively for the associated types, but that's irrelevant here.) * Dictionary function Ids are not implicit. * Axioms for newtypes are implicit (same as above), but axioms for data/type family instances are *not* implicit (like DFunIds). -} -- | Determine the 'TyThing's brought into scope by another 'TyThing' -- /other/ than itself. For example, Id's don't have any implicit TyThings -- as they just bring themselves into scope, but classes bring their -- dictionary datatype, type constructor and some selector functions into -- scope, just for a start! -- N.B. the set of TyThings returned here *must* match the set of -- names returned by 'GHC.Iface.Load.ifaceDeclImplicitBndrs', in the sense that -- TyThing.getOccName should define a bijection between the two lists. -- This invariant is used in 'GHC.IfaceToCore.tc_iface_decl_fingerprint' (see -- Note [Tricky iface loop]) -- The order of the list does not matter. implicitTyThings :: TyThing -> [TyThing] implicitTyThings (AnId _) = [] implicitTyThings (ACoAxiom _cc) = [] implicitTyThings (ATyCon tc) = implicitTyConThings tc implicitTyThings (AConLike cl) = implicitConLikeThings cl implicitConLikeThings :: ConLike -> [TyThing] implicitConLikeThings (RealDataCon dc) = dataConImplicitTyThings dc implicitConLikeThings (PatSynCon {}) = [] -- Pattern synonyms have no implicit Ids; the wrapper and matcher -- are not "implicit"; they are simply new top-level bindings, -- and they have their own declaration in an interface file -- Unless a record pat syn when there are implicit selectors -- They are still not included here as `implicitConLikeThings` is -- used by `tcTyClsDecls` whilst pattern synonyms are typed checked -- by `tcTopValBinds`. implicitClassThings :: Class -> [TyThing] implicitClassThings cl = -- Does not include default methods, because those Ids may have -- their own pragmas, unfoldings etc, not derived from the Class object -- associated types -- No recursive call for the classATs, because they -- are only the family decls; they have no implicit things map ATyCon (classATs cl) ++ -- superclass and operation selectors map AnId (classAllSelIds cl) implicitTyConThings :: TyCon -> [TyThing] implicitTyConThings tc = class_stuff ++ -- fields (names of selectors) -- (possibly) implicit newtype axioms -- or type family axioms implicitCoTyCon tc ++ -- for each data constructor in order, -- the constructor, worker, and (possibly) wrapper [ thing | dc <- tyConDataCons tc , thing <- AConLike (RealDataCon dc) : dataConImplicitTyThings dc ] -- NB. record selectors are *not* implicit, they have fully-fledged -- bindings that pass through the compilation pipeline as normal. where class_stuff = case tyConClass_maybe tc of Nothing -> [] Just cl -> implicitClassThings cl -- For newtypes and closed type families (only) add the implicit coercion tycon implicitCoTyCon :: TyCon -> [TyThing] implicitCoTyCon tc | Just co <- newTyConCo_maybe tc = [ACoAxiom $ toBranchedAxiom co] | Just co <- isClosedSynFamilyTyConWithAxiom_maybe tc = [ACoAxiom co] | otherwise = [] -- | Returns @True@ if there should be no interface-file declaration -- for this thing on its own: either it is built-in, or it is part -- of some other declaration, or it is generated implicitly by some -- other declaration. isImplicitTyThing :: TyThing -> Bool isImplicitTyThing (AConLike cl) = case cl of RealDataCon {} -> True PatSynCon {} -> False isImplicitTyThing (AnId id) = isImplicitId id isImplicitTyThing (ATyCon tc) = isImplicitTyCon tc isImplicitTyThing (ACoAxiom ax) = isImplicitCoAxiom ax -- | tyThingParent_maybe x returns (Just p) -- when pprTyThingInContext should print a declaration for p -- (albeit with some "..." in it) when asked to show x -- It returns the *immediate* parent. So a datacon returns its tycon -- but the tycon could be the associated type of a class, so it in turn -- might have a parent. tyThingParent_maybe :: TyThing -> Maybe TyThing tyThingParent_maybe (AConLike cl) = case cl of RealDataCon dc -> Just (ATyCon (dataConTyCon dc)) PatSynCon{} -> Nothing tyThingParent_maybe (ATyCon tc) = case tyConAssoc_maybe tc of Just tc -> Just (ATyCon tc) Nothing -> Nothing tyThingParent_maybe (AnId id) = case idDetails id of RecSelId { sel_tycon = RecSelData tc } -> Just (ATyCon tc) RecSelId { sel_tycon = RecSelPatSyn ps } -> Just (AConLike (PatSynCon ps)) ClassOpId cls -> Just (ATyCon (classTyCon cls)) _other -> Nothing tyThingParent_maybe _other = Nothing tyThingsTyCoVars :: [TyThing] -> TyCoVarSet tyThingsTyCoVars tts = unionVarSets $ map ttToVarSet tts where ttToVarSet (AnId id) = tyCoVarsOfType $ idType id ttToVarSet (AConLike cl) = case cl of RealDataCon dc -> tyCoVarsOfType $ dataConRepType dc PatSynCon{} -> emptyVarSet ttToVarSet (ATyCon tc) = case tyConClass_maybe tc of Just cls -> (mkVarSet . fst . classTvsFds) cls Nothing -> tyCoVarsOfType $ tyConKind tc ttToVarSet (ACoAxiom _) = emptyVarSet -- | The Names that a TyThing should bring into scope. Used to build -- the GlobalRdrEnv for the InteractiveContext. tyThingAvailInfo :: TyThing -> [AvailInfo] tyThingAvailInfo (ATyCon t) = case tyConClass_maybe t of Just c -> [availTC n ((n : map getName (classMethods c) ++ map getName (classATs c))) [] ] where n = getName c Nothing -> [availTC n (n : map getName dcs) flds] where n = getName t dcs = tyConDataCons t flds = tyConFieldLabels t tyThingAvailInfo (AConLike (PatSynCon p)) = avail (getName p) : map availField (patSynFieldLabels p) tyThingAvailInfo t = [avail (getName t)] -- | Get the 'TyCon' from a 'TyThing' if it is a type constructor thing. Panics otherwise tyThingTyCon :: HasDebugCallStack => TyThing -> TyCon tyThingTyCon (ATyCon tc) = tc tyThingTyCon other = pprPanic "tyThingTyCon" (ppr other) -- | Get the 'CoAxiom' from a 'TyThing' if it is a coercion axiom thing. Panics otherwise tyThingCoAxiom :: HasDebugCallStack => TyThing -> CoAxiom Branched tyThingCoAxiom (ACoAxiom ax) = ax tyThingCoAxiom other = pprPanic "tyThingCoAxiom" (ppr other) -- | Get the 'DataCon' from a 'TyThing' if it is a data constructor thing. Panics otherwise tyThingDataCon :: HasDebugCallStack => TyThing -> DataCon tyThingDataCon (AConLike (RealDataCon dc)) = dc tyThingDataCon other = pprPanic "tyThingDataCon" (ppr other) -- | Get the 'ConLike' from a 'TyThing' if it is a data constructor thing. -- Panics otherwise tyThingConLike :: HasDebugCallStack => TyThing -> ConLike tyThingConLike (AConLike dc) = dc tyThingConLike other = pprPanic "tyThingConLike" (ppr other) -- | Get the 'Id' from a 'TyThing' if it is a id *or* data constructor thing. Panics otherwise tyThingId :: HasDebugCallStack => TyThing -> Id tyThingId (AnId id) = id tyThingId (AConLike (RealDataCon dc)) = dataConWrapId dc tyThingId other = pprPanic "tyThingId" (ppr other) -- | Class that abstracts out the common ability of the monads in GHC -- to lookup a 'TyThing' in the monadic environment by 'Name'. Provides -- a number of related convenience functions for accessing particular -- kinds of 'TyThing' class Monad m => MonadThings m where lookupThing :: Name -> m TyThing lookupId :: Name -> m Id lookupId = liftM tyThingId . lookupThing lookupDataCon :: Name -> m DataCon lookupDataCon = liftM tyThingDataCon . lookupThing lookupTyCon :: Name -> m TyCon lookupTyCon = liftM tyThingTyCon . lookupThing -- Instance used in GHC.HsToCore.Quote instance MonadThings m => MonadThings (ReaderT s m) where lookupThing = lift . lookupThing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/TypeEnv.hs0000644000000000000000000000552414472400113020674 0ustar0000000000000000module GHC.Types.TypeEnv ( TypeEnv , emptyTypeEnv , lookupTypeEnv , mkTypeEnv , typeEnvFromEntities , mkTypeEnvWithImplicits , extendTypeEnv , extendTypeEnvList , extendTypeEnvWithIds , plusTypeEnv , typeEnvElts , typeEnvTyCons , typeEnvIds , typeEnvPatSyns , typeEnvDataCons , typeEnvCoAxioms , typeEnvClasses ) where import GHC.Prelude import GHC.Core.Class import GHC.Core.Coercion.Axiom import GHC.Core.ConLike import GHC.Core.DataCon import GHC.Core.FamInstEnv import GHC.Core.PatSyn import GHC.Core.TyCon import GHC.Types.Name import GHC.Types.Name.Env import GHC.Types.Var import GHC.Types.TyThing -- | A map from 'Name's to 'TyThing's, constructed by typechecking -- local declarations or interface files type TypeEnv = NameEnv TyThing emptyTypeEnv :: TypeEnv typeEnvElts :: TypeEnv -> [TyThing] typeEnvTyCons :: TypeEnv -> [TyCon] typeEnvCoAxioms :: TypeEnv -> [CoAxiom Branched] typeEnvIds :: TypeEnv -> [Id] typeEnvPatSyns :: TypeEnv -> [PatSyn] typeEnvDataCons :: TypeEnv -> [DataCon] typeEnvClasses :: TypeEnv -> [Class] lookupTypeEnv :: TypeEnv -> Name -> Maybe TyThing emptyTypeEnv = emptyNameEnv typeEnvElts env = nonDetNameEnvElts env typeEnvTyCons env = [tc | ATyCon tc <- typeEnvElts env] typeEnvCoAxioms env = [ax | ACoAxiom ax <- typeEnvElts env] typeEnvIds env = [id | AnId id <- typeEnvElts env] typeEnvPatSyns env = [ps | AConLike (PatSynCon ps) <- typeEnvElts env] typeEnvDataCons env = [dc | AConLike (RealDataCon dc) <- typeEnvElts env] typeEnvClasses env = [cl | tc <- typeEnvTyCons env, Just cl <- [tyConClass_maybe tc]] mkTypeEnv :: [TyThing] -> TypeEnv mkTypeEnv things = extendTypeEnvList emptyTypeEnv things mkTypeEnvWithImplicits :: [TyThing] -> TypeEnv mkTypeEnvWithImplicits things = mkTypeEnv things `plusNameEnv` mkTypeEnv (concatMap implicitTyThings things) typeEnvFromEntities :: [Id] -> [TyCon] -> [PatSyn] -> [FamInst] -> TypeEnv typeEnvFromEntities ids tcs patsyns famInsts = mkTypeEnv ( map AnId ids ++ map ATyCon all_tcs ++ concatMap implicitTyConThings all_tcs ++ map (ACoAxiom . toBranchedAxiom . famInstAxiom) famInsts ++ map (AConLike . PatSynCon) patsyns ) where all_tcs = tcs ++ famInstsRepTyCons famInsts lookupTypeEnv = lookupNameEnv -- Extend the type environment extendTypeEnv :: TypeEnv -> TyThing -> TypeEnv extendTypeEnv env thing = extendNameEnv env (getName thing) thing extendTypeEnvList :: TypeEnv -> [TyThing] -> TypeEnv extendTypeEnvList env things = foldl' extendTypeEnv env things extendTypeEnvWithIds :: TypeEnv -> [Id] -> TypeEnv extendTypeEnvWithIds env ids = extendNameEnvList env [(getName id, AnId id) | id <- ids] plusTypeEnv :: TypeEnv -> TypeEnv -> TypeEnv plusTypeEnv env1 env2 = plusNameEnv env1 env2 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique.hs0000644000000000000000000002505214472400113020546 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 @Uniques@ are used to distinguish entities in the compiler (@Ids@, @Classes@, etc.) from each other. Thus, @Uniques@ are the basic comparison key in the compiler. If there is any single operation that needs to be fast, it is @Unique@ comparison. Unsurprisingly, there is quite a bit of huff-and-puff directed to that end. Some of the other hair in this code is to be able to use a ``splittable @UniqueSupply@'' if requested/possible (not standard Haskell). -} {-# LANGUAGE CPP #-} {-# LANGUAGE BangPatterns, MagicHash #-} module GHC.Types.Unique ( -- * Main data types Unique, Uniquable(..), uNIQUE_BITS, -- ** Constructors, destructors and operations on 'Unique's hasKey, pprUniqueAlways, mkUniqueGrimily, getKey, mkUnique, unpkUnique, eqUnique, ltUnique, incrUnique, stepUnique, newTagUnique, nonDetCmpUnique, isValidKnownKeyUnique, -- ** Local uniques -- | These are exposed exclusively for use by 'GHC.Types.Var.Env.uniqAway', which -- has rather peculiar needs. See Note [Local uniques]. mkLocalUnique, minLocalUnique, maxLocalUnique, ) where #include "Unique.h" import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Outputable import GHC.Utils.Panic.Plain -- just for implementing a fast [0,61) -> Char function import GHC.Exts (indexCharOffAddr#, Char(..), Int(..)) import Data.Char ( chr, ord ) {- ************************************************************************ * * \subsection[Unique-type]{@Unique@ type and operations} * * ************************************************************************ Note [Uniques and masks] ~~~~~~~~~~~~~~~~~~~~~~~~ A `Unique` in GHC is a Word-sized value composed of two pieces: * A "mask", of width `UNIQUE_TAG_BITS`, in the high order bits * A number, of width `uNIQUE_BITS`, which fills up the remainder of the Word The mask is typically an ASCII character. It is typically used to make it easier to distinguish uniques constructed by different parts of the compiler. There is a (potentially incomplete) list of unique masks used given in GHC.Builtin.Uniques. See Note [Uniques-prelude - Uniques for wired-in Prelude things] `mkUnique` constructs a `Unique` from its pieces mkUnique :: Char -> Int -> Unique -} -- | Unique identifier. -- -- The type of unique identifiers that are used in many places in GHC -- for fast ordering and equality tests. You should generate these with -- the functions from the 'UniqSupply' module -- -- These are sometimes also referred to as \"keys\" in comments in GHC. newtype Unique = MkUnique Int {-# INLINE uNIQUE_BITS #-} uNIQUE_BITS :: Int uNIQUE_BITS = finiteBitSize (0 :: Int) - UNIQUE_TAG_BITS {- Now come the functions which construct uniques from their pieces, and vice versa. The stuff about unique *supplies* is handled further down this module. -} unpkUnique :: Unique -> (Char, Int) -- The reverse mkUniqueGrimily :: Int -> Unique -- A trap-door for UniqSupply getKey :: Unique -> Int -- for Var incrUnique :: Unique -> Unique stepUnique :: Unique -> Int -> Unique newTagUnique :: Unique -> Char -> Unique mkUniqueGrimily = MkUnique {-# INLINE getKey #-} getKey (MkUnique x) = x incrUnique (MkUnique i) = MkUnique (i + 1) stepUnique (MkUnique i) n = MkUnique (i + n) mkLocalUnique :: Int -> Unique mkLocalUnique i = mkUnique 'X' i minLocalUnique :: Unique minLocalUnique = mkLocalUnique 0 maxLocalUnique :: Unique maxLocalUnique = mkLocalUnique uniqueMask -- newTagUnique changes the "domain" of a unique to a different char newTagUnique u c = mkUnique c i where (_,i) = unpkUnique u -- | How many bits are devoted to the unique index (as opposed to the class -- character). uniqueMask :: Int uniqueMask = (1 `shiftL` uNIQUE_BITS) - 1 -- pop the Char in the top 8 bits of the Unique(Supply) -- No 64-bit bugs here, as long as we have at least 32 bits. --JSM -- and as long as the Char fits in 8 bits, which we assume anyway! mkUnique :: Char -> Int -> Unique -- Builds a unique from pieces -- EXPORTED and used only in GHC.Builtin.Uniques mkUnique c i = MkUnique (tag .|. bits) where tag = ord c `shiftL` uNIQUE_BITS bits = i .&. uniqueMask unpkUnique (MkUnique u) = let -- as long as the Char may have its eighth bit set, we -- really do need the logical right-shift here! tag = chr (u `shiftR` uNIQUE_BITS) i = u .&. uniqueMask in (tag, i) -- | The interface file symbol-table encoding assumes that known-key uniques fit -- in 30-bits; verify this. -- -- See Note [Symbol table representation of names] in "GHC.Iface.Binary" for details. isValidKnownKeyUnique :: Unique -> Bool isValidKnownKeyUnique u = case unpkUnique u of (c, x) -> ord c < 0xff && x <= (1 `shiftL` 22) {- ************************************************************************ * * \subsection[Uniquable-class]{The @Uniquable@ class} * * ************************************************************************ -} -- | Class of things that we can obtain a 'Unique' from class Uniquable a where getUnique :: a -> Unique hasKey :: Uniquable a => a -> Unique -> Bool x `hasKey` k = getUnique x == k instance Uniquable FastString where getUnique fs = mkUniqueGrimily (uniqueOfFS fs) instance Uniquable Int where getUnique i = mkUniqueGrimily i {- ************************************************************************ * * \subsection[Unique-instances]{Instance declarations for @Unique@} * * ************************************************************************ And the whole point (besides uniqueness) is fast equality. We don't use `deriving' because we want {\em precise} control of ordering (equality on @Uniques@ is v common). -} -- Note [Unique Determinism] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- The order of allocated @Uniques@ is not stable across rebuilds. -- The main reason for that is that typechecking interface files pulls -- @Uniques@ from @UniqSupply@ and the interface file for the module being -- currently compiled can, but doesn't have to exist. -- -- It gets more complicated if you take into account that the interface -- files are loaded lazily and that building multiple files at once has to -- work for any subset of interface files present. When you add parallelism -- this makes @Uniques@ hopelessly random. -- -- As such, to get deterministic builds, the order of the allocated -- @Uniques@ should not affect the final result. -- see also wiki/deterministic-builds -- -- Note [Unique Determinism and code generation] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The goal of the deterministic builds (wiki/deterministic-builds, #4012) -- is to get ABI compatible binaries given the same inputs and environment. -- The motivation behind that is that if the ABI doesn't change the -- binaries can be safely reused. -- Note that this is weaker than bit-for-bit identical binaries and getting -- bit-for-bit identical binaries is not a goal for now. -- This means that we don't care about nondeterminism that happens after -- the interface files are created, in particular we don't care about -- register allocation and code generation. -- To track progress on bit-for-bit determinism see #12262. eqUnique :: Unique -> Unique -> Bool eqUnique (MkUnique u1) (MkUnique u2) = u1 == u2 ltUnique :: Unique -> Unique -> Bool ltUnique (MkUnique u1) (MkUnique u2) = u1 < u2 -- Provided here to make it explicit at the call-site that it can -- introduce non-determinism. -- See Note [Unique Determinism] -- See Note [No Ord for Unique] nonDetCmpUnique :: Unique -> Unique -> Ordering nonDetCmpUnique (MkUnique u1) (MkUnique u2) = if u1 == u2 then EQ else if u1 < u2 then LT else GT {- Note [No Ord for Unique] ~~~~~~~~~~~~~~~~~~~~~~~~~~ As explained in Note [Unique Determinism] the relative order of Uniques is nondeterministic. To prevent from accidental use the Ord Unique instance has been removed. This makes it easier to maintain deterministic builds, but comes with some drawbacks. The biggest drawback is that Maps keyed by Uniques can't directly be used. The alternatives are: 1) Use UniqFM or UniqDFM, see Note [Deterministic UniqFM] to decide which 2) Create a newtype wrapper based on Unique ordering where nondeterminism is controlled. See Module.ModuleEnv 3) Change the algorithm to use nonDetCmpUnique and document why it's still deterministic 4) Use TrieMap as done in GHC.Cmm.CommonBlockElim.groupByLabel -} instance Eq Unique where a == b = eqUnique a b a /= b = not (eqUnique a b) instance Uniquable Unique where getUnique u = u -- We do sometimes make strings with @Uniques@ in them: showUnique :: Unique -> String showUnique uniq = case unpkUnique uniq of (tag, u) -> tag : iToBase62 u pprUniqueAlways :: Unique -> SDoc -- The "always" means regardless of -dsuppress-uniques -- It replaces the old pprUnique to remind callers that -- they should consider whether they want to consult -- Opt_SuppressUniques pprUniqueAlways u = text (showUnique u) instance Outputable Unique where ppr = pprUniqueAlways instance Show Unique where show uniq = showUnique uniq {- ************************************************************************ * * \subsection[Utils-base62]{Base-62 numbers} * * ************************************************************************ A character-stingy way to read/write numbers (notably Uniques). The ``62-its'' are \tr{[0-9a-zA-Z]}. We don't handle negative Ints. Code stolen from Lennart. -} iToBase62 :: Int -> String iToBase62 n_ = assert (n_ >= 0) $ go n_ "" where go n cs | n < 62 = let !c = chooseChar62 n in c : cs | otherwise = go q (c : cs) where (!q, r) = quotRem n 62 !c = chooseChar62 r chooseChar62 :: Int -> Char {-# INLINE chooseChar62 #-} chooseChar62 (I# n) = C# (indexCharOffAddr# chars62 n) chars62 = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"# ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/DFM.hs0000644000000000000000000004306114472400113021154 0ustar0000000000000000{- (c) Bartosz Nitka, Facebook, 2015 UniqDFM: Specialised deterministic finite maps, for things with @Uniques@. Basically, the things need to be in class @Uniquable@, and we use the @getUnique@ method to grab their @Uniques@. This is very similar to @UniqFM@, the major difference being that the order of folding is not dependent on @Unique@ ordering, giving determinism. Currently the ordering is determined by insertion order. See Note [Unique Determinism] in GHC.Types.Unique for explanation why @Unique@ ordering is not deterministic. -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} {-# OPTIONS_GHC -Wall #-} module GHC.Types.Unique.DFM ( -- * Unique-keyed deterministic mappings UniqDFM, -- abstract type -- ** Manipulating those mappings emptyUDFM, unitUDFM, addToUDFM, addToUDFM_C, addToUDFM_C_Directly, addToUDFM_Directly, addListToUDFM, delFromUDFM, delListFromUDFM, adjustUDFM, adjustUDFM_Directly, alterUDFM, mapUDFM, mapMaybeUDFM, plusUDFM, plusUDFM_C, lookupUDFM, lookupUDFM_Directly, elemUDFM, foldUDFM, eltsUDFM, filterUDFM, filterUDFM_Directly, isNullUDFM, sizeUDFM, intersectUDFM, udfmIntersectUFM, disjointUDFM, disjointUdfmUfm, equalKeysUDFM, minusUDFM, listToUDFM, listToUDFM_Directly, udfmMinusUFM, ufmMinusUDFM, partitionUDFM, udfmRestrictKeys, anyUDFM, allUDFM, pprUniqDFM, pprUDFM, udfmToList, udfmToUfm, nonDetStrictFoldUDFM, unsafeCastUDFMKey, alwaysUnsafeUfmToUdfm, ) where import GHC.Prelude import GHC.Types.Unique ( Uniquable(..), Unique, getKey ) import GHC.Utils.Outputable import qualified Data.IntMap.Strict as MS import qualified Data.IntMap as M import Data.Data import Data.Functor.Classes (Eq1 (..)) import Data.List (sortBy) import Data.Function (on) import GHC.Types.Unique.FM (UniqFM, nonDetUFMToList, ufmToIntMap, unsafeIntMapToUFM) import Unsafe.Coerce -- Note [Deterministic UniqFM] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- A @UniqDFM@ is just like @UniqFM@ with the following additional -- property: the function `udfmToList` returns the elements in some -- deterministic order not depending on the Unique key for those elements. -- -- If the client of the map performs operations on the map in deterministic -- order then `udfmToList` returns them in deterministic order. -- -- There is an implementation cost: each element is given a serial number -- as it is added, and `udfmToList` sorts it's result by this serial -- number. So you should only use `UniqDFM` if you need the deterministic -- property. -- -- `foldUDFM` also preserves determinism. -- -- Normal @UniqFM@ when you turn it into a list will use -- Data.IntMap.toList function that returns the elements in the order of -- the keys. The keys in @UniqFM@ are always @Uniques@, so you end up with -- with a list ordered by @Uniques@. -- The order of @Uniques@ is known to be not stable across rebuilds. -- See Note [Unique Determinism] in GHC.Types.Unique. -- -- -- There's more than one way to implement this. The implementation here tags -- every value with the insertion time that can later be used to sort the -- values when asked to convert to a list. -- -- An alternative would be to have -- -- data UniqDFM ele = UDFM (M.IntMap ele) [ele] -- -- where the list determines the order. This makes deletion tricky as we'd -- only accumulate elements in that list, but makes merging easier as you -- can just merge both structures independently. -- Deletion can probably be done in amortized fashion when the size of the -- list is twice the size of the set. -- | A type of values tagged with insertion time data TaggedVal val = TaggedVal !val {-# UNPACK #-} !Int -- ^ insertion time deriving stock (Data, Functor, Foldable, Traversable) taggedFst :: TaggedVal val -> val taggedFst (TaggedVal v _) = v taggedSnd :: TaggedVal val -> Int taggedSnd (TaggedVal _ i) = i instance Eq val => Eq (TaggedVal val) where (TaggedVal v1 _) == (TaggedVal v2 _) = v1 == v2 -- | Type of unique deterministic finite maps -- -- The key is just here to keep us honest. It's always safe -- to use a single type as key. -- If two types don't overlap in their uniques it's also safe -- to index the same map at multiple key types. But this is -- very much discouraged. data UniqDFM key ele = UDFM !(M.IntMap (TaggedVal ele)) -- A map where keys are Unique's values and -- values are tagged with insertion time. -- The invariant is that all the tags will -- be distinct within a single map {-# UNPACK #-} !Int -- Upper bound on the values' insertion -- time. See Note [Overflow on plusUDFM] deriving (Data, Functor) -- | Deterministic, in O(n log n). instance Foldable (UniqDFM key) where foldr = foldUDFM -- | Deterministic, in O(n log n). instance Traversable (UniqDFM key) where traverse f = fmap listToUDFM_Directly . traverse (\(u,a) -> (u,) <$> f a) . udfmToList emptyUDFM :: UniqDFM key elt emptyUDFM = UDFM M.empty 0 unitUDFM :: Uniquable key => key -> elt -> UniqDFM key elt unitUDFM k v = UDFM (M.singleton (getKey $ getUnique k) (TaggedVal v 0)) 1 -- The new binding always goes to the right of existing ones addToUDFM :: Uniquable key => UniqDFM key elt -> key -> elt -> UniqDFM key elt addToUDFM m k v = addToUDFM_Directly m (getUnique k) v -- The new binding always goes to the right of existing ones addToUDFM_Directly :: UniqDFM key elt -> Unique -> elt -> UniqDFM key elt addToUDFM_Directly (UDFM m i) u v = UDFM (MS.insertWith tf (getKey u) (TaggedVal v i) m) (i + 1) where tf (TaggedVal new_v _) (TaggedVal _ old_i) = TaggedVal new_v old_i -- Keep the old tag, but insert the new value -- This means that udfmToList typically returns elements -- in the order of insertion, rather than the reverse -- It is quite critical that the strict insertWith is used as otherwise -- the combination function 'tf' is not forced and both old values are retained -- in the map. addToUDFM_C_Directly :: (elt -> elt -> elt) -- old -> new -> result -> UniqDFM key elt -> Unique -> elt -> UniqDFM key elt addToUDFM_C_Directly f (UDFM m i) u v = UDFM (MS.insertWith tf (getKey u) (TaggedVal v i) m) (i + 1) where tf (TaggedVal new_v _) (TaggedVal old_v old_i) = TaggedVal (f old_v new_v) old_i -- Flip the arguments, because M.insertWith uses (new->old->result) -- but f needs (old->new->result) -- Like addToUDFM_Directly, keep the old tag addToUDFM_C :: Uniquable key => (elt -> elt -> elt) -- old -> new -> result -> UniqDFM key elt -- old -> key -> elt -- new -> UniqDFM key elt -- result addToUDFM_C f m k v = addToUDFM_C_Directly f m (getUnique k) v addListToUDFM :: Uniquable key => UniqDFM key elt -> [(key,elt)] -> UniqDFM key elt addListToUDFM = foldl' (\m (k, v) -> addToUDFM m k v) addListToUDFM_Directly :: UniqDFM key elt -> [(Unique,elt)] -> UniqDFM key elt addListToUDFM_Directly = foldl' (\m (k, v) -> addToUDFM_Directly m k v) addListToUDFM_Directly_C :: (elt -> elt -> elt) -> UniqDFM key elt -> [(Unique,elt)] -> UniqDFM key elt addListToUDFM_Directly_C f = foldl' (\m (k, v) -> addToUDFM_C_Directly f m k v) delFromUDFM :: Uniquable key => UniqDFM key elt -> key -> UniqDFM key elt delFromUDFM (UDFM m i) k = UDFM (M.delete (getKey $ getUnique k) m) i plusUDFM_C :: (elt -> elt -> elt) -> UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt plusUDFM_C f udfml@(UDFM _ i) udfmr@(UDFM _ j) -- we will use the upper bound on the tag as a proxy for the set size, -- to insert the smaller one into the bigger one | i > j = insertUDFMIntoLeft_C f udfml udfmr | otherwise = insertUDFMIntoLeft_C f udfmr udfml -- Note [Overflow on plusUDFM] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- There are multiple ways of implementing plusUDFM. -- The main problem that needs to be solved is overlap on times of -- insertion between different keys in two maps. -- Consider: -- -- A = fromList [(a, (x, 1))] -- B = fromList [(b, (y, 1))] -- -- If you merge them naively you end up with: -- -- C = fromList [(a, (x, 1)), (b, (y, 1))] -- -- Which loses information about ordering and brings us back into -- non-deterministic world. -- -- The solution I considered before would increment the tags on one of the -- sets by the upper bound of the other set. The problem with this approach -- is that you'll run out of tags for some merge patterns. -- Say you start with A with upper bound 1, you merge A with A to get A' and -- the upper bound becomes 2. You merge A' with A' and the upper bound -- doubles again. After 64 merges you overflow. -- This solution would have the same time complexity as plusUFM, namely O(n+m). -- -- The solution I ended up with has time complexity of -- O(m log m + m * min (n+m, W)) where m is the smaller set. -- It simply inserts the elements of the smaller set into the larger -- set in the order that they were inserted into the smaller set. That's -- O(m log m) for extracting the elements from the smaller set in the -- insertion order and O(m * min(n+m, W)) to insert them into the bigger -- set. plusUDFM :: UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt plusUDFM udfml@(UDFM _ i) udfmr@(UDFM _ j) -- we will use the upper bound on the tag as a proxy for the set size, -- to insert the smaller one into the bigger one | i > j = insertUDFMIntoLeft udfml udfmr | otherwise = insertUDFMIntoLeft udfmr udfml insertUDFMIntoLeft :: UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt insertUDFMIntoLeft udfml udfmr = addListToUDFM_Directly udfml $ udfmToList udfmr insertUDFMIntoLeft_C :: (elt -> elt -> elt) -> UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt insertUDFMIntoLeft_C f udfml udfmr = addListToUDFM_Directly_C f udfml $ udfmToList udfmr lookupUDFM :: Uniquable key => UniqDFM key elt -> key -> Maybe elt lookupUDFM (UDFM m _i) k = taggedFst `fmap` M.lookup (getKey $ getUnique k) m lookupUDFM_Directly :: UniqDFM key elt -> Unique -> Maybe elt lookupUDFM_Directly (UDFM m _i) k = taggedFst `fmap` M.lookup (getKey k) m elemUDFM :: Uniquable key => key -> UniqDFM key elt -> Bool elemUDFM k (UDFM m _i) = M.member (getKey $ getUnique k) m -- | Performs a deterministic fold over the UniqDFM. -- It's O(n log n) while the corresponding function on `UniqFM` is O(n). foldUDFM :: (elt -> a -> a) -> a -> UniqDFM key elt -> a foldUDFM k z m = foldr k z (eltsUDFM m) -- | Performs a nondeterministic strict fold over the UniqDFM. -- It's O(n), same as the corresponding function on `UniqFM`. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUDFM :: (elt -> a -> a) -> a -> UniqDFM key elt -> a nonDetStrictFoldUDFM k z (UDFM m _i) = foldl' k' z m where k' acc (TaggedVal v _) = k v acc eltsUDFM :: UniqDFM key elt -> [elt] eltsUDFM (UDFM m _i) = map taggedFst $ sortBy (compare `on` taggedSnd) $ M.elems m filterUDFM :: (elt -> Bool) -> UniqDFM key elt -> UniqDFM key elt filterUDFM p (UDFM m i) = UDFM (M.filter (\(TaggedVal v _) -> p v) m) i filterUDFM_Directly :: (Unique -> elt -> Bool) -> UniqDFM key elt -> UniqDFM key elt filterUDFM_Directly p (UDFM m i) = UDFM (M.filterWithKey p' m) i where p' k (TaggedVal v _) = p (getUnique k) v udfmRestrictKeys :: UniqDFM key elt -> UniqDFM key elt2 -> UniqDFM key elt udfmRestrictKeys (UDFM a i) (UDFM b _) = UDFM (M.restrictKeys a (M.keysSet b)) i -- | Converts `UniqDFM` to a list, with elements in deterministic order. -- It's O(n log n) while the corresponding function on `UniqFM` is O(n). udfmToList :: UniqDFM key elt -> [(Unique, elt)] udfmToList (UDFM m _i) = [ (getUnique k, taggedFst v) | (k, v) <- sortBy (compare `on` (taggedSnd . snd)) $ M.toList m ] -- Determines whether two 'UniqDFM's contain the same keys. equalKeysUDFM :: UniqDFM key a -> UniqDFM key b -> Bool equalKeysUDFM (UDFM m1 _) (UDFM m2 _) = liftEq (\_ _ -> True) m1 m2 isNullUDFM :: UniqDFM key elt -> Bool isNullUDFM (UDFM m _) = M.null m sizeUDFM :: UniqDFM key elt -> Int sizeUDFM (UDFM m _i) = M.size m intersectUDFM :: UniqDFM key elt -> UniqDFM key elt -> UniqDFM key elt intersectUDFM (UDFM x i) (UDFM y _j) = UDFM (M.intersection x y) i -- M.intersection is left biased, that means the result will only have -- a subset of elements from the left set, so `i` is a good upper bound. udfmIntersectUFM :: UniqDFM key elt1 -> UniqFM key elt2 -> UniqDFM key elt1 udfmIntersectUFM (UDFM x i) y = UDFM (M.intersection x (ufmToIntMap y)) i -- M.intersection is left biased, that means the result will only have -- a subset of elements from the left set, so `i` is a good upper bound. disjointUDFM :: UniqDFM key elt -> UniqDFM key elt -> Bool disjointUDFM (UDFM x _i) (UDFM y _j) = M.disjoint x y disjointUdfmUfm :: UniqDFM key elt -> UniqFM key elt2 -> Bool disjointUdfmUfm (UDFM x _i) y = M.disjoint x (ufmToIntMap y) minusUDFM :: UniqDFM key elt1 -> UniqDFM key elt2 -> UniqDFM key elt1 minusUDFM (UDFM x i) (UDFM y _j) = UDFM (M.difference x y) i -- M.difference returns a subset of a left set, so `i` is a good upper -- bound. udfmMinusUFM :: UniqDFM key elt1 -> UniqFM key elt2 -> UniqDFM key elt1 udfmMinusUFM (UDFM x i) y = UDFM (M.difference x (ufmToIntMap y)) i -- M.difference returns a subset of a left set, so `i` is a good upper -- bound. ufmMinusUDFM :: UniqFM key elt1 -> UniqDFM key elt2 -> UniqFM key elt1 ufmMinusUDFM x (UDFM y _i) = unsafeIntMapToUFM (M.difference (ufmToIntMap x) y) -- | Partition UniqDFM into two UniqDFMs according to the predicate partitionUDFM :: (elt -> Bool) -> UniqDFM key elt -> (UniqDFM key elt, UniqDFM key elt) partitionUDFM p (UDFM m i) = case M.partition (p . taggedFst) m of (left, right) -> (UDFM left i, UDFM right i) -- | Delete a list of elements from a UniqDFM delListFromUDFM :: Uniquable key => UniqDFM key elt -> [key] -> UniqDFM key elt delListFromUDFM = foldl' delFromUDFM -- | This allows for lossy conversion from UniqDFM to UniqFM udfmToUfm :: UniqDFM key elt -> UniqFM key elt udfmToUfm (UDFM m _i) = unsafeIntMapToUFM (M.map taggedFst m) listToUDFM :: Uniquable key => [(key,elt)] -> UniqDFM key elt listToUDFM = foldl' (\m (k, v) -> addToUDFM m k v) emptyUDFM listToUDFM_Directly :: [(Unique, elt)] -> UniqDFM key elt listToUDFM_Directly = foldl' (\m (u, v) -> addToUDFM_Directly m u v) emptyUDFM -- | Apply a function to a particular element adjustUDFM :: Uniquable key => (elt -> elt) -> UniqDFM key elt -> key -> UniqDFM key elt adjustUDFM f (UDFM m i) k = UDFM (M.adjust (fmap f) (getKey $ getUnique k) m) i -- | Apply a function to a particular element adjustUDFM_Directly :: (elt -> elt) -> UniqDFM key elt -> Unique -> UniqDFM key elt adjustUDFM_Directly f (UDFM m i) k = UDFM (M.adjust (fmap f) (getKey k) m) i -- | The expression (alterUDFM f k map) alters value x at k, or absence -- thereof. alterUDFM can be used to insert, delete, or update a value in -- UniqDFM. Use addToUDFM, delFromUDFM or adjustUDFM when possible, they are -- more efficient. alterUDFM :: Uniquable key => (Maybe elt -> Maybe elt) -- How to adjust -> UniqDFM key elt -- old -> key -- new -> UniqDFM key elt -- result alterUDFM f (UDFM m i) k = UDFM (M.alter alterf (getKey $ getUnique k) m) (i + 1) where alterf Nothing = inject $ f Nothing alterf (Just (TaggedVal v _)) = inject $ f (Just v) inject Nothing = Nothing inject (Just v) = Just $ TaggedVal v i -- | Map a function over every value in a UniqDFM mapUDFM :: (elt1 -> elt2) -> UniqDFM key elt1 -> UniqDFM key elt2 mapUDFM f (UDFM m i) = UDFM (MS.map (fmap f) m) i -- Critical this is strict map, otherwise you get a big space leak when reloading -- in GHCi because all old ModDetails are retained (see pruneHomePackageTable). -- Modify with care. mapMaybeUDFM :: forall elt1 elt2 key. (elt1 -> Maybe elt2) -> UniqDFM key elt1 -> UniqDFM key elt2 mapMaybeUDFM f (UDFM m i) = UDFM (M.mapMaybe (traverse f) m) i anyUDFM :: (elt -> Bool) -> UniqDFM key elt -> Bool anyUDFM p (UDFM m _i) = M.foldr ((||) . p . taggedFst) False m allUDFM :: (elt -> Bool) -> UniqDFM key elt -> Bool allUDFM p (UDFM m _i) = M.foldr ((&&) . p . taggedFst) True m -- This should not be used in committed code, provided for convenience to -- make ad-hoc conversions when developing alwaysUnsafeUfmToUdfm :: UniqFM key elt -> UniqDFM key elt alwaysUnsafeUfmToUdfm = listToUDFM_Directly . nonDetUFMToList -- | Cast the key domain of a UniqFM. -- -- As long as the domains don't overlap in their uniques -- this is safe. unsafeCastUDFMKey :: UniqDFM key1 elt -> UniqDFM key2 elt unsafeCastUDFMKey = unsafeCoerce -- Only phantom parameter changes so -- this is safe and avoids reallocation. -- Output-ery instance Outputable a => Outputable (UniqDFM key a) where ppr ufm = pprUniqDFM ppr ufm pprUniqDFM :: (a -> SDoc) -> UniqDFM key a -> SDoc pprUniqDFM ppr_elt ufm = brackets $ fsep $ punctuate comma $ [ ppr uq <+> text ":->" <+> ppr_elt elt | (uq, elt) <- udfmToList ufm ] pprUDFM :: UniqDFM key a -- ^ The things to be pretty printed -> ([a] -> SDoc) -- ^ The pretty printing function to use on the elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprUDFM ufm pp = pp (eltsUDFM ufm) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/DSet.hs0000644000000000000000000001124414472400113021403 0ustar0000000000000000-- (c) Bartosz Nitka, Facebook, 2015 -- | -- Specialised deterministic sets, for things with @Uniques@ -- -- Based on 'UniqDFM's (as you would expect). -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why we need it. -- -- Basically, the things need to be in class 'Uniquable'. {-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.Unique.DSet ( -- * Unique set type UniqDSet, -- type synonym for UniqFM a getUniqDSet, pprUniqDSet, -- ** Manipulating these sets delOneFromUniqDSet, delListFromUniqDSet, emptyUniqDSet, unitUniqDSet, mkUniqDSet, addOneToUniqDSet, addListToUniqDSet, unionUniqDSets, unionManyUniqDSets, minusUniqDSet, uniqDSetMinusUniqSet, intersectUniqDSets, uniqDSetIntersectUniqSet, nonDetStrictFoldUniqDSet, elementOfUniqDSet, filterUniqDSet, sizeUniqDSet, isEmptyUniqDSet, lookupUniqDSet, uniqDSetToList, partitionUniqDSet, mapUniqDSet ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Types.Unique.DFM import GHC.Types.Unique.Set import GHC.Types.Unique import Data.Coerce import Data.Data -- See Note [UniqSet invariant] in GHC.Types.Unique.Set for why we want a newtype here. -- Beyond preserving invariants, we may also want to 'override' typeclass -- instances. newtype UniqDSet a = UniqDSet {getUniqDSet' :: UniqDFM a a} deriving (Data) emptyUniqDSet :: UniqDSet a emptyUniqDSet = UniqDSet emptyUDFM unitUniqDSet :: Uniquable a => a -> UniqDSet a unitUniqDSet x = UniqDSet (unitUDFM x x) mkUniqDSet :: Uniquable a => [a] -> UniqDSet a mkUniqDSet = foldl' addOneToUniqDSet emptyUniqDSet -- The new element always goes to the right of existing ones. addOneToUniqDSet :: Uniquable a => UniqDSet a -> a -> UniqDSet a addOneToUniqDSet (UniqDSet set) x = UniqDSet (addToUDFM set x x) addListToUniqDSet :: Uniquable a => UniqDSet a -> [a] -> UniqDSet a addListToUniqDSet = foldl' addOneToUniqDSet delOneFromUniqDSet :: Uniquable a => UniqDSet a -> a -> UniqDSet a delOneFromUniqDSet (UniqDSet s) = UniqDSet . delFromUDFM s delListFromUniqDSet :: Uniquable a => UniqDSet a -> [a] -> UniqDSet a delListFromUniqDSet (UniqDSet s) = UniqDSet . delListFromUDFM s unionUniqDSets :: UniqDSet a -> UniqDSet a -> UniqDSet a unionUniqDSets (UniqDSet s) (UniqDSet t) = UniqDSet (plusUDFM s t) unionManyUniqDSets :: [UniqDSet a] -> UniqDSet a unionManyUniqDSets [] = emptyUniqDSet unionManyUniqDSets (x:xs) = foldl' unionUniqDSets x xs minusUniqDSet :: UniqDSet a -> UniqDSet a -> UniqDSet a minusUniqDSet (UniqDSet s) (UniqDSet t) = UniqDSet (minusUDFM s t) uniqDSetMinusUniqSet :: UniqDSet a -> UniqSet a -> UniqDSet a uniqDSetMinusUniqSet xs ys = UniqDSet (udfmMinusUFM (getUniqDSet xs) (getUniqSet ys)) intersectUniqDSets :: UniqDSet a -> UniqDSet a -> UniqDSet a intersectUniqDSets (UniqDSet s) (UniqDSet t) = UniqDSet (intersectUDFM s t) uniqDSetIntersectUniqSet :: UniqDSet a -> UniqSet a -> UniqDSet a uniqDSetIntersectUniqSet xs ys = UniqDSet (udfmIntersectUFM (getUniqDSet xs) (getUniqSet ys)) -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUniqDSet :: (a -> b -> b) -> b -> UniqDSet a -> b nonDetStrictFoldUniqDSet f acc (UniqDSet s) = nonDetStrictFoldUDFM f acc s elementOfUniqDSet :: Uniquable a => a -> UniqDSet a -> Bool elementOfUniqDSet k = elemUDFM k . getUniqDSet filterUniqDSet :: (a -> Bool) -> UniqDSet a -> UniqDSet a filterUniqDSet p (UniqDSet s) = UniqDSet (filterUDFM p s) sizeUniqDSet :: UniqDSet a -> Int sizeUniqDSet = sizeUDFM . getUniqDSet isEmptyUniqDSet :: UniqDSet a -> Bool isEmptyUniqDSet = isNullUDFM . getUniqDSet lookupUniqDSet :: Uniquable a => UniqDSet a -> a -> Maybe a lookupUniqDSet = lookupUDFM . getUniqDSet uniqDSetToList :: UniqDSet a -> [a] uniqDSetToList = eltsUDFM . getUniqDSet partitionUniqDSet :: (a -> Bool) -> UniqDSet a -> (UniqDSet a, UniqDSet a) partitionUniqDSet p = coerce . partitionUDFM p . getUniqDSet -- See Note [UniqSet invariant] in GHC.Types.Unique.Set mapUniqDSet :: Uniquable b => (a -> b) -> UniqDSet a -> UniqDSet b mapUniqDSet f = mkUniqDSet . map f . uniqDSetToList -- Two 'UniqDSet's are considered equal if they contain the same -- uniques. instance Eq (UniqDSet a) where UniqDSet a == UniqDSet b = equalKeysUDFM a b getUniqDSet :: UniqDSet a -> UniqDFM a a getUniqDSet = getUniqDSet' instance Outputable a => Outputable (UniqDSet a) where ppr = pprUniqDSet ppr pprUniqDSet :: (a -> SDoc) -> UniqDSet a -> SDoc pprUniqDSet f = braces . pprWithCommas f . uniqDSetToList ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/FM.hs0000644000000000000000000004574314472400113021061 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 UniqFM: Specialised finite maps, for things with @Uniques@. Basically, the things need to be in class @Uniquable@, and we use the @getUnique@ method to grab their @Uniques@. (A similar thing to @UniqSet@, as opposed to @Set@.) The interface is based on @FiniteMap@s, but the implementation uses @Data.IntMap@, which is both maintained and faster than the past implementation (see commit log). The @UniqFM@ interface maps directly to Data.IntMap, only ``Data.IntMap.union'' is left-biased and ``plusUFM'' right-biased and ``addToUFM\_C'' and ``Data.IntMap.insertWith'' differ in the order of arguments of combining function. -} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -Wall #-} module GHC.Types.Unique.FM ( -- * Unique-keyed mappings UniqFM, -- abstract type NonDetUniqFM(..), -- wrapper for opting into nondeterminism -- ** Manipulating those mappings emptyUFM, unitUFM, unitDirectlyUFM, zipToUFM, listToUFM, listToUFM_Directly, listToUFM_C, listToIdentityUFM, addToUFM,addToUFM_C,addToUFM_Acc, addListToUFM,addListToUFM_C, addToUFM_Directly, addListToUFM_Directly, adjustUFM, alterUFM, adjustUFM_Directly, delFromUFM, delFromUFM_Directly, delListFromUFM, delListFromUFM_Directly, plusUFM, plusUFM_C, plusUFM_CD, plusUFM_CD2, mergeUFM, plusMaybeUFM_C, plusUFMList, sequenceUFMList, minusUFM, minusUFM_C, intersectUFM, intersectUFM_C, disjointUFM, equalKeysUFM, nonDetStrictFoldUFM, foldUFM, nonDetStrictFoldUFM_DirectlyM, nonDetStrictFoldUFM_Directly, anyUFM, allUFM, seqEltsUFM, mapUFM, mapUFM_Directly, mapMaybeUFM, elemUFM, elemUFM_Directly, filterUFM, filterUFM_Directly, partitionUFM, sizeUFM, isNullUFM, lookupUFM, lookupUFM_Directly, lookupWithDefaultUFM, lookupWithDefaultUFM_Directly, nonDetEltsUFM, nonDetKeysUFM, ufmToSet_Directly, nonDetUFMToList, ufmToIntMap, unsafeIntMapToUFM, unsafeCastUFMKey, pprUniqFM, pprUFM, pprUFMWithKeys, pluralUFM ) where import GHC.Prelude import GHC.Types.Unique ( Uniquable(..), Unique, getKey ) import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import qualified Data.IntMap as M import qualified Data.IntMap.Strict as MS import qualified Data.IntSet as S import Data.Data import qualified Data.Semigroup as Semi import Data.Functor.Classes (Eq1 (..)) import Data.Coerce -- | A finite map from @uniques@ of one type to -- elements in another type. -- -- The key is just here to keep us honest. It's always safe -- to use a single type as key. -- If two types don't overlap in their uniques it's also safe -- to index the same map at multiple key types. But this is -- very much discouraged. newtype UniqFM key ele = UFM (M.IntMap ele) deriving (Data, Eq, Functor) -- Nondeterministic Foldable and Traversable instances are accessible through -- use of the 'NonDetUniqFM' wrapper. -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM to learn about determinism. emptyUFM :: UniqFM key elt emptyUFM = UFM M.empty isNullUFM :: UniqFM key elt -> Bool isNullUFM (UFM m) = M.null m unitUFM :: Uniquable key => key -> elt -> UniqFM key elt unitUFM k v = UFM (M.singleton (getKey $ getUnique k) v) -- when you've got the Unique already unitDirectlyUFM :: Unique -> elt -> UniqFM key elt unitDirectlyUFM u v = UFM (M.singleton (getKey u) v) -- zipToUFM ks vs = listToUFM (zip ks vs) -- This function exists because it's a common case (#18535), and -- it's inefficient to first build a list of pairs, and then immediately -- take it apart. Astonishingly, fusing this one list away reduces total -- compiler allocation by more than 10% (in T12545, see !3935) -- Note that listToUFM (zip ks vs) performs similarly, but -- the explicit recursion avoids relying too much on fusion. zipToUFM :: Uniquable key => [key] -> [elt] -> UniqFM key elt zipToUFM ks vs = assert (length ks == length vs ) innerZip emptyUFM ks vs where innerZip ufm (k:kList) (v:vList) = innerZip (addToUFM ufm k v) kList vList innerZip ufm _ _ = ufm listToUFM :: Uniquable key => [(key,elt)] -> UniqFM key elt listToUFM = foldl' (\m (k, v) -> addToUFM m k v) emptyUFM listToUFM_Directly :: [(Unique, elt)] -> UniqFM key elt listToUFM_Directly = foldl' (\m (u, v) -> addToUFM_Directly m u v) emptyUFM listToIdentityUFM :: Uniquable key => [key] -> UniqFM key key listToIdentityUFM = foldl' (\m x -> addToUFM m x x) emptyUFM listToUFM_C :: Uniquable key => (elt -> elt -> elt) -> [(key, elt)] -> UniqFM key elt listToUFM_C f = foldl' (\m (k, v) -> addToUFM_C f m k v) emptyUFM addToUFM :: Uniquable key => UniqFM key elt -> key -> elt -> UniqFM key elt addToUFM (UFM m) k v = UFM (M.insert (getKey $ getUnique k) v m) addListToUFM :: Uniquable key => UniqFM key elt -> [(key,elt)] -> UniqFM key elt addListToUFM = foldl' (\m (k, v) -> addToUFM m k v) addListToUFM_Directly :: UniqFM key elt -> [(Unique,elt)] -> UniqFM key elt addListToUFM_Directly = foldl' (\m (k, v) -> addToUFM_Directly m k v) addToUFM_Directly :: UniqFM key elt -> Unique -> elt -> UniqFM key elt addToUFM_Directly (UFM m) u v = UFM (M.insert (getKey u) v m) addToUFM_C :: Uniquable key => (elt -> elt -> elt) -- old -> new -> result -> UniqFM key elt -- old -> key -> elt -- new -> UniqFM key elt -- result -- Arguments of combining function of M.insertWith and addToUFM_C are flipped. addToUFM_C f (UFM m) k v = UFM (M.insertWith (flip f) (getKey $ getUnique k) v m) addToUFM_Acc :: Uniquable key => (elt -> elts -> elts) -- Add to existing -> (elt -> elts) -- New element -> UniqFM key elts -- old -> key -> elt -- new -> UniqFM key elts -- result addToUFM_Acc exi new (UFM m) k v = UFM (M.insertWith (\_new old -> exi v old) (getKey $ getUnique k) (new v) m) alterUFM :: Uniquable key => (Maybe elt -> Maybe elt) -- How to adjust -> UniqFM key elt -- old -> key -- new -> UniqFM key elt -- result alterUFM f (UFM m) k = UFM (M.alter f (getKey $ getUnique k) m) -- | Add elements to the map, combining existing values with inserted ones using -- the given function. addListToUFM_C :: Uniquable key => (elt -> elt -> elt) -> UniqFM key elt -> [(key,elt)] -> UniqFM key elt addListToUFM_C f = foldl' (\m (k, v) -> addToUFM_C f m k v) adjustUFM :: Uniquable key => (elt -> elt) -> UniqFM key elt -> key -> UniqFM key elt adjustUFM f (UFM m) k = UFM (M.adjust f (getKey $ getUnique k) m) adjustUFM_Directly :: (elt -> elt) -> UniqFM key elt -> Unique -> UniqFM key elt adjustUFM_Directly f (UFM m) u = UFM (M.adjust f (getKey u) m) delFromUFM :: Uniquable key => UniqFM key elt -> key -> UniqFM key elt delFromUFM (UFM m) k = UFM (M.delete (getKey $ getUnique k) m) delListFromUFM :: Uniquable key => UniqFM key elt -> [key] -> UniqFM key elt delListFromUFM = foldl' delFromUFM delListFromUFM_Directly :: UniqFM key elt -> [Unique] -> UniqFM key elt delListFromUFM_Directly = foldl' delFromUFM_Directly delFromUFM_Directly :: UniqFM key elt -> Unique -> UniqFM key elt delFromUFM_Directly (UFM m) u = UFM (M.delete (getKey u) m) -- Bindings in right argument shadow those in the left plusUFM :: UniqFM key elt -> UniqFM key elt -> UniqFM key elt -- M.union is left-biased, plusUFM should be right-biased. plusUFM (UFM x) (UFM y) = UFM (M.union y x) -- Note (M.union y x), with arguments flipped -- M.union is left-biased, plusUFM should be right-biased. plusUFM_C :: (elt -> elt -> elt) -> UniqFM key elt -> UniqFM key elt -> UniqFM key elt plusUFM_C f (UFM x) (UFM y) = UFM (M.unionWith f x y) -- | `plusUFM_CD f m1 d1 m2 d2` merges the maps using `f` as the -- combinding function and `d1` resp. `d2` as the default value if -- there is no entry in `m1` reps. `m2`. The domain is the union of -- the domains of `m1` and `m2`. -- -- IMPORTANT NOTE: This function strictly applies the modification function -- and forces the result unlike most the other functions in this module. -- -- Representative example: -- -- @ -- plusUFM_CD f {A: 1, B: 2} 23 {B: 3, C: 4} 42 -- == {A: f 1 42, B: f 2 3, C: f 23 4 } -- @ {-# INLINE plusUFM_CD #-} plusUFM_CD :: (elta -> eltb -> eltc) -> UniqFM key elta -- map X -> elta -- default for X -> UniqFM key eltb -- map Y -> eltb -- default for Y -> UniqFM key eltc plusUFM_CD f (UFM xm) dx (UFM ym) dy = UFM $ MS.mergeWithKey (\_ x y -> Just (x `f` y)) (MS.map (\x -> x `f` dy)) (MS.map (\y -> dx `f` y)) xm ym -- | `plusUFM_CD2 f m1 m2` merges the maps using `f` as the combining -- function. Unlike `plusUFM_CD`, a missing value is not defaulted: it is -- instead passed as `Nothing` to `f`. `f` can never have both its arguments -- be `Nothing`. -- -- IMPORTANT NOTE: This function strictly applies the modification function -- and forces the result. -- -- `plusUFM_CD2 f m1 m2` is the same as `plusUFM_CD f (mapUFM Just m1) Nothing -- (mapUFM Just m2) Nothing`. plusUFM_CD2 :: (Maybe elta -> Maybe eltb -> eltc) -> UniqFM key elta -- map X -> UniqFM key eltb -- map Y -> UniqFM key eltc plusUFM_CD2 f (UFM xm) (UFM ym) = UFM $ MS.mergeWithKey (\_ x y -> Just (Just x `f` Just y)) (MS.map (\x -> Just x `f` Nothing)) (MS.map (\y -> Nothing `f` Just y)) xm ym mergeUFM :: (elta -> eltb -> Maybe eltc) -> (UniqFM key elta -> UniqFM key eltc) -- map X -> (UniqFM key eltb -> UniqFM key eltc) -- map Y -> UniqFM key elta -> UniqFM key eltb -> UniqFM key eltc mergeUFM f g h (UFM xm) (UFM ym) = UFM $ MS.mergeWithKey (\_ x y -> (x `f` y)) (coerce g) (coerce h) xm ym plusMaybeUFM_C :: (elt -> elt -> Maybe elt) -> UniqFM key elt -> UniqFM key elt -> UniqFM key elt plusMaybeUFM_C f (UFM xm) (UFM ym) = UFM $ M.mergeWithKey (\_ x y -> x `f` y) id id xm ym plusUFMList :: [UniqFM key elt] -> UniqFM key elt plusUFMList = foldl' plusUFM emptyUFM sequenceUFMList :: forall key elt. [UniqFM key elt] -> UniqFM key [elt] sequenceUFMList = foldr (plusUFM_CD2 cons) emptyUFM where cons :: Maybe elt -> Maybe [elt] -> [elt] cons (Just x) (Just ys) = x : ys cons Nothing (Just ys) = ys cons (Just x) Nothing = [x] cons Nothing Nothing = [] minusUFM :: UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt1 minusUFM (UFM x) (UFM y) = UFM (M.difference x y) -- | @minusUFC_C f map1 map2@ returns @map1@, except that every mapping @key -- |-> value1@ in @map1@ that shares a key with a mapping @key |-> value2@ in -- @map2@ is altered by @f@: @value1@ is replaced by @f value1 value2@, where -- 'Just' means that the new value is used and 'Nothing' means that the mapping -- is deleted. minusUFM_C :: (elt1 -> elt2 -> Maybe elt1) -> UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt1 minusUFM_C f (UFM x) (UFM y) = UFM (M.differenceWith f x y) intersectUFM :: UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt1 intersectUFM (UFM x) (UFM y) = UFM (M.intersection x y) intersectUFM_C :: (elt1 -> elt2 -> elt3) -> UniqFM key elt1 -> UniqFM key elt2 -> UniqFM key elt3 intersectUFM_C f (UFM x) (UFM y) = UFM (M.intersectionWith f x y) disjointUFM :: UniqFM key elt1 -> UniqFM key elt2 -> Bool disjointUFM (UFM x) (UFM y) = M.disjoint x y foldUFM :: (elt -> a -> a) -> a -> UniqFM key elt -> a foldUFM k z (UFM m) = M.foldr k z m mapUFM :: (elt1 -> elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapUFM f (UFM m) = UFM (M.map f m) mapMaybeUFM :: (elt1 -> Maybe elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapMaybeUFM f (UFM m) = UFM (M.mapMaybe f m) mapUFM_Directly :: (Unique -> elt1 -> elt2) -> UniqFM key elt1 -> UniqFM key elt2 mapUFM_Directly f (UFM m) = UFM (M.mapWithKey (f . getUnique) m) filterUFM :: (elt -> Bool) -> UniqFM key elt -> UniqFM key elt filterUFM p (UFM m) = UFM (M.filter p m) filterUFM_Directly :: (Unique -> elt -> Bool) -> UniqFM key elt -> UniqFM key elt filterUFM_Directly p (UFM m) = UFM (M.filterWithKey (p . getUnique) m) partitionUFM :: (elt -> Bool) -> UniqFM key elt -> (UniqFM key elt, UniqFM key elt) partitionUFM p (UFM m) = case M.partition p m of (left, right) -> (UFM left, UFM right) sizeUFM :: UniqFM key elt -> Int sizeUFM (UFM m) = M.size m elemUFM :: Uniquable key => key -> UniqFM key elt -> Bool elemUFM k (UFM m) = M.member (getKey $ getUnique k) m elemUFM_Directly :: Unique -> UniqFM key elt -> Bool elemUFM_Directly u (UFM m) = M.member (getKey u) m lookupUFM :: Uniquable key => UniqFM key elt -> key -> Maybe elt lookupUFM (UFM m) k = M.lookup (getKey $ getUnique k) m -- when you've got the Unique already lookupUFM_Directly :: UniqFM key elt -> Unique -> Maybe elt lookupUFM_Directly (UFM m) u = M.lookup (getKey u) m lookupWithDefaultUFM :: Uniquable key => UniqFM key elt -> elt -> key -> elt lookupWithDefaultUFM (UFM m) v k = M.findWithDefault v (getKey $ getUnique k) m lookupWithDefaultUFM_Directly :: UniqFM key elt -> elt -> Unique -> elt lookupWithDefaultUFM_Directly (UFM m) v u = M.findWithDefault v (getKey u) m ufmToSet_Directly :: UniqFM key elt -> S.IntSet ufmToSet_Directly (UFM m) = M.keysSet m anyUFM :: (elt -> Bool) -> UniqFM key elt -> Bool anyUFM p (UFM m) = M.foldr ((||) . p) False m allUFM :: (elt -> Bool) -> UniqFM key elt -> Bool allUFM p (UFM m) = M.foldr ((&&) . p) True m seqEltsUFM :: (elt -> ()) -> UniqFM key elt -> () seqEltsUFM seqElt = foldUFM (\v rest -> seqElt v `seq` rest) () -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetEltsUFM :: UniqFM key elt -> [elt] nonDetEltsUFM (UFM m) = M.elems m -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetKeysUFM :: UniqFM key elt -> [Unique] nonDetKeysUFM (UFM m) = map getUnique $ M.keys m -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUFM :: (elt -> a -> a) -> a -> UniqFM key elt -> a nonDetStrictFoldUFM k z (UFM m) = M.foldl' (flip k) z m {-# INLINE nonDetStrictFoldUFM #-} -- | In essence foldM -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. {-# INLINE nonDetStrictFoldUFM_DirectlyM #-} -- Allow specialization nonDetStrictFoldUFM_DirectlyM :: (Monad m) => (Unique -> b -> elt -> m b) -> b -> UniqFM key elt -> m b nonDetStrictFoldUFM_DirectlyM f z0 (UFM xs) = M.foldrWithKey c return xs z0 -- See Note [List fusion and continuations in 'c'] where c u x k z = f (getUnique u) z x >>= k {-# INLINE c #-} nonDetStrictFoldUFM_Directly:: (Unique -> elt -> a -> a) -> a -> UniqFM key elt -> a nonDetStrictFoldUFM_Directly k z (UFM m) = M.foldlWithKey' (\z' i x -> k (getUnique i) x z') z m {-# INLINE nonDetStrictFoldUFM_Directly #-} -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetUFMToList :: UniqFM key elt -> [(Unique, elt)] nonDetUFMToList (UFM m) = map (\(k, v) -> (getUnique k, v)) $ M.toList m -- | A wrapper around 'UniqFM' with the sole purpose of informing call sites -- that the provided 'Foldable' and 'Traversable' instances are -- nondeterministic. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" to learn about determinism. newtype NonDetUniqFM key ele = NonDetUniqFM { getNonDet :: UniqFM key ele } deriving (Functor) -- | Inherently nondeterministic. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" to learn about determinism. instance forall key. Foldable (NonDetUniqFM key) where foldr f z (NonDetUniqFM (UFM m)) = foldr f z m -- | Inherently nondeterministic. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" to learn about determinism. instance forall key. Traversable (NonDetUniqFM key) where traverse f (NonDetUniqFM (UFM m)) = NonDetUniqFM . UFM <$> traverse f m ufmToIntMap :: UniqFM key elt -> M.IntMap elt ufmToIntMap (UFM m) = m unsafeIntMapToUFM :: M.IntMap elt -> UniqFM key elt unsafeIntMapToUFM = UFM -- | Cast the key domain of a UniqFM. -- -- As long as the domains don't overlap in their uniques -- this is safe. unsafeCastUFMKey :: UniqFM key1 elt -> UniqFM key2 elt unsafeCastUFMKey (UFM m) = UFM m -- Determines whether two 'UniqFM's contain the same keys. equalKeysUFM :: UniqFM key a -> UniqFM key b -> Bool equalKeysUFM (UFM m1) (UFM m2) = liftEq (\_ _ -> True) m1 m2 -- Instances instance Semi.Semigroup (UniqFM key a) where (<>) = plusUFM instance Monoid (UniqFM key a) where mempty = emptyUFM mappend = (Semi.<>) -- Output-ery instance Outputable a => Outputable (UniqFM key a) where ppr ufm = pprUniqFM ppr ufm pprUniqFM :: (a -> SDoc) -> UniqFM key a -> SDoc pprUniqFM ppr_elt ufm = brackets $ fsep $ punctuate comma $ [ ppr uq <+> text ":->" <+> ppr_elt elt | (uq, elt) <- nonDetUFMToList ufm ] -- It's OK to use nonDetUFMToList here because we only use it for -- pretty-printing. -- | Pretty-print a non-deterministic set. -- The order of variables is non-deterministic and for pretty-printing that -- shouldn't be a problem. -- Having this function helps contain the non-determinism created with -- nonDetEltsUFM. pprUFM :: UniqFM key a -- ^ The things to be pretty printed -> ([a] -> SDoc) -- ^ The pretty printing function to use on the elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprUFM ufm pp = pp (nonDetEltsUFM ufm) -- | Pretty-print a non-deterministic set. -- The order of variables is non-deterministic and for pretty-printing that -- shouldn't be a problem. -- Having this function helps contain the non-determinism created with -- nonDetUFMToList. pprUFMWithKeys :: UniqFM key a -- ^ The things to be pretty printed -> ([(Unique, a)] -> SDoc) -- ^ The pretty printing function to use on the elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprUFMWithKeys ufm pp = pp (nonDetUFMToList ufm) -- | Determines the pluralisation suffix appropriate for the length of a set -- in the same way that plural from Outputable does for lists. pluralUFM :: UniqFM key a -> SDoc pluralUFM ufm | sizeUFM ufm == 1 = empty | otherwise = char 's' ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/Map.hs0000644000000000000000000001453514472400113021267 0ustar0000000000000000{-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveFunctor #-} {-# OPTIONS_GHC -Wall #-} -- Like 'UniqFM', these are maps for keys which are Uniquable. -- Unlike 'UniqFM', these maps also remember their keys, which -- makes them a much better drop in replacement for 'Data.Map.Map'. -- -- Key preservation is right-biased. module GHC.Types.Unique.Map ( UniqMap(..), emptyUniqMap, isNullUniqMap, unitUniqMap, listToUniqMap, listToUniqMap_C, addToUniqMap, addListToUniqMap, addToUniqMap_C, addToUniqMap_Acc, alterUniqMap, addListToUniqMap_C, adjustUniqMap, delFromUniqMap, delListFromUniqMap, plusUniqMap, plusUniqMap_C, plusMaybeUniqMap_C, plusUniqMapList, minusUniqMap, intersectUniqMap, disjointUniqMap, mapUniqMap, filterUniqMap, partitionUniqMap, sizeUniqMap, elemUniqMap, lookupUniqMap, lookupWithDefaultUniqMap, anyUniqMap, allUniqMap, nonDetEltsUniqMap, nonDetFoldUniqMap -- Non-deterministic functions omitted ) where import GHC.Prelude import GHC.Types.Unique.FM import GHC.Types.Unique import GHC.Utils.Outputable import Data.Semigroup as Semi ( Semigroup(..) ) import Data.Coerce import Data.Maybe import Data.Data -- | Maps indexed by 'Uniquable' keys newtype UniqMap k a = UniqMap (UniqFM k (k, a)) deriving (Data, Eq, Functor) type role UniqMap nominal representational instance Semigroup (UniqMap k a) where (<>) = plusUniqMap instance Monoid (UniqMap k a) where mempty = emptyUniqMap mappend = (Semi.<>) instance (Outputable k, Outputable a) => Outputable (UniqMap k a) where ppr (UniqMap m) = brackets $ fsep $ punctuate comma $ [ ppr k <+> text "->" <+> ppr v | (k, v) <- nonDetEltsUFM m ] liftC :: (a -> a -> a) -> (k, a) -> (k, a) -> (k, a) liftC f (_, v) (k', v') = (k', f v v') emptyUniqMap :: UniqMap k a emptyUniqMap = UniqMap emptyUFM isNullUniqMap :: UniqMap k a -> Bool isNullUniqMap (UniqMap m) = isNullUFM m unitUniqMap :: Uniquable k => k -> a -> UniqMap k a unitUniqMap k v = UniqMap (unitUFM k (k, v)) listToUniqMap :: Uniquable k => [(k,a)] -> UniqMap k a listToUniqMap kvs = UniqMap (listToUFM [ (k,(k,v)) | (k,v) <- kvs]) listToUniqMap_C :: Uniquable k => (a -> a -> a) -> [(k,a)] -> UniqMap k a listToUniqMap_C f kvs = UniqMap $ listToUFM_C (liftC f) [ (k,(k,v)) | (k,v) <- kvs] addToUniqMap :: Uniquable k => UniqMap k a -> k -> a -> UniqMap k a addToUniqMap (UniqMap m) k v = UniqMap $ addToUFM m k (k, v) addListToUniqMap :: Uniquable k => UniqMap k a -> [(k,a)] -> UniqMap k a addListToUniqMap (UniqMap m) kvs = UniqMap $ addListToUFM m [(k,(k,v)) | (k,v) <- kvs] addToUniqMap_C :: Uniquable k => (a -> a -> a) -> UniqMap k a -> k -> a -> UniqMap k a addToUniqMap_C f (UniqMap m) k v = UniqMap $ addToUFM_C (liftC f) m k (k, v) addToUniqMap_Acc :: Uniquable k => (b -> a -> a) -> (b -> a) -> UniqMap k a -> k -> b -> UniqMap k a addToUniqMap_Acc exi new (UniqMap m) k0 v0 = UniqMap $ addToUFM_Acc (\b (k, v) -> (k, exi b v)) (\b -> (k0, new b)) m k0 v0 alterUniqMap :: Uniquable k => (Maybe a -> Maybe a) -> UniqMap k a -> k -> UniqMap k a alterUniqMap f (UniqMap m) k = UniqMap $ alterUFM (fmap (k,) . f . fmap snd) m k addListToUniqMap_C :: Uniquable k => (a -> a -> a) -> UniqMap k a -> [(k, a)] -> UniqMap k a addListToUniqMap_C f (UniqMap m) kvs = UniqMap $ addListToUFM_C (liftC f) m [(k,(k,v)) | (k,v) <- kvs] adjustUniqMap :: Uniquable k => (a -> a) -> UniqMap k a -> k -> UniqMap k a adjustUniqMap f (UniqMap m) k = UniqMap $ adjustUFM (\(_,v) -> (k,f v)) m k delFromUniqMap :: Uniquable k => UniqMap k a -> k -> UniqMap k a delFromUniqMap (UniqMap m) k = UniqMap $ delFromUFM m k delListFromUniqMap :: Uniquable k => UniqMap k a -> [k] -> UniqMap k a delListFromUniqMap (UniqMap m) ks = UniqMap $ delListFromUFM m ks plusUniqMap :: UniqMap k a -> UniqMap k a -> UniqMap k a plusUniqMap (UniqMap m1) (UniqMap m2) = UniqMap $ plusUFM m1 m2 plusUniqMap_C :: (a -> a -> a) -> UniqMap k a -> UniqMap k a -> UniqMap k a plusUniqMap_C f (UniqMap m1) (UniqMap m2) = UniqMap $ plusUFM_C (liftC f) m1 m2 plusMaybeUniqMap_C :: (a -> a -> Maybe a) -> UniqMap k a -> UniqMap k a -> UniqMap k a plusMaybeUniqMap_C f (UniqMap m1) (UniqMap m2) = UniqMap $ plusMaybeUFM_C (\(_, v) (k', v') -> fmap (k',) (f v v')) m1 m2 plusUniqMapList :: [UniqMap k a] -> UniqMap k a plusUniqMapList xs = UniqMap $ plusUFMList (coerce xs) minusUniqMap :: UniqMap k a -> UniqMap k b -> UniqMap k a minusUniqMap (UniqMap m1) (UniqMap m2) = UniqMap $ minusUFM m1 m2 intersectUniqMap :: UniqMap k a -> UniqMap k b -> UniqMap k a intersectUniqMap (UniqMap m1) (UniqMap m2) = UniqMap $ intersectUFM m1 m2 disjointUniqMap :: UniqMap k a -> UniqMap k b -> Bool disjointUniqMap (UniqMap m1) (UniqMap m2) = disjointUFM m1 m2 mapUniqMap :: (a -> b) -> UniqMap k a -> UniqMap k b mapUniqMap f (UniqMap m) = UniqMap $ mapUFM (fmap f) m -- (,) k instance filterUniqMap :: (a -> Bool) -> UniqMap k a -> UniqMap k a filterUniqMap f (UniqMap m) = UniqMap $ filterUFM (f . snd) m partitionUniqMap :: (a -> Bool) -> UniqMap k a -> (UniqMap k a, UniqMap k a) partitionUniqMap f (UniqMap m) = coerce $ partitionUFM (f . snd) m sizeUniqMap :: UniqMap k a -> Int sizeUniqMap (UniqMap m) = sizeUFM m elemUniqMap :: Uniquable k => k -> UniqMap k a -> Bool elemUniqMap k (UniqMap m) = elemUFM k m lookupUniqMap :: Uniquable k => UniqMap k a -> k -> Maybe a lookupUniqMap (UniqMap m) k = fmap snd (lookupUFM m k) lookupWithDefaultUniqMap :: Uniquable k => UniqMap k a -> a -> k -> a lookupWithDefaultUniqMap (UniqMap m) a k = fromMaybe a (fmap snd (lookupUFM m k)) anyUniqMap :: (a -> Bool) -> UniqMap k a -> Bool anyUniqMap f (UniqMap m) = anyUFM (f . snd) m allUniqMap :: (a -> Bool) -> UniqMap k a -> Bool allUniqMap f (UniqMap m) = allUFM (f . snd) m nonDetEltsUniqMap :: UniqMap k a -> [(k, a)] nonDetEltsUniqMap (UniqMap m) = nonDetEltsUFM m nonDetFoldUniqMap :: ((k, a) -> b -> b) -> b -> UniqMap k a -> b nonDetFoldUniqMap go z (UniqMap m) = foldUFM go z m ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/SDFM.hs0000644000000000000000000001161114472400113021273 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE ApplicativeDo #-} {-# OPTIONS_GHC -Wall #-} -- | Like a 'UniqDFM', but maintains equivalence classes of keys sharing the -- same entry. See 'UniqSDFM'. module GHC.Types.Unique.SDFM ( -- * Unique-keyed, /shared/, deterministic mappings UniqSDFM, emptyUSDFM, lookupUSDFM, equateUSDFM, addToUSDFM, traverseUSDFM ) where import GHC.Prelude import GHC.Types.Unique import GHC.Types.Unique.DFM import GHC.Utils.Outputable -- | Either @Indirect x@, meaning the value is represented by that of @x@, or -- an @Entry@ containing containing the actual value it represents. data Shared key ele = Indirect !key | Entry !ele -- | A 'UniqDFM' whose domain is /sets/ of 'Unique's, each of which share a -- common value of type @ele@. -- Every such set (\"equivalence class\") has a distinct representative -- 'Unique'. Supports merging the entries of multiple such sets in a union-find -- like fashion. -- -- An accurate model is that of @[(Set key, Maybe ele)]@: A finite mapping from -- sets of @key@s to possibly absent entries @ele@, where the sets don't overlap. -- Example: -- @ -- m = [({u1,u3}, Just ele1), ({u2}, Just ele2), ({u4,u7}, Nothing)] -- @ -- On this model we support the following main operations: -- -- * @'lookupUSDFM' m u3 == Just ele1@, @'lookupUSDFM' m u4 == Nothing@, -- @'lookupUSDFM' m u5 == Nothing@. -- * @'equateUSDFM' m u1 u3@ is a no-op, but -- @'equateUSDFM' m u1 u2@ merges @{u1,u3}@ and @{u2}@ to point to -- @Just ele2@ and returns the old entry of @{u1,u3}@, @Just ele1@. -- * @'addToUSDFM' m u3 ele4@ sets the entry of @{u1,u3}@ to @Just ele4@. -- -- As well as a few means for traversal/conversion to list. newtype UniqSDFM key ele = USDFM { unUSDFM :: UniqDFM key (Shared key ele) } emptyUSDFM :: UniqSDFM key ele emptyUSDFM = USDFM emptyUDFM lookupReprAndEntryUSDFM :: Uniquable key => UniqSDFM key ele -> key -> (key, Maybe ele) lookupReprAndEntryUSDFM (USDFM env) = go where go x = case lookupUDFM env x of Nothing -> (x, Nothing) Just (Indirect y) -> go y Just (Entry ele) -> (x, Just ele) -- | @lookupSUDFM env x@ looks up an entry for @x@, looking through all -- 'Indirect's until it finds a shared 'Entry'. -- -- Examples in terms of the model (see 'UniqSDFM'): -- >>> lookupUSDFM [({u1,u3}, Just ele1), ({u2}, Just ele2)] u3 == Just ele1 -- >>> lookupUSDFM [({u1,u3}, Just ele1), ({u2}, Just ele2)] u4 == Nothing -- >>> lookupUSDFM [({u1,u3}, Just ele1), ({u2}, Nothing)] u2 == Nothing lookupUSDFM :: Uniquable key => UniqSDFM key ele -> key -> Maybe ele lookupUSDFM usdfm x = snd (lookupReprAndEntryUSDFM usdfm x) -- | @equateUSDFM env x y@ makes @x@ and @y@ point to the same entry, -- thereby merging @x@'s class with @y@'s. -- If both @x@ and @y@ are in the domain of the map, then @y@'s entry will be -- chosen as the new entry and @x@'s old entry will be returned. -- -- Examples in terms of the model (see 'UniqSDFM'): -- >>> equateUSDFM [] u1 u2 == (Nothing, [({u1,u2}, Nothing)]) -- >>> equateUSDFM [({u1,u3}, Just ele1)] u3 u4 == (Nothing, [({u1,u3,u4}, Just ele1)]) -- >>> equateUSDFM [({u1,u3}, Just ele1)] u4 u3 == (Nothing, [({u1,u3,u4}, Just ele1)]) -- >>> equateUSDFM [({u1,u3}, Just ele1), ({u2}, Just ele2)] u3 u2 == (Just ele1, [({u2,u1,u3}, Just ele2)]) equateUSDFM :: Uniquable key => UniqSDFM key ele -> key -> key -> (Maybe ele, UniqSDFM key ele) equateUSDFM usdfm@(USDFM env) x y = case (lu x, lu y) of ((x', _) , (y', _)) | getUnique x' == getUnique y' -> (Nothing, usdfm) -- nothing to do ((x', _) , (y', Nothing)) -> (Nothing, set_indirect y' x') ((x', mb_ex), (y', _)) -> (mb_ex, set_indirect x' y') where lu = lookupReprAndEntryUSDFM usdfm set_indirect a b = USDFM $ addToUDFM env a (Indirect b) -- | @addToUSDFM env x a@ sets the entry @x@ is associated with to @a@, -- thereby modifying its whole equivalence class. -- -- Examples in terms of the model (see 'UniqSDFM'): -- >>> addToUSDFM [] u1 ele1 == [({u1}, Just ele1)] -- >>> addToUSDFM [({u1,u3}, Just ele1)] u3 ele2 == [({u1,u3}, Just ele2)] addToUSDFM :: Uniquable key => UniqSDFM key ele -> key -> ele -> UniqSDFM key ele addToUSDFM usdfm@(USDFM env) x v = USDFM $ addToUDFM env (fst (lookupReprAndEntryUSDFM usdfm x)) (Entry v) traverseUSDFM :: forall key a b f. Applicative f => (a -> f b) -> UniqSDFM key a -> f (UniqSDFM key b) traverseUSDFM f = fmap (USDFM . listToUDFM_Directly) . traverse g . udfmToList . unUSDFM where g :: (Unique, Shared key a) -> f (Unique, Shared key b) g (u, Indirect y) = pure (u,Indirect y) g (u, Entry a) = do a' <- f a pure (u,Entry a') instance (Outputable key, Outputable ele) => Outputable (Shared key ele) where ppr (Indirect x) = ppr x ppr (Entry a) = ppr a instance (Outputable key, Outputable ele) => Outputable (UniqSDFM key ele) where ppr (USDFM env) = ppr env ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/Set.hs0000644000000000000000000001535314472400113021304 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The AQUA Project, Glasgow University, 1994-1998 \section[UniqSet]{Specialised sets, for things with @Uniques@} Based on @UniqFMs@ (as you would expect). Basically, the things need to be in class @Uniquable@. -} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE DeriveDataTypeable #-} module GHC.Types.Unique.Set ( -- * Unique set type UniqSet, -- type synonym for UniqFM a getUniqSet, pprUniqSet, -- ** Manipulating these sets emptyUniqSet, unitUniqSet, mkUniqSet, addOneToUniqSet, addListToUniqSet, delOneFromUniqSet, delOneFromUniqSet_Directly, delListFromUniqSet, delListFromUniqSet_Directly, unionUniqSets, unionManyUniqSets, minusUniqSet, uniqSetMinusUFM, uniqSetMinusUDFM, intersectUniqSets, disjointUniqSets, restrictUniqSetToUFM, uniqSetAny, uniqSetAll, elementOfUniqSet, elemUniqSet_Directly, filterUniqSet, filterUniqSet_Directly, sizeUniqSet, isEmptyUniqSet, lookupUniqSet, lookupUniqSet_Directly, partitionUniqSet, mapUniqSet, unsafeUFMToUniqSet, nonDetEltsUniqSet, nonDetKeysUniqSet, nonDetStrictFoldUniqSet, ) where import GHC.Prelude import GHC.Types.Unique.DFM import GHC.Types.Unique.FM import GHC.Types.Unique import Data.Coerce import GHC.Utils.Outputable import Data.Data import qualified Data.Semigroup as Semi -- Note [UniqSet invariant] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- UniqSet has the following invariant: -- The keys in the map are the uniques of the values -- It means that to implement mapUniqSet you have to update -- both the keys and the values. newtype UniqSet a = UniqSet {getUniqSet' :: UniqFM a a} deriving (Data, Semi.Semigroup, Monoid) emptyUniqSet :: UniqSet a emptyUniqSet = UniqSet emptyUFM unitUniqSet :: Uniquable a => a -> UniqSet a unitUniqSet x = UniqSet $ unitUFM x x mkUniqSet :: Uniquable a => [a] -> UniqSet a mkUniqSet = foldl' addOneToUniqSet emptyUniqSet addOneToUniqSet :: Uniquable a => UniqSet a -> a -> UniqSet a addOneToUniqSet (UniqSet set) x = UniqSet (addToUFM set x x) addListToUniqSet :: Uniquable a => UniqSet a -> [a] -> UniqSet a addListToUniqSet = foldl' addOneToUniqSet delOneFromUniqSet :: Uniquable a => UniqSet a -> a -> UniqSet a delOneFromUniqSet (UniqSet s) a = UniqSet (delFromUFM s a) delOneFromUniqSet_Directly :: UniqSet a -> Unique -> UniqSet a delOneFromUniqSet_Directly (UniqSet s) u = UniqSet (delFromUFM_Directly s u) delListFromUniqSet :: Uniquable a => UniqSet a -> [a] -> UniqSet a delListFromUniqSet (UniqSet s) l = UniqSet (delListFromUFM s l) delListFromUniqSet_Directly :: UniqSet a -> [Unique] -> UniqSet a delListFromUniqSet_Directly (UniqSet s) l = UniqSet (delListFromUFM_Directly s l) unionUniqSets :: UniqSet a -> UniqSet a -> UniqSet a unionUniqSets (UniqSet s) (UniqSet t) = UniqSet (plusUFM s t) unionManyUniqSets :: [UniqSet a] -> UniqSet a unionManyUniqSets = foldl' (flip unionUniqSets) emptyUniqSet minusUniqSet :: UniqSet a -> UniqSet a -> UniqSet a minusUniqSet (UniqSet s) (UniqSet t) = UniqSet (minusUFM s t) intersectUniqSets :: UniqSet a -> UniqSet a -> UniqSet a intersectUniqSets (UniqSet s) (UniqSet t) = UniqSet (intersectUFM s t) disjointUniqSets :: UniqSet a -> UniqSet a -> Bool disjointUniqSets (UniqSet s) (UniqSet t) = disjointUFM s t restrictUniqSetToUFM :: UniqSet key -> UniqFM key b -> UniqSet key restrictUniqSetToUFM (UniqSet s) m = UniqSet (intersectUFM s m) uniqSetMinusUFM :: UniqSet key -> UniqFM key b -> UniqSet key uniqSetMinusUFM (UniqSet s) t = UniqSet (minusUFM s t) uniqSetMinusUDFM :: UniqSet key -> UniqDFM key b -> UniqSet key uniqSetMinusUDFM (UniqSet s) t = UniqSet (ufmMinusUDFM s t) elementOfUniqSet :: Uniquable a => a -> UniqSet a -> Bool elementOfUniqSet a (UniqSet s) = elemUFM a s elemUniqSet_Directly :: Unique -> UniqSet a -> Bool elemUniqSet_Directly a (UniqSet s) = elemUFM_Directly a s filterUniqSet :: (a -> Bool) -> UniqSet a -> UniqSet a filterUniqSet p (UniqSet s) = UniqSet (filterUFM p s) filterUniqSet_Directly :: (Unique -> elt -> Bool) -> UniqSet elt -> UniqSet elt filterUniqSet_Directly f (UniqSet s) = UniqSet (filterUFM_Directly f s) partitionUniqSet :: (a -> Bool) -> UniqSet a -> (UniqSet a, UniqSet a) partitionUniqSet p (UniqSet s) = coerce (partitionUFM p s) uniqSetAny :: (a -> Bool) -> UniqSet a -> Bool uniqSetAny p (UniqSet s) = anyUFM p s uniqSetAll :: (a -> Bool) -> UniqSet a -> Bool uniqSetAll p (UniqSet s) = allUFM p s sizeUniqSet :: UniqSet a -> Int sizeUniqSet (UniqSet s) = sizeUFM s isEmptyUniqSet :: UniqSet a -> Bool isEmptyUniqSet (UniqSet s) = isNullUFM s -- | What's the point you might ask? We might have changed an object -- without it's key changing. In which case this lookup makes sense. lookupUniqSet :: Uniquable key => UniqSet key -> key -> Maybe key lookupUniqSet (UniqSet s) k = lookupUFM s k lookupUniqSet_Directly :: UniqSet a -> Unique -> Maybe a lookupUniqSet_Directly (UniqSet s) k = lookupUFM_Directly s k -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetEltsUniqSet :: UniqSet elt -> [elt] nonDetEltsUniqSet = nonDetEltsUFM . getUniqSet' -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetKeysUniqSet :: UniqSet elt -> [Unique] nonDetKeysUniqSet = nonDetKeysUFM . getUniqSet' -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldUniqSet :: (elt -> a -> a) -> a -> UniqSet elt -> a nonDetStrictFoldUniqSet c n (UniqSet s) = nonDetStrictFoldUFM c n s -- See Note [UniqSet invariant] mapUniqSet :: Uniquable b => (a -> b) -> UniqSet a -> UniqSet b mapUniqSet f = mkUniqSet . map f . nonDetEltsUniqSet -- Two 'UniqSet's are considered equal if they contain the same -- uniques. instance Eq (UniqSet a) where UniqSet a == UniqSet b = equalKeysUFM a b getUniqSet :: UniqSet a -> UniqFM a a getUniqSet = getUniqSet' -- | 'unsafeUFMToUniqSet' converts a @'UniqFM' a@ into a @'UniqSet' a@ -- assuming, without checking, that it maps each 'Unique' to a value -- that has that 'Unique'. See Note [UniqSet invariant]. unsafeUFMToUniqSet :: UniqFM a a -> UniqSet a unsafeUFMToUniqSet = UniqSet instance Outputable a => Outputable (UniqSet a) where ppr = pprUniqSet ppr pprUniqSet :: (a -> SDoc) -> UniqSet a -> SDoc -- It's OK to use nonDetUFMToList here because we only use it for -- pretty-printing. pprUniqSet f = braces . pprWithCommas f . nonDetEltsUniqSet ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Unique/Supply.hs0000644000000000000000000003500614472400113022042 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE UnboxedTuples #-} module GHC.Types.Unique.Supply ( -- * Main data type UniqSupply, -- Abstractly -- ** Operations on supplies uniqFromSupply, uniqsFromSupply, -- basic ops takeUniqFromSupply, uniqFromMask, mkSplitUniqSupply, splitUniqSupply, listSplitUniqSupply, -- * Unique supply monad and its abstraction UniqSM, MonadUnique(..), -- ** Operations on the monad initUs, initUs_, -- * Set supply strategy initUniqSupply ) where import GHC.Prelude import GHC.Types.Unique import GHC.Utils.Panic.Plain import GHC.IO import GHC.Utils.Monad import Control.Monad import Data.Char import GHC.Exts( Ptr(..), noDuplicate#, oneShot ) #if MIN_VERSION_GLASGOW_HASKELL(9,1,0,0) import GHC.Exts( Int(..), word2Int#, fetchAddWordAddr#, plusWord#, readWordOffAddr# ) #endif import Foreign.Storable #include "Unique.h" {- ************************************************************************ * * \subsection{Splittable Unique supply: @UniqSupply@} * * ************************************************************************ -} {- Note [How the unique supply works] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic idea (due to Lennart Augustsson) is that a UniqSupply is lazily-evaluated infinite tree. * At each MkSplitUniqSupply node is a unique Int, and two sub-trees (see data UniqSupply) * takeUniqFromSupply :: UniqSupply -> (Unique, UniqSupply) returns the unique Int and one of the sub-trees * splitUniqSupply :: UniqSupply -> (UniqSupply, UniqSupply) returns the two sub-trees * When you poke on one of the thunks, it does a foreign call to get a fresh Int from a thread-safe counter, and returns a fresh MkSplitUniqSupply node. This has to be as efficient as possible: it should allocate only * The fresh node * A thunk for each sub-tree Note [How unique supplies are used] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The general design (used throughout GHC) is to: * For creating new uniques either a UniqSupply is used and threaded through or for monadic code a MonadUnique instance might conjure up uniques using `uniqFromMask`. * Different parts of the compiler will use a UniqSupply or MonadUnique instance with a specific mask. This way the different parts of the compiler will generate uniques with different masks. If different code shares the same mask then care has to be taken that all uniques still get distinct numbers. Usually this is done by relying on genSym which has *one* counter per GHC invocation that is relied on by all calls to it. But using something like the address for pinned objects works as well and in fact is done for fast strings. This is important for example in the simplifier. Most passes of the simplifier use the same mask 's'. However in some places we create a unique supply using `mkSplitUniqSupply` and thread it through the code, while in GHC.Core.Opt.Simplify.Monad we use the `instance MonadUnique SimplM`, which uses `mkSplitUniqSupply` in getUniqueSupplyM and `uniqFromMask` in getUniqeM. Ultimately all these boil down to each new unique consisting of the mask and the result from a call to `genSym`. The later producing a distinct number for each invocation ensuring uniques are distinct. Note [Optimising the unique supply] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The inner loop of mkSplitUniqSupply is a function closure mk_supply s0 = case noDuplicate# s0 of { s1 -> case unIO genSym s1 of { (# s2, u #) -> case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s2 of { (# s3, x #) -> case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s3 of { (# s4, y #) -> (# s4, MkSplitUniqSupply (mask .|. u) x y #) }}}} It's a classic example of an IO action that is captured and then called repeatedly (see #18238 for some discussion). It mustn't allocate! The test perf/should_run/UniqLoop keeps track of this loop. Watch it carefully. We used to write it as: mk_supply :: IO UniqSupply mk_supply = unsafeInterleaveIO $ genSym >>= \ u -> mk_supply >>= \ s1 -> mk_supply >>= \ s2 -> return (MkSplitUniqSupply (mask .|. u) s1 s2) and to rely on -fno-state-hack, full laziness and inlining to get the same result. It was very brittle and required enabling -fno-state-hack globally. So it has been rewritten using lower level constructs to explicitly state what we want. Note [Optimising use of unique supplies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When it comes to having a way to generate new Uniques there are generally three ways to deal with this: For pure code the only good approach is to take an UniqSupply as argument. Then thread it through the code splitting it for sub-passes or when creating uniques. The code for this is about as optimized as it gets, but we can't get around the need to allocate one `UniqSupply` for each Unique we need. For code in IO we can improve on this by threading only the *mask* we are going to use for Uniques. Using `uniqFromMask` to generate uniques as needed. This gets rid of the overhead of allocating a new UniqSupply for each unique generated. It also avoids frequent state updates when the Unique/Mask is part of the state in a state monad. For monadic code in IO which always uses the same mask we can go further and hardcode the mask into the MonadUnique instance. On top of all the benefits of threading the mask this *also* has the benefit of avoiding the mask getting captured in thunks, or being passed around at runtime. It does however come at the cost of having to use a fixed Mask for all code run in this Monad. But rememeber, the Mask is purely cosmetic: See Note [Uniques and masks]. NB: It's *not* an optimization to pass around the UniqSupply inside an IORef instead of the mask. While this would avoid frequent state updates it still requires allocating one UniqSupply per Unique. On top of some overhead for reading/writing to/from the IORef. All of this hinges on the assumption that UniqSupply and uniqFromMask use the same source of distinct numbers (`genSym`) which allows both to be used at the same time, with the same mask, while still ensuring distinct uniques. One might consider this fact to be an "accident". But GHC worked like this as far back as source control history goes. It also allows the later two optimizations to be used. So it seems safe to depend on this fact. -} -- | Unique Supply -- -- A value of type 'UniqSupply' is unique, and it can -- supply /one/ distinct 'Unique'. Also, from the supply, one can -- also manufacture an arbitrary number of further 'UniqueSupply' values, -- which will be distinct from the first and from all others. data UniqSupply = MkSplitUniqSupply {-# UNPACK #-} !Int -- make the Unique with this UniqSupply UniqSupply -- when split => these two supplies mkSplitUniqSupply :: Char -> IO UniqSupply -- ^ Create a unique supply out of thin air. -- The "mask" (Char) supplied is purely cosmetic, making it easier -- to figure out where a Unique was born. See -- Note [Uniques and masks]. -- -- The payload part of the Uniques allocated from this UniqSupply are -- guaranteed distinct wrt all other supplies, regardless of their "mask". -- This is achieved by allocating the payload part from -- a single source of Uniques, namely `genSym`, shared across -- all UniqSupply's. -- See Note [How the unique supply works] -- See Note [Optimising the unique supply] mkSplitUniqSupply c = unsafeDupableInterleaveIO (IO mk_supply) where !mask = ord c `unsafeShiftL` uNIQUE_BITS -- Here comes THE MAGIC: see Note [How the unique supply works] -- This is one of the most hammered bits in the whole compiler -- See Note [Optimising the unique supply] -- NB: Use noDuplicate# for thread-safety. mk_supply s0 = case noDuplicate# s0 of { s1 -> case unIO genSym s1 of { (# s2, u #) -> -- deferred IO computations case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s2 of { (# s3, x #) -> case unIO (unsafeDupableInterleaveIO (IO mk_supply)) s3 of { (# s4, y #) -> (# s4, MkSplitUniqSupply (mask .|. u) x y #) }}}} #if !MIN_VERSION_GLASGOW_HASKELL(9,1,0,0) foreign import ccall unsafe "ghc_lib_parser_genSym" genSym :: IO Int #else genSym :: IO Int genSym = do let !mask = (1 `unsafeShiftL` uNIQUE_BITS) - 1 let !(Ptr counter) = ghc_unique_counter let !(Ptr inc_ptr) = ghc_unique_inc u <- IO $ \s0 -> case readWordOffAddr# inc_ptr 0# s0 of (# s1, inc #) -> case fetchAddWordAddr# counter inc s1 of (# s2, val #) -> let !u = I# (word2Int# (val `plusWord#` inc)) .&. mask in (# s2, u #) #if defined(DEBUG) -- Uh oh! We will overflow next time a unique is requested. -- (Note that if the increment isn't 1 we may miss this check) massert (u /= mask) #endif return u #endif foreign import ccall unsafe "&ghc_unique_counter" ghc_unique_counter :: Ptr Word foreign import ccall unsafe "&ghc_unique_inc" ghc_unique_inc :: Ptr Int initUniqSupply :: Word -> Int -> IO () initUniqSupply counter inc = do poke ghc_unique_counter counter poke ghc_unique_inc inc uniqFromMask :: Char -> IO Unique uniqFromMask !mask = do { uqNum <- genSym ; return $! mkUnique mask uqNum } {-# NOINLINE uniqFromMask #-} -- We'll unbox everything, but we don't want to inline it splitUniqSupply :: UniqSupply -> (UniqSupply, UniqSupply) -- ^ Build two 'UniqSupply' from a single one, each of which -- can supply its own 'Unique'. listSplitUniqSupply :: UniqSupply -> [UniqSupply] -- ^ Create an infinite list of 'UniqSupply' from a single one uniqFromSupply :: UniqSupply -> Unique -- ^ Obtain the 'Unique' from this particular 'UniqSupply' uniqsFromSupply :: UniqSupply -> [Unique] -- Infinite -- ^ Obtain an infinite list of 'Unique' that can be generated by constant splitting of the supply takeUniqFromSupply :: UniqSupply -> (Unique, UniqSupply) -- ^ Obtain the 'Unique' from this particular 'UniqSupply', and a new supply splitUniqSupply (MkSplitUniqSupply _ s1 s2) = (s1, s2) listSplitUniqSupply (MkSplitUniqSupply _ s1 s2) = s1 : listSplitUniqSupply s2 uniqFromSupply (MkSplitUniqSupply n _ _) = mkUniqueGrimily n uniqsFromSupply (MkSplitUniqSupply n _ s2) = mkUniqueGrimily n : uniqsFromSupply s2 takeUniqFromSupply (MkSplitUniqSupply n s1 _) = (mkUniqueGrimily n, s1) {- ************************************************************************ * * \subsubsection[UniqSupply-monad]{@UniqSupply@ monad: @UniqSM@} * * ************************************************************************ -} type UniqResult result = (# result, UniqSupply #) pattern UniqResult :: a -> b -> (# a, b #) pattern UniqResult x y = (# x, y #) {-# COMPLETE UniqResult #-} -- | A monad which just gives the ability to obtain 'Unique's newtype UniqSM result = USM { unUSM :: UniqSupply -> UniqResult result } -- See Note [The one-shot state monad trick] for why we don't derive this. instance Functor UniqSM where fmap f (USM m) = mkUniqSM $ \us -> case m us of (# r, us' #) -> UniqResult (f r) us' -- | Smart constructor for 'UniqSM', as described in Note [The one-shot state -- monad trick]. mkUniqSM :: (UniqSupply -> UniqResult a) -> UniqSM a mkUniqSM f = USM (oneShot f) {-# INLINE mkUniqSM #-} instance Monad UniqSM where (>>=) = thenUs (>>) = (*>) instance Applicative UniqSM where pure = returnUs (USM f) <*> (USM x) = mkUniqSM $ \us0 -> case f us0 of UniqResult ff us1 -> case x us1 of UniqResult xx us2 -> UniqResult (ff xx) us2 (*>) = thenUs_ -- TODO: try to get rid of this instance instance MonadFail UniqSM where fail = panic -- | Run the 'UniqSM' action, returning the final 'UniqSupply' initUs :: UniqSupply -> UniqSM a -> (a, UniqSupply) initUs init_us m = case unUSM m init_us of { UniqResult r us -> (r, us) } -- | Run the 'UniqSM' action, discarding the final 'UniqSupply' initUs_ :: UniqSupply -> UniqSM a -> a initUs_ init_us m = case unUSM m init_us of { UniqResult r _ -> r } {-# INLINE thenUs #-} {-# INLINE returnUs #-} {-# INLINE splitUniqSupply #-} -- @thenUs@ is where we split the @UniqSupply@. liftUSM :: UniqSM a -> UniqSupply -> (a, UniqSupply) liftUSM (USM m) us0 = case m us0 of UniqResult a us1 -> (a, us1) instance MonadFix UniqSM where mfix m = mkUniqSM (\us0 -> let (r,us1) = liftUSM (m r) us0 in UniqResult r us1) thenUs :: UniqSM a -> (a -> UniqSM b) -> UniqSM b thenUs (USM expr) cont = mkUniqSM (\us0 -> case (expr us0) of UniqResult result us1 -> unUSM (cont result) us1) thenUs_ :: UniqSM a -> UniqSM b -> UniqSM b thenUs_ (USM expr) (USM cont) = mkUniqSM (\us0 -> case (expr us0) of { UniqResult _ us1 -> cont us1 }) returnUs :: a -> UniqSM a returnUs result = mkUniqSM (\us -> UniqResult result us) getUs :: UniqSM UniqSupply getUs = mkUniqSM (\us0 -> case splitUniqSupply us0 of (us1,us2) -> UniqResult us1 us2) -- | A monad for generating unique identifiers class Monad m => MonadUnique m where -- | Get a new UniqueSupply getUniqueSupplyM :: m UniqSupply -- | Get a new unique identifier getUniqueM :: m Unique -- | Get an infinite list of new unique identifiers getUniquesM :: m [Unique] -- This default definition of getUniqueM, while correct, is not as -- efficient as it could be since it needlessly generates and throws away -- an extra Unique. For your instances consider providing an explicit -- definition for 'getUniqueM' which uses 'takeUniqFromSupply' directly. getUniqueM = liftM uniqFromSupply getUniqueSupplyM getUniquesM = liftM uniqsFromSupply getUniqueSupplyM instance MonadUnique UniqSM where getUniqueSupplyM = getUs getUniqueM = getUniqueUs getUniquesM = getUniquesUs getUniqueUs :: UniqSM Unique getUniqueUs = mkUniqSM (\us0 -> case takeUniqFromSupply us0 of (u,us1) -> UniqResult u us1) getUniquesUs :: UniqSM [Unique] getUniquesUs = mkUniqSM (\us0 -> case splitUniqSupply us0 of (us1,us2) -> UniqResult (uniqsFromSupply us1) us2) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Var.hs0000644000000000000000000010310314472400113020022 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section{@Vars@: Variables} -} {-# LANGUAGE FlexibleContexts, MultiWayIf, FlexibleInstances, DeriveDataTypeable, PatternSynonyms, BangPatterns #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- | -- #name_types# -- GHC uses several kinds of name internally: -- -- * 'GHC.Types.Name.Occurrence.OccName': see "GHC.Types.Name.Occurrence#name_types" -- -- * 'GHC.Types.Name.Reader.RdrName': see "GHC.Types.Name.Reader#name_types" -- -- * 'GHC.Types.Name.Name': see "GHC.Types.Name#name_types" -- -- * 'GHC.Types.Id.Id': see "GHC.Types.Id#name_types" -- -- * 'GHC.Types.Var.Var' is a synonym for the 'GHC.Types.Id.Id' type but it may additionally -- potentially contain type variables, which have a 'GHC.Core.TyCo.Rep.Kind' -- rather than a 'GHC.Core.TyCo.Rep.Type' and only contain some extra -- details during typechecking. -- -- These 'Var' names may either be global or local, see "GHC.Types.Var#globalvslocal" -- -- #globalvslocal# -- Global 'Id's and 'Var's are those that are imported or correspond -- to a data constructor, primitive operation, or record selectors. -- Local 'Id's and 'Var's are those bound within an expression -- (e.g. by a lambda) or at the top level of the module being compiled. module GHC.Types.Var ( -- * The main data type and synonyms Var, CoVar, Id, NcId, DictId, DFunId, EvVar, EqVar, EvId, IpId, JoinId, TyVar, TcTyVar, TypeVar, KindVar, TKVar, TyCoVar, -- * In and Out variants InVar, InCoVar, InId, InTyVar, OutVar, OutCoVar, OutId, OutTyVar, -- ** Taking 'Var's apart varName, varUnique, varType, varMult, varMultMaybe, -- ** Modifying 'Var's setVarName, setVarUnique, setVarType, updateVarType, updateVarTypeM, -- ** Constructing, taking apart, modifying 'Id's mkGlobalVar, mkLocalVar, mkExportedLocalVar, mkCoVar, idInfo, idDetails, lazySetIdInfo, setIdDetails, globaliseId, setIdExported, setIdNotExported, setIdMult, updateIdTypeButNotMult, updateIdTypeAndMult, updateIdTypeAndMultM, -- ** Predicates isId, isTyVar, isTcTyVar, isLocalVar, isLocalId, isCoVar, isNonCoVarId, isTyCoVar, isGlobalId, isExportedId, mustHaveLocalBinding, -- * ArgFlags ArgFlag(Invisible,Required,Specified,Inferred), AnonArgFlag(..), Specificity(..), isVisibleArgFlag, isInvisibleArgFlag, isInferredArgFlag, sameVis, -- * TyVar's VarBndr(..), TyCoVarBinder, TyVarBinder, InvisTVBinder, ReqTVBinder, binderVar, binderVars, binderArgFlag, binderType, mkTyCoVarBinder, mkTyCoVarBinders, mkTyVarBinder, mkTyVarBinders, isTyVarBinder, tyVarSpecToBinder, tyVarSpecToBinders, tyVarReqToBinder, tyVarReqToBinders, mapVarBndr, mapVarBndrs, lookupVarBndr, -- ** Constructing TyVar's mkTyVar, mkTcTyVar, -- ** Taking 'TyVar's apart tyVarName, tyVarKind, tcTyVarDetails, setTcTyVarDetails, -- ** Modifying 'TyVar's setTyVarName, setTyVarUnique, setTyVarKind, updateTyVarKind, updateTyVarKindM, nonDetCmpVar ) where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Rep( Type, Kind, Mult ) import {-# SOURCE #-} GHC.Core.TyCo.Ppr( pprKind ) import {-# SOURCE #-} GHC.Tc.Utils.TcType( TcTyVarDetails, pprTcTyVarDetails, vanillaSkolemTvUnk ) import {-# SOURCE #-} GHC.Types.Id.Info( IdDetails, IdInfo, coVarDetails, isCoVarDetails, vanillaIdInfo, pprIdDetails ) import {-# SOURCE #-} GHC.Builtin.Types ( manyDataConTy ) import GHC.Types.Name hiding (varName) import GHC.Types.Unique ( Uniquable, Unique, getKey, getUnique , mkUniqueGrimily, nonDetCmpUnique ) import GHC.Utils.Misc import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import Data.Data {- ************************************************************************ * * Synonyms * * ************************************************************************ -- These synonyms are here and not in Id because otherwise we need a very -- large number of SOURCE imports of "GHC.Types.Id" :-( -} -- | Identifier type Id = Var -- A term-level identifier -- predicate: isId -- | Coercion Variable type CoVar = Id -- See Note [Evidence: EvIds and CoVars] -- predicate: isCoVar -- | type NcId = Id -- A term-level (value) variable that is -- /not/ an (unlifted) coercion -- predicate: isNonCoVarId -- | Type or kind Variable type TyVar = Var -- Type *or* kind variable (historical) -- | Type or Kind Variable type TKVar = Var -- Type *or* kind variable (historical) -- | Type variable that might be a metavariable type TcTyVar = Var -- | Type Variable type TypeVar = Var -- Definitely a type variable -- | Kind Variable type KindVar = Var -- Definitely a kind variable -- See Note [Kind and type variables] -- See Note [Evidence: EvIds and CoVars] -- | Evidence Identifier type EvId = Id -- Term-level evidence: DictId, IpId, or EqVar -- | Evidence Variable type EvVar = EvId -- ...historical name for EvId -- | Dictionary Function Identifier type DFunId = Id -- A dictionary function -- | Dictionary Identifier type DictId = EvId -- A dictionary variable -- | Implicit parameter Identifier type IpId = EvId -- A term-level implicit parameter -- | Equality Variable type EqVar = EvId -- Boxed equality evidence type JoinId = Id -- A join variable -- | Type or Coercion Variable type TyCoVar = Id -- Type, *or* coercion variable -- predicate: isTyCoVar {- Many passes apply a substitution, and it's very handy to have type synonyms to remind us whether or not the substitution has been applied -} type InVar = Var type InTyVar = TyVar type InCoVar = CoVar type InId = Id type OutVar = Var type OutTyVar = TyVar type OutCoVar = CoVar type OutId = Id {- Note [Evidence: EvIds and CoVars] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * An EvId (evidence Id) is a term-level evidence variable (dictionary, implicit parameter, or equality). Could be boxed or unboxed. * DictId, IpId, and EqVar are synonyms when we know what kind of evidence we are talking about. For example, an EqVar has type (t1 ~ t2). * A CoVar is always an un-lifted coercion, of type (t1 ~# t2) or (t1 ~R# t2) Note [Kind and type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before kind polymorphism, TyVar were used to mean type variables. Now they are used to mean kind *or* type variables. KindVar is used when we know for sure that it is a kind variable. In future, we might want to go over the whole compiler code to use: - TKVar to mean kind or type variables - TypeVar to mean type variables only - KindVar to mean kind variables ************************************************************************ * * \subsection{The main data type declarations} * * ************************************************************************ Every @Var@ has a @Unique@, to uniquify it and for fast comparison, a @Type@, and an @IdInfo@ (non-essential info about it, e.g., strictness). The essential info about different kinds of @Vars@ is in its @VarDetails@. -} -- | Variable -- -- Essentially a typed 'Name', that may also contain some additional information -- about the 'Var' and its use sites. data Var = TyVar { -- Type and kind variables -- see Note [Kind and type variables] varName :: !Name, realUnique :: {-# UNPACK #-} !Int, -- ^ Key for fast comparison -- Identical to the Unique in the name, -- cached here for speed varType :: Kind -- ^ The type or kind of the 'Var' in question } | TcTyVar { -- Used only during type inference -- Used for kind variables during -- inference, as well varName :: !Name, realUnique :: {-# UNPACK #-} !Int, varType :: Kind, tc_tv_details :: TcTyVarDetails } | Id { varName :: !Name, realUnique :: {-# UNPACK #-} !Int, varType :: Type, varMult :: Mult, -- See Note [Multiplicity of let binders] idScope :: IdScope, id_details :: IdDetails, -- Stable, doesn't change id_info :: IdInfo } -- Unstable, updated by simplifier -- | Identifier Scope data IdScope -- See Note [GlobalId/LocalId] = GlobalId | LocalId ExportFlag data ExportFlag -- See Note [ExportFlag on binders] = NotExported -- ^ Not exported: may be discarded as dead code. | Exported -- ^ Exported: kept alive {- Note [ExportFlag on binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An ExportFlag of "Exported" on a top-level binder says "keep this binding alive; do not drop it as dead code". This transitively keeps alive all the other top-level bindings that this binding refers to. This property is persisted all the way down the pipeline, so that the binding will be compiled all the way to object code, and its symbols will appear in the linker symbol table. However, note that this use of "exported" is quite different to the export list on a Haskell module. Setting the ExportFlag on an Id does /not/ mean that if you import the module (in Haskell source code) you will see this Id. Of course, things that appear in the export list of the source Haskell module do indeed have their ExportFlag set. But many other things, such as dictionary functions, are kept alive by having their ExportFlag set, even though they are not exported in the source-code sense. We should probably use a different term for ExportFlag, like KeepAlive. Note [GlobalId/LocalId] ~~~~~~~~~~~~~~~~~~~~~~~ A GlobalId is * always a constant (top-level) * imported, or data constructor, or primop, or record selector * has a Unique that is globally unique across the whole GHC invocation (a single invocation may compile multiple modules) * never treated as a candidate by the free-variable finder; it's a constant! A LocalId is * bound within an expression (lambda, case, local let(rec)) * or defined at top level in the module being compiled * always treated as a candidate by the free-variable finder After CoreTidy, top-level LocalIds are turned into GlobalIds Note [Multiplicity of let binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Core, let-binders' multiplicity is always completely determined by syntax: a recursive let will always have multiplicity Many (it's a prerequisite for being recursive), and non-recursive let doesn't have a conventional multiplicity, instead they act, for the purpose of multiplicity, as an alias for their right-hand side. Therefore, the `varMult` field of identifier is only used by binders in lambda and case expressions. In a let expression the `varMult` field holds an arbitrary value which will (and must!) be ignored. -} instance Outputable Var where ppr var = sdocOption sdocSuppressVarKinds $ \supp_var_kinds -> getPprDebug $ \debug -> getPprStyle $ \sty -> let ppr_var = case var of (TyVar {}) | debug -> brackets (text "tv") (TcTyVar {tc_tv_details = d}) | dumpStyle sty || debug -> brackets (pprTcTyVarDetails d) (Id { idScope = s, id_details = d }) | debug -> brackets (ppr_id_scope s <> pprIdDetails d) _ -> empty in if | debug && (not supp_var_kinds) -> parens (ppr (varName var) <+> ppr (varMultMaybe var) <+> ppr_var <+> dcolon <+> pprKind (tyVarKind var)) | otherwise -> ppr (varName var) <> ppr_var ppr_id_scope :: IdScope -> SDoc ppr_id_scope GlobalId = text "gid" ppr_id_scope (LocalId Exported) = text "lidx" ppr_id_scope (LocalId NotExported) = text "lid" instance NamedThing Var where getName = varName instance Uniquable Var where getUnique = varUnique instance Eq Var where a == b = realUnique a == realUnique b instance Ord Var where a <= b = realUnique a <= realUnique b a < b = realUnique a < realUnique b a >= b = realUnique a >= realUnique b a > b = realUnique a > realUnique b a `compare` b = a `nonDetCmpVar` b -- | Compare Vars by their Uniques. -- This is what Ord Var does, provided here to make it explicit at the -- call-site that it can introduce non-determinism. -- See Note [Unique Determinism] nonDetCmpVar :: Var -> Var -> Ordering nonDetCmpVar a b = varUnique a `nonDetCmpUnique` varUnique b instance Data Var where -- don't traverse? toConstr _ = abstractConstr "Var" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Var" instance HasOccName Var where occName = nameOccName . varName varUnique :: Var -> Unique varUnique var = mkUniqueGrimily (realUnique var) varMultMaybe :: Id -> Maybe Mult varMultMaybe (Id { varMult = mult }) = Just mult varMultMaybe _ = Nothing setVarUnique :: Var -> Unique -> Var setVarUnique var uniq = var { realUnique = getKey uniq, varName = setNameUnique (varName var) uniq } setVarName :: Var -> Name -> Var setVarName var new_name = var { realUnique = getKey (getUnique new_name), varName = new_name } setVarType :: Var -> Type -> Var setVarType id ty = id { varType = ty } -- | Update a 'Var's type. Does not update the /multiplicity/ -- stored in an 'Id', if any. Because of the possibility for -- abuse, ASSERTs that there is no multiplicity to update. updateVarType :: (Type -> Type) -> Var -> Var updateVarType upd var = case var of Id { id_details = details } -> assert (isCoVarDetails details) $ result _ -> result where result = var { varType = upd (varType var) } -- | Update a 'Var's type monadically. Does not update the /multiplicity/ -- stored in an 'Id', if any. Because of the possibility for -- abuse, ASSERTs that there is no multiplicity to update. updateVarTypeM :: Monad m => (Type -> m Type) -> Var -> m Var updateVarTypeM upd var = case var of Id { id_details = details } -> assert (isCoVarDetails details) $ result _ -> result where result = do { ty' <- upd (varType var) ; return (var { varType = ty' }) } {- ********************************************************************* * * * ArgFlag * * ********************************************************************* -} -- | Argument Flag -- -- Is something required to appear in source Haskell ('Required'), -- permitted by request ('Specified') (visible type application), or -- prohibited entirely from appearing in source Haskell ('Inferred')? -- See Note [VarBndrs, TyCoVarBinders, TyConBinders, and visibility] in "GHC.Core.TyCo.Rep" data ArgFlag = Invisible Specificity | Required deriving (Eq, Ord, Data) -- (<) on ArgFlag means "is less visible than" -- | Whether an 'Invisible' argument may appear in source Haskell. data Specificity = InferredSpec -- ^ the argument may not appear in source Haskell, it is -- only inferred. | SpecifiedSpec -- ^ the argument may appear in source Haskell, but isn't -- required. deriving (Eq, Ord, Data) pattern Inferred, Specified :: ArgFlag pattern Inferred = Invisible InferredSpec pattern Specified = Invisible SpecifiedSpec {-# COMPLETE Required, Specified, Inferred #-} -- | Does this 'ArgFlag' classify an argument that is written in Haskell? isVisibleArgFlag :: ArgFlag -> Bool isVisibleArgFlag af = not (isInvisibleArgFlag af) -- | Does this 'ArgFlag' classify an argument that is not written in Haskell? isInvisibleArgFlag :: ArgFlag -> Bool isInvisibleArgFlag (Invisible {}) = True isInvisibleArgFlag Required = False isInferredArgFlag :: ArgFlag -> Bool -- More restrictive than isInvisibleArgFlag isInferredArgFlag (Invisible InferredSpec) = True isInferredArgFlag _ = False -- | Do these denote the same level of visibility? 'Required' -- arguments are visible, others are not. So this function -- equates 'Specified' and 'Inferred'. Used for printing. sameVis :: ArgFlag -> ArgFlag -> Bool sameVis Required Required = True sameVis (Invisible _) (Invisible _) = True sameVis _ _ = False instance Outputable ArgFlag where ppr Required = text "[req]" ppr Specified = text "[spec]" ppr Inferred = text "[infrd]" instance Binary Specificity where put_ bh SpecifiedSpec = putByte bh 0 put_ bh InferredSpec = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return SpecifiedSpec _ -> return InferredSpec instance Binary ArgFlag where put_ bh Required = putByte bh 0 put_ bh Specified = putByte bh 1 put_ bh Inferred = putByte bh 2 get bh = do h <- getByte bh case h of 0 -> return Required 1 -> return Specified _ -> return Inferred -- | The non-dependent version of 'ArgFlag'. -- See Note [AnonArgFlag] -- Appears here partly so that it's together with its friends ArgFlag -- and ForallVisFlag, but also because it is used in IfaceType, rather -- early in the compilation chain data AnonArgFlag = VisArg -- ^ Used for @(->)@: an ordinary non-dependent arrow. -- The argument is visible in source code. | InvisArg -- ^ Used for @(=>)@: a non-dependent predicate arrow. -- The argument is invisible in source code. deriving (Eq, Ord, Data) instance Outputable AnonArgFlag where ppr VisArg = text "[vis]" ppr InvisArg = text "[invis]" instance Binary AnonArgFlag where put_ bh VisArg = putByte bh 0 put_ bh InvisArg = putByte bh 1 get bh = do h <- getByte bh case h of 0 -> return VisArg _ -> return InvisArg {- Note [AnonArgFlag] ~~~~~~~~~~~~~~~~~~~~~ AnonArgFlag is used principally in the FunTy constructor of Type. FunTy VisArg t1 t2 means t1 -> t2 FunTy InvisArg t1 t2 means t1 => t2 However, the AnonArgFlag in a FunTy is just redundant, cached information. In (FunTy { ft_af = af, ft_arg = t1, ft_res = t2 }) * if (isPredTy t1 = True) then af = InvisArg * if (isPredTy t1 = False) then af = VisArg where isPredTy is defined in GHC.Core.Type, and sees if t1's kind is Constraint. See GHC.Core.TyCo.Rep Note [Types for coercions, predicates, and evidence] GHC.Core.Utils.mkFunctionType :: Mult -> Type -> Type -> Type uses isPredTy to decide the AnonArgFlag for the FunTy. The term (Lam b e), and coercion (FunCo co1 co2) don't carry AnonArgFlags; instead they use mkFunctionType when we want to get their types; see mkLamType and coercionLKind/RKind resp. This is just an engineering choice; we could cache here too if we wanted. Why bother with all this? After all, we are in Core, where (=>) and (->) behave the same. We maintain this distinction throughout Core so that we can cheaply and conveniently determine * How to print a type * How to split up a type: tcSplitSigmaTy * How to specialise it (over type classes; GHC.Core.Opt.Specialise) For the specialisation point, consider (\ (d :: Ord a). blah). We want to give it type (Ord a => blah_ty) with a fat arrow; that is, using mkInvisFunTy, not mkVisFunTy. Why? Because the /specialiser/ treats dictionary arguments specially. Suppose we do w/w on 'foo', thus (#11272, #6056) foo :: Ord a => Int -> blah foo a d x = case x of I# x' -> $wfoo @a d x' $wfoo :: Ord a => Int# -> blah Now, at a call we see (foo @Int dOrdInt). The specialiser will specialise this to $sfoo, where $sfoo :: Int -> blah $sfoo x = case x of I# x' -> $wfoo @Int dOrdInt x' Now we /must/ also specialise $wfoo! But it wasn't user-written, and has a type built with mkLamTypes. Conclusion: the easiest thing is to make mkLamType build (c => ty) when the argument is a predicate type. See GHC.Core.TyCo.Rep Note [Types for coercions, predicates, and evidence] -} {- ********************************************************************* * * * VarBndr, TyCoVarBinder * * ********************************************************************* -} {- Note [The VarBndr type and its uses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ VarBndr is polymorphic in both var and visibility fields. Currently there are nine different uses of 'VarBndr': * Var.TyCoVarBinder = VarBndr TyCoVar ArgFlag Binder of a forall-type; see ForAllTy in GHC.Core.TyCo.Rep * Var.TyVarBinder = VarBndr TyVar ArgFlag Subset of TyCoVarBinder when we are sure the binder is a TyVar * Var.InvisTVBinder = VarBndr TyVar Specificity Specialised form of TyVarBinder, when ArgFlag = Invisible s See GHC.Core.Type.splitForAllInvisTVBinders * Var.ReqTVBinder = VarBndr TyVar () Specialised form of TyVarBinder, when ArgFlag = Required See GHC.Core.Type.splitForAllReqTVBinders This one is barely used * TyCon.TyConBinder = VarBndr TyVar TyConBndrVis Binders of a TyCon; see TyCon in GHC.Core.TyCon * TyCon.TyConTyCoBinder = VarBndr TyCoVar TyConBndrVis Binders of a PromotedDataCon See Note [Promoted GADT data constructors] in GHC.Core.TyCon * IfaceType.IfaceForAllBndr = VarBndr IfaceBndr ArgFlag * IfaceType.IfaceForAllSpecBndr = VarBndr IfaceBndr Specificity * IfaceType.IfaceTyConBinder = VarBndr IfaceBndr TyConBndrVis -} data VarBndr var argf = Bndr var argf -- See Note [The VarBndr type and its uses] deriving( Data ) -- | Variable Binder -- -- A 'TyCoVarBinder' is the binder of a ForAllTy -- It's convenient to define this synonym here rather its natural -- home in "GHC.Core.TyCo.Rep", because it's used in GHC.Core.DataCon.hs-boot -- -- A 'TyVarBinder' is a binder with only TyVar type TyCoVarBinder = VarBndr TyCoVar ArgFlag type TyVarBinder = VarBndr TyVar ArgFlag type InvisTVBinder = VarBndr TyVar Specificity type ReqTVBinder = VarBndr TyVar () tyVarSpecToBinders :: [VarBndr a Specificity] -> [VarBndr a ArgFlag] tyVarSpecToBinders = map tyVarSpecToBinder tyVarSpecToBinder :: VarBndr a Specificity -> VarBndr a ArgFlag tyVarSpecToBinder (Bndr tv vis) = Bndr tv (Invisible vis) tyVarReqToBinders :: [VarBndr a ()] -> [VarBndr a ArgFlag] tyVarReqToBinders = map tyVarReqToBinder tyVarReqToBinder :: VarBndr a () -> VarBndr a ArgFlag tyVarReqToBinder (Bndr tv _) = Bndr tv Required binderVar :: VarBndr tv argf -> tv binderVar (Bndr v _) = v binderVars :: [VarBndr tv argf] -> [tv] binderVars tvbs = map binderVar tvbs binderArgFlag :: VarBndr tv argf -> argf binderArgFlag (Bndr _ argf) = argf binderType :: VarBndr TyCoVar argf -> Type binderType (Bndr tv _) = varType tv -- | Make a named binder mkTyCoVarBinder :: vis -> TyCoVar -> VarBndr TyCoVar vis mkTyCoVarBinder vis var = Bndr var vis -- | Make a named binder -- 'var' should be a type variable mkTyVarBinder :: vis -> TyVar -> VarBndr TyVar vis mkTyVarBinder vis var = assert (isTyVar var) $ Bndr var vis -- | Make many named binders mkTyCoVarBinders :: vis -> [TyCoVar] -> [VarBndr TyCoVar vis] mkTyCoVarBinders vis = map (mkTyCoVarBinder vis) -- | Make many named binders -- Input vars should be type variables mkTyVarBinders :: vis -> [TyVar] -> [VarBndr TyVar vis] mkTyVarBinders vis = map (mkTyVarBinder vis) isTyVarBinder :: TyCoVarBinder -> Bool isTyVarBinder (Bndr v _) = isTyVar v mapVarBndr :: (var -> var') -> (VarBndr var flag) -> (VarBndr var' flag) mapVarBndr f (Bndr v fl) = Bndr (f v) fl mapVarBndrs :: (var -> var') -> [VarBndr var flag] -> [VarBndr var' flag] mapVarBndrs f = map (mapVarBndr f) lookupVarBndr :: Eq var => var -> [VarBndr var flag] -> Maybe flag lookupVarBndr var bndrs = lookup var zipped_bndrs where zipped_bndrs = map (\(Bndr v f) -> (v,f)) bndrs instance Outputable tv => Outputable (VarBndr tv ArgFlag) where ppr (Bndr v Required) = ppr v ppr (Bndr v Specified) = char '@' <> ppr v ppr (Bndr v Inferred) = braces (ppr v) instance Outputable tv => Outputable (VarBndr tv Specificity) where ppr = ppr . tyVarSpecToBinder instance (Binary tv, Binary vis) => Binary (VarBndr tv vis) where put_ bh (Bndr tv vis) = do { put_ bh tv; put_ bh vis } get bh = do { tv <- get bh; vis <- get bh; return (Bndr tv vis) } instance NamedThing tv => NamedThing (VarBndr tv flag) where getName (Bndr tv _) = getName tv {- ************************************************************************ * * * Type and kind variables * * * ************************************************************************ -} tyVarName :: TyVar -> Name tyVarName = varName tyVarKind :: TyVar -> Kind tyVarKind = varType setTyVarUnique :: TyVar -> Unique -> TyVar setTyVarUnique = setVarUnique setTyVarName :: TyVar -> Name -> TyVar setTyVarName = setVarName setTyVarKind :: TyVar -> Kind -> TyVar setTyVarKind tv k = tv {varType = k} updateTyVarKind :: (Kind -> Kind) -> TyVar -> TyVar updateTyVarKind update tv = tv {varType = update (tyVarKind tv)} updateTyVarKindM :: (Monad m) => (Kind -> m Kind) -> TyVar -> m TyVar updateTyVarKindM update tv = do { k' <- update (tyVarKind tv) ; return $ tv {varType = k'} } mkTyVar :: Name -> Kind -> TyVar mkTyVar name kind = TyVar { varName = name , realUnique = getKey (nameUnique name) , varType = kind } mkTcTyVar :: Name -> Kind -> TcTyVarDetails -> TyVar mkTcTyVar name kind details = -- NB: 'kind' may be a coercion kind; cf, 'GHC.Tc.Utils.TcMType.newMetaCoVar' TcTyVar { varName = name, realUnique = getKey (nameUnique name), varType = kind, tc_tv_details = details } tcTyVarDetails :: TyVar -> TcTyVarDetails -- See Note [TcTyVars and TyVars in the typechecker] in GHC.Tc.Utils.TcType tcTyVarDetails (TcTyVar { tc_tv_details = details }) = details -- MP: This should never happen, but it does. Future work is to turn this into a panic. tcTyVarDetails (TyVar {}) = vanillaSkolemTvUnk tcTyVarDetails var = pprPanic "tcTyVarDetails" (ppr var <+> dcolon <+> pprKind (tyVarKind var)) setTcTyVarDetails :: TyVar -> TcTyVarDetails -> TyVar setTcTyVarDetails tv details = tv { tc_tv_details = details } {- %************************************************************************ %* * \subsection{Ids} * * ************************************************************************ -} idInfo :: HasDebugCallStack => Id -> IdInfo idInfo (Id { id_info = info }) = info idInfo other = pprPanic "idInfo" (ppr other) idDetails :: Id -> IdDetails idDetails (Id { id_details = details }) = details idDetails other = pprPanic "idDetails" (ppr other) -- The next three have a 'Var' suffix even though they always build -- Ids, because "GHC.Types.Id" uses 'mkGlobalId' etc with different types mkGlobalVar :: IdDetails -> Name -> Type -> IdInfo -> Id mkGlobalVar details name ty info = mk_id name manyDataConTy ty GlobalId details info -- There is no support for linear global variables yet. They would require -- being checked at link-time, which can be useful, but is not a priority. mkLocalVar :: IdDetails -> Name -> Mult -> Type -> IdInfo -> Id mkLocalVar details name w ty info = mk_id name w ty (LocalId NotExported) details info mkCoVar :: Name -> Type -> CoVar -- Coercion variables have no IdInfo mkCoVar name ty = mk_id name manyDataConTy ty (LocalId NotExported) coVarDetails vanillaIdInfo -- | Exported 'Var's will not be removed as dead code mkExportedLocalVar :: IdDetails -> Name -> Type -> IdInfo -> Id mkExportedLocalVar details name ty info = mk_id name manyDataConTy ty (LocalId Exported) details info -- There is no support for exporting linear variables. See also [mkGlobalVar] mk_id :: Name -> Mult -> Type -> IdScope -> IdDetails -> IdInfo -> Id mk_id name !w ty scope details info = Id { varName = name, realUnique = getKey (nameUnique name), varMult = w, varType = ty, idScope = scope, id_details = details, id_info = info } ------------------- lazySetIdInfo :: Id -> IdInfo -> Var lazySetIdInfo id info = id { id_info = info } setIdDetails :: Id -> IdDetails -> Id setIdDetails id details = id { id_details = details } globaliseId :: Id -> Id -- ^ If it's a local, make it global globaliseId id = id { idScope = GlobalId } setIdExported :: Id -> Id -- ^ Exports the given local 'Id'. Can also be called on global 'Id's, such as data constructors -- and class operations, which are born as global 'Id's and automatically exported setIdExported id@(Id { idScope = LocalId {} }) = id { idScope = LocalId Exported } setIdExported id@(Id { idScope = GlobalId }) = id setIdExported tv = pprPanic "setIdExported" (ppr tv) setIdNotExported :: Id -> Id -- ^ We can only do this to LocalIds setIdNotExported id = assert (isLocalId id) $ id { idScope = LocalId NotExported } ----------------------- updateIdTypeButNotMult :: (Type -> Type) -> Id -> Id updateIdTypeButNotMult f id = id { varType = f (varType id) } updateIdTypeAndMult :: (Type -> Type) -> Id -> Id updateIdTypeAndMult f id@(Id { varType = ty , varMult = mult }) = id { varType = ty' , varMult = mult' } where !ty' = f ty !mult' = f mult updateIdTypeAndMult _ other = pprPanic "updateIdTypeAndMult" (ppr other) updateIdTypeAndMultM :: Monad m => (Type -> m Type) -> Id -> m Id updateIdTypeAndMultM f id@(Id { varType = ty , varMult = mult }) = do { !ty' <- f ty ; !mult' <- f mult ; return (id { varType = ty', varMult = mult' }) } updateIdTypeAndMultM _ other = pprPanic "updateIdTypeAndMultM" (ppr other) setIdMult :: Id -> Mult -> Id setIdMult id !r | isId id = id { varMult = r } | otherwise = pprPanic "setIdMult" (ppr id <+> ppr r) {- ************************************************************************ * * \subsection{Predicates over variables} * * ************************************************************************ -} -- | Is this a type-level (i.e., computationally irrelevant, thus erasable) -- variable? Satisfies @isTyVar = not . isId@. isTyVar :: Var -> Bool -- True of both TyVar and TcTyVar isTyVar (TyVar {}) = True isTyVar (TcTyVar {}) = True isTyVar _ = False isTcTyVar :: Var -> Bool -- True of TcTyVar only isTcTyVar (TcTyVar {}) = True isTcTyVar _ = False isTyCoVar :: Var -> Bool isTyCoVar v = isTyVar v || isCoVar v -- | Is this a value-level (i.e., computationally relevant) 'Id'entifier? -- Satisfies @isId = not . isTyVar@. isId :: Var -> Bool isId (Id {}) = True isId _ = False -- | Is this a coercion variable? -- Satisfies @'isId' v ==> 'isCoVar' v == not ('isNonCoVarId' v)@. isCoVar :: Var -> Bool isCoVar (Id { id_details = details }) = isCoVarDetails details isCoVar _ = False -- | Is this a term variable ('Id') that is /not/ a coercion variable? -- Satisfies @'isId' v ==> 'isCoVar' v == not ('isNonCoVarId' v)@. isNonCoVarId :: Var -> Bool isNonCoVarId (Id { id_details = details }) = not (isCoVarDetails details) isNonCoVarId _ = False isLocalId :: Var -> Bool isLocalId (Id { idScope = LocalId _ }) = True isLocalId _ = False -- | 'isLocalVar' returns @True@ for type variables as well as local 'Id's -- These are the variables that we need to pay attention to when finding free -- variables, or doing dependency analysis. isLocalVar :: Var -> Bool isLocalVar v = not (isGlobalId v) isGlobalId :: Var -> Bool isGlobalId (Id { idScope = GlobalId }) = True isGlobalId _ = False -- | 'mustHaveLocalBinding' returns @True@ of 'Id's and 'TyVar's -- that must have a binding in this module. The converse -- is not quite right: there are some global 'Id's that must have -- bindings, such as record selectors. But that doesn't matter, -- because it's only used for assertions mustHaveLocalBinding :: Var -> Bool mustHaveLocalBinding var = isLocalVar var -- | 'isExportedIdVar' means \"don't throw this away\" isExportedId :: Var -> Bool isExportedId (Id { idScope = GlobalId }) = True isExportedId (Id { idScope = LocalId Exported}) = True isExportedId _ = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Var/Env.hs0000644000000000000000000006060714472400113020565 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Types.Var.Env ( -- * Var, Id and TyVar environments (maps) VarEnv, IdEnv, TyVarEnv, CoVarEnv, TyCoVarEnv, -- ** Manipulating these environments emptyVarEnv, unitVarEnv, mkVarEnv, mkVarEnv_Directly, elemVarEnv, disjointVarEnv, extendVarEnv, extendVarEnv_C, extendVarEnv_Acc, extendVarEnvList, plusVarEnv, plusVarEnv_C, plusVarEnv_CD, plusMaybeVarEnv_C, plusVarEnvList, alterVarEnv, delVarEnvList, delVarEnv, minusVarEnv, lookupVarEnv, lookupVarEnv_NF, lookupWithDefaultVarEnv, lookupVarEnv_Directly, mapVarEnv, zipVarEnv, modifyVarEnv, modifyVarEnv_Directly, isEmptyVarEnv, elemVarEnvByKey, filterVarEnv, restrictVarEnv, partitionVarEnv, -- * Deterministic Var environments (maps) DVarEnv, DIdEnv, DTyVarEnv, -- ** Manipulating these environments emptyDVarEnv, mkDVarEnv, dVarEnvElts, extendDVarEnv, extendDVarEnv_C, extendDVarEnvList, lookupDVarEnv, elemDVarEnv, isEmptyDVarEnv, foldDVarEnv, nonDetStrictFoldDVarEnv, mapDVarEnv, filterDVarEnv, modifyDVarEnv, alterDVarEnv, plusDVarEnv, plusDVarEnv_C, unitDVarEnv, delDVarEnv, delDVarEnvList, minusDVarEnv, partitionDVarEnv, anyDVarEnv, -- * The InScopeSet type InScopeSet, -- ** Operations on InScopeSets emptyInScopeSet, mkInScopeSet, delInScopeSet, extendInScopeSet, extendInScopeSetList, extendInScopeSetSet, getInScopeVars, lookupInScope, lookupInScope_Directly, unionInScope, elemInScopeSet, uniqAway, varSetInScope, unsafeGetFreshLocalUnique, -- * The RnEnv2 type RnEnv2, -- ** Operations on RnEnv2s mkRnEnv2, rnBndr2, rnBndrs2, rnBndr2_var, rnOccL, rnOccR, inRnEnvL, inRnEnvR, rnOccL_maybe, rnOccR_maybe, rnBndrL, rnBndrR, nukeRnEnvL, nukeRnEnvR, rnSwap, delBndrL, delBndrR, delBndrsL, delBndrsR, extendRnInScopeSetList, rnEtaL, rnEtaR, rnInScope, rnInScopeSet, lookupRnInScope, rnEnvL, rnEnvR, -- * TidyEnv and its operation TidyEnv, emptyTidyEnv, mkEmptyTidyEnv, delTidyEnvList ) where import GHC.Prelude import qualified Data.IntMap.Strict as IntMap -- TODO: Move this to UniqFM import GHC.Types.Name.Occurrence import GHC.Types.Name import GHC.Types.Var as Var import GHC.Types.Var.Set import GHC.Types.Unique.Set import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Types.Unique import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Data.Maybe import GHC.Utils.Outputable {- ************************************************************************ * * In-scope sets * * ************************************************************************ -} -- | A set of variables that are in scope at some point. -- -- Note that this is a /superset/ of the variables that are currently in scope. -- See Note [The InScopeSet invariant]. -- -- "Secrets of the Glasgow Haskell Compiler inliner" Section 3.2 provides -- the motivation for this abstraction. newtype InScopeSet = InScope VarSet -- Note [Lookups in in-scope set] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- We store a VarSet here, but we use this for lookups rather than just -- membership tests. Typically the InScopeSet contains the canonical -- version of the variable (e.g. with an informative unfolding), so this -- lookup is useful (see, for instance, Note [In-scope set as a -- substitution]). -- Note [The InScopeSet invariant] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The InScopeSet must include every in-scope variable, but it may also -- include other variables. -- Its principal purpose is to provide a set of variables to be avoided -- when creating a fresh identifier (fresh in the sense that it does not -- "shadow" any in-scope binding). To do this we simply have to find one that -- does not appear in the InScopeSet. This is done by the key function -- GHC.Types.Var.Env.uniqAway. -- See "Secrets of the Glasgow Haskell Compiler inliner" Section 3.2 -- for more detailed motivation. #20419 has further discussion. instance Outputable InScopeSet where ppr (InScope s) = text "InScope" <+> braces (fsep (map (ppr . Var.varName) (nonDetEltsUniqSet s))) -- It's OK to use nonDetEltsUniqSet here because it's -- only for pretty printing -- In-scope sets get big, and with -dppr-debug -- the output is overwhelming emptyInScopeSet :: InScopeSet emptyInScopeSet = InScope emptyVarSet getInScopeVars :: InScopeSet -> VarSet getInScopeVars (InScope vs) = vs mkInScopeSet :: VarSet -> InScopeSet mkInScopeSet in_scope = InScope in_scope extendInScopeSet :: InScopeSet -> Var -> InScopeSet extendInScopeSet (InScope in_scope) v = InScope (extendVarSet in_scope v) extendInScopeSetList :: InScopeSet -> [Var] -> InScopeSet extendInScopeSetList (InScope in_scope) vs = InScope $ foldl' extendVarSet in_scope vs extendInScopeSetSet :: InScopeSet -> VarSet -> InScopeSet extendInScopeSetSet (InScope in_scope) vs = InScope (in_scope `unionVarSet` vs) delInScopeSet :: InScopeSet -> Var -> InScopeSet delInScopeSet (InScope in_scope) v = InScope (in_scope `delVarSet` v) elemInScopeSet :: Var -> InScopeSet -> Bool elemInScopeSet v (InScope in_scope) = v `elemVarSet` in_scope -- | Look up a variable the 'InScopeSet'. This lets you map from -- the variable's identity (unique) to its full value. lookupInScope :: InScopeSet -> Var -> Maybe Var lookupInScope (InScope in_scope) v = lookupVarSet in_scope v lookupInScope_Directly :: InScopeSet -> Unique -> Maybe Var lookupInScope_Directly (InScope in_scope) uniq = lookupVarSet_Directly in_scope uniq unionInScope :: InScopeSet -> InScopeSet -> InScopeSet unionInScope (InScope s1) (InScope s2) = InScope (s1 `unionVarSet` s2) varSetInScope :: VarSet -> InScopeSet -> Bool varSetInScope vars (InScope s1) = vars `subVarSet` s1 {- Note [Local uniques] ~~~~~~~~~~~~~~~~~~~~ Sometimes one must create conjure up a unique which is unique in a particular context (but not necessarily globally unique). For instance, one might need to create a fresh local identifier which does not shadow any of the locally in-scope variables. For this we purpose we provide 'uniqAway'. 'uniqAway' is implemented in terms of the 'unsafeGetFreshLocalUnique' operation, which generates an unclaimed 'Unique' from an 'InScopeSet'. To ensure that we do not conflict with uniques allocated by future allocations from 'UniqSupply's, Uniques generated by 'unsafeGetFreshLocalUnique' are allocated into a dedicated region of the unique space (namely the X tag). Note that one must be quite carefully when using uniques generated in this way since they are only locally unique. In particular, two successive calls to 'uniqAway' on the same 'InScopeSet' will produce the same unique. -} -- | @uniqAway in_scope v@ finds a unique that is not used in the -- in-scope set, and gives that to v. See Note [Local uniques] and -- Note [The InScopeSet invariant]. uniqAway :: InScopeSet -> Var -> Var -- It starts with v's current unique, of course, in the hope that it won't -- have to change, and thereafter uses the successor to the last derived unique -- found in the in-scope set. uniqAway in_scope var | var `elemInScopeSet` in_scope = uniqAway' in_scope var -- Make a new one | otherwise = var -- Nothing to do uniqAway' :: InScopeSet -> Var -> Var -- This one *always* makes up a new variable uniqAway' in_scope var = setVarUnique var (unsafeGetFreshLocalUnique in_scope) -- | @unsafeGetFreshUnique in_scope@ finds a unique that is not in-scope in the -- given 'InScopeSet'. This must be used very carefully since one can very easily -- introduce non-unique 'Unique's this way. See Note [Local uniques]. unsafeGetFreshLocalUnique :: InScopeSet -> Unique unsafeGetFreshLocalUnique (InScope set) | Just (uniq,_) <- IntMap.lookupLT (getKey maxLocalUnique) (ufmToIntMap $ getUniqSet set) , let uniq' = mkLocalUnique uniq , not $ uniq' `ltUnique` minLocalUnique = incrUnique uniq' | otherwise = minLocalUnique {- ************************************************************************ * * Dual renaming * * ************************************************************************ -} -- | Rename Environment 2 -- -- When we are comparing (or matching) types or terms, we are faced with -- \"going under\" corresponding binders. E.g. when comparing: -- -- > \x. e1 ~ \y. e2 -- -- Basically we want to rename [@x@ -> @y@] or [@y@ -> @x@], but there are lots of -- things we must be careful of. In particular, @x@ might be free in @e2@, or -- y in @e1@. So the idea is that we come up with a fresh binder that is free -- in neither, and rename @x@ and @y@ respectively. That means we must maintain: -- -- 1. A renaming for the left-hand expression -- -- 2. A renaming for the right-hand expressions -- -- 3. An in-scope set -- -- Furthermore, when matching, we want to be able to have an 'occurs check', -- to prevent: -- -- > \x. f ~ \y. y -- -- matching with [@f@ -> @y@]. So for each expression we want to know that set of -- locally-bound variables. That is precisely the domain of the mappings 1. -- and 2., but we must ensure that we always extend the mappings as we go in. -- -- All of this information is bundled up in the 'RnEnv2' data RnEnv2 = RV2 { envL :: VarEnv Var -- Renaming for Left term , envR :: VarEnv Var -- Renaming for Right term , in_scope :: InScopeSet } -- In scope in left or right terms -- The renamings envL and envR are *guaranteed* to contain a binding -- for every variable bound as we go into the term, even if it is not -- renamed. That way we can ask what variables are locally bound -- (inRnEnvL, inRnEnvR) mkRnEnv2 :: InScopeSet -> RnEnv2 mkRnEnv2 vars = RV2 { envL = emptyVarEnv , envR = emptyVarEnv , in_scope = vars } extendRnInScopeSetList :: RnEnv2 -> [Var] -> RnEnv2 extendRnInScopeSetList env vs | null vs = env | otherwise = env { in_scope = extendInScopeSetList (in_scope env) vs } rnInScope :: Var -> RnEnv2 -> Bool rnInScope x env = x `elemInScopeSet` in_scope env rnInScopeSet :: RnEnv2 -> InScopeSet rnInScopeSet = in_scope -- | Retrieve the left mapping rnEnvL :: RnEnv2 -> VarEnv Var rnEnvL = envL -- | Retrieve the right mapping rnEnvR :: RnEnv2 -> VarEnv Var rnEnvR = envR rnBndrs2 :: RnEnv2 -> [Var] -> [Var] -> RnEnv2 -- ^ Applies 'rnBndr2' to several variables: the two variable lists must be of equal length rnBndrs2 env bsL bsR = foldl2 rnBndr2 env bsL bsR rnBndr2 :: RnEnv2 -> Var -> Var -> RnEnv2 -- ^ @rnBndr2 env bL bR@ goes under a binder @bL@ in the Left term, -- and binder @bR@ in the Right term. -- It finds a new binder, @new_b@, -- and returns an environment mapping @bL -> new_b@ and @bR -> new_b@ rnBndr2 env bL bR = fst $ rnBndr2_var env bL bR rnBndr2_var :: RnEnv2 -> Var -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but returns the new variable as well as the -- new environment rnBndr2_var (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL bR = (RV2 { envL = extendVarEnv envL bL new_b -- See Note , envR = extendVarEnv envR bR new_b -- [Rebinding] , in_scope = extendInScopeSet in_scope new_b }, new_b) where -- Find a new binder not in scope in either term new_b | not (bL `elemInScopeSet` in_scope) = bL | not (bR `elemInScopeSet` in_scope) = bR | otherwise = uniqAway' in_scope bL -- Note [Rebinding] -- ~~~~~~~~~~~~~~~~ -- If the new var is the same as the old one, note that -- the extendVarEnv *deletes* any current renaming -- E.g. (\x. \x. ...) ~ (\y. \z. ...) -- -- Inside \x \y { [x->y], [y->y], {y} } -- \x \z { [x->x], [y->y, z->x], {y,x} } rnBndrL :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but used when there's a binder on the left -- side only. rnBndrL (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL = (RV2 { envL = extendVarEnv envL bL new_b , envR = envR , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bL rnBndrR :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but used when there's a binder on the right -- side only. rnBndrR (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bR = (RV2 { envR = extendVarEnv envR bR new_b , envL = envL , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bR rnEtaL :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndrL' but used for eta expansion -- See Note [Eta expansion] rnEtaL (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bL = (RV2 { envL = extendVarEnv envL bL new_b , envR = extendVarEnv envR new_b new_b -- Note [Eta expansion] , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bL rnEtaR :: RnEnv2 -> Var -> (RnEnv2, Var) -- ^ Similar to 'rnBndr2' but used for eta expansion -- See Note [Eta expansion] rnEtaR (RV2 { envL = envL, envR = envR, in_scope = in_scope }) bR = (RV2 { envL = extendVarEnv envL new_b new_b -- Note [Eta expansion] , envR = extendVarEnv envR bR new_b , in_scope = extendInScopeSet in_scope new_b }, new_b) where new_b = uniqAway in_scope bR delBndrL, delBndrR :: RnEnv2 -> Var -> RnEnv2 delBndrL rn@(RV2 { envL = env, in_scope = in_scope }) v = rn { envL = env `delVarEnv` v, in_scope = in_scope `extendInScopeSet` v } delBndrR rn@(RV2 { envR = env, in_scope = in_scope }) v = rn { envR = env `delVarEnv` v, in_scope = in_scope `extendInScopeSet` v } delBndrsL, delBndrsR :: RnEnv2 -> [Var] -> RnEnv2 delBndrsL rn@(RV2 { envL = env, in_scope = in_scope }) v = rn { envL = env `delVarEnvList` v, in_scope = in_scope `extendInScopeSetList` v } delBndrsR rn@(RV2 { envR = env, in_scope = in_scope }) v = rn { envR = env `delVarEnvList` v, in_scope = in_scope `extendInScopeSetList` v } rnOccL, rnOccR :: RnEnv2 -> Var -> Var -- ^ Look up the renaming of an occurrence in the left or right term rnOccL (RV2 { envL = env }) v = lookupVarEnv env v `orElse` v rnOccR (RV2 { envR = env }) v = lookupVarEnv env v `orElse` v rnOccL_maybe, rnOccR_maybe :: RnEnv2 -> Var -> Maybe Var -- ^ Look up the renaming of an occurrence in the left or right term rnOccL_maybe (RV2 { envL = env }) v = lookupVarEnv env v rnOccR_maybe (RV2 { envR = env }) v = lookupVarEnv env v inRnEnvL, inRnEnvR :: RnEnv2 -> Var -> Bool -- ^ Tells whether a variable is locally bound inRnEnvL (RV2 { envL = env }) v = v `elemVarEnv` env inRnEnvR (RV2 { envR = env }) v = v `elemVarEnv` env lookupRnInScope :: RnEnv2 -> Var -> Var lookupRnInScope env v = lookupInScope (in_scope env) v `orElse` v nukeRnEnvL, nukeRnEnvR :: RnEnv2 -> RnEnv2 -- ^ Wipe the left or right side renaming nukeRnEnvL env = env { envL = emptyVarEnv } nukeRnEnvR env = env { envR = emptyVarEnv } rnSwap :: RnEnv2 -> RnEnv2 -- ^ swap the meaning of left and right rnSwap (RV2 { envL = envL, envR = envR, in_scope = in_scope }) = RV2 { envL = envR, envR = envL, in_scope = in_scope } {- Note [Eta expansion] ~~~~~~~~~~~~~~~~~~~~ When matching (\x.M) ~ N we rename x to x' with, where x' is not in scope in either term. Then we want to behave as if we'd seen (\x'.M) ~ (\x'.N x') Since x' isn't in scope in N, the form (\x'. N x') doesn't capture any variables in N. But we must nevertheless extend the envR with a binding [x' -> x'], to support the occurs check. For example, if we don't do this, we can get silly matches like forall a. (\y.a) ~ v succeeding with [a -> v y], which is bogus of course. ************************************************************************ * * Tidying * * ************************************************************************ -} -- | Tidy Environment -- -- When tidying up print names, we keep a mapping of in-scope occ-names -- (the 'TidyOccEnv') and a Var-to-Var of the current renamings type TidyEnv = (TidyOccEnv, VarEnv Var) emptyTidyEnv :: TidyEnv emptyTidyEnv = (emptyTidyOccEnv, emptyVarEnv) mkEmptyTidyEnv :: TidyOccEnv -> TidyEnv mkEmptyTidyEnv occ_env = (occ_env, emptyVarEnv) delTidyEnvList :: TidyEnv -> [Var] -> TidyEnv delTidyEnvList (occ_env, var_env) vs = (occ_env', var_env') where occ_env' = occ_env `delTidyOccEnvList` map (occNameFS . getOccName) vs var_env' = var_env `delVarEnvList` vs {- ************************************************************************ * * VarEnv * * ************************************************************************ -} -- We would like this to be `UniqFM Var elt` -- but the code uses various key types. -- So for now make it explicitly untyped -- | Variable Environment type VarEnv elt = UniqFM Var elt -- | Identifier Environment type IdEnv elt = UniqFM Id elt -- | Type Variable Environment type TyVarEnv elt = UniqFM Var elt -- | Type or Coercion Variable Environment type TyCoVarEnv elt = UniqFM TyCoVar elt -- | Coercion Variable Environment type CoVarEnv elt = UniqFM CoVar elt emptyVarEnv :: VarEnv a mkVarEnv :: [(Var, a)] -> VarEnv a mkVarEnv_Directly :: [(Unique, a)] -> VarEnv a zipVarEnv :: [Var] -> [a] -> VarEnv a unitVarEnv :: Var -> a -> VarEnv a alterVarEnv :: (Maybe a -> Maybe a) -> VarEnv a -> Var -> VarEnv a extendVarEnv :: VarEnv a -> Var -> a -> VarEnv a extendVarEnv_C :: (a->a->a) -> VarEnv a -> Var -> a -> VarEnv a extendVarEnv_Acc :: (a->b->b) -> (a->b) -> VarEnv b -> Var -> a -> VarEnv b plusVarEnv :: VarEnv a -> VarEnv a -> VarEnv a plusVarEnvList :: [VarEnv a] -> VarEnv a extendVarEnvList :: VarEnv a -> [(Var, a)] -> VarEnv a partitionVarEnv :: (a -> Bool) -> VarEnv a -> (VarEnv a, VarEnv a) restrictVarEnv :: VarEnv a -> VarSet -> VarEnv a delVarEnvList :: VarEnv a -> [Var] -> VarEnv a delVarEnv :: VarEnv a -> Var -> VarEnv a minusVarEnv :: VarEnv a -> VarEnv b -> VarEnv a plusVarEnv_C :: (a -> a -> a) -> VarEnv a -> VarEnv a -> VarEnv a plusVarEnv_CD :: (a -> a -> a) -> VarEnv a -> a -> VarEnv a -> a -> VarEnv a plusMaybeVarEnv_C :: (a -> a -> Maybe a) -> VarEnv a -> VarEnv a -> VarEnv a mapVarEnv :: (a -> b) -> VarEnv a -> VarEnv b modifyVarEnv :: (a -> a) -> VarEnv a -> Var -> VarEnv a isEmptyVarEnv :: VarEnv a -> Bool lookupVarEnv :: VarEnv a -> Var -> Maybe a lookupVarEnv_Directly :: VarEnv a -> Unique -> Maybe a filterVarEnv :: (a -> Bool) -> VarEnv a -> VarEnv a lookupVarEnv_NF :: VarEnv a -> Var -> a lookupWithDefaultVarEnv :: VarEnv a -> a -> Var -> a elemVarEnv :: Var -> VarEnv a -> Bool elemVarEnvByKey :: Unique -> VarEnv a -> Bool disjointVarEnv :: VarEnv a -> VarEnv a -> Bool elemVarEnv = elemUFM elemVarEnvByKey = elemUFM_Directly disjointVarEnv = disjointUFM alterVarEnv = alterUFM extendVarEnv = addToUFM extendVarEnv_C = addToUFM_C extendVarEnv_Acc = addToUFM_Acc extendVarEnvList = addListToUFM plusVarEnv_C = plusUFM_C plusVarEnv_CD = plusUFM_CD plusMaybeVarEnv_C = plusMaybeUFM_C delVarEnvList = delListFromUFM delVarEnv = delFromUFM minusVarEnv = minusUFM plusVarEnv = plusUFM plusVarEnvList = plusUFMList -- lookupVarEnv is very hot (in part due to being called by substTyVar), -- if it's not inlined than the mere allocation of the Just constructor causes -- perf benchmarks to regress by 2% in some cases. See #21159, !7638 and containers#821 -- for some more explanation about what exactly went wrong. {-# INLINE lookupVarEnv #-} lookupVarEnv = lookupUFM lookupVarEnv_Directly = lookupUFM_Directly filterVarEnv = filterUFM lookupWithDefaultVarEnv = lookupWithDefaultUFM mapVarEnv = mapUFM mkVarEnv = listToUFM mkVarEnv_Directly= listToUFM_Directly emptyVarEnv = emptyUFM unitVarEnv = unitUFM isEmptyVarEnv = isNullUFM partitionVarEnv = partitionUFM restrictVarEnv env vs = filterUFM_Directly keep env where keep u _ = u `elemVarSetByKey` vs zipVarEnv tyvars tys = mkVarEnv (zipEqual "zipVarEnv" tyvars tys) lookupVarEnv_NF env id = case lookupVarEnv env id of Just xx -> xx Nothing -> panic "lookupVarEnv_NF: Nothing" {- @modifyVarEnv@: Look up a thing in the VarEnv, then mash it with the modify function, and put it back. -} modifyVarEnv mangle_fn env key = case (lookupVarEnv env key) of Nothing -> env Just xx -> extendVarEnv env key (mangle_fn xx) modifyVarEnv_Directly :: (a -> a) -> UniqFM key a -> Unique -> UniqFM key a modifyVarEnv_Directly mangle_fn env key = case (lookupUFM_Directly env key) of Nothing -> env Just xx -> addToUFM_Directly env key (mangle_fn xx) {- ************************************************************************ * * Deterministic VarEnv (DVarEnv) * * ************************************************************************ -} -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why we need -- DVarEnv. -- | Deterministic Variable Environment type DVarEnv elt = UniqDFM Var elt -- | Deterministic Identifier Environment -- Sadly not always indexed by Id, but it is in the common case. type DIdEnv elt = UniqDFM Var elt -- | Deterministic Type Variable Environment type DTyVarEnv elt = UniqDFM TyVar elt emptyDVarEnv :: DVarEnv a emptyDVarEnv = emptyUDFM dVarEnvElts :: DVarEnv a -> [a] dVarEnvElts = eltsUDFM mkDVarEnv :: [(Var, a)] -> DVarEnv a mkDVarEnv = listToUDFM extendDVarEnv :: DVarEnv a -> Var -> a -> DVarEnv a extendDVarEnv = addToUDFM minusDVarEnv :: DVarEnv a -> DVarEnv a' -> DVarEnv a minusDVarEnv = minusUDFM lookupDVarEnv :: DVarEnv a -> Var -> Maybe a lookupDVarEnv = lookupUDFM foldDVarEnv :: (a -> b -> b) -> b -> DVarEnv a -> b foldDVarEnv = foldUDFM -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldDVarEnv :: (a -> b -> b) -> b -> DVarEnv a -> b nonDetStrictFoldDVarEnv = nonDetStrictFoldUDFM mapDVarEnv :: (a -> b) -> DVarEnv a -> DVarEnv b mapDVarEnv = mapUDFM filterDVarEnv :: (a -> Bool) -> DVarEnv a -> DVarEnv a filterDVarEnv = filterUDFM alterDVarEnv :: (Maybe a -> Maybe a) -> DVarEnv a -> Var -> DVarEnv a alterDVarEnv = alterUDFM plusDVarEnv :: DVarEnv a -> DVarEnv a -> DVarEnv a plusDVarEnv = plusUDFM plusDVarEnv_C :: (a -> a -> a) -> DVarEnv a -> DVarEnv a -> DVarEnv a plusDVarEnv_C = plusUDFM_C unitDVarEnv :: Var -> a -> DVarEnv a unitDVarEnv = unitUDFM delDVarEnv :: DVarEnv a -> Var -> DVarEnv a delDVarEnv = delFromUDFM delDVarEnvList :: DVarEnv a -> [Var] -> DVarEnv a delDVarEnvList = delListFromUDFM isEmptyDVarEnv :: DVarEnv a -> Bool isEmptyDVarEnv = isNullUDFM elemDVarEnv :: Var -> DVarEnv a -> Bool elemDVarEnv = elemUDFM extendDVarEnv_C :: (a -> a -> a) -> DVarEnv a -> Var -> a -> DVarEnv a extendDVarEnv_C = addToUDFM_C modifyDVarEnv :: (a -> a) -> DVarEnv a -> Var -> DVarEnv a modifyDVarEnv mangle_fn env key = case (lookupDVarEnv env key) of Nothing -> env Just xx -> extendDVarEnv env key (mangle_fn xx) partitionDVarEnv :: (a -> Bool) -> DVarEnv a -> (DVarEnv a, DVarEnv a) partitionDVarEnv = partitionUDFM extendDVarEnvList :: DVarEnv a -> [(Var, a)] -> DVarEnv a extendDVarEnvList = addListToUDFM anyDVarEnv :: (a -> Bool) -> DVarEnv a -> Bool anyDVarEnv = anyUDFM ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Var/Set.hs0000644000000000000000000003073314472400113020565 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} module GHC.Types.Var.Set ( -- * Var, Id and TyVar set types VarSet, IdSet, TyVarSet, CoVarSet, TyCoVarSet, -- ** Manipulating these sets emptyVarSet, unitVarSet, mkVarSet, extendVarSet, extendVarSetList, elemVarSet, subVarSet, unionVarSet, unionVarSets, mapUnionVarSet, intersectVarSet, intersectsVarSet, disjointVarSet, isEmptyVarSet, delVarSet, delVarSetList, delVarSetByKey, minusVarSet, filterVarSet, mapVarSet, anyVarSet, allVarSet, transCloVarSet, fixVarSet, lookupVarSet_Directly, lookupVarSet, lookupVarSetByName, sizeVarSet, seqVarSet, elemVarSetByKey, partitionVarSet, pluralVarSet, pprVarSet, nonDetStrictFoldVarSet, -- * Deterministic Var set types DVarSet, DIdSet, DTyVarSet, DTyCoVarSet, -- ** Manipulating these sets emptyDVarSet, unitDVarSet, mkDVarSet, extendDVarSet, extendDVarSetList, elemDVarSet, dVarSetElems, subDVarSet, unionDVarSet, unionDVarSets, mapUnionDVarSet, intersectDVarSet, dVarSetIntersectVarSet, intersectsDVarSet, disjointDVarSet, isEmptyDVarSet, delDVarSet, delDVarSetList, minusDVarSet, nonDetStrictFoldDVarSet, filterDVarSet, mapDVarSet, dVarSetMinusVarSet, anyDVarSet, allDVarSet, transCloDVarSet, sizeDVarSet, seqDVarSet, partitionDVarSet, dVarSetToVarSet, ) where import GHC.Prelude import GHC.Types.Var ( Var, TyVar, CoVar, TyCoVar, Id ) import GHC.Types.Unique import GHC.Types.Name ( Name ) import GHC.Types.Unique.Set import GHC.Types.Unique.DSet import GHC.Types.Unique.FM( disjointUFM, pluralUFM, pprUFM ) import GHC.Types.Unique.DFM( disjointUDFM, udfmToUfm, anyUDFM, allUDFM ) import GHC.Utils.Outputable (SDoc) -- | A non-deterministic Variable Set -- -- A non-deterministic set of variables. -- See Note [Deterministic UniqFM] in "GHC.Types.Unique.DFM" for explanation why it's not -- deterministic and why it matters. Use DVarSet if the set eventually -- gets converted into a list or folded over in a way where the order -- changes the generated code, for example when abstracting variables. type VarSet = UniqSet Var -- | Identifier Set type IdSet = UniqSet Id -- | Type Variable Set type TyVarSet = UniqSet TyVar -- | Coercion Variable Set type CoVarSet = UniqSet CoVar -- | Type or Coercion Variable Set type TyCoVarSet = UniqSet TyCoVar emptyVarSet :: VarSet intersectVarSet :: VarSet -> VarSet -> VarSet unionVarSet :: VarSet -> VarSet -> VarSet unionVarSets :: [VarSet] -> VarSet mapUnionVarSet :: (a -> VarSet) -> [a] -> VarSet -- ^ map the function over the list, and union the results unitVarSet :: Var -> VarSet extendVarSet :: VarSet -> Var -> VarSet extendVarSetList:: VarSet -> [Var] -> VarSet elemVarSet :: Var -> VarSet -> Bool delVarSet :: VarSet -> Var -> VarSet delVarSetList :: VarSet -> [Var] -> VarSet minusVarSet :: VarSet -> VarSet -> VarSet isEmptyVarSet :: VarSet -> Bool mkVarSet :: [Var] -> VarSet lookupVarSet_Directly :: VarSet -> Unique -> Maybe Var lookupVarSet :: VarSet -> Var -> Maybe Var -- Returns the set element, which may be -- (==) to the argument, but not the same as lookupVarSetByName :: VarSet -> Name -> Maybe Var sizeVarSet :: VarSet -> Int filterVarSet :: (Var -> Bool) -> VarSet -> VarSet delVarSetByKey :: VarSet -> Unique -> VarSet elemVarSetByKey :: Unique -> VarSet -> Bool partitionVarSet :: (Var -> Bool) -> VarSet -> (VarSet, VarSet) emptyVarSet = emptyUniqSet unitVarSet = unitUniqSet extendVarSet = addOneToUniqSet extendVarSetList= addListToUniqSet intersectVarSet = intersectUniqSets intersectsVarSet:: VarSet -> VarSet -> Bool -- True if non-empty intersection disjointVarSet :: VarSet -> VarSet -> Bool -- True if empty intersection subVarSet :: VarSet -> VarSet -> Bool -- True if first arg is subset of second -- (s1 `intersectsVarSet` s2) doesn't compute s2 if s1 is empty; -- ditto disjointVarSet, subVarSet unionVarSet = unionUniqSets unionVarSets = unionManyUniqSets elemVarSet = elementOfUniqSet minusVarSet = minusUniqSet delVarSet = delOneFromUniqSet delVarSetList = delListFromUniqSet isEmptyVarSet = isEmptyUniqSet mkVarSet = mkUniqSet lookupVarSet_Directly = lookupUniqSet_Directly lookupVarSet = lookupUniqSet lookupVarSetByName set name = lookupUniqSet_Directly set (getUnique name) sizeVarSet = sizeUniqSet filterVarSet = filterUniqSet delVarSetByKey = delOneFromUniqSet_Directly elemVarSetByKey = elemUniqSet_Directly partitionVarSet = partitionUniqSet mapUnionVarSet get_set xs = foldr (unionVarSet . get_set) emptyVarSet xs -- See comments with type signatures intersectsVarSet s1 s2 = not (s1 `disjointVarSet` s2) disjointVarSet s1 s2 = disjointUFM (getUniqSet s1) (getUniqSet s2) subVarSet s1 s2 = isEmptyVarSet (s1 `minusVarSet` s2) anyVarSet :: (Var -> Bool) -> VarSet -> Bool anyVarSet = uniqSetAny allVarSet :: (Var -> Bool) -> VarSet -> Bool allVarSet = uniqSetAll mapVarSet :: Uniquable b => (a -> b) -> UniqSet a -> UniqSet b mapVarSet = mapUniqSet -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldVarSet :: (Var -> a -> a) -> a -> VarSet -> a nonDetStrictFoldVarSet = nonDetStrictFoldUniqSet fixVarSet :: (VarSet -> VarSet) -- Map the current set to a new set -> VarSet -> VarSet -- (fixVarSet f s) repeatedly applies f to the set s, -- until it reaches a fixed point. fixVarSet fn vars | new_vars `subVarSet` vars = vars | otherwise = fixVarSet fn new_vars where new_vars = fn vars transCloVarSet :: (VarSet -> VarSet) -- Map some variables in the set to -- extra variables that should be in it -> VarSet -> VarSet -- (transCloVarSet f s) repeatedly applies f to new candidates, adding any -- new variables to s that it finds thereby, until it reaches a fixed point. -- -- The function fn could be (Var -> VarSet), but we use (VarSet -> VarSet) -- for efficiency, so that the test can be batched up. -- It's essential that fn will work fine if given new candidates -- one at a time; ie fn {v1,v2} = fn v1 `union` fn v2 -- Use fixVarSet if the function needs to see the whole set all at once transCloVarSet fn seeds = go seeds seeds where go :: VarSet -- Accumulating result -> VarSet -- Work-list; un-processed subset of accumulating result -> VarSet -- Specification: go acc vs = acc `union` transClo fn vs go acc candidates | isEmptyVarSet new_vs = acc | otherwise = go (acc `unionVarSet` new_vs) new_vs where new_vs = fn candidates `minusVarSet` acc seqVarSet :: VarSet -> () seqVarSet s = s `seq` () -- | Determines the pluralisation suffix appropriate for the length of a set -- in the same way that plural from Outputable does for lists. pluralVarSet :: VarSet -> SDoc pluralVarSet = pluralUFM . getUniqSet -- | Pretty-print a non-deterministic set. -- The order of variables is non-deterministic and for pretty-printing that -- shouldn't be a problem. -- Having this function helps contain the non-determinism created with -- nonDetEltsUFM. -- Passing a list to the pretty-printing function allows the caller -- to decide on the order of Vars (eg. toposort them) without them having -- to use nonDetEltsUFM at the call site. This prevents from let-binding -- non-deterministically ordered lists and reusing them where determinism -- matters. pprVarSet :: VarSet -- ^ The things to be pretty printed -> ([Var] -> SDoc) -- ^ The pretty printing function to use on the -- elements -> SDoc -- ^ 'SDoc' where the things have been pretty -- printed pprVarSet = pprUFM . getUniqSet -- Deterministic VarSet -- See Note [Deterministic UniqFM] in GHC.Types.Unique.DFM for explanation why we need -- DVarSet. -- | Deterministic Variable Set type DVarSet = UniqDSet Var -- | Deterministic Identifier Set type DIdSet = UniqDSet Id -- | Deterministic Type Variable Set type DTyVarSet = UniqDSet TyVar -- | Deterministic Type or Coercion Variable Set type DTyCoVarSet = UniqDSet TyCoVar emptyDVarSet :: DVarSet emptyDVarSet = emptyUniqDSet unitDVarSet :: Var -> DVarSet unitDVarSet = unitUniqDSet mkDVarSet :: [Var] -> DVarSet mkDVarSet = mkUniqDSet -- The new element always goes to the right of existing ones. extendDVarSet :: DVarSet -> Var -> DVarSet extendDVarSet = addOneToUniqDSet elemDVarSet :: Var -> DVarSet -> Bool elemDVarSet = elementOfUniqDSet dVarSetElems :: DVarSet -> [Var] dVarSetElems = uniqDSetToList subDVarSet :: DVarSet -> DVarSet -> Bool subDVarSet s1 s2 = isEmptyDVarSet (s1 `minusDVarSet` s2) unionDVarSet :: DVarSet -> DVarSet -> DVarSet unionDVarSet = unionUniqDSets unionDVarSets :: [DVarSet] -> DVarSet unionDVarSets = unionManyUniqDSets -- | Map the function over the list, and union the results mapUnionDVarSet :: (a -> DVarSet) -> [a] -> DVarSet mapUnionDVarSet get_set xs = foldr (unionDVarSet . get_set) emptyDVarSet xs intersectDVarSet :: DVarSet -> DVarSet -> DVarSet intersectDVarSet = intersectUniqDSets dVarSetIntersectVarSet :: DVarSet -> VarSet -> DVarSet dVarSetIntersectVarSet = uniqDSetIntersectUniqSet -- | True if empty intersection disjointDVarSet :: DVarSet -> DVarSet -> Bool disjointDVarSet s1 s2 = disjointUDFM (getUniqDSet s1) (getUniqDSet s2) -- | True if non-empty intersection intersectsDVarSet :: DVarSet -> DVarSet -> Bool intersectsDVarSet s1 s2 = not (s1 `disjointDVarSet` s2) isEmptyDVarSet :: DVarSet -> Bool isEmptyDVarSet = isEmptyUniqDSet delDVarSet :: DVarSet -> Var -> DVarSet delDVarSet = delOneFromUniqDSet minusDVarSet :: DVarSet -> DVarSet -> DVarSet minusDVarSet = minusUniqDSet dVarSetMinusVarSet :: DVarSet -> VarSet -> DVarSet dVarSetMinusVarSet = uniqDSetMinusUniqSet -- See Note [Deterministic UniqFM] to learn about nondeterminism. -- If you use this please provide a justification why it doesn't introduce -- nondeterminism. nonDetStrictFoldDVarSet :: (Var -> a -> a) -> a -> DVarSet -> a nonDetStrictFoldDVarSet = nonDetStrictFoldUniqDSet anyDVarSet :: (Var -> Bool) -> DVarSet -> Bool anyDVarSet p = anyUDFM p . getUniqDSet allDVarSet :: (Var -> Bool) -> DVarSet -> Bool allDVarSet p = allUDFM p . getUniqDSet mapDVarSet :: Uniquable b => (a -> b) -> UniqDSet a -> UniqDSet b mapDVarSet = mapUniqDSet filterDVarSet :: (Var -> Bool) -> DVarSet -> DVarSet filterDVarSet = filterUniqDSet sizeDVarSet :: DVarSet -> Int sizeDVarSet = sizeUniqDSet -- | Partition DVarSet according to the predicate given partitionDVarSet :: (Var -> Bool) -> DVarSet -> (DVarSet, DVarSet) partitionDVarSet = partitionUniqDSet -- | Delete a list of variables from DVarSet delDVarSetList :: DVarSet -> [Var] -> DVarSet delDVarSetList = delListFromUniqDSet seqDVarSet :: DVarSet -> () seqDVarSet s = s `seq` () -- | Add a list of variables to DVarSet extendDVarSetList :: DVarSet -> [Var] -> DVarSet extendDVarSetList = addListToUniqDSet -- | Convert a DVarSet to a VarSet by forgetting the order of insertion dVarSetToVarSet :: DVarSet -> VarSet dVarSetToVarSet = unsafeUFMToUniqSet . udfmToUfm . getUniqDSet -- | transCloVarSet for DVarSet transCloDVarSet :: (DVarSet -> DVarSet) -- Map some variables in the set to -- extra variables that should be in it -> DVarSet -> DVarSet -- (transCloDVarSet f s) repeatedly applies f to new candidates, adding any -- new variables to s that it finds thereby, until it reaches a fixed point. -- -- The function fn could be (Var -> DVarSet), but we use (DVarSet -> DVarSet) -- for efficiency, so that the test can be batched up. -- It's essential that fn will work fine if given new candidates -- one at a time; ie fn {v1,v2} = fn v1 `union` fn v2 transCloDVarSet fn seeds = go seeds seeds where go :: DVarSet -- Accumulating result -> DVarSet -- Work-list; un-processed subset of accumulating result -> DVarSet -- Specification: go acc vs = acc `union` transClo fn vs go acc candidates | isEmptyDVarSet new_vs = acc | otherwise = go (acc `unionDVarSet` new_vs) new_vs where new_vs = fn candidates `minusDVarSet` acc ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/UniqueSubdir.hs0000644000000000000000000000125014470055371022463 0ustar0000000000000000module GHC.UniqueSubdir ( uniqueSubdir ) where import Prelude -- See Note [Why do we import Prelude here?] import Data.List (intercalate) import GHC.Platform.ArchOS import GHC.Version (cProjectVersion) -- | A filepath like @x86_64-linux-7.6.3@ with the platform string to use when -- constructing platform-version-dependent files that need to co-exist. uniqueSubdir :: ArchOS -> FilePath uniqueSubdir (ArchOS arch os) = intercalate "-" [ stringEncodeArch arch , stringEncodeOS os , cProjectVersion ] -- NB: This functionality is reimplemented in Cabal, so if you -- change it, be sure to update Cabal. -- TODO make Cabal use this now that it is in ghc-boot. ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit.hs0000644000000000000000000003707014472400112017115 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} -- | Units are library components from Cabal packages compiled and installed in -- a database module GHC.Unit ( module GHC.Unit.Types , module GHC.Unit.Info , module GHC.Unit.Parser , module GHC.Unit.State , module GHC.Unit.Module , module GHC.Unit.Home ) where import GHC.Unit.Types import GHC.Unit.Info import GHC.Unit.Parser import GHC.Unit.Module import GHC.Unit.Home import GHC.Unit.State {- Note [About units] ~~~~~~~~~~~~~~~~~~ Haskell users are used to manipulating Cabal packages. These packages are identified by: - a package name :: String - a package version :: Version - (a revision number, when they are registered on Hackage) Cabal packages may contain several components (libraries, programs, testsuites). In GHC we are mostly interested in libraries because those are the components that can be depended upon by other components. Components in a package are identified by their component name. Historically only one library component was allowed per package, hence it didn't need a name. For this reason, component name may be empty for one library component in each package: - a component name :: Maybe String UnitId ------ Cabal libraries can be compiled in various ways (different compiler options or Cabal flags, different dependencies, etc.), hence using package name, package version and component name isn't enough to identify a built library. We use another identifier called UnitId: package name \ package version | ________ component name | hash of all this ==> | UnitId | Cabal flags | -------- compiler options | dependencies' UnitId / Fortunately GHC doesn't have to generate these UnitId: they are provided by external build tools (e.g. Cabal) with `-this-unit-id` command-line parameter. UnitIds are important because they are used to generate internal names (symbols, etc.). Wired-in units -------------- Certain libraries (ghc-prim, base, etc.) are known to the compiler and to the RTS as they provide some basic primitives. Hence UnitIds of wired-in libraries are fixed. Instead of letting Cabal chose the UnitId for these libraries, their .cabal file uses the following stanza to force it to a specific value: ghc-options: -this-unit-id ghc-prim -- taken from ghc-prim.cabal The RTS also uses entities of wired-in units by directly referring to symbols such as "base_GHCziIOziException_heapOverflow_closure" where the prefix is the UnitId of "base" unit. Unit databases -------------- Units are stored in databases in order to be reused by other codes: UnitKey ---> UnitInfo { exposed modules, package name, package version component name, various file paths, dependencies :: [UnitKey], etc. } Because of the wired-in units described above, we can't exactly use UnitIds as UnitKeys in the database: if we did this, we could only have a single unit (compiled library) in the database for each wired-in library. As we want to support databases containing several different units for the same wired-in library, we do this: * for non wired-in units: * UnitId = UnitKey = Identifier (hash) computed by Cabal * for wired-in units: * UnitKey = Identifier computed by Cabal (just like for non wired-in units) * UnitId = unit-id specified with -this-unit-id command-line flag We can expose several units to GHC via the `package-id ` command-line parameter. We must use the UnitKeys of the units so that GHC can find them in the database. During unit loading, GHC replaces UnitKeys with UnitIds. It identifies wired units by their package name (stored in their UnitInfo) and uses wired-in UnitIds for them. For example, knowing that "base", "ghc-prim" and "rts" are wired-in units, the following dependency graph expressed with database UnitKeys will be transformed into a similar graph expressed with UnitIds: UnitKeys ~~~~~~~~ ----------> rts-1.0-hashABC <-- | | | | foo-2.0-hash123 --> base-4.1-hashXYZ ---> ghc-prim-0.5.3-hashUVW UnitIds ~~~~~~~ ---------------> rts <-- | | | | foo-2.0-hash123 --> base ---------------> ghc-prim Note that "foo-2.0-hash123" isn't wired-in so its UnitId is the same as its UnitKey. Module signatures / indefinite units / instantiated units --------------------------------------------------------- GHC distinguishes two kinds of units: * definite units: * units without module holes and with definite dependencies * can be compiled into machine code (.o/.a/.so/.dll/...) * indefinite units: * units with some module holes or with some indefinite dependencies * can only be type-checked Module holes are constrained by module signatures (.hsig files). Module signatures are a kind of interface (similar to .hs-boot files). They are used in place of some real code. GHC allows modules from other units to be used to fill these module holes: the process is called "unit/module instantiation". The instantiating module may either be a concrete module or a module signature. In the latter case, the signatures are merged to form a new one. You can think of this as polymorphism at the module level: module signatures give constraints on the "type" of module that can be used to fill the hole (where "type" means types of the exported module entitites, etc.). Module signatures contain enough information (datatypes, abstract types, type synonyms, classes, etc.) to typecheck modules depending on them but not enough to compile them. As such, indefinite units found in databases only provide module interfaces (the .hi ones this time), not object code. Unit instantiation / on-the-fly instantiation --------------------------------------------- Indefinite units can be instantiated with modules from other units. The instantiating units can also be instantiated themselves (if there are indefinite) and so on. On-the-fly unit instantiation is a tricky optimization explained in http://blog.ezyang.com/2016/08/optimizing-incremental-compilation Here is a summary: 1. Indefinite units can only be type-checked, not compiled into real code. Type-checking produces interface files (.hi) which are incomplete for code generation (they lack unfoldings, etc.) but enough to perform type-checking of units depending on them. 2. Type-checking an instantiated unit is cheap as we only have to merge interface files (.hi) of the instantiated unit and of the instantiating units, hence it can be done on-the-fly. Interface files of the dependencies can be concrete or produced on-the-fly recursively. 3. When we compile a unit, we mustn't use interfaces produced by the type-checker (on-the-fly or not) for the instantiated unit dependencies because they lack some information. 4. When we type-check an indefinite unit, we must be consistent about the interfaces we use for each dependency: only those produced by the type-checker (on-the-fly or not) or only those produced after a full compilation, but not both at the same time. It can be tricky if we have the following kind of dependency graph: X (indefinite) ------> D (definite, compiled) -----> I (instantiated, definite, compiled) |----------------------------------------------------^ Suppose we want to type-check unit X which depends on unit I and D: * I is definite and compiled: we have compiled .hi files for its modules on disk * I is instantiated: it is cheap to produce type-checker .hi files for its modules on-the-fly But we must not do: X (indefinite) ------> D (definite, compiled) -----> I (instantiated, definite, compiled) |--------------------------------------------------> I (instantiated on-the-fly) ==> inconsistent module interfaces for I Nor: X (indefinite) ------> D (definite, compiled) -------v |--------------------------------------------------> I (instantiated on-the-fly) ==> D's interfaces may refer to things that only exist in I's *compiled* interfaces An alternative would be to store both type-checked and compiled interfaces for every compiled non-instantiated unit (instantiated unit can be done on-the-fly) so that we could use type-checked interfaces of D in the example above. But it would increase compilation time and unit size. The 'Unit' datatype represents a unit which may have been instantiated on-the-fly: data Unit = RealUnit DefUnitId -- use compiled interfaces on disk | VirtUnit InstantiatedUnit -- use on-the-fly instantiation 'InstantiatedUnit' has two interesting fields: * instUnitInstanceOf :: UnitId -- ^ the indefinite unit that is instantiated * instUnitInsts :: [(ModuleName,(Unit,ModuleName)] -- ^ a list of instantiations, where an instantiation is: (module hole name, (instantiating unit, instantiating module name)) A 'VirtUnit' may be indefinite or definite, it depends on whether some holes remain in the instantiated unit OR in the instantiating units (recursively). Having a fully instantiated (i.e. definite) virtual unit can lead to some issues if there is a matching compiled unit in the preload closure. See Note [VirtUnit to RealUnit improvement] Unit database and indefinite units ---------------------------------- We don't store partially instantiated units in the unit database. Units in the database are either: * definite (fully instantiated or without holes): in this case we have *compiled* module interfaces (.hi) and object codes (.o/.a/.so/.dll/...). * fully indefinite (not instantiated at all): in this case we only have *type-checked* module interfaces (.hi). Note that indefinite units are stored as an instantiation of themselves where each instantiating module is a module variable (see Note [Representation of module/name variables]). E.g. "xyz" (UnitKey) ---> UnitInfo { instanceOf = "xyz" , instantiatedWith = [A=,B=...] , ... } Note that non-instantiated units are also stored as an instantiation of themselves. It is a reminiscence of previous terminology (when "instanceOf" was "componentId"). E.g. "xyz" (UnitKey) ---> UnitInfo { instanceOf = "xyz" , instantiatedWith = [] , ... } TODO: We should probably have `instanceOf :: Maybe UnitId` instead. Note [Pretty-printing UnitId] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we pretty-print a UnitId for the user, we try to map it back to its origin package name, version and component to print "package-version:component" instead of some hash. How to retrieve these information from a UnitId? Solution 0: ask for a UnitState to be passed each time we want to pretty-print a SDoc so that the Outputable instance for UnitId could retrieve the information from it. That what we used in the past: a DynFlags was passed and the UnitState was retrieved from it. This is wrong for several reasons: 1. The UnitState is accessed when the message is printed, not when it is generated. So we could imagine that the UnitState could have changed in-between. Especially if we want to allow unit unloading. 2. We want GHC to support several independent sessions at once, hence several UnitState. This approach supposes there is a unique UnitState (the one given at printing-time), moreover a UnitId doesn't indicate which UnitState it comes from (think about statically defined UnitId for wired-in units). Solution 1: an obvious approach would be to store the required information in the UnitId itself. However it doesn't work because some UnitId are defined statically for wired-in units and the same UnitId can map to different units in different contexts. This solution would make wired-in units harder to deal with. Solution 2: another approach would be to thread the UnitState to all places where a UnitId is pretty-printed and to retrieve the information from the UnitState only when needed. It would mean that UnitId couldn't have an Outputable instance as it would need an additional UnitState parameter to be printed. It means that many other types couldn't have an Outputable instance either: Unit, Module, Name, InstEnv, etc. Too many to make this solution feasible. Solution 3: the approach we use is a compromise between solutions 0 and 2: the appropriate UnitState has to be threaded close enough to the function generating the SDoc so that the latter can use `pprWithUnitState` to set the UnitState to fetch information from. However the UnitState doesn't have to be threaded explicitly all the way down to the point where the UnitId itself is printed: instead the Outputable instance of UnitId fetches the "sdocUnitIdForUser" field in the SDocContext to pretty-print. 1. We can still have Outputable instances for common types (Module, Unit, Name, etc.) 2. End-users don't have to pass a UnitState (via a DynFlags) to print a SDoc. 3. By default "sdocUnitIdForUser" prints the UnitId hash. In case of a bug (i.e. GHC doesn't correctly call `pprWithUnitState` before pretty-printing a UnitId), that's what will be shown to the user so it's no big deal. Note [VirtUnit to RealUnit improvement] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Over the course of instantiating VirtUnits on the fly while typechecking an indefinite library, we may end up with a fully instantiated VirtUnit. I.e. one that could be compiled and installed in the database. During type-checking we generate a virtual UnitId for it, say "abc". Now the question is: do we have a matching installed unit in the database? Suppose we have one with UnitId "xyz" (provided by Cabal so we don't know how to generate it). The trouble is that if both units end up being used in the same type-checking session, their names won't match (e.g. "abc:M.X" vs "xyz:M.X"). As we want them to match we just replace the virtual unit with the installed one: for some reason this is called "improvement". There is one last niggle: improvement based on the unit database means that we might end up developing on a unit that is not transitively depended upon by the units the user specified directly via command line flags. This could lead to strange and difficult to understand bugs if those instantiations are out of date. The solution is to only improve a unit id if the new unit id is part of the 'preloadClosure'; i.e., the closure of all the units which were explicitly specified. Note [Representation of module/name variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In our ICFP'16, we use to represent module holes, and {A.T} to represent name holes. This could have been represented by adding some new cases to the core data types, but this would have made the existing 'moduleName' and 'moduleUnit' partial, which would have required a lot of modifications to existing code. Instead, we use a fake "hole" unit: ===> hole:A {A.T} ===> hole:A.T This encoding is quite convenient, but it is also a bit dangerous too, because if you have a 'hole:A' you need to know if it's actually a 'Module' or just a module stored in a 'Name'; these two cases must be treated differently when doing substitutions. 'renameHoleModule' and 'renameHoleUnit' assume they are NOT operating on a 'Name'; 'NameShape' handles name substitutions exclusively. -} ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Unit/Database.hs0000644000000000000000000006502414470055371022500 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-name-shadowing #-} {-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeSynonymInstances #-} {-# LANGUAGE ExplicitNamespaces #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE OverloadedStrings #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.Unit.Database -- Copyright : (c) The University of Glasgow 2009, Duncan Coutts 2014 -- -- Maintainer : ghc-devs@haskell.org -- Portability : portable -- -- This module provides the view of GHC's database of registered packages that -- is shared between GHC the compiler\/library, and the ghc-pkg program. It -- defines the database format that is shared between GHC and ghc-pkg. -- -- The database format, and this library are constructed so that GHC does not -- have to depend on the Cabal library. The ghc-pkg program acts as the -- gateway between the external package format (which is defined by Cabal) and -- the internal package format which is specialised just for GHC. -- -- GHC the compiler only needs some of the information which is kept about -- registered packages, such as module names, various paths etc. On the other -- hand ghc-pkg has to keep all the information from Cabal packages and be able -- to regurgitate it for users and other tools. -- -- The first trick is that we duplicate some of the information in the package -- database. We essentially keep two versions of the database in one file, one -- version used only by ghc-pkg which keeps the full information (using the -- serialised form of the 'InstalledPackageInfo' type defined by the Cabal -- library); and a second version written by ghc-pkg and read by GHC which has -- just the subset of information that GHC needs. -- -- The second trick is that this module only defines in detail the format of -- the second version -- the bit GHC uses -- and the part managed by ghc-pkg -- is kept in the file but here we treat it as an opaque blob of data. That way -- this library avoids depending on Cabal. -- module GHC.Unit.Database ( GenericUnitInfo(..) , type DbUnitInfo , DbModule (..) , DbInstUnitId (..) , mapGenericUnitInfo -- * Read and write , DbMode(..) , DbOpenMode(..) , isDbOpenReadMode , readPackageDbForGhc , readPackageDbForGhcPkg , writePackageDb -- * Locking , PackageDbLock , lockPackageDb , unlockPackageDb -- * Misc , mkMungePathUrl , mungeUnitInfoPaths ) where import Prelude -- See note [Why do we import Prelude here?] import Data.Version (Version(..)) import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BS.Char8 import qualified Data.ByteString.Lazy as BS.Lazy import qualified Data.ByteString.Lazy.Internal as BS.Lazy (defaultChunkSize) import qualified Data.Foldable as F import qualified Data.Traversable as F import Data.Bifunctor import Data.Binary as Bin import Data.Binary.Put as Bin import Data.Binary.Get as Bin import Data.List (intersperse) import Control.Exception as Exception import Control.Monad (when) import System.FilePath as FilePath #if !defined(mingw32_HOST_OS) import Data.Bits ((.|.)) import System.Posix.Files import System.Posix.Types (FileMode) #endif import System.IO import System.IO.Error import GHC.IO.Exception (IOErrorType(InappropriateType)) import qualified GHC.Data.ShortText as ST import GHC.IO.Handle.Lock import System.Directory -- | @ghc-boot@'s UnitInfo, serialized to the database. type DbUnitInfo = GenericUnitInfo BS.ByteString BS.ByteString BS.ByteString BS.ByteString DbModule -- | Information about an unit (a unit is an installed module library). -- -- This is a subset of Cabal's 'InstalledPackageInfo', with just the bits -- that GHC is interested in. -- -- Some types are left as parameters to be instantiated differently in ghc-pkg -- and in ghc itself. -- data GenericUnitInfo srcpkgid srcpkgname uid modulename mod = GenericUnitInfo { unitId :: uid -- ^ Unique unit identifier that is used during compilation (e.g. to -- generate symbols). , unitInstanceOf :: uid -- ^ Identifier of an indefinite unit (i.e. with module holes) that this -- unit is an instance of. -- -- For non instantiated units, unitInstanceOf=unitId , unitInstantiations :: [(modulename, mod)] -- ^ How this unit instantiates some of its module holes. Map hole module -- names to actual module , unitPackageId :: srcpkgid -- ^ Source package identifier. -- -- Cabal instantiates this with Distribution.Types.PackageId.PackageId -- type which only contains the source package name and version. Notice -- that it doesn't contain the Hackage revision, nor any kind of hash. , unitPackageName :: srcpkgname -- ^ Source package name , unitPackageVersion :: Version -- ^ Source package version , unitComponentName :: Maybe srcpkgname -- ^ Name of the component. -- -- Cabal supports more than one components (libraries, executables, -- testsuites) in the same package. Each component has a name except the -- default one (that can only be a library component) for which we use -- "Nothing". -- -- GHC only deals with "library" components as they are the only kind of -- components that can be registered in a database and used by other -- modules. , unitAbiHash :: ST.ShortText -- ^ ABI hash used to avoid mixing up units compiled with different -- dependencies, compiler, options, etc. , unitDepends :: [uid] -- ^ Identifiers of the units this one depends on , unitAbiDepends :: [(uid, ST.ShortText)] -- ^ Like 'unitDepends', but each dependency is annotated with the ABI hash -- we expect the dependency to respect. , unitImportDirs :: [FilePathST] -- ^ Directories containing module interfaces , unitLibraries :: [ST.ShortText] -- ^ Names of the Haskell libraries provided by this unit , unitExtDepLibsSys :: [ST.ShortText] -- ^ Names of the external system libraries that this unit depends on. See -- also `unitExtDepLibsGhc` field. , unitExtDepLibsGhc :: [ST.ShortText] -- ^ Because of slight differences between the GHC dynamic linker (in -- GHC.Runtime.Linker) and the -- native system linker, some packages have to link with a different list -- of libraries when using GHC's. Examples include: libs that are actually -- gnu ld scripts, and the possibility that the .a libs do not exactly -- match the .so/.dll equivalents. -- -- If this field is set, then we use that instead of the -- `unitExtDepLibsSys` field. , unitLibraryDirs :: [FilePathST] -- ^ Directories containing libraries provided by this unit. See also -- `unitLibraryDynDirs`. -- -- It seems to be used to store paths to external library dependencies -- too. , unitLibraryDynDirs :: [FilePathST] -- ^ Directories containing the dynamic libraries provided by this unit. -- See also `unitLibraryDirs`. -- -- It seems to be used to store paths to external dynamic library -- dependencies too. , unitExtDepFrameworks :: [ST.ShortText] -- ^ Names of the external MacOS frameworks that this unit depends on. , unitExtDepFrameworkDirs :: [FilePathST] -- ^ Directories containing MacOS frameworks that this unit depends -- on. , unitLinkerOptions :: [ST.ShortText] -- ^ Linker (e.g. ld) command line options , unitCcOptions :: [ST.ShortText] -- ^ C compiler options that needs to be passed to the C compiler when we -- compile some C code against this unit. , unitIncludes :: [ST.ShortText] -- ^ C header files that are required by this unit (provided by this unit -- or external) , unitIncludeDirs :: [FilePathST] -- ^ Directories containing C header files that this unit depends -- on. , unitHaddockInterfaces :: [FilePathST] -- ^ Paths to Haddock interface files for this unit , unitHaddockHTMLs :: [FilePathST] -- ^ Paths to Haddock directories containing HTML files , unitExposedModules :: [(modulename, Maybe mod)] -- ^ Modules exposed by the unit. -- -- A module can be re-exported from another package. In this case, we -- indicate the module origin in the second parameter. , unitHiddenModules :: [modulename] -- ^ Hidden modules. -- -- These are useful for error reporting (e.g. if a hidden module is -- imported) , unitIsIndefinite :: Bool -- ^ True if this unit has some module holes that need to be instantiated -- with real modules to make the unit usable (a.k.a. Backpack). , unitIsExposed :: Bool -- ^ True if the unit is exposed. A unit could be installed in a database -- by "disabled" by not being exposed. , unitIsTrusted :: Bool -- ^ True if the unit is trusted (cf Safe Haskell) } deriving (Eq, Show) type FilePathST = ST.ShortText -- | Convert between GenericUnitInfo instances mapGenericUnitInfo :: (uid1 -> uid2) -> (srcpkg1 -> srcpkg2) -> (srcpkgname1 -> srcpkgname2) -> (modname1 -> modname2) -> (mod1 -> mod2) -> (GenericUnitInfo srcpkg1 srcpkgname1 uid1 modname1 mod1 -> GenericUnitInfo srcpkg2 srcpkgname2 uid2 modname2 mod2) mapGenericUnitInfo fuid fsrcpkg fsrcpkgname fmodname fmod g@(GenericUnitInfo {..}) = g { unitId = fuid unitId , unitInstanceOf = fuid unitInstanceOf , unitInstantiations = fmap (bimap fmodname fmod) unitInstantiations , unitPackageId = fsrcpkg unitPackageId , unitPackageName = fsrcpkgname unitPackageName , unitComponentName = fmap fsrcpkgname unitComponentName , unitDepends = fmap fuid unitDepends , unitAbiDepends = fmap (first fuid) unitAbiDepends , unitExposedModules = fmap (bimap fmodname (fmap fmod)) unitExposedModules , unitHiddenModules = fmap fmodname unitHiddenModules } -- | @ghc-boot@'s 'Module', serialized to the database. data DbModule = DbModule { dbModuleUnitId :: DbInstUnitId , dbModuleName :: BS.ByteString } | DbModuleVar { dbModuleVarName :: BS.ByteString } deriving (Eq, Show) -- | @ghc-boot@'s instantiated unit id, serialized to the database. data DbInstUnitId -- | Instantiated unit = DbInstUnitId BS.ByteString -- component id [(BS.ByteString, DbModule)] -- instantiations: [(modulename,module)] -- | Uninstantiated unit | DbUnitId BS.ByteString -- unit id deriving (Eq, Show) -- | Represents a lock of a package db. newtype PackageDbLock = PackageDbLock Handle -- | Acquire an exclusive lock related to package DB under given location. lockPackageDb :: FilePath -> IO PackageDbLock -- | Release the lock related to package DB. unlockPackageDb :: PackageDbLock -> IO () -- | Acquire a lock of given type related to package DB under given location. lockPackageDbWith :: LockMode -> FilePath -> IO PackageDbLock lockPackageDbWith mode file = do -- We are trying to open the lock file and then lock it. Thus the lock file -- needs to either exist or we need to be able to create it. Ideally we -- would not assume that the lock file always exists in advance. When we are -- dealing with a package DB where we have write access then if the lock -- file does not exist then we can create it by opening the file in -- read/write mode. On the other hand if we are dealing with a package DB -- where we do not have write access (e.g. a global DB) then we can only -- open in read mode, and the lock file had better exist already or we're in -- trouble. So for global read-only DBs on platforms where we must lock the -- DB for reading then we will require that the installer/packaging has -- included the lock file. -- -- Thus the logic here is to first try opening in read-write mode -- and if that fails we try read-only (to handle global read-only DBs). -- If either succeed then lock the file. IO exceptions (other than the first -- open attempt failing due to the file not existing) simply propagate. -- -- Note that there is a complexity here which was discovered in #13945: some -- filesystems (e.g. NFS) will only allow exclusive locking if the fd was -- opened for write access. We would previously try opening the lockfile for -- read-only access first, however this failed when run on such filesystems. -- Consequently, we now try read-write access first, falling back to read-only -- if we are denied permission (e.g. in the case of a global database). catchJust (\e -> if isPermissionError e then Just () else Nothing) (lockFileOpenIn ReadWriteMode) (const $ lockFileOpenIn ReadMode) where lock = file <.> "lock" lockFileOpenIn io_mode = bracketOnError (openBinaryFile lock io_mode) hClose -- If file locking support is not available, ignore the error and proceed -- normally. Without it the only thing we lose on non-Windows platforms is -- the ability to safely issue concurrent updates to the same package db. $ \hnd -> do hLock hnd mode `catch` \FileLockingNotSupported -> return () return $ PackageDbLock hnd lockPackageDb = lockPackageDbWith ExclusiveLock unlockPackageDb (PackageDbLock hnd) = do hUnlock hnd hClose hnd -- | Mode to open a package db in. data DbMode = DbReadOnly | DbReadWrite -- | 'DbOpenMode' holds a value of type @t@ but only in 'DbReadWrite' mode. So -- it is like 'Maybe' but with a type argument for the mode to enforce that the -- mode is used consistently. data DbOpenMode (mode :: DbMode) t where DbOpenReadOnly :: DbOpenMode 'DbReadOnly t DbOpenReadWrite :: t -> DbOpenMode 'DbReadWrite t deriving instance Functor (DbOpenMode mode) deriving instance F.Foldable (DbOpenMode mode) deriving instance F.Traversable (DbOpenMode mode) isDbOpenReadMode :: DbOpenMode mode t -> Bool isDbOpenReadMode = \case DbOpenReadOnly -> True DbOpenReadWrite{} -> False -- | Read the part of the package DB that GHC is interested in. -- readPackageDbForGhc :: FilePath -> IO [DbUnitInfo] readPackageDbForGhc file = decodeFromFile file DbOpenReadOnly getDbForGhc >>= \case (pkgs, DbOpenReadOnly) -> return pkgs where getDbForGhc = do _version <- getHeader _ghcPartLen <- get :: Get Word32 ghcPart <- get -- the next part is for ghc-pkg, but we stop here. return ghcPart -- | Read the part of the package DB that ghc-pkg is interested in -- -- Note that the Binary instance for ghc-pkg's representation of packages -- is not defined in this package. This is because ghc-pkg uses Cabal types -- (and Binary instances for these) which this package does not depend on. -- -- If we open the package db in read only mode, we get its contents. Otherwise -- we additionally receive a PackageDbLock that represents a lock on the -- database, so that we can safely update it later. -- readPackageDbForGhcPkg :: Binary pkgs => FilePath -> DbOpenMode mode t -> IO (pkgs, DbOpenMode mode PackageDbLock) readPackageDbForGhcPkg file mode = decodeFromFile file mode getDbForGhcPkg where getDbForGhcPkg = do _version <- getHeader -- skip over the ghc part ghcPartLen <- get :: Get Word32 _ghcPart <- skip (fromIntegral ghcPartLen) -- the next part is for ghc-pkg ghcPkgPart <- get return ghcPkgPart -- | Write the whole of the package DB, both parts. -- writePackageDb :: Binary pkgs => FilePath -> [DbUnitInfo] -> pkgs -> IO () writePackageDb file ghcPkgs ghcPkgPart = do writeFileAtomic file (runPut putDbForGhcPkg) #if !defined(mingw32_HOST_OS) addFileMode file 0o444 -- ^ In case the current umask is too restrictive force all read bits to -- allow access. #endif return () where putDbForGhcPkg = do putHeader put ghcPartLen putLazyByteString ghcPart put ghcPkgPart where ghcPartLen :: Word32 ghcPartLen = fromIntegral (BS.Lazy.length ghcPart) ghcPart = encode ghcPkgs #if !defined(mingw32_HOST_OS) addFileMode :: FilePath -> FileMode -> IO () addFileMode file m = do o <- fileMode <$> getFileStatus file setFileMode file (m .|. o) #endif getHeader :: Get (Word32, Word32) getHeader = do magic <- getByteString (BS.length headerMagic) when (magic /= headerMagic) $ fail "not a ghc-pkg db file, wrong file magic number" majorVersion <- get :: Get Word32 -- The major version is for incompatible changes minorVersion <- get :: Get Word32 -- The minor version is for compatible extensions when (majorVersion /= 1) $ fail "unsupported ghc-pkg db format version" -- If we ever support multiple major versions then we'll have to change -- this code -- The header can be extended without incrementing the major version, -- we ignore fields we don't know about (currently all). headerExtraLen <- get :: Get Word32 skip (fromIntegral headerExtraLen) return (majorVersion, minorVersion) putHeader :: Put putHeader = do putByteString headerMagic put majorVersion put minorVersion put headerExtraLen where majorVersion = 1 :: Word32 minorVersion = 0 :: Word32 headerExtraLen = 0 :: Word32 headerMagic :: BS.ByteString headerMagic = BS.Char8.pack "\0ghcpkg\0" -- TODO: we may be able to replace the following with utils from the binary -- package in future. -- | Feed a 'Get' decoder with data chunks from a file. -- decodeFromFile :: FilePath -> DbOpenMode mode t -> Get pkgs -> IO (pkgs, DbOpenMode mode PackageDbLock) decodeFromFile file mode decoder = case mode of DbOpenReadOnly -> do -- Note [Locking package database on Windows] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- When we open the package db in read only mode, there is no need to acquire -- shared lock on non-Windows platform because we update the database with an -- atomic rename, so readers will always see the database in a consistent -- state. #if defined(mingw32_HOST_OS) bracket (lockPackageDbWith SharedLock file) unlockPackageDb $ \_ -> do #endif (, DbOpenReadOnly) <$> decodeFileContents DbOpenReadWrite{} -> do -- When we open the package db in read/write mode, acquire an exclusive lock -- on the database and return it so we can keep it for the duration of the -- update. bracketOnError (lockPackageDb file) unlockPackageDb $ \lock -> do (, DbOpenReadWrite lock) <$> decodeFileContents where decodeFileContents = withBinaryFile file ReadMode $ \hnd -> feed hnd (runGetIncremental decoder) feed hnd (Partial k) = do chunk <- BS.hGet hnd BS.Lazy.defaultChunkSize if BS.null chunk then feed hnd (k Nothing) else feed hnd (k (Just chunk)) feed _ (Done _ _ res) = return res feed _ (Fail _ _ msg) = ioError err where err = mkIOError InappropriateType loc Nothing (Just file) `ioeSetErrorString` msg loc = "GHC.Unit.Database.readPackageDb" -- Copied from Cabal's Distribution.Simple.Utils. writeFileAtomic :: FilePath -> BS.Lazy.ByteString -> IO () writeFileAtomic targetPath content = do let (targetDir, targetFile) = splitFileName targetPath Exception.bracketOnError (openBinaryTempFileWithDefaultPermissions targetDir $ targetFile <.> "tmp") (\(tmpPath, handle) -> hClose handle >> removeFile tmpPath) (\(tmpPath, handle) -> do BS.Lazy.hPut handle content hClose handle renameFile tmpPath targetPath) instance Binary DbUnitInfo where put (GenericUnitInfo unitId unitInstanceOf unitInstantiations unitPackageId unitPackageName unitPackageVersion unitComponentName unitAbiHash unitDepends unitAbiDepends unitImportDirs unitLibraries unitExtDepLibsSys unitExtDepLibsGhc unitLibraryDirs unitLibraryDynDirs unitExtDepFrameworks unitExtDepFrameworkDirs unitLinkerOptions unitCcOptions unitIncludes unitIncludeDirs unitHaddockInterfaces unitHaddockHTMLs unitExposedModules unitHiddenModules unitIsIndefinite unitIsExposed unitIsTrusted) = do put unitPackageId put unitPackageName put unitPackageVersion put unitComponentName put unitId put unitInstanceOf put unitInstantiations put unitAbiHash put unitDepends put unitAbiDepends put unitImportDirs put unitLibraries put unitExtDepLibsSys put unitExtDepLibsGhc put unitLibraryDirs put unitLibraryDynDirs put unitExtDepFrameworks put unitExtDepFrameworkDirs put unitLinkerOptions put unitCcOptions put unitIncludes put unitIncludeDirs put unitHaddockInterfaces put unitHaddockHTMLs put unitExposedModules put unitHiddenModules put unitIsIndefinite put unitIsExposed put unitIsTrusted get = do unitPackageId <- get unitPackageName <- get unitPackageVersion <- get unitComponentName <- get unitId <- get unitInstanceOf <- get unitInstantiations <- get unitAbiHash <- get unitDepends <- get unitAbiDepends <- get unitImportDirs <- get unitLibraries <- get unitExtDepLibsSys <- get unitExtDepLibsGhc <- get libraryDirs <- get libraryDynDirs <- get frameworks <- get frameworkDirs <- get unitLinkerOptions <- get unitCcOptions <- get unitIncludes <- get unitIncludeDirs <- get unitHaddockInterfaces <- get unitHaddockHTMLs <- get unitExposedModules <- get unitHiddenModules <- get unitIsIndefinite <- get unitIsExposed <- get unitIsTrusted <- get return (GenericUnitInfo unitId unitInstanceOf unitInstantiations unitPackageId unitPackageName unitPackageVersion unitComponentName unitAbiHash unitDepends unitAbiDepends unitImportDirs unitLibraries unitExtDepLibsSys unitExtDepLibsGhc libraryDirs libraryDynDirs frameworks frameworkDirs unitLinkerOptions unitCcOptions unitIncludes unitIncludeDirs unitHaddockInterfaces unitHaddockHTMLs unitExposedModules unitHiddenModules unitIsIndefinite unitIsExposed unitIsTrusted) instance Binary DbModule where put (DbModule dbModuleUnitId dbModuleName) = do putWord8 0 put dbModuleUnitId put dbModuleName put (DbModuleVar dbModuleVarName) = do putWord8 1 put dbModuleVarName get = do b <- getWord8 case b of 0 -> DbModule <$> get <*> get _ -> DbModuleVar <$> get instance Binary DbInstUnitId where put (DbUnitId uid) = do putWord8 0 put uid put (DbInstUnitId dbUnitIdComponentId dbUnitIdInsts) = do putWord8 1 put dbUnitIdComponentId put dbUnitIdInsts get = do b <- getWord8 case b of 0 -> DbUnitId <$> get _ -> DbInstUnitId <$> get <*> get -- | Return functions to perform path/URL variable substitution as per the Cabal -- ${pkgroot} spec -- (http://www.haskell.org/pipermail/libraries/2009-May/011772.html) -- -- Paths/URLs can be relative to ${pkgroot} or ${pkgrooturl}. -- The "pkgroot" is the directory containing the package database. -- -- Also perform a similar substitution for the older GHC-specific -- "$topdir" variable. The "topdir" is the location of the ghc -- installation (obtained from the -B option). mkMungePathUrl :: FilePathST -> FilePathST -> (FilePathST -> FilePathST, FilePathST -> FilePathST) mkMungePathUrl top_dir pkgroot = (munge_path, munge_url) where munge_path p | Just p' <- stripVarPrefix "${pkgroot}" p = mappend pkgroot p' | Just p' <- stripVarPrefix "$topdir" p = mappend top_dir p' | otherwise = p munge_url p | Just p' <- stripVarPrefix "${pkgrooturl}" p = toUrlPath pkgroot p' | Just p' <- stripVarPrefix "$httptopdir" p = toUrlPath top_dir p' | otherwise = p toUrlPath r p = mconcat $ "file:///" : (intersperse "/" (r : (splitDirectories p))) -- URLs always use posix style '/' separators -- We need to drop a leading "/" or "\\" if there is one: splitDirectories :: FilePathST -> [FilePathST] splitDirectories p = filter (not . ST.null) $ ST.splitFilePath p -- We could drop the separator here, and then use above. However, -- by leaving it in and using ++ we keep the same path separator -- rather than letting FilePath change it to use \ as the separator stripVarPrefix var path = case ST.stripPrefix var path of Just "" -> Just "" Just cs | isPathSeparator (ST.head cs) -> Just cs _ -> Nothing -- | Perform path/URL variable substitution as per the Cabal ${pkgroot} spec -- (http://www.haskell.org/pipermail/libraries/2009-May/011772.html) -- Paths/URLs can be relative to ${pkgroot} or ${pkgrooturl}. -- The "pkgroot" is the directory containing the package database. -- -- Also perform a similar substitution for the older GHC-specific -- "$topdir" variable. The "topdir" is the location of the ghc -- installation (obtained from the -B option). mungeUnitInfoPaths :: FilePathST -> FilePathST -> GenericUnitInfo a b c d e -> GenericUnitInfo a b c d e mungeUnitInfoPaths top_dir pkgroot pkg = -- TODO: similar code is duplicated in utils/ghc-pkg/Main.hs pkg { unitImportDirs = munge_paths (unitImportDirs pkg) , unitIncludeDirs = munge_paths (unitIncludeDirs pkg) , unitLibraryDirs = munge_paths (unitLibraryDirs pkg) , unitLibraryDynDirs = munge_paths (unitLibraryDynDirs pkg) , unitExtDepFrameworkDirs = munge_paths (unitExtDepFrameworkDirs pkg) , unitHaddockInterfaces = munge_paths (unitHaddockInterfaces pkg) -- haddock-html is allowed to be either a URL or a file , unitHaddockHTMLs = munge_paths (munge_urls (unitHaddockHTMLs pkg)) } where munge_paths = map munge_path munge_urls = map munge_url (munge_path,munge_url) = mkMungePathUrl top_dir pkgroot ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Env.hs0000644000000000000000000005064114472400113017645 0ustar0000000000000000{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Unit.Env ( UnitEnv (..) , initUnitEnv , unsafeGetHomeUnit , updateHug , updateHpt -- * Unit Env helper functions , ue_units , ue_currentHomeUnitEnv , ue_setUnits , ue_setUnitFlags , ue_unit_dbs , ue_all_home_unit_ids , ue_setUnitDbs , ue_hpt , ue_homeUnit , ue_unsafeHomeUnit , ue_setFlags , ue_setActiveUnit , ue_currentUnit , ue_findHomeUnitEnv , ue_updateHomeUnitEnv , ue_unitHomeUnit , ue_unitFlags , ue_renameUnitId , ue_transitiveHomeDeps -- * HomeUnitEnv , HomeUnitGraph , HomeUnitEnv (..) , mkHomeUnitEnv , lookupHugByModule , hugElts , lookupHug , addHomeModInfoToHug -- * UnitEnvGraph , UnitEnvGraph (..) , unitEnv_insert , unitEnv_delete , unitEnv_adjust , unitEnv_new , unitEnv_singleton , unitEnv_map , unitEnv_member , unitEnv_lookup_maybe , unitEnv_lookup , unitEnv_keys , unitEnv_elts , unitEnv_hpts , unitEnv_foldWithKey , unitEnv_mapWithKey -- * Invariants , assertUnitEnvInvariant -- * Preload units info , preloadUnitsInfo , preloadUnitsInfo' -- * Home Module functions , isUnitEnvInstalledModule ) where import GHC.Prelude import GHC.Unit.External import GHC.Unit.State import GHC.Unit.Home import GHC.Unit.Types import GHC.Unit.Home.ModInfo import GHC.Platform import GHC.Settings import GHC.Data.Maybe import GHC.Utils.Panic.Plain import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import GHC.Utils.Misc (HasDebugCallStack) import GHC.Driver.Session import GHC.Utils.Outputable import GHC.Utils.Panic (pprPanic) import GHC.Unit.Module.ModIface import GHC.Unit.Module import qualified Data.Set as Set data UnitEnv = UnitEnv { ue_eps :: {-# UNPACK #-} !ExternalUnitCache -- ^ Information about the currently loaded external packages. -- This is mutable because packages will be demand-loaded during -- a compilation run as required. , ue_current_unit :: UnitId , ue_home_unit_graph :: !HomeUnitGraph -- See Note [Multiple Home Units] , ue_platform :: !Platform -- ^ Platform , ue_namever :: !GhcNameVersion -- ^ GHC name/version (used for dynamic library suffix) } initUnitEnv :: UnitId -> HomeUnitGraph -> GhcNameVersion -> Platform -> IO UnitEnv initUnitEnv cur_unit hug namever platform = do eps <- initExternalUnitCache return $ UnitEnv { ue_eps = eps , ue_home_unit_graph = hug , ue_current_unit = cur_unit , ue_platform = platform , ue_namever = namever } -- | Get home-unit -- -- Unsafe because the home-unit may not be set unsafeGetHomeUnit :: UnitEnv -> HomeUnit unsafeGetHomeUnit ue = ue_unsafeHomeUnit ue updateHpt :: (HomePackageTable -> HomePackageTable) -> UnitEnv -> UnitEnv updateHpt = ue_updateHPT updateHug :: (HomeUnitGraph -> HomeUnitGraph) -> UnitEnv -> UnitEnv updateHug = ue_updateHUG ue_transitiveHomeDeps :: UnitId -> UnitEnv -> [UnitId] ue_transitiveHomeDeps uid unit_env = Set.toList (loop Set.empty [uid]) where loop acc [] = acc loop acc (uid:uids) | uid `Set.member` acc = loop acc uids | otherwise = let hue = homeUnitDepends (homeUnitEnv_units (ue_findHomeUnitEnv uid unit_env)) in loop (Set.insert uid acc) (hue ++ uids) -- ----------------------------------------------------------------------------- -- Extracting information from the packages in scope -- Many of these functions take a list of packages: in those cases, -- the list is expected to contain the "dependent packages", -- i.e. those packages that were found to be depended on by the -- current module/program. These can be auto or non-auto packages, it -- doesn't really matter. The list is always combined with the list -- of preload (command-line) packages to determine which packages to -- use. -- | Lookup 'UnitInfo' for every preload unit from the UnitState, for every unit -- used to instantiate the home unit, and for every unit explicitly passed in -- the given list of UnitId. preloadUnitsInfo' :: UnitEnv -> [UnitId] -> MaybeErr UnitErr [UnitInfo] preloadUnitsInfo' unit_env ids0 = all_infos where unit_state = ue_units unit_env ids = ids0 ++ inst_ids inst_ids = case ue_homeUnit unit_env of Nothing -> [] Just home_unit -- An indefinite package will have insts to HOLE, -- which is not a real package. Don't look it up. -- Fixes #14525 | isHomeUnitIndefinite home_unit -> [] | otherwise -> map (toUnitId . moduleUnit . snd) (homeUnitInstantiations home_unit) pkg_map = unitInfoMap unit_state preload = preloadUnits unit_state all_pkgs = closeUnitDeps' pkg_map preload (ids `zip` repeat Nothing) all_infos = map (unsafeLookupUnitId unit_state) <$> all_pkgs -- | Lookup 'UnitInfo' for every preload unit from the UnitState and for every -- unit used to instantiate the home unit. preloadUnitsInfo :: UnitEnv -> MaybeErr UnitErr [UnitInfo] preloadUnitsInfo unit_env = preloadUnitsInfo' unit_env [] -- ----------------------------------------------------------------------------- data HomeUnitEnv = HomeUnitEnv { homeUnitEnv_units :: !UnitState -- ^ External units , homeUnitEnv_unit_dbs :: !(Maybe [UnitDatabase UnitId]) -- ^ Stack of unit databases for the target platform. -- -- This field is populated with the result of `initUnits`. -- -- 'Nothing' means the databases have never been read from disk. -- -- Usually we don't reload the databases from disk if they are -- cached, even if the database flags changed! , homeUnitEnv_dflags :: DynFlags -- ^ The dynamic flag settings , homeUnitEnv_hpt :: HomePackageTable -- ^ The home package table describes already-compiled -- home-package modules, /excluding/ the module we -- are compiling right now. -- (In one-shot mode the current module is the only -- home-package module, so homeUnitEnv_hpt is empty. All other -- modules count as \"external-package\" modules. -- However, even in GHCi mode, hi-boot interfaces are -- demand-loaded into the external-package table.) -- -- 'homeUnitEnv_hpt' is not mutable because we only demand-load -- external packages; the home package is eagerly -- loaded, module by module, by the compilation manager. -- -- The HPT may contain modules compiled earlier by @--make@ -- but not actually below the current module in the dependency -- graph. -- -- (This changes a previous invariant: changed Jan 05.) , homeUnitEnv_home_unit :: !(Maybe HomeUnit) -- ^ Home-unit } instance Outputable HomeUnitEnv where ppr hug = pprHPT (homeUnitEnv_hpt hug) homeUnitEnv_unsafeHomeUnit :: HomeUnitEnv -> HomeUnit homeUnitEnv_unsafeHomeUnit hue = case homeUnitEnv_home_unit hue of Nothing -> panic "homeUnitEnv_unsafeHomeUnit: No home unit" Just h -> h mkHomeUnitEnv :: DynFlags -> HomePackageTable -> Maybe HomeUnit -> HomeUnitEnv mkHomeUnitEnv dflags hpt home_unit = HomeUnitEnv { homeUnitEnv_units = emptyUnitState , homeUnitEnv_unit_dbs = Nothing , homeUnitEnv_dflags = dflags , homeUnitEnv_hpt = hpt , homeUnitEnv_home_unit = home_unit } -- | Test if the module comes from the home unit isUnitEnvInstalledModule :: UnitEnv -> InstalledModule -> Bool isUnitEnvInstalledModule ue m = maybe False (`isHomeInstalledModule` m) hu where hu = ue_unitHomeUnit_maybe (moduleUnit m) ue type HomeUnitGraph = UnitEnvGraph HomeUnitEnv lookupHugByModule :: Module -> HomeUnitGraph -> Maybe HomeModInfo lookupHugByModule mod hug | otherwise = do env <- (unitEnv_lookup_maybe (toUnitId $ moduleUnit mod) hug) lookupHptByModule (homeUnitEnv_hpt env) mod hugElts :: HomeUnitGraph -> [(UnitId, HomeUnitEnv)] hugElts hug = unitEnv_elts hug addHomeModInfoToHug :: HomeModInfo -> HomeUnitGraph -> HomeUnitGraph addHomeModInfoToHug hmi hug = unitEnv_alter go hmi_unit hug where hmi_mod :: Module hmi_mod = mi_module (hm_iface hmi) hmi_unit = toUnitId (moduleUnit hmi_mod) _hmi_mn = moduleName hmi_mod go :: Maybe HomeUnitEnv -> Maybe HomeUnitEnv go Nothing = pprPanic "addHomeInfoToHug" (ppr hmi_mod) go (Just hue) = Just (updateHueHpt (addHomeModInfoToHpt hmi) hue) updateHueHpt :: (HomePackageTable -> HomePackageTable) -> HomeUnitEnv -> HomeUnitEnv updateHueHpt f hue = hue { homeUnitEnv_hpt = f (homeUnitEnv_hpt hue)} lookupHug :: HomeUnitGraph -> UnitId -> ModuleName -> Maybe HomeModInfo lookupHug hug uid mod = unitEnv_lookup_maybe uid hug >>= flip lookupHpt mod . homeUnitEnv_hpt instance Outputable (UnitEnvGraph HomeUnitEnv) where ppr g = ppr [(k, length (homeUnitEnv_hpt hue)) | (k, hue) <- (unitEnv_elts g)] type UnitEnvGraphKey = UnitId newtype UnitEnvGraph v = UnitEnvGraph { unitEnv_graph :: Map UnitEnvGraphKey v } deriving (Functor, Foldable, Traversable) unitEnv_insert :: UnitEnvGraphKey -> v -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_insert unitId env unitEnv = unitEnv { unitEnv_graph = Map.insert unitId env (unitEnv_graph unitEnv) } unitEnv_delete :: UnitEnvGraphKey -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_delete uid unitEnv = unitEnv { unitEnv_graph = Map.delete uid (unitEnv_graph unitEnv) } unitEnv_adjust :: (v -> v) -> UnitEnvGraphKey -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_adjust f uid unitEnv = unitEnv { unitEnv_graph = Map.adjust f uid (unitEnv_graph unitEnv) } unitEnv_alter :: (Maybe v -> Maybe v) -> UnitEnvGraphKey -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_alter f uid unitEnv = unitEnv { unitEnv_graph = Map.alter f uid (unitEnv_graph unitEnv) } unitEnv_mapWithKey :: (UnitEnvGraphKey -> v -> b) -> UnitEnvGraph v -> UnitEnvGraph b unitEnv_mapWithKey f (UnitEnvGraph u) = UnitEnvGraph $ Map.mapWithKey f u unitEnv_new :: Map UnitEnvGraphKey v -> UnitEnvGraph v unitEnv_new m = UnitEnvGraph { unitEnv_graph = m } unitEnv_singleton :: UnitEnvGraphKey -> v -> UnitEnvGraph v unitEnv_singleton active m = UnitEnvGraph { unitEnv_graph = Map.singleton active m } unitEnv_map :: (v -> v) -> UnitEnvGraph v -> UnitEnvGraph v unitEnv_map f m = m { unitEnv_graph = Map.map f (unitEnv_graph m)} unitEnv_member :: UnitEnvGraphKey -> UnitEnvGraph v -> Bool unitEnv_member u env = Map.member u (unitEnv_graph env) unitEnv_lookup_maybe :: UnitEnvGraphKey -> UnitEnvGraph v -> Maybe v unitEnv_lookup_maybe u env = Map.lookup u (unitEnv_graph env) unitEnv_lookup :: UnitEnvGraphKey -> UnitEnvGraph v -> v unitEnv_lookup u env = fromJust $ unitEnv_lookup_maybe u env unitEnv_keys :: UnitEnvGraph v -> Set.Set UnitEnvGraphKey unitEnv_keys env = Map.keysSet (unitEnv_graph env) unitEnv_elts :: UnitEnvGraph v -> [(UnitEnvGraphKey, v)] unitEnv_elts env = Map.toList (unitEnv_graph env) unitEnv_hpts :: UnitEnvGraph HomeUnitEnv -> [HomePackageTable] unitEnv_hpts env = map homeUnitEnv_hpt (Map.elems (unitEnv_graph env)) unitEnv_foldWithKey :: (b -> UnitEnvGraphKey -> a -> b) -> b -> UnitEnvGraph a -> b unitEnv_foldWithKey f z (UnitEnvGraph g)= Map.foldlWithKey' f z g -- ------------------------------------------------------- -- Query and modify UnitState in HomeUnitEnv -- ------------------------------------------------------- ue_units :: HasDebugCallStack => UnitEnv -> UnitState ue_units = homeUnitEnv_units . ue_currentHomeUnitEnv ue_setUnits :: UnitState -> UnitEnv -> UnitEnv ue_setUnits units ue = ue_updateHomeUnitEnv f (ue_currentUnit ue) ue where f hue = hue { homeUnitEnv_units = units } ue_unit_dbs :: UnitEnv -> Maybe [UnitDatabase UnitId] ue_unit_dbs = homeUnitEnv_unit_dbs . ue_currentHomeUnitEnv ue_setUnitDbs :: Maybe [UnitDatabase UnitId] -> UnitEnv -> UnitEnv ue_setUnitDbs unit_dbs ue = ue_updateHomeUnitEnv f (ue_currentUnit ue) ue where f hue = hue { homeUnitEnv_unit_dbs = unit_dbs } -- ------------------------------------------------------- -- Query and modify Home Package Table in HomeUnitEnv -- ------------------------------------------------------- ue_hpt :: HasDebugCallStack => UnitEnv -> HomePackageTable ue_hpt = homeUnitEnv_hpt . ue_currentHomeUnitEnv ue_updateHPT :: HasDebugCallStack => (HomePackageTable -> HomePackageTable) -> UnitEnv -> UnitEnv ue_updateHPT f e = ue_updateUnitHPT f (ue_currentUnit e) e ue_updateHUG :: HasDebugCallStack => (HomeUnitGraph -> HomeUnitGraph) -> UnitEnv -> UnitEnv ue_updateHUG f e = ue_updateUnitHUG f e ue_updateUnitHPT :: HasDebugCallStack => (HomePackageTable -> HomePackageTable) -> UnitId -> UnitEnv -> UnitEnv ue_updateUnitHPT f uid ue_env = ue_updateHomeUnitEnv update uid ue_env where update unitEnv = unitEnv { homeUnitEnv_hpt = f $ homeUnitEnv_hpt unitEnv } ue_updateUnitHUG :: HasDebugCallStack => (HomeUnitGraph -> HomeUnitGraph) -> UnitEnv -> UnitEnv ue_updateUnitHUG f ue_env = ue_env { ue_home_unit_graph = f (ue_home_unit_graph ue_env)} -- ------------------------------------------------------- -- Query and modify DynFlags in HomeUnitEnv -- ------------------------------------------------------- ue_setFlags :: HasDebugCallStack => DynFlags -> UnitEnv -> UnitEnv ue_setFlags dflags ue_env = ue_setUnitFlags (ue_currentUnit ue_env) dflags ue_env ue_setUnitFlags :: HasDebugCallStack => UnitId -> DynFlags -> UnitEnv -> UnitEnv ue_setUnitFlags uid dflags e = ue_updateUnitFlags (const dflags) uid e ue_unitFlags :: HasDebugCallStack => UnitId -> UnitEnv -> DynFlags ue_unitFlags uid ue_env = homeUnitEnv_dflags $ ue_findHomeUnitEnv uid ue_env ue_updateUnitFlags :: HasDebugCallStack => (DynFlags -> DynFlags) -> UnitId -> UnitEnv -> UnitEnv ue_updateUnitFlags f uid e = ue_updateHomeUnitEnv update uid e where update hue = hue { homeUnitEnv_dflags = f $ homeUnitEnv_dflags hue } -- ------------------------------------------------------- -- Query and modify home units in HomeUnitEnv -- ------------------------------------------------------- ue_homeUnit :: UnitEnv -> Maybe HomeUnit ue_homeUnit = homeUnitEnv_home_unit . ue_currentHomeUnitEnv ue_unsafeHomeUnit :: UnitEnv -> HomeUnit ue_unsafeHomeUnit ue = case ue_homeUnit ue of Nothing -> panic "unsafeGetHomeUnit: No home unit" Just h -> h ue_unitHomeUnit_maybe :: UnitId -> UnitEnv -> Maybe HomeUnit ue_unitHomeUnit_maybe uid ue_env = homeUnitEnv_unsafeHomeUnit <$> (ue_findHomeUnitEnv_maybe uid ue_env) ue_unitHomeUnit :: UnitId -> UnitEnv -> HomeUnit ue_unitHomeUnit uid ue_env = homeUnitEnv_unsafeHomeUnit $ ue_findHomeUnitEnv uid ue_env ue_all_home_unit_ids :: UnitEnv -> Set.Set UnitId ue_all_home_unit_ids = unitEnv_keys . ue_home_unit_graph -- ------------------------------------------------------- -- Query and modify the currently active unit -- ------------------------------------------------------- ue_currentHomeUnitEnv :: HasDebugCallStack => UnitEnv -> HomeUnitEnv ue_currentHomeUnitEnv e = case ue_findHomeUnitEnv_maybe (ue_currentUnit e) e of Just unitEnv -> unitEnv Nothing -> pprPanic "packageNotFound" $ (ppr $ ue_currentUnit e) $$ ppr (ue_home_unit_graph e) ue_setActiveUnit :: UnitId -> UnitEnv -> UnitEnv ue_setActiveUnit u ue_env = assertUnitEnvInvariant $ ue_env { ue_current_unit = u } ue_currentUnit :: UnitEnv -> UnitId ue_currentUnit = ue_current_unit -- ------------------------------------------------------- -- Operations on arbitrary elements of the home unit graph -- ------------------------------------------------------- ue_findHomeUnitEnv_maybe :: UnitId -> UnitEnv -> Maybe HomeUnitEnv ue_findHomeUnitEnv_maybe uid e = unitEnv_lookup_maybe uid (ue_home_unit_graph e) ue_findHomeUnitEnv :: HasDebugCallStack => UnitId -> UnitEnv -> HomeUnitEnv ue_findHomeUnitEnv uid e = case unitEnv_lookup_maybe uid (ue_home_unit_graph e) of Nothing -> pprPanic "Unit unknown to the internal unit environment" $ text "unit (" <> ppr uid <> text ")" $$ pprUnitEnvGraph e Just hue -> hue ue_updateHomeUnitEnv :: (HomeUnitEnv -> HomeUnitEnv) -> UnitId -> UnitEnv -> UnitEnv ue_updateHomeUnitEnv f uid e = e { ue_home_unit_graph = unitEnv_adjust f uid $ ue_home_unit_graph e } -- | Rename a unit id in the internal unit env. -- -- @'ue_renameUnitId' oldUnit newUnit UnitEnv@, it is assumed that the 'oldUnit' exists in the map, -- otherwise we panic. -- The 'DynFlags' associated with the home unit will have its field 'homeUnitId' set to 'newUnit'. ue_renameUnitId :: HasDebugCallStack => UnitId -> UnitId -> UnitEnv -> UnitEnv ue_renameUnitId oldUnit newUnit unitEnv = case ue_findHomeUnitEnv_maybe oldUnit unitEnv of Nothing -> pprPanic "Tried to rename unit, but it didn't exist" $ text "Rename old unit \"" <> ppr oldUnit <> text "\" to \""<> ppr newUnit <> text "\"" $$ nest 2 (pprUnitEnvGraph unitEnv) Just oldEnv -> let activeUnit :: UnitId !activeUnit = if ue_currentUnit unitEnv == oldUnit then newUnit else ue_currentUnit unitEnv newInternalUnitEnv = oldEnv { homeUnitEnv_dflags = (homeUnitEnv_dflags oldEnv) { homeUnitId_ = newUnit } } in unitEnv { ue_current_unit = activeUnit , ue_home_unit_graph = unitEnv_insert newUnit newInternalUnitEnv $ unitEnv_delete oldUnit $ ue_home_unit_graph unitEnv } -- --------------------------------------------- -- Asserts to enforce invariants for the UnitEnv -- --------------------------------------------- assertUnitEnvInvariant :: HasDebugCallStack => UnitEnv -> UnitEnv assertUnitEnvInvariant u = if ue_current_unit u `unitEnv_member` ue_home_unit_graph u then u else pprPanic "invariant" (ppr (ue_current_unit u) $$ ppr (ue_home_unit_graph u)) -- ----------------------------------------------------------------------------- -- Pretty output functions -- ----------------------------------------------------------------------------- pprUnitEnvGraph :: UnitEnv -> SDoc pprUnitEnvGraph env = text "pprInternalUnitMap" $$ nest 2 (pprHomeUnitGraph $ ue_home_unit_graph env) pprHomeUnitGraph :: HomeUnitGraph -> SDoc pprHomeUnitGraph unitEnv = vcat (map (\(k, v) -> pprHomeUnitEnv k v) $ Map.assocs $ unitEnv_graph unitEnv) pprHomeUnitEnv :: UnitId -> HomeUnitEnv -> SDoc pprHomeUnitEnv uid env = ppr uid <+> text "(flags:" <+> ppr (homeUnitId_ $ homeUnitEnv_dflags env) <> text "," <+> ppr (fmap homeUnitId $ homeUnitEnv_home_unit env) <> text ")" <+> text "->" $$ nest 4 (pprHPT $ homeUnitEnv_hpt env) {- Note [Multiple Home Units] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The basic idea of multiple home units is quite simple. Instead of allowing one home unit, you can multiple home units The flow: 1. Dependencies between units are specified between each other in the normal manner, a unit is identified by the -this-unit-id flag and dependencies specified by the normal -package-id flag. 2. Downsweep is augmented to know to know how to look for dependencies in any home unit. 3. The rest of the compiler is modified appropiately to offset paths to the right places. 4. --make mode can parallelise between home units and multiple units are allowed to produce linkables. Closure Property ---------------- You must perform a clean cut of the dependency graph. > Any dependency which is not a home unit must not (transitively) depend on a home unit. For example, if you have three packages p, q and r, then if p depends on q which depends on r then it is illegal to load both p and r as home units but not q, because q is a dependency of the home unit p which depends on another home unit r. Offsetting Paths ---------------- The main complication to the implementation is to do with offsetting paths appropiately. For a long time it has been assumed that GHC will execute in the top-directory for a unit, normally where the .cabal file is and all paths are interpreted relative to there. When you have multiple home units then it doesn't make sense to pick one of these units to choose as the base-unit, and you can't robustly change directories when using parralelism. Therefore there is an option `-working-directory`, which tells GHC where the relative paths for each unit should be interpreted relative to. For example, if you specify `-working-dir a -ib`, then GHC will offset the relative path `b`, by `a`, and look for source files in `a/b`. The same thing happens for any path passed on the command line. A non-exhaustive list is * -i * -I * -odir/-hidir/-outputdir/-stubdir/-hiedir * Target files passed on the command line There is also a template-haskell function, makeRelativeToProject, which uses the `-working-directory` option in order to allow users to offset their own relative paths. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/External.hs0000644000000000000000000001602114472400113020671 0ustar0000000000000000module GHC.Unit.External ( ExternalUnitCache (..) , initExternalUnitCache , ExternalPackageState (..) , initExternalPackageState , EpsStats(..) , addEpsInStats , PackageTypeEnv , PackageIfaceTable , PackageInstEnv , PackageFamInstEnv , PackageRuleBase , PackageCompleteMatches , emptyPackageIfaceTable ) where import GHC.Prelude import GHC.Unit import GHC.Unit.Module.ModIface import GHC.Core ( RuleBase ) import GHC.Core.FamInstEnv import GHC.Core.InstEnv ( InstEnv, emptyInstEnv ) import GHC.Core.Opt.ConstantFold import GHC.Core.Rules (mkRuleBase) import GHC.Types.Annotations ( AnnEnv, emptyAnnEnv ) import GHC.Types.CompleteMatch import GHC.Types.TypeEnv import GHC.Types.Unique.DSet import Data.IORef type PackageTypeEnv = TypeEnv type PackageRuleBase = RuleBase type PackageInstEnv = InstEnv type PackageFamInstEnv = FamInstEnv type PackageAnnEnv = AnnEnv type PackageCompleteMatches = CompleteMatches -- | Helps us find information about modules in the imported packages type PackageIfaceTable = ModuleEnv ModIface -- Domain = modules in the imported packages -- | Constructs an empty PackageIfaceTable emptyPackageIfaceTable :: PackageIfaceTable emptyPackageIfaceTable = emptyModuleEnv -- | Information about the currently loaded external packages. -- This is mutable because packages will be demand-loaded during -- a compilation run as required. newtype ExternalUnitCache = ExternalUnitCache { euc_eps :: IORef ExternalPackageState } initExternalUnitCache :: IO ExternalUnitCache initExternalUnitCache = ExternalUnitCache <$> newIORef initExternalPackageState initExternalPackageState :: ExternalPackageState initExternalPackageState = EPS { eps_is_boot = emptyInstalledModuleEnv , eps_PIT = emptyPackageIfaceTable , eps_free_holes = emptyInstalledModuleEnv , eps_PTE = emptyTypeEnv , eps_inst_env = emptyInstEnv , eps_fam_inst_env = emptyFamInstEnv , eps_rule_base = mkRuleBase builtinRules , -- Initialise the EPS rule pool with the built-in rules eps_mod_fam_inst_env = emptyModuleEnv , eps_complete_matches = [] , eps_ann_env = emptyAnnEnv , eps_stats = EpsStats { n_ifaces_in = 0 , n_decls_in = 0 , n_decls_out = 0 , n_insts_in = 0 , n_insts_out = 0 , n_rules_in = length builtinRules , n_rules_out = 0 } } -- | Information about other packages that we have slurped in by reading -- their interface files data ExternalPackageState = EPS { eps_is_boot :: !(InstalledModuleEnv ModuleNameWithIsBoot), -- ^ In OneShot mode (only), home-package modules -- accumulate in the external package state, and are -- sucked in lazily. For these home-pkg modules -- (only) we need to record which are boot modules. -- We set this field after loading all the -- explicitly-imported interfaces, but before doing -- anything else -- -- The 'ModuleName' part is not necessary, but it's useful for -- debug prints, and it's convenient because this field comes -- direct from 'GHC.Tc.Utils.imp_dep_mods' eps_PIT :: !PackageIfaceTable, -- ^ The 'ModIface's for modules in external packages -- whose interfaces we have opened. -- The declarations in these interface files are held in the -- 'eps_decls', 'eps_inst_env', 'eps_fam_inst_env' and 'eps_rules' -- fields of this record, not in the 'mi_decls' fields of the -- interface we have sucked in. -- -- What /is/ in the PIT is: -- -- * The Module -- -- * Fingerprint info -- -- * Its exports -- -- * Fixities -- -- * Deprecations and warnings eps_free_holes :: InstalledModuleEnv (UniqDSet ModuleName), -- ^ Cache for 'mi_free_holes'. Ordinarily, we can rely on -- the 'eps_PIT' for this information, EXCEPT that when -- we do dependency analysis, we need to look at the -- 'Dependencies' of our imports to determine what their -- precise free holes are ('moduleFreeHolesPrecise'). We -- don't want to repeatedly reread in the interface -- for every import, so cache it here. When the PIT -- gets filled in we can drop these entries. eps_PTE :: !PackageTypeEnv, -- ^ Result of typechecking all the external package -- interface files we have sucked in. The domain of -- the mapping is external-package modules eps_inst_env :: !PackageInstEnv, -- ^ The total 'InstEnv' accumulated -- from all the external-package modules eps_fam_inst_env :: !PackageFamInstEnv,-- ^ The total 'FamInstEnv' accumulated -- from all the external-package modules eps_rule_base :: !PackageRuleBase, -- ^ The total 'RuleEnv' accumulated -- from all the external-package modules eps_ann_env :: !PackageAnnEnv, -- ^ The total 'AnnEnv' accumulated -- from all the external-package modules eps_complete_matches :: !PackageCompleteMatches, -- ^ The total 'CompleteMatches' accumulated -- from all the external-package modules eps_mod_fam_inst_env :: !(ModuleEnv FamInstEnv), -- ^ The family instances accumulated from external -- packages, keyed off the module that declared them eps_stats :: !EpsStats -- ^ Stastics about what was loaded from external packages } -- | Accumulated statistics about what we are putting into the 'ExternalPackageState'. -- \"In\" means stuff that is just /read/ from interface files, -- \"Out\" means actually sucked in and type-checked data EpsStats = EpsStats { n_ifaces_in , n_decls_in, n_decls_out , n_rules_in, n_rules_out , n_insts_in, n_insts_out :: !Int } addEpsInStats :: EpsStats -> Int -> Int -> Int -> EpsStats -- ^ Add stats for one newly-read interface addEpsInStats stats n_decls n_insts n_rules = stats { n_ifaces_in = n_ifaces_in stats + 1 , n_decls_in = n_decls_in stats + n_decls , n_insts_in = n_insts_in stats + n_insts , n_rules_in = n_rules_in stats + n_rules } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Finder/Types.hs0000644000000000000000000000735014472400113021427 0ustar0000000000000000module GHC.Unit.Finder.Types ( FinderCache (..) , FinderCacheState , FindResult (..) , InstalledFindResult (..) , FinderOpts(..) ) where import GHC.Prelude import GHC.Unit import qualified Data.Map as M import GHC.Fingerprint import GHC.Platform.Ways import Data.IORef import GHC.Data.FastString import qualified Data.Set as Set -- | The 'FinderCache' maps modules to the result of -- searching for that module. It records the results of searching for -- modules along the search path. On @:load@, we flush the entire -- contents of this cache. -- type FinderCacheState = InstalledModuleEnv InstalledFindResult type FileCacheState = M.Map FilePath Fingerprint data FinderCache = FinderCache { fcModuleCache :: (IORef FinderCacheState) , fcFileCache :: (IORef FileCacheState) } data InstalledFindResult = InstalledFound ModLocation InstalledModule | InstalledNoPackage UnitId | InstalledNotFound [FilePath] (Maybe UnitId) -- | The result of searching for an imported module. -- -- NB: FindResult manages both user source-import lookups -- (which can result in 'Module') as well as direct imports -- for interfaces (which always result in 'InstalledModule'). data FindResult = Found ModLocation Module -- ^ The module was found | NoPackage Unit -- ^ The requested unit was not found | FoundMultiple [(Module, ModuleOrigin)] -- ^ _Error_: both in multiple packages -- | Not found | NotFound { fr_paths :: [FilePath] -- ^ Places where I looked , fr_pkg :: Maybe Unit -- ^ Just p => module is in this unit's -- manifest, but couldn't find the -- .hi file , fr_mods_hidden :: [Unit] -- ^ Module is in these units, -- but the *module* is hidden , fr_pkgs_hidden :: [Unit] -- ^ Module is in these units, -- but the *unit* is hidden -- | Module is in these units, but it is unusable , fr_unusables :: [(Unit, UnusableUnitReason)] , fr_suggestions :: [ModuleSuggestion] -- ^ Possible mis-spelled modules } -- | Locations and information the finder cares about. -- -- Should be taken from 'DynFlags' via 'initFinderOpts'. data FinderOpts = FinderOpts { finder_importPaths :: [FilePath] -- ^ Where are we allowed to look for Modules and Source files , finder_lookupHomeInterfaces :: Bool -- ^ When looking up a home module: -- -- * 'True': search interface files (e.g. in '-c' mode) -- * 'False': search source files (e.g. in '--make' mode) , finder_bypassHiFileCheck :: Bool -- ^ Don't check that an imported interface file actually exists -- if it can only be at one location. The interface will be reported -- as `InstalledFound` even if the file doesn't exist, so this is -- only useful in specific cases (e.g. to generate dependencies -- with `ghc -M`) , finder_ways :: Ways , finder_enableSuggestions :: Bool -- ^ If we encounter unknown modules, should we suggest modules -- that have a similar name. , finder_workingDirectory :: Maybe FilePath , finder_thisPackageName :: Maybe FastString , finder_hiddenModules :: Set.Set ModuleName , finder_reexportedModules :: Set.Set ModuleName , finder_hieDir :: Maybe FilePath , finder_hieSuf :: String , finder_hiDir :: Maybe FilePath , finder_hiSuf :: String , finder_dynHiSuf :: String , finder_objectDir :: Maybe FilePath , finder_objectSuf :: String , finder_dynObjectSuf :: String , finder_stubDir :: Maybe FilePath } deriving Show ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Home.hs0000644000000000000000000002045714472400113020007 0ustar0000000000000000-- | The home unit is the unit (i.e. compiled package) that contains the module -- we are compiling/typechecking. module GHC.Unit.Home ( GenHomeUnit (..) , HomeUnit , homeUnitId , homeUnitInstantiations , homeUnitInstanceOf , homeUnitInstanceOfMaybe , homeUnitAsUnit , homeUnitMap -- * Predicates , isHomeUnitIndefinite , isHomeUnitDefinite , isHomeUnitInstantiating , isHomeUnit , isHomeUnitId , isHomeUnitInstanceOf , isHomeModule , isHomeInstalledModule , notHomeUnitId , notHomeModule , notHomeModuleMaybe , notHomeInstalledModule , notHomeInstalledModuleMaybe -- * Helpers , mkHomeModule , mkHomeInstalledModule , homeModuleInstantiation , homeModuleNameInstantiation ) where import GHC.Prelude import GHC.Unit.Types import GHC.Unit.Module.Name import Data.Maybe -- | Information about the home unit (i.e., the until that will contain the -- modules we are compiling) -- -- The unit identifier of the instantiating units is left open to allow -- switching from UnitKey (what is provided by the user) to UnitId (internal -- unit identifier) with `homeUnitMap`. -- -- TODO: this isn't implemented yet. UnitKeys are still converted too early into -- UnitIds in GHC.Unit.State.readUnitDataBase data GenHomeUnit u = DefiniteHomeUnit UnitId (Maybe (u, GenInstantiations u)) -- ^ Definite home unit (i.e. that we can compile). -- -- Nothing: not an instantiated unit -- Just (i,insts): made definite by instantiating "i" with "insts" | IndefiniteHomeUnit UnitId (GenInstantiations u) -- ^ Indefinite home unit (i.e. that we can only typecheck) -- -- All the holes are instantiated with fake modules from the Hole unit. -- See Note [Representation of module/name variables] in "GHC.Unit" type HomeUnit = GenHomeUnit UnitId -- | Return home unit id homeUnitId :: GenHomeUnit u -> UnitId homeUnitId (DefiniteHomeUnit u _) = u homeUnitId (IndefiniteHomeUnit u _) = u -- | Return home unit instantiations homeUnitInstantiations :: GenHomeUnit u -> GenInstantiations u homeUnitInstantiations (DefiniteHomeUnit _ Nothing) = [] homeUnitInstantiations (DefiniteHomeUnit _ (Just (_,is))) = is homeUnitInstantiations (IndefiniteHomeUnit _ is) = is -- | Return the unit id of the unit that is instantiated by the home unit. -- -- E.g. if home unit = q[A=p:B,...] we return q. -- -- If the home unit is not an instance of another unit, we return its own unit -- id (it is an instance of itself if you will). homeUnitInstanceOf :: HomeUnit -> UnitId homeUnitInstanceOf h = fromMaybe (homeUnitId h) (homeUnitInstanceOfMaybe h) -- | Return the unit id of the unit that is instantiated by the home unit. -- -- E.g. if home unit = q[A=p:B,...] we return (Just q). -- -- If the home unit is not an instance of another unit, we return Nothing. homeUnitInstanceOfMaybe :: GenHomeUnit u -> Maybe u homeUnitInstanceOfMaybe (DefiniteHomeUnit _ (Just (u,_))) = Just u homeUnitInstanceOfMaybe _ = Nothing -- | Return the home unit as a normal unit. -- -- We infer from the home unit itself the kind of unit we create: -- 1. If the home unit is definite, we must be compiling so we return a real -- unit. The definite home unit may be the result of a unit instantiation, -- say `p = q[A=r:X]`. In this case we could have returned a virtual unit -- `q[A=r:X]` but it's not what the clients of this function expect, -- especially because `p` is lost when we do this. The unit id of a virtual -- unit is made up internally so `unitId(q[A=r:X])` is not equal to `p`. -- -- 2. If the home unit is indefinite we can only create a virtual unit from -- it. It's ok because we must be only typechecking the home unit so we won't -- produce any code object that rely on the unit id of this virtual unit. homeUnitAsUnit :: HomeUnit -> Unit homeUnitAsUnit (DefiniteHomeUnit u _) = RealUnit (Definite u) homeUnitAsUnit (IndefiniteHomeUnit u is) = mkVirtUnit u is -- | Map over the unit identifier for instantiating units homeUnitMap :: IsUnitId v => (u -> v) -> GenHomeUnit u -> GenHomeUnit v homeUnitMap _ (DefiniteHomeUnit u Nothing) = DefiniteHomeUnit u Nothing homeUnitMap f (DefiniteHomeUnit u (Just (i,is))) = DefiniteHomeUnit u (Just (f i, mapInstantiations f is)) homeUnitMap f (IndefiniteHomeUnit u is) = IndefiniteHomeUnit u (mapInstantiations f is) ---------------------------- -- Predicates ---------------------------- -- | Test if we are type-checking an indefinite unit -- -- (if it is not, we should never use on-the-fly renaming) isHomeUnitIndefinite :: GenHomeUnit u -> Bool isHomeUnitIndefinite (DefiniteHomeUnit {}) = False isHomeUnitIndefinite (IndefiniteHomeUnit {}) = True -- | Test if we are compiling a definite unit -- -- (if it is, we should never use on-the-fly renaming) isHomeUnitDefinite :: GenHomeUnit u -> Bool isHomeUnitDefinite (DefiniteHomeUnit {}) = True isHomeUnitDefinite (IndefiniteHomeUnit {}) = False -- | Test if we are compiling by instantiating a definite unit isHomeUnitInstantiating :: GenHomeUnit u -> Bool isHomeUnitInstantiating u = isHomeUnitDefinite u && not (null (homeUnitInstantiations u)) -- | Test if the unit is the home unit isHomeUnit :: HomeUnit -> Unit -> Bool isHomeUnit hu u = u == homeUnitAsUnit hu -- | Test if the unit-id is the home unit-id isHomeUnitId :: GenHomeUnit u -> UnitId -> Bool isHomeUnitId hu uid = uid == homeUnitId hu -- | Test if the unit-id is not the home unit-id notHomeUnitId :: Maybe (GenHomeUnit u) -> UnitId -> Bool notHomeUnitId Nothing _ = True notHomeUnitId (Just hu) uid = not (isHomeUnitId hu uid) -- | Test if the home unit is an instance of the given unit-id isHomeUnitInstanceOf :: HomeUnit -> UnitId -> Bool isHomeUnitInstanceOf hu u = homeUnitInstanceOf hu == u -- | Test if the module comes from the home unit isHomeModule :: HomeUnit -> Module -> Bool isHomeModule hu m = isHomeUnit hu (moduleUnit m) -- | Test if the module comes from the home unit isHomeInstalledModule :: GenHomeUnit u -> InstalledModule -> Bool isHomeInstalledModule hu m = isHomeUnitId hu (moduleUnit m) -- | Test if a module doesn't come from the given home unit notHomeInstalledModule :: GenHomeUnit u -> InstalledModule -> Bool notHomeInstalledModule hu m = not (isHomeInstalledModule hu m) -- | Test if a module doesn't come from the given home unit notHomeInstalledModuleMaybe :: Maybe (GenHomeUnit u) -> InstalledModule -> Bool notHomeInstalledModuleMaybe mh m = fromMaybe True $ fmap (`notHomeInstalledModule` m) mh -- | Test if a module doesn't come from the given home unit notHomeModule :: HomeUnit -> Module -> Bool notHomeModule hu m = not (isHomeModule hu m) -- | Test if a module doesn't come from the given home unit notHomeModuleMaybe :: Maybe HomeUnit -> Module -> Bool notHomeModuleMaybe mh m = fromMaybe True $ fmap (`notHomeModule` m) mh ---------------------------- -- helpers ---------------------------- -- | Make a module in home unit mkHomeModule :: HomeUnit -> ModuleName -> Module mkHomeModule hu = mkModule (homeUnitAsUnit hu) -- | Make a module in home unit mkHomeInstalledModule :: GenHomeUnit u -> ModuleName -> InstalledModule mkHomeInstalledModule hu = mkModule (homeUnitId hu) -- | Return the module that is used to instantiate the given home module name. -- If the ModuleName doesn't refer to a signature, return the actual home -- module. -- -- E.g., the instantiating module of @A@ in @p[A=q[]:B]@ is @q[]:B@. -- the instantiating module of @A@ in @p@ is @p:A@. homeModuleNameInstantiation :: HomeUnit -> ModuleName -> Module homeModuleNameInstantiation hu mod_name = case lookup mod_name (homeUnitInstantiations hu) of Nothing -> mkHomeModule hu mod_name Just mod -> mod -- | Return the module that is used to instantiate the given home module. -- -- If the given module isn't a module hole, return the actual home module. -- -- E.g., the instantiating module of @p:A@ in @p[A=q[]:B]@ is @q[]:B@. -- the instantiating module of @r:A@ in @p[A=q[]:B]@ is @r:A@. -- the instantiating module of @p:A@ in @p@ is @p:A@. -- the instantiating module of @r:A@ in @p@ is @r:A@. homeModuleInstantiation :: Maybe HomeUnit -> Module -> Module homeModuleInstantiation mhu mod | Just hu <- mhu , isHomeModule hu mod = homeModuleNameInstantiation hu (moduleName mod) | otherwise = mod ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Home/ModInfo.hs0000644000000000000000000001033614472400113021335 0ustar0000000000000000-- | Info about modules in the "home" unit module GHC.Unit.Home.ModInfo ( HomeModInfo (..) , HomePackageTable , emptyHomePackageTable , lookupHpt , eltsHpt , filterHpt , allHpt , anyHpt , mapHpt , delFromHpt , addToHpt , addHomeModInfoToHpt , addListToHpt , lookupHptDirectly , lookupHptByModule , listToHpt , listHMIToHpt , pprHPT ) where import GHC.Prelude import GHC.Unit.Module.ModIface import GHC.Unit.Module.ModDetails import GHC.Unit.Module import GHC.Linker.Types ( Linkable(..) ) import GHC.Types.Unique import GHC.Types.Unique.DFM import GHC.Utils.Outputable import Data.List (sortOn) import Data.Ord -- | Information about modules in the package being compiled data HomeModInfo = HomeModInfo { hm_iface :: !ModIface -- ^ The basic loaded interface file: every loaded module has one of -- these, even if it is imported from another package , hm_details :: ModDetails -- ^ Extra information that has been created from the 'ModIface' for -- the module, typically during typechecking -- This field is LAZY because a ModDetails is constructed by knot tying. , hm_linkable :: !(Maybe Linkable) -- ^ The actual artifact we would like to link to access things in -- this module. -- -- 'hm_linkable' might be Nothing: -- -- 1. If this is an .hs-boot module -- -- 2. Temporarily during compilation if we pruned away -- the old linkable because it was out of date. -- -- After a complete compilation ('GHC.load'), all 'hm_linkable' fields -- in the 'HomePackageTable' will be @Just@. -- -- When re-linking a module ('GHC.Driver.Main.HscNoRecomp'), we construct the -- 'HomeModInfo' by building a new 'ModDetails' from the old -- 'ModIface' (only). } -- | Helps us find information about modules in the home package type HomePackageTable = DModuleNameEnv HomeModInfo -- Domain = modules in the home unit that have been fully compiled -- "home" unit id cached (implicit) here for convenience -- | Constructs an empty HomePackageTable emptyHomePackageTable :: HomePackageTable emptyHomePackageTable = emptyUDFM lookupHpt :: HomePackageTable -> ModuleName -> Maybe HomeModInfo lookupHpt = lookupUDFM lookupHptDirectly :: HomePackageTable -> Unique -> Maybe HomeModInfo lookupHptDirectly = lookupUDFM_Directly eltsHpt :: HomePackageTable -> [HomeModInfo] eltsHpt = eltsUDFM filterHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> HomePackageTable filterHpt = filterUDFM allHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> Bool allHpt = allUDFM anyHpt :: (HomeModInfo -> Bool) -> HomePackageTable -> Bool anyHpt = anyUDFM mapHpt :: (HomeModInfo -> HomeModInfo) -> HomePackageTable -> HomePackageTable mapHpt = mapUDFM delFromHpt :: HomePackageTable -> ModuleName -> HomePackageTable delFromHpt = delFromUDFM addToHpt :: HomePackageTable -> ModuleName -> HomeModInfo -> HomePackageTable addToHpt = addToUDFM addHomeModInfoToHpt :: HomeModInfo -> HomePackageTable -> HomePackageTable addHomeModInfoToHpt hmi hpt = addToHpt hpt (moduleName (mi_module (hm_iface hmi))) hmi addListToHpt :: HomePackageTable -> [(ModuleName, HomeModInfo)] -> HomePackageTable addListToHpt = addListToUDFM listToHpt :: [(ModuleName, HomeModInfo)] -> HomePackageTable listToHpt = listToUDFM listHMIToHpt :: [HomeModInfo] -> HomePackageTable listHMIToHpt hmis = listToHpt [(moduleName (mi_module (hm_iface hmi)), hmi) | hmi <- sorted_hmis] where -- Sort to put Non-boot things last, so they overwrite the boot interfaces -- in the HPT, other than that, the order doesn't matter sorted_hmis = sortOn (Down . mi_boot . hm_iface) hmis lookupHptByModule :: HomePackageTable -> Module -> Maybe HomeModInfo -- The HPT is indexed by ModuleName, not Module, -- we must check for a hit on the right Module lookupHptByModule hpt mod = case lookupHpt hpt (moduleName mod) of Just hm | mi_module (hm_iface hm) == mod -> Just hm _otherwise -> Nothing pprHPT :: HomePackageTable -> SDoc -- A bit arbitrary for now pprHPT hpt = pprUDFM hpt $ \hms -> vcat [ ppr (mi_module (hm_iface hm)) | hm <- hms ] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Info.hs0000644000000000000000000002227514472400113020012 0ustar0000000000000000{-# LANGUAGE RecordWildCards, FlexibleInstances, MultiParamTypeClasses #-} -- | Info about installed units (compiled libraries) module GHC.Unit.Info ( GenericUnitInfo (..) , GenUnitInfo , UnitInfo , UnitKey (..) , UnitKeyInfo , mkUnitKeyInfo , mapUnitInfo , mkUnitPprInfo , mkUnit , PackageId(..) , PackageName(..) , Version(..) , unitPackageNameString , unitPackageIdString , pprUnitInfo , collectIncludeDirs , collectExtraCcOpts , collectLibraryDirs , collectFrameworks , collectFrameworksDirs , unitHsLibs ) where import GHC.Prelude import GHC.Platform.Ways import GHC.Utils.Misc import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Types.Unique import GHC.Data.FastString import qualified GHC.Data.ShortText as ST import GHC.Unit.Module as Module import GHC.Unit.Ppr import GHC.Unit.Database import GHC.Settings import Data.Version import Data.Bifunctor import Data.List (isPrefixOf, stripPrefix) -- | Information about an installed unit -- -- We parameterize on the unit identifier: -- * UnitKey: identifier used in the database (cf 'UnitKeyInfo') -- * UnitId: identifier used to generate code (cf 'UnitInfo') -- -- These two identifiers are different for wired-in packages. See Note [About -- units] in "GHC.Unit" type GenUnitInfo unit = GenericUnitInfo PackageId PackageName unit ModuleName (GenModule (GenUnit unit)) -- | Information about an installed unit (units are identified by their database -- UnitKey) type UnitKeyInfo = GenUnitInfo UnitKey -- | Information about an installed unit (units are identified by their internal -- UnitId) type UnitInfo = GenUnitInfo UnitId -- | Convert a DbUnitInfo (read from a package database) into `UnitKeyInfo` mkUnitKeyInfo :: DbUnitInfo -> UnitKeyInfo mkUnitKeyInfo = mapGenericUnitInfo mkUnitKey' mkPackageIdentifier' mkPackageName' mkModuleName' mkModule' where mkPackageIdentifier' = PackageId . mkFastStringByteString mkPackageName' = PackageName . mkFastStringByteString mkUnitKey' = UnitKey . mkFastStringByteString mkModuleName' = mkModuleNameFS . mkFastStringByteString mkVirtUnitKey' i = case i of DbInstUnitId cid insts -> mkVirtUnit (mkUnitKey' cid) (fmap (bimap mkModuleName' mkModule') insts) DbUnitId uid -> RealUnit (Definite (mkUnitKey' uid)) mkModule' m = case m of DbModule uid n -> mkModule (mkVirtUnitKey' uid) (mkModuleName' n) DbModuleVar n -> mkHoleModule (mkModuleName' n) -- | Map over the unit parameter mapUnitInfo :: IsUnitId v => (u -> v) -> GenUnitInfo u -> GenUnitInfo v mapUnitInfo f = mapGenericUnitInfo f -- unit identifier id -- package identifier id -- package name id -- module name (fmap (mapGenUnit f)) -- instantiating modules newtype PackageId = PackageId FastString deriving (Eq) newtype PackageName = PackageName { unPackageName :: FastString } deriving (Eq) instance Uniquable PackageId where getUnique (PackageId n) = getUnique n instance Uniquable PackageName where getUnique (PackageName n) = getUnique n instance Outputable PackageId where ppr (PackageId str) = ftext str instance Outputable PackageName where ppr (PackageName str) = ftext str unitPackageIdString :: GenUnitInfo u -> String unitPackageIdString pkg = unpackFS str where PackageId str = unitPackageId pkg unitPackageNameString :: GenUnitInfo u -> String unitPackageNameString pkg = unpackFS str where PackageName str = unitPackageName pkg pprUnitInfo :: UnitInfo -> SDoc pprUnitInfo GenericUnitInfo {..} = vcat [ field "name" (ppr unitPackageName), field "version" (text (showVersion unitPackageVersion)), field "id" (ppr unitId), field "exposed" (ppr unitIsExposed), field "exposed-modules" (ppr unitExposedModules), field "hidden-modules" (fsep (map ppr unitHiddenModules)), field "trusted" (ppr unitIsTrusted), field "import-dirs" (fsep (map (text . ST.unpack) unitImportDirs)), field "library-dirs" (fsep (map (text . ST.unpack) unitLibraryDirs)), field "dynamic-library-dirs" (fsep (map (text . ST.unpack) unitLibraryDynDirs)), field "hs-libraries" (fsep (map (text . ST.unpack) unitLibraries)), field "extra-libraries" (fsep (map (text . ST.unpack) unitExtDepLibsSys)), field "extra-ghci-libraries" (fsep (map (text . ST.unpack) unitExtDepLibsGhc)), field "include-dirs" (fsep (map (text . ST.unpack) unitIncludeDirs)), field "includes" (fsep (map (text . ST.unpack) unitIncludes)), field "depends" (fsep (map ppr unitDepends)), field "cc-options" (fsep (map (text . ST.unpack) unitCcOptions)), field "ld-options" (fsep (map (text . ST.unpack) unitLinkerOptions)), field "framework-dirs" (fsep (map (text . ST.unpack) unitExtDepFrameworkDirs)), field "frameworks" (fsep (map (text . ST.unpack) unitExtDepFrameworks)), field "haddock-interfaces" (fsep (map (text . ST.unpack) unitHaddockInterfaces)), field "haddock-html" (fsep (map (text . ST.unpack) unitHaddockHTMLs)) ] where field name body = text name <> colon <+> nest 4 body -- | Make a `Unit` from a `UnitInfo` -- -- If the unit is definite, make a `RealUnit` from `unitId` field. -- -- If the unit is indefinite, make a `VirtUnit` from `unitInstanceOf` and -- `unitInstantiations` fields. Note that in this case we don't keep track of -- `unitId`. It can be retrieved later with "improvement", i.e. matching on -- `unitInstanceOf/unitInstantiations` fields (see Note [About units] in -- GHC.Unit). mkUnit :: UnitInfo -> Unit mkUnit p | unitIsIndefinite p = mkVirtUnit (unitInstanceOf p) (unitInstantiations p) | otherwise = RealUnit (Definite (unitId p)) -- | Create a UnitPprInfo from a UnitInfo mkUnitPprInfo :: (u -> FastString) -> GenUnitInfo u -> UnitPprInfo mkUnitPprInfo ufs i = UnitPprInfo (ufs (unitId i)) (unitPackageNameString i) (unitPackageVersion i) ((unpackFS . unPackageName) <$> unitComponentName i) -- | Find all the include directories in the given units collectIncludeDirs :: [UnitInfo] -> [FilePath] collectIncludeDirs ps = map ST.unpack $ ordNub (filter (not . ST.null) (concatMap unitIncludeDirs ps)) -- | Find all the C-compiler options in the given units collectExtraCcOpts :: [UnitInfo] -> [String] collectExtraCcOpts ps = map ST.unpack (concatMap unitCcOptions ps) -- | Find all the library directories in the given units for the given ways collectLibraryDirs :: Ways -> [UnitInfo] -> [FilePath] collectLibraryDirs ws = ordNub . filter notNull . concatMap (libraryDirsForWay ws) -- | Find all the frameworks in the given units collectFrameworks :: [UnitInfo] -> [String] collectFrameworks ps = map ST.unpack (concatMap unitExtDepFrameworks ps) -- | Find all the package framework paths in these and the preload packages collectFrameworksDirs :: [UnitInfo] -> [String] collectFrameworksDirs ps = map ST.unpack (ordNub (filter (not . ST.null) (concatMap unitExtDepFrameworkDirs ps))) -- | Either the 'unitLibraryDirs' or 'unitLibraryDynDirs' as appropriate for the way. libraryDirsForWay :: Ways -> UnitInfo -> [String] libraryDirsForWay ws | hasWay ws WayDyn = map ST.unpack . unitLibraryDynDirs | otherwise = map ST.unpack . unitLibraryDirs unitHsLibs :: GhcNameVersion -> Ways -> UnitInfo -> [String] unitHsLibs namever ways0 p = map (mkDynName . addSuffix . ST.unpack) (unitLibraries p) where ways1 = removeWay WayDyn ways0 -- the name of a shared library is libHSfoo-ghc.so -- we leave out the _dyn, because it is superfluous tag = waysTag (fullWays ways1) rts_tag = waysTag ways1 mkDynName x | not (ways0 `hasWay` WayDyn) = x | "HS" `isPrefixOf` x = x ++ dynLibSuffix namever -- For non-Haskell libraries, we use the name "Cfoo". The .a -- file is libCfoo.a, and the .so is libfoo.so. That way the -- linker knows what we mean for the vanilla (-lCfoo) and dyn -- (-lfoo) ways. We therefore need to strip the 'C' off here. | Just x' <- stripPrefix "C" x = x' | otherwise = panic ("Don't understand library name " ++ x) -- Add _thr and other rts suffixes to packages named -- `rts` or `rts-1.0`. Why both? Traditionally the rts -- package is called `rts` only. However the tooling -- usually expects a package name to have a version. -- As such we will gradually move towards the `rts-1.0` -- package name, at which point the `rts` package name -- will eventually be unused. -- -- This change elevates the need to add custom hooks -- and handling specifically for the `rts` package for -- example in ghc-cabal. addSuffix rts@"HSrts" = rts ++ (expandTag rts_tag) addSuffix rts@"HSrts-1.0.2" = rts ++ (expandTag rts_tag) addSuffix other_lib = other_lib ++ (expandTag tag) expandTag t | null t = "" | otherwise = '_':t ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module.hs0000644000000000000000000001041714472400113020337 0ustar0000000000000000{-# LANGUAGE ExplicitNamespaces #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RecordWildCards #-} {- (c) The University of Glasgow, 2004-2006 Module ~~~~~~~~~~ Simply the name of a module, represented as a FastString. These are Uniquable, hence we can build Maps with Modules as the keys. -} module GHC.Unit.Module ( module GHC.Unit.Types -- * The ModuleName type , module GHC.Unit.Module.Name -- * The ModLocation type , module GHC.Unit.Module.Location -- * ModuleEnv , module GHC.Unit.Module.Env -- * Generalization , getModuleInstantiation , getUnitInstantiations , uninstantiateInstantiatedUnit , uninstantiateInstantiatedModule -- * The Module type , mkHoleModule , isHoleModule , stableModuleCmp , moduleStableString , moduleIsDefinite , HasModule(..) , ContainsModule(..) , installedModuleEq ) where import GHC.Prelude import GHC.Types.Unique.DSet import GHC.Unit.Types import GHC.Unit.Module.Name import GHC.Unit.Module.Location import GHC.Unit.Module.Env import GHC.Utils.Misc -- | A 'Module' is definite if it has no free holes. moduleIsDefinite :: Module -> Bool moduleIsDefinite = isEmptyUniqDSet . moduleFreeHoles -- | Get a string representation of a 'Module' that's unique and stable -- across recompilations. -- eg. "$aeson_70dylHtv1FFGeai1IoxcQr$Data.Aeson.Types.Internal" moduleStableString :: Module -> String moduleStableString Module{..} = "$" ++ unitString moduleUnit ++ "$" ++ moduleNameString moduleName -- | This gives a stable ordering, as opposed to the Ord instance which -- gives an ordering based on the 'Unique's of the components, which may -- not be stable from run to run of the compiler. stableModuleCmp :: Module -> Module -> Ordering stableModuleCmp (Module p1 n1) (Module p2 n2) = (p1 `stableUnitCmp` p2) `thenCmp` (n1 `stableModuleNameCmp` n2) class ContainsModule t where extractModule :: t -> Module class HasModule m where getModule :: m Module -- | Test if a 'Module' corresponds to a given 'InstalledModule', -- modulo instantiation. installedModuleEq :: InstalledModule -> Module -> Bool installedModuleEq imod mod = fst (getModuleInstantiation mod) == imod {- ************************************************************************ * * Hole substitutions * * ************************************************************************ -} -- | Given a possibly on-the-fly instantiated module, split it into -- a 'Module' that we definitely can find on-disk, as well as an -- instantiation if we need to instantiate it on the fly. If the -- instantiation is @Nothing@ no on-the-fly renaming is needed. getModuleInstantiation :: Module -> (InstalledModule, Maybe InstantiatedModule) getModuleInstantiation m = let (uid, mb_iuid) = getUnitInstantiations (moduleUnit m) in (Module uid (moduleName m), fmap (\iuid -> Module iuid (moduleName m)) mb_iuid) -- | Return the unit-id this unit is an instance of and the module instantiations (if any). getUnitInstantiations :: Unit -> (UnitId, Maybe InstantiatedUnit) getUnitInstantiations (VirtUnit iuid) = (instUnitInstanceOf iuid, Just iuid) getUnitInstantiations (RealUnit (Definite uid)) = (uid, Nothing) getUnitInstantiations (HoleUnit {}) = error "Hole unit" -- | Remove instantiations of the given instantiated unit uninstantiateInstantiatedUnit :: InstantiatedUnit -> InstantiatedUnit uninstantiateInstantiatedUnit u = mkInstantiatedUnit (instUnitInstanceOf u) (map (\(m,_) -> (m, mkHoleModule m)) (instUnitInsts u)) -- | Remove instantiations of the given module instantiated unit uninstantiateInstantiatedModule :: InstantiatedModule -> InstantiatedModule uninstantiateInstantiatedModule (Module uid n) = Module (uninstantiateInstantiatedUnit uid) n -- | Test if a Module is not instantiated isHoleModule :: GenModule (GenUnit u) -> Bool isHoleModule (Module HoleUnit _) = True isHoleModule _ = False -- | Create a hole Module mkHoleModule :: ModuleName -> GenModule (GenUnit u) mkHoleModule = Module HoleUnit ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Deps.hs0000644000000000000000000005150414472400113021234 0ustar0000000000000000-- | Dependencies and Usage of a module module GHC.Unit.Module.Deps ( Dependencies , mkDependencies , noDependencies , dep_direct_mods , dep_direct_pkgs , dep_sig_mods , dep_trusted_pkgs , dep_orphs , dep_plugin_pkgs , dep_finsts , dep_boot_mods , dep_orphs_update , dep_finsts_update , pprDeps , Usage (..) , ImportAvails (..) ) where import GHC.Prelude import GHC.Types.SafeHaskell import GHC.Types.Name import GHC.Unit.Module.Name import GHC.Unit.Module.Imported import GHC.Unit.Module import GHC.Unit.Home import GHC.Unit.State import GHC.Utils.Fingerprint import GHC.Utils.Binary import GHC.Utils.Outputable import Data.List (sortBy, sort, partition) import Data.Set (Set) import qualified Data.Set as Set import Data.Bifunctor -- | Dependency information about ALL modules and packages below this one -- in the import hierarchy. This is the serialisable version of `ImportAvails`. -- -- Invariant: the dependencies of a module @M@ never includes @M@. -- -- Invariant: none of the lists contain duplicates. -- -- Invariant: lists are ordered canonically (e.g. using stableModuleCmp) -- -- See Note [Transitive Information in Dependencies] data Dependencies = Deps { dep_direct_mods :: Set (UnitId, ModuleNameWithIsBoot) -- ^ All home-package modules which are directly imported by this one. -- This may include modules from other units when using multiple home units , dep_direct_pkgs :: Set UnitId -- ^ All packages directly imported by this module -- I.e. packages to which this module's direct imports belong. -- Does not include other home units when using multiple home units. -- Modules from these units will go in `dep_direct_mods` , dep_plugin_pkgs :: Set UnitId -- ^ All units needed for plugins ------------------------------------ -- Transitive information below here , dep_sig_mods :: ![ModuleName] -- ^ Transitive closure of hsig files in the home package , dep_trusted_pkgs :: Set UnitId -- Packages which we are required to trust -- when the module is imported as a safe import -- (Safe Haskell). See Note [Tracking Trust Transitively] in GHC.Rename.Names , dep_boot_mods :: Set (UnitId, ModuleNameWithIsBoot) -- ^ All modules which have boot files below this one, and whether we -- should use the boot file or not. -- This information is only used to populate the eps_is_boot field. -- See Note [Structure of dep_boot_mods] , dep_orphs :: [Module] -- ^ Transitive closure of orphan modules (whether -- home or external pkg). -- -- (Possible optimization: don't include family -- instance orphans as they are anyway included in -- 'dep_finsts'. But then be careful about code -- which relies on dep_orphs having the complete list!) -- This does NOT include us, unlike 'imp_orphs'. , dep_finsts :: [Module] -- ^ Transitive closure of depended upon modules which -- contain family instances (whether home or external). -- This is used by 'checkFamInstConsistency'. This -- does NOT include us, unlike 'imp_finsts'. See Note -- [The type family instance consistency story]. } deriving( Eq ) -- Equality used only for old/new comparison in GHC.Iface.Recomp.addFingerprints -- See 'GHC.Tc.Utils.ImportAvails' for details on dependencies. -- | Extract information from the rename and typecheck phases to produce -- a dependencies information for the module being compiled. -- -- The fourth argument is a list of plugin modules. mkDependencies :: HomeUnit -> Module -> ImportAvails -> [Module] -> Dependencies mkDependencies home_unit mod imports plugin_mods = let (home_plugins, external_plugins) = partition (isHomeUnit home_unit . moduleUnit) plugin_mods plugin_units = Set.fromList (map (toUnitId . moduleUnit) external_plugins) all_direct_mods = foldr (\mn m -> extendInstalledModuleEnv m mn (GWIB (moduleName mn) NotBoot)) (imp_direct_dep_mods imports) (map (fmap toUnitId) home_plugins) modDepsElts = Set.fromList . installedModuleEnvElts -- It's OK to use nonDetEltsUFM here because sorting by module names -- restores determinism direct_mods = first moduleUnit `Set.map` modDepsElts (delInstalledModuleEnv all_direct_mods (toUnitId <$> mod)) -- M.hi-boot can be in the imp_dep_mods, but we must remove -- it before recording the modules on which this one depends! -- (We want to retain M.hi-boot in imp_dep_mods so that -- loadHiBootInterface can see if M's direct imports depend -- on M.hi-boot, and hence that we should do the hi-boot consistency -- check.) dep_orphs = filter (/= mod) (imp_orphs imports) -- We must also remove self-references from imp_orphs. See -- Note [Module self-dependency] direct_pkgs = imp_dep_direct_pkgs imports -- Set the packages required to be Safe according to Safe Haskell. -- See Note [Tracking Trust Transitively] in GHC.Rename.Names trust_pkgs = imp_trust_pkgs imports -- If there's a non-boot import, then it shadows the boot import -- coming from the dependencies source_mods = first moduleUnit `Set.map` modDepsElts (imp_boot_mods imports) sig_mods = filter (/= (moduleName mod)) $ imp_sig_mods imports in Deps { dep_direct_mods = direct_mods , dep_direct_pkgs = direct_pkgs , dep_plugin_pkgs = plugin_units , dep_sig_mods = sort sig_mods , dep_trusted_pkgs = trust_pkgs , dep_boot_mods = source_mods , dep_orphs = sortBy stableModuleCmp dep_orphs , dep_finsts = sortBy stableModuleCmp (imp_finsts imports) -- sort to get into canonical order -- NB. remember to use lexicographic ordering } -- | Update module dependencies containing orphans (used by Backpack) dep_orphs_update :: Monad m => Dependencies -> ([Module] -> m [Module]) -> m Dependencies dep_orphs_update deps f = do r <- f (dep_orphs deps) pure (deps { dep_orphs = sortBy stableModuleCmp r }) -- | Update module dependencies containing family instances (used by Backpack) dep_finsts_update :: Monad m => Dependencies -> ([Module] -> m [Module]) -> m Dependencies dep_finsts_update deps f = do r <- f (dep_finsts deps) pure (deps { dep_finsts = sortBy stableModuleCmp r }) instance Binary Dependencies where put_ bh deps = do put_ bh (dep_direct_mods deps) put_ bh (dep_direct_pkgs deps) put_ bh (dep_plugin_pkgs deps) put_ bh (dep_trusted_pkgs deps) put_ bh (dep_sig_mods deps) put_ bh (dep_boot_mods deps) put_ bh (dep_orphs deps) put_ bh (dep_finsts deps) get bh = do dms <- get bh dps <- get bh plugin_pkgs <- get bh tps <- get bh hsigms <- get bh sms <- get bh os <- get bh fis <- get bh return (Deps { dep_direct_mods = dms , dep_direct_pkgs = dps , dep_plugin_pkgs = plugin_pkgs , dep_sig_mods = hsigms , dep_boot_mods = sms , dep_trusted_pkgs = tps , dep_orphs = os, dep_finsts = fis }) noDependencies :: Dependencies noDependencies = Deps { dep_direct_mods = Set.empty , dep_direct_pkgs = Set.empty , dep_plugin_pkgs = Set.empty , dep_sig_mods = [] , dep_boot_mods = Set.empty , dep_trusted_pkgs = Set.empty , dep_orphs = [] , dep_finsts = [] } -- | Pretty-print unit dependencies pprDeps :: UnitState -> Dependencies -> SDoc pprDeps unit_state (Deps { dep_direct_mods = dmods , dep_boot_mods = bmods , dep_plugin_pkgs = plgns , dep_orphs = orphs , dep_direct_pkgs = pkgs , dep_trusted_pkgs = tps , dep_finsts = finsts }) = pprWithUnitState unit_state $ vcat [text "direct module dependencies:" <+> ppr_set ppr_mod dmods, text "boot module dependencies:" <+> ppr_set ppr bmods, text "direct package dependencies:" <+> ppr_set ppr pkgs, text "plugin package dependencies:" <+> ppr_set ppr plgns, if null tps then empty else text "trusted package dependencies:" <+> ppr_set ppr tps, text "orphans:" <+> fsep (map ppr orphs), text "family instance modules:" <+> fsep (map ppr finsts) ] where ppr_mod (uid, (GWIB mod IsBoot)) = ppr uid <> colon <> ppr mod <+> text "[boot]" ppr_mod (uid, (GWIB mod NotBoot)) = ppr uid <> colon <> ppr mod ppr_set :: Outputable a => (a -> SDoc) -> Set a -> SDoc ppr_set w = fsep . fmap w . Set.toAscList -- | Records modules for which changes may force recompilation of this module -- See wiki: https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/recompilation-avoidance -- -- This differs from Dependencies. A module X may be in the dep_mods of this -- module (via an import chain) but if we don't use anything from X it won't -- appear in our Usage data Usage -- | Module from another package = UsagePackageModule { usg_mod :: Module, -- ^ External package module depended on usg_mod_hash :: Fingerprint, -- ^ Cached module ABI fingerprint (corresponds to mi_mod_hash) usg_safe :: IsSafeImport -- ^ Was this module imported as a safe import } -- | Module from the current package | UsageHomeModule { usg_mod_name :: ModuleName, -- ^ Name of the module usg_unit_id :: UnitId, -- ^ UnitId of the HomeUnit the module is from usg_mod_hash :: Fingerprint, -- ^ Cached module ABI fingerprint (corresponds to mi_mod_hash). -- This may be out dated after recompilation was avoided, but is -- still used as a fast initial check for change during -- recompilation avoidance. usg_entities :: [(OccName,Fingerprint)], -- ^ Entities we depend on, sorted by occurrence name and fingerprinted. -- NB: usages are for parent names only, e.g. type constructors -- but not the associated data constructors. usg_exports :: Maybe Fingerprint, -- ^ Fingerprint for the export list of this module, -- if we directly imported it (and hence we depend on its export list) usg_safe :: IsSafeImport -- ^ Was this module imported as a safe import } -- | A file upon which the module depends, e.g. a CPP #include, or using TH's -- 'addDependentFile' | UsageFile { usg_file_path :: FilePath, -- ^ External file dependency. From a CPP #include or TH -- addDependentFile. Should be absolute. usg_file_hash :: Fingerprint, -- ^ 'Fingerprint' of the file contents. usg_file_label :: Maybe String -- ^ An optional string which is used in recompilation messages if -- file in question has changed. -- Note: We don't consider things like modification timestamps -- here, because there's no reason to recompile if the actual -- contents don't change. This previously lead to odd -- recompilation behaviors; see #8114 } | UsageHomeModuleInterface { usg_mod_name :: ModuleName -- ^ Name of the module , usg_unit_id :: UnitId -- ^ UnitId of the HomeUnit the module is from , usg_iface_hash :: Fingerprint -- ^ The *interface* hash of the module, not the ABI hash. -- This changes when anything about the interface (and hence the -- module) has changed. -- UsageHomeModuleInterface is *only* used for recompilation -- checking when using TemplateHaskell in the interpreter (where -- some modules are loaded as BCOs). } -- | A requirement which was merged into this one. | UsageMergedRequirement { usg_mod :: Module, usg_mod_hash :: Fingerprint } deriving( Eq ) -- The export list field is (Just v) if we depend on the export list: -- i.e. we imported the module directly, whether or not we -- enumerated the things we imported, or just imported -- everything -- We need to recompile if M's exports change, because -- if the import was import M, we might now have a name clash -- in the importing module. -- if the import was import M(x) M might no longer export x -- The only way we don't depend on the export list is if we have -- import M() -- And of course, for modules that aren't imported directly we don't -- depend on their export lists instance Binary Usage where put_ bh usg@UsagePackageModule{} = do putByte bh 0 put_ bh (usg_mod usg) put_ bh (usg_mod_hash usg) put_ bh (usg_safe usg) put_ bh usg@UsageHomeModule{} = do putByte bh 1 put_ bh (usg_mod_name usg) put_ bh (usg_unit_id usg) put_ bh (usg_mod_hash usg) put_ bh (usg_exports usg) put_ bh (usg_entities usg) put_ bh (usg_safe usg) put_ bh usg@UsageFile{} = do putByte bh 2 put_ bh (usg_file_path usg) put_ bh (usg_file_hash usg) put_ bh (usg_file_label usg) put_ bh usg@UsageMergedRequirement{} = do putByte bh 3 put_ bh (usg_mod usg) put_ bh (usg_mod_hash usg) put_ bh usg@UsageHomeModuleInterface{} = do putByte bh 4 put_ bh (usg_mod_name usg) put_ bh (usg_unit_id usg) put_ bh (usg_iface_hash usg) get bh = do h <- getByte bh case h of 0 -> do nm <- get bh mod <- get bh safe <- get bh return UsagePackageModule { usg_mod = nm, usg_mod_hash = mod, usg_safe = safe } 1 -> do nm <- get bh uid <- get bh mod <- get bh exps <- get bh ents <- get bh safe <- get bh return UsageHomeModule { usg_mod_name = nm, usg_mod_hash = mod, usg_unit_id = uid, usg_exports = exps, usg_entities = ents, usg_safe = safe } 2 -> do fp <- get bh hash <- get bh label <- get bh return UsageFile { usg_file_path = fp, usg_file_hash = hash, usg_file_label = label } 3 -> do mod <- get bh hash <- get bh return UsageMergedRequirement { usg_mod = mod, usg_mod_hash = hash } 4 -> do mod <- get bh uid <- get bh hash <- get bh return UsageHomeModuleInterface { usg_mod_name = mod, usg_unit_id = uid, usg_iface_hash = hash } i -> error ("Binary.get(Usage): " ++ show i) {- Note [Transitive Information in Dependencies] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is important to be careful what information we put in 'Dependencies' because ultimately it ends up serialised in an interface file. Interface files must always be kept up-to-date with the state of the world, so if `Dependencies` needs to be updated then the module had to be recompiled just to update `Dependencies`. Before #16885, the dependencies used to contain the transitive closure of all home modules. Therefore, if you added an import somewhere low down in the home package it would recompile nearly every module in your project, just to update this information. Now, we are a bit more careful about what we store and explicitly store transitive information only if it is really needed. ~ Direct Information * dep_direct_mods - Directly imported home package modules * dep_direct_pkgs - Directly imported packages * dep_plgins - Directly used plugins ~ Transitive Information Some features of the compiler require transitive information about what is currently being compiled, so that is explicitly stored separately in the form they need. * dep_trusted_pkgs - Only used for the -fpackage-trust feature * dep_boot_mods - Only used to populate eps_is_boot in -c mode * dep_orphs - Modules with orphan instances * dep_finsts - Modules with type family instances Important note: If you add some transitive information to the interface file then you need to make sure recompilation is triggered when it could be out of date. The correct way to do this is to include the transitive information in the export hash of the module. The export hash is computed in `GHC.Iface.Recomp.addFingerprints`. -} {- Note [Structure of dep_boot_deps] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In `-c` mode we always need to know whether to load the normal or boot version of an interface file, and this can't be determined from just looking at the direct imports. Consider modules with dependencies: ``` A -(S)-> B A -> C -> B -(S)-> B ``` Say when compiling module `A` that we need to load the interface for `B`, do we load `B.hi` or `B.hi-boot`? Well, `A` does directly {-# SOURCE #-} import B, so you might think that we would load the `B.hi-boot` file, however this is wrong because `C` imports `B` normally. Therefore in the interface file for `C` we still need to record that there is a hs-boot file for `B` below it but that we now want `B.hi` rather than `B.hi-boot`. When `C` is imported, the fact that it needs `B.hi` clobbers the `{- SOURCE -}` import for `B`. Therefore in mod_boot_deps we store the names of any modules which have hs-boot files, and whether we want to import the .hi or .hi-boot version of the interface file. If you get this wrong, then GHC fails to compile, so there is a test but you might not make it that far if you get this wrong! Question: does this happen even across packages? No: if I need to load the interface for module X from package P I always look for p:X.hi. -} -- | 'ImportAvails' summarises what was imported from where, irrespective of -- whether the imported things are actually used or not. It is used: -- -- * when processing the export list, -- -- * when constructing usage info for the interface file, -- -- * to identify the list of directly imported modules for initialisation -- purposes and for optimised overlap checking of family instances, -- -- * when figuring out what things are really unused -- data ImportAvails = ImportAvails { imp_mods :: ImportedMods, -- = ModuleEnv [ImportedModsVal], -- ^ Domain is all directly-imported modules -- -- See the documentation on ImportedModsVal in -- "GHC.Unit.Module.Imported" for the meaning of the fields. -- -- We need a full ModuleEnv rather than a ModuleNameEnv here, -- because we might be importing modules of the same name from -- different packages. (currently not the case, but might be in the -- future). imp_direct_dep_mods :: InstalledModuleEnv ModuleNameWithIsBoot, -- ^ Home-package modules directly imported by the module being compiled. imp_dep_direct_pkgs :: Set UnitId, -- ^ Packages directly needed by the module being compiled imp_trust_own_pkg :: Bool, -- ^ Do we require that our own package is trusted? -- This is to handle efficiently the case where a Safe module imports -- a Trustworthy module that resides in the same package as it. -- See Note [Trust Own Package] in "GHC.Rename.Names" -- Transitive information below here imp_trust_pkgs :: Set UnitId, -- ^ This records the -- packages the current module needs to trust for Safe Haskell -- compilation to succeed. A package is required to be trusted if -- we are dependent on a trustworthy module in that package. -- See Note [Tracking Trust Transitively] in "GHC.Rename.Names" imp_boot_mods :: InstalledModuleEnv ModuleNameWithIsBoot, -- ^ Domain is all modules which have hs-boot files, and whether -- we should import the boot version of interface file. Only used -- in one-shot mode to populate eps_is_boot. imp_sig_mods :: [ModuleName], -- ^ Signature modules below this one imp_orphs :: [Module], -- ^ Orphan modules below us in the import tree (and maybe including -- us for imported modules) imp_finsts :: [Module] -- ^ Family instance modules below us in the import tree (and maybe -- including us for imported modules) } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Env.hs0000644000000000000000000002255214472400113021072 0ustar0000000000000000-- | Module environment module GHC.Unit.Module.Env ( -- * Module mappings ModuleEnv , elemModuleEnv, extendModuleEnv, extendModuleEnvList , extendModuleEnvList_C, plusModuleEnv_C , delModuleEnvList, delModuleEnv, plusModuleEnv, lookupModuleEnv , lookupWithDefaultModuleEnv, mapModuleEnv, mkModuleEnv, emptyModuleEnv , partitionModuleEnv , moduleEnvKeys, moduleEnvElts, moduleEnvToList , unitModuleEnv, isEmptyModuleEnv , extendModuleEnvWith, filterModuleEnv -- * ModuleName mappings , ModuleNameEnv, DModuleNameEnv -- * Sets of Modules , ModuleSet , emptyModuleSet, mkModuleSet, moduleSetElts , extendModuleSet, extendModuleSetList, delModuleSet , elemModuleSet, intersectModuleSet, minusModuleSet, unionModuleSet , unitModuleSet, isEmptyModuleSet , unionManyModuleSets -- * InstalledModuleEnv , InstalledModuleEnv , emptyInstalledModuleEnv , lookupInstalledModuleEnv , extendInstalledModuleEnv , filterInstalledModuleEnv , delInstalledModuleEnv , mergeInstalledModuleEnv , plusInstalledModuleEnv , installedModuleEnvElts ) where import GHC.Prelude import GHC.Unit.Module.Name (ModuleName) import GHC.Types.Unique import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Unit.Types import GHC.Utils.Misc import Data.List (sortBy, sort) import Data.Ord import Data.Coerce import Data.Map (Map) import Data.Set (Set) import qualified Data.Map as Map import qualified Data.Set as Set import qualified GHC.Data.FiniteMap as Map import GHC.Utils.Outputable -- | A map keyed off of 'Module's newtype ModuleEnv elt = ModuleEnv (Map NDModule elt) instance Outputable a => Outputable (ModuleEnv a) where ppr (ModuleEnv m) = ppr m {- Note [ModuleEnv performance and determinism] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To prevent accidental reintroduction of nondeterminism the Ord instance for Module was changed to not depend on Unique ordering and to use the lexicographic order. This is potentially expensive, but when measured there was no difference in performance. To be on the safe side and not pessimize ModuleEnv uses nondeterministic ordering on Module and normalizes by doing the lexicographic sort when turning the env to a list. See Note [Unique Determinism] for more information about the source of nondeterminismand and Note [Deterministic UniqFM] for explanation of why it matters for maps. -} newtype NDModule = NDModule { unNDModule :: Module } deriving Eq -- A wrapper for Module with faster nondeterministic Ord. -- Don't export, See [ModuleEnv performance and determinism] -- instance Outputable NDModule where ppr (NDModule a) = ppr a instance Ord NDModule where compare (NDModule (Module p1 n1)) (NDModule (Module p2 n2)) = (getUnique p1 `nonDetCmpUnique` getUnique p2) `thenCmp` (getUnique n1 `nonDetCmpUnique` getUnique n2) filterModuleEnv :: (Module -> a -> Bool) -> ModuleEnv a -> ModuleEnv a filterModuleEnv f (ModuleEnv e) = ModuleEnv (Map.filterWithKey (f . unNDModule) e) elemModuleEnv :: Module -> ModuleEnv a -> Bool elemModuleEnv m (ModuleEnv e) = Map.member (NDModule m) e extendModuleEnv :: ModuleEnv a -> Module -> a -> ModuleEnv a extendModuleEnv (ModuleEnv e) m x = ModuleEnv (Map.insert (NDModule m) x e) extendModuleEnvWith :: (a -> a -> a) -> ModuleEnv a -> Module -> a -> ModuleEnv a extendModuleEnvWith f (ModuleEnv e) m x = ModuleEnv (Map.insertWith f (NDModule m) x e) extendModuleEnvList :: ModuleEnv a -> [(Module, a)] -> ModuleEnv a extendModuleEnvList (ModuleEnv e) xs = ModuleEnv (Map.insertList [(NDModule k, v) | (k,v) <- xs] e) extendModuleEnvList_C :: (a -> a -> a) -> ModuleEnv a -> [(Module, a)] -> ModuleEnv a extendModuleEnvList_C f (ModuleEnv e) xs = ModuleEnv (Map.insertListWith f [(NDModule k, v) | (k,v) <- xs] e) plusModuleEnv_C :: (a -> a -> a) -> ModuleEnv a -> ModuleEnv a -> ModuleEnv a plusModuleEnv_C f (ModuleEnv e1) (ModuleEnv e2) = ModuleEnv (Map.unionWith f e1 e2) delModuleEnvList :: ModuleEnv a -> [Module] -> ModuleEnv a delModuleEnvList (ModuleEnv e) ms = ModuleEnv (Map.deleteList (map NDModule ms) e) delModuleEnv :: ModuleEnv a -> Module -> ModuleEnv a delModuleEnv (ModuleEnv e) m = ModuleEnv (Map.delete (NDModule m) e) plusModuleEnv :: ModuleEnv a -> ModuleEnv a -> ModuleEnv a plusModuleEnv (ModuleEnv e1) (ModuleEnv e2) = ModuleEnv (Map.union e1 e2) lookupModuleEnv :: ModuleEnv a -> Module -> Maybe a lookupModuleEnv (ModuleEnv e) m = Map.lookup (NDModule m) e lookupWithDefaultModuleEnv :: ModuleEnv a -> a -> Module -> a lookupWithDefaultModuleEnv (ModuleEnv e) x m = Map.findWithDefault x (NDModule m) e mapModuleEnv :: (a -> b) -> ModuleEnv a -> ModuleEnv b mapModuleEnv f (ModuleEnv e) = ModuleEnv (Map.mapWithKey (\_ v -> f v) e) partitionModuleEnv :: (a -> Bool) -> ModuleEnv a -> (ModuleEnv a, ModuleEnv a) partitionModuleEnv f (ModuleEnv e) = (ModuleEnv a, ModuleEnv b) where (a,b) = Map.partition f e mkModuleEnv :: [(Module, a)] -> ModuleEnv a mkModuleEnv xs = ModuleEnv (Map.fromList [(NDModule k, v) | (k,v) <- xs]) emptyModuleEnv :: ModuleEnv a emptyModuleEnv = ModuleEnv Map.empty moduleEnvKeys :: ModuleEnv a -> [Module] moduleEnvKeys (ModuleEnv e) = sort $ map unNDModule $ Map.keys e -- See Note [ModuleEnv performance and determinism] moduleEnvElts :: ModuleEnv a -> [a] moduleEnvElts e = map snd $ moduleEnvToList e -- See Note [ModuleEnv performance and determinism] moduleEnvToList :: ModuleEnv a -> [(Module, a)] moduleEnvToList (ModuleEnv e) = sortBy (comparing fst) [(m, v) | (NDModule m, v) <- Map.toList e] -- See Note [ModuleEnv performance and determinism] unitModuleEnv :: Module -> a -> ModuleEnv a unitModuleEnv m x = ModuleEnv (Map.singleton (NDModule m) x) isEmptyModuleEnv :: ModuleEnv a -> Bool isEmptyModuleEnv (ModuleEnv e) = Map.null e -- | A set of 'Module's type ModuleSet = Set NDModule mkModuleSet :: [Module] -> ModuleSet mkModuleSet = Set.fromList . coerce extendModuleSet :: ModuleSet -> Module -> ModuleSet extendModuleSet s m = Set.insert (NDModule m) s extendModuleSetList :: ModuleSet -> [Module] -> ModuleSet extendModuleSetList s ms = foldl' (coerce . flip Set.insert) s ms emptyModuleSet :: ModuleSet emptyModuleSet = Set.empty isEmptyModuleSet :: ModuleSet -> Bool isEmptyModuleSet = Set.null moduleSetElts :: ModuleSet -> [Module] moduleSetElts = sort . coerce . Set.toList elemModuleSet :: Module -> ModuleSet -> Bool elemModuleSet = Set.member . coerce intersectModuleSet :: ModuleSet -> ModuleSet -> ModuleSet intersectModuleSet = coerce Set.intersection minusModuleSet :: ModuleSet -> ModuleSet -> ModuleSet minusModuleSet = coerce Set.difference delModuleSet :: ModuleSet -> Module -> ModuleSet delModuleSet = coerce (flip Set.delete) unionModuleSet :: ModuleSet -> ModuleSet -> ModuleSet unionModuleSet = coerce Set.union unionManyModuleSets :: [ModuleSet] -> ModuleSet unionManyModuleSets = coerce (Set.unions :: [Set NDModule] -> Set NDModule) unitModuleSet :: Module -> ModuleSet unitModuleSet = coerce Set.singleton {- A ModuleName has a Unique, so we can build mappings of these using UniqFM. -} -- | A map keyed off of 'ModuleName's (actually, their 'Unique's) type ModuleNameEnv elt = UniqFM ModuleName elt -- | A map keyed off of 'ModuleName's (actually, their 'Unique's) -- Has deterministic folds and can be deterministically converted to a list type DModuleNameEnv elt = UniqDFM ModuleName elt -------------------------------------------------------------------- -- InstalledModuleEnv -------------------------------------------------------------------- -- | A map keyed off of 'InstalledModule' newtype InstalledModuleEnv elt = InstalledModuleEnv (Map InstalledModule elt) instance Outputable elt => Outputable (InstalledModuleEnv elt) where ppr (InstalledModuleEnv env) = ppr env emptyInstalledModuleEnv :: InstalledModuleEnv a emptyInstalledModuleEnv = InstalledModuleEnv Map.empty lookupInstalledModuleEnv :: InstalledModuleEnv a -> InstalledModule -> Maybe a lookupInstalledModuleEnv (InstalledModuleEnv e) m = Map.lookup m e extendInstalledModuleEnv :: InstalledModuleEnv a -> InstalledModule -> a -> InstalledModuleEnv a extendInstalledModuleEnv (InstalledModuleEnv e) m x = InstalledModuleEnv (Map.insert m x e) filterInstalledModuleEnv :: (InstalledModule -> a -> Bool) -> InstalledModuleEnv a -> InstalledModuleEnv a filterInstalledModuleEnv f (InstalledModuleEnv e) = InstalledModuleEnv (Map.filterWithKey f e) delInstalledModuleEnv :: InstalledModuleEnv a -> InstalledModule -> InstalledModuleEnv a delInstalledModuleEnv (InstalledModuleEnv e) m = InstalledModuleEnv (Map.delete m e) installedModuleEnvElts :: InstalledModuleEnv a -> [(InstalledModule, a)] installedModuleEnvElts (InstalledModuleEnv e) = Map.assocs e mergeInstalledModuleEnv :: (elta -> eltb -> Maybe eltc) -> (InstalledModuleEnv elta -> InstalledModuleEnv eltc) -- map X -> (InstalledModuleEnv eltb -> InstalledModuleEnv eltc) -- map Y -> InstalledModuleEnv elta -> InstalledModuleEnv eltb -> InstalledModuleEnv eltc mergeInstalledModuleEnv f g h (InstalledModuleEnv xm) (InstalledModuleEnv ym) = InstalledModuleEnv $ Map.mergeWithKey (\_ x y -> (x `f` y)) (coerce g) (coerce h) xm ym plusInstalledModuleEnv :: (elt -> elt -> elt) -> InstalledModuleEnv elt -> InstalledModuleEnv elt -> InstalledModuleEnv elt plusInstalledModuleEnv f (InstalledModuleEnv xm) (InstalledModuleEnv ym) = InstalledModuleEnv $ Map.unionWith f xm ym ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Graph.hs0000644000000000000000000003266514472400113021411 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DeriveTraversable #-} module GHC.Unit.Module.Graph ( ModuleGraph , ModuleGraphNode(..) , nodeDependencies , emptyMG , mkModuleGraph , extendMG , extendMGInst , extendMG' , unionMG , isTemplateHaskellOrQQNonBoot , filterToposortToModules , mapMG , mgModSummaries , mgModSummaries' , mgLookupModule , mgTransDeps , showModMsg , moduleGraphNodeModule , moduleGraphNodeModSum , moduleGraphNodes , SummaryNode , summaryNodeSummary , NodeKey(..) , nodeKeyUnitId , ModNodeKey , mkNodeKey , msKey , moduleGraphNodeUnitId , ModNodeKeyWithUid(..) ) where import GHC.Prelude import qualified GHC.LanguageExtensions as LangExt import GHC.Data.Maybe import GHC.Data.Graph.Directed import GHC.Driver.Backend import GHC.Driver.Ppr import GHC.Driver.Session import GHC.Types.SourceFile ( hscSourceString ) import GHC.Unit.Module.ModSummary import GHC.Unit.Types import GHC.Utils.Outputable import System.FilePath import qualified Data.Map as Map import GHC.Types.Unique.DSet import qualified Data.Set as Set import GHC.Unit.Module import GHC.Linker.Static.Utils import Data.Bifunctor import Data.Either import Data.Function import GHC.Data.List.SetOps -- | A '@ModuleGraphNode@' is a node in the '@ModuleGraph@'. -- Edges between nodes mark dependencies arising from module imports -- and dependencies arising from backpack instantiations. data ModuleGraphNode -- | Instantiation nodes track the instantiation of other units -- (backpack dependencies) with the holes (signatures) of the current package. = InstantiationNode UnitId InstantiatedUnit -- | There is a module summary node for each module, signature, and boot module being built. | ModuleNode [NodeKey] ModSummary -- | Link nodes are whether are are creating a linked product (ie executable/shared object etc) for a unit. | LinkNode [NodeKey] UnitId moduleGraphNodeModule :: ModuleGraphNode -> Maybe ModuleName moduleGraphNodeModule mgn = ms_mod_name <$> (moduleGraphNodeModSum mgn) moduleGraphNodeModSum :: ModuleGraphNode -> Maybe ModSummary moduleGraphNodeModSum (InstantiationNode {}) = Nothing moduleGraphNodeModSum (LinkNode {}) = Nothing moduleGraphNodeModSum (ModuleNode _ ms) = Just ms moduleGraphNodeUnitId :: ModuleGraphNode -> UnitId moduleGraphNodeUnitId mgn = case mgn of InstantiationNode uid _iud -> uid ModuleNode _ ms -> toUnitId (moduleUnit (ms_mod ms)) LinkNode _ uid -> uid instance Outputable ModuleGraphNode where ppr = \case InstantiationNode _ iuid -> ppr iuid ModuleNode nks ms -> ppr (msKey ms) <+> ppr nks LinkNode uid _ -> text "LN:" <+> ppr uid instance Eq ModuleGraphNode where (==) = (==) `on` mkNodeKey instance Ord ModuleGraphNode where compare = compare `on` mkNodeKey data NodeKey = NodeKey_Unit {-# UNPACK #-} !InstantiatedUnit | NodeKey_Module {-# UNPACK #-} !ModNodeKeyWithUid | NodeKey_Link !UnitId deriving (Eq, Ord) instance Outputable NodeKey where ppr nk = pprNodeKey nk pprNodeKey :: NodeKey -> SDoc pprNodeKey (NodeKey_Unit iu) = ppr iu pprNodeKey (NodeKey_Module mk) = ppr mk pprNodeKey (NodeKey_Link uid) = ppr uid nodeKeyUnitId :: NodeKey -> UnitId nodeKeyUnitId (NodeKey_Unit iu) = instUnitInstanceOf iu nodeKeyUnitId (NodeKey_Module mk) = mnkUnitId mk nodeKeyUnitId (NodeKey_Link uid) = uid data ModNodeKeyWithUid = ModNodeKeyWithUid { mnkModuleName :: !ModuleNameWithIsBoot , mnkUnitId :: !UnitId } deriving (Eq, Ord) instance Outputable ModNodeKeyWithUid where ppr (ModNodeKeyWithUid mnwib uid) = ppr uid <> colon <> ppr mnwib -- | A '@ModuleGraph@' contains all the nodes from the home package (only). See -- '@ModuleGraphNode@' for information about the nodes. -- -- Modules need to be compiled. hs-boots need to be typechecked before -- the associated "real" module so modules with {-# SOURCE #-} imports can be -- built. Instantiations also need to be typechecked to ensure that the module -- fits the signature. Substantiation typechecking is roughly comparable to the -- check that the module and its hs-boot agree. -- -- The graph is not necessarily stored in topologically-sorted order. Use -- 'GHC.topSortModuleGraph' and 'GHC.Data.Graph.Directed.flattenSCC' to achieve this. data ModuleGraph = ModuleGraph { mg_mss :: [ModuleGraphNode] , mg_trans_deps :: Map.Map NodeKey (Set.Set NodeKey) -- A cached transitive dependency calculation so that a lot of work is not -- repeated whenever the transitive dependencies need to be calculated (for example, hptInstances) } -- | Map a function 'f' over all the 'ModSummaries'. -- To preserve invariants 'f' can't change the isBoot status. mapMG :: (ModSummary -> ModSummary) -> ModuleGraph -> ModuleGraph mapMG f mg@ModuleGraph{..} = mg { mg_mss = flip fmap mg_mss $ \case InstantiationNode uid iuid -> InstantiationNode uid iuid LinkNode uid nks -> LinkNode uid nks ModuleNode deps ms -> ModuleNode deps (f ms) } unionMG :: ModuleGraph -> ModuleGraph -> ModuleGraph unionMG a b = let new_mss = nubOrdBy compare $ mg_mss a `mappend` mg_mss b in ModuleGraph { mg_mss = new_mss , mg_trans_deps = mkTransDeps new_mss } mgTransDeps :: ModuleGraph -> Map.Map NodeKey (Set.Set NodeKey) mgTransDeps = mg_trans_deps mgModSummaries :: ModuleGraph -> [ModSummary] mgModSummaries mg = [ m | ModuleNode _ m <- mgModSummaries' mg ] mgModSummaries' :: ModuleGraph -> [ModuleGraphNode] mgModSummaries' = mg_mss -- | Look up a ModSummary in the ModuleGraph -- Looks up the non-boot ModSummary -- Linear in the size of the module graph mgLookupModule :: ModuleGraph -> Module -> Maybe ModSummary mgLookupModule ModuleGraph{..} m = listToMaybe $ mapMaybe go mg_mss where go (ModuleNode _ ms) | NotBoot <- isBootSummary ms , ms_mod ms == m = Just ms go _ = Nothing emptyMG :: ModuleGraph emptyMG = ModuleGraph [] Map.empty isTemplateHaskellOrQQNonBoot :: ModSummary -> Bool isTemplateHaskellOrQQNonBoot ms = (xopt LangExt.TemplateHaskell (ms_hspp_opts ms) || xopt LangExt.QuasiQuotes (ms_hspp_opts ms)) && (isBootSummary ms == NotBoot) -- | Add an ExtendedModSummary to ModuleGraph. Assumes that the new ModSummary is -- not an element of the ModuleGraph. extendMG :: ModuleGraph -> [NodeKey] -> ModSummary -> ModuleGraph extendMG ModuleGraph{..} deps ms = ModuleGraph { mg_mss = ModuleNode deps ms : mg_mss , mg_trans_deps = mkTransDeps (ModuleNode deps ms : mg_mss) } mkTransDeps :: [ModuleGraphNode] -> Map.Map NodeKey (Set.Set NodeKey) mkTransDeps mss = let (gg, _lookup_node) = moduleGraphNodes False mss in allReachable gg (mkNodeKey . node_payload) extendMGInst :: ModuleGraph -> UnitId -> InstantiatedUnit -> ModuleGraph extendMGInst mg uid depUnitId = mg { mg_mss = InstantiationNode uid depUnitId : mg_mss mg } extendMGLink :: ModuleGraph -> UnitId -> [NodeKey] -> ModuleGraph extendMGLink mg uid nks = mg { mg_mss = LinkNode nks uid : mg_mss mg } extendMG' :: ModuleGraph -> ModuleGraphNode -> ModuleGraph extendMG' mg = \case InstantiationNode uid depUnitId -> extendMGInst mg uid depUnitId ModuleNode deps ms -> extendMG mg deps ms LinkNode deps uid -> extendMGLink mg uid deps mkModuleGraph :: [ModuleGraphNode] -> ModuleGraph mkModuleGraph = foldr (flip extendMG') emptyMG -- | This function filters out all the instantiation nodes from each SCC of a -- topological sort. Use this with care, as the resulting "strongly connected components" -- may not really be strongly connected in a direct way, as instantiations have been -- removed. It would probably be best to eliminate uses of this function where possible. filterToposortToModules :: [SCC ModuleGraphNode] -> [SCC ModSummary] filterToposortToModules = mapMaybe $ mapMaybeSCC $ \case InstantiationNode _ _ -> Nothing LinkNode{} -> Nothing ModuleNode _deps node -> Just node where -- This higher order function is somewhat bogus, -- as the definition of "strongly connected component" -- is not necessarily respected. mapMaybeSCC :: (a -> Maybe b) -> SCC a -> Maybe (SCC b) mapMaybeSCC f = \case AcyclicSCC a -> AcyclicSCC <$> f a CyclicSCC as -> case mapMaybe f as of [] -> Nothing [a] -> Just $ AcyclicSCC a as -> Just $ CyclicSCC as showModMsg :: DynFlags -> Bool -> ModuleGraphNode -> SDoc showModMsg dflags _ (LinkNode {}) = let staticLink = case ghcLink dflags of LinkStaticLib -> True _ -> False platform = targetPlatform dflags exe_file = exeFileName platform staticLink (outputFile_ dflags) in text exe_file showModMsg _ _ (InstantiationNode _uid indef_unit) = ppr $ instUnitInstanceOf indef_unit showModMsg dflags recomp (ModuleNode _ mod_summary) = if gopt Opt_HideSourcePaths dflags then text mod_str else hsep $ [ text (mod_str ++ replicate (max 0 (16 - length mod_str)) ' ') , char '(' , text (op $ msHsFilePath mod_summary) <> char ',' , message, char ')' ] where op = normalise mod = moduleName (ms_mod mod_summary) mod_str = showPpr dflags mod ++ hscSourceString (ms_hsc_src mod_summary) dyn_file = op $ msDynObjFilePath mod_summary obj_file = op $ msObjFilePath mod_summary message = case backend dflags of Interpreter | recomp -> text "interpreted" NoBackend -> text "nothing" _ -> if gopt Opt_BuildDynamicToo dflags then text obj_file <> comma <+> text dyn_file else text obj_file type SummaryNode = Node Int ModuleGraphNode summaryNodeKey :: SummaryNode -> Int summaryNodeKey = node_key summaryNodeSummary :: SummaryNode -> ModuleGraphNode summaryNodeSummary = node_payload -- | Collect the immediate dependencies of a ModuleGraphNode, -- optionally avoiding hs-boot dependencies. -- If the drop_hs_boot_nodes flag is False, and if this is a .hs and there is -- an equivalent .hs-boot, add a link from the former to the latter. This -- has the effect of detecting bogus cases where the .hs-boot depends on the -- .hs, by introducing a cycle. Additionally, it ensures that we will always -- process the .hs-boot before the .hs, and so the HomePackageTable will always -- have the most up to date information. nodeDependencies :: Bool -> ModuleGraphNode -> [NodeKey] nodeDependencies drop_hs_boot_nodes = \case LinkNode deps _uid -> deps InstantiationNode uid iuid -> NodeKey_Module . (\mod -> ModNodeKeyWithUid (GWIB mod NotBoot) uid) <$> uniqDSetToList (instUnitHoles iuid) ModuleNode deps _ms -> map drop_hs_boot deps where -- Drop hs-boot nodes by using HsSrcFile as the key hs_boot_key | drop_hs_boot_nodes = NotBoot -- is regular mod or signature | otherwise = IsBoot drop_hs_boot (NodeKey_Module (ModNodeKeyWithUid (GWIB mn IsBoot) uid)) = (NodeKey_Module (ModNodeKeyWithUid (GWIB mn hs_boot_key) uid)) drop_hs_boot x = x -- | Turn a list of graph nodes into an efficient queriable graph. -- The first boolean parameter indicates whether nodes corresponding to hs-boot files -- should be collapsed into their relevant hs nodes. moduleGraphNodes :: Bool -> [ModuleGraphNode] -> (Graph SummaryNode, NodeKey -> Maybe SummaryNode) moduleGraphNodes drop_hs_boot_nodes summaries = (graphFromEdgedVerticesUniq nodes, lookup_node) where -- Map from module to extra boot summary dependencies which need to be merged in (boot_summaries, nodes) = bimap Map.fromList id $ partitionEithers (map go numbered_summaries) where go (s, key) = case s of ModuleNode __deps ms | isBootSummary ms == IsBoot, drop_hs_boot_nodes -- Using nodeDependencies here converts dependencies on other -- boot files to dependencies on dependencies on non-boot files. -> Left (ms_mod ms, nodeDependencies drop_hs_boot_nodes s) _ -> normal_case where normal_case = let lkup_key = ms_mod <$> moduleGraphNodeModSum s extra = (lkup_key >>= \key -> Map.lookup key boot_summaries) in Right $ DigraphNode s key $ out_edge_keys $ (fromMaybe [] extra ++ nodeDependencies drop_hs_boot_nodes s) numbered_summaries = zip summaries [1..] lookup_node :: NodeKey -> Maybe SummaryNode lookup_node key = Map.lookup key (unNodeMap node_map) lookup_key :: NodeKey -> Maybe Int lookup_key = fmap summaryNodeKey . lookup_node node_map :: NodeMap SummaryNode node_map = NodeMap $ Map.fromList [ (mkNodeKey s, node) | node <- nodes , let s = summaryNodeSummary node ] out_edge_keys :: [NodeKey] -> [Int] out_edge_keys = mapMaybe lookup_key -- If we want keep_hi_boot_nodes, then we do lookup_key with -- IsBoot; else False newtype NodeMap a = NodeMap { unNodeMap :: Map.Map NodeKey a } deriving (Functor, Traversable, Foldable) mkNodeKey :: ModuleGraphNode -> NodeKey mkNodeKey = \case InstantiationNode _ iu -> NodeKey_Unit iu ModuleNode _ x -> NodeKey_Module $ msKey x LinkNode _ uid -> NodeKey_Link uid msKey :: ModSummary -> ModNodeKeyWithUid msKey ms = ModNodeKeyWithUid (ms_mnwib ms) (ms_unitid ms) type ModNodeKey = ModuleNameWithIsBoot ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Imported.hs0000644000000000000000000000276014472400113022124 0ustar0000000000000000module GHC.Unit.Module.Imported ( ImportedMods , ImportedBy (..) , ImportedModsVal (..) , importedByUser ) where import GHC.Prelude import GHC.Unit.Module import GHC.Types.Name.Reader import GHC.Types.SafeHaskell import GHC.Types.SrcLoc -- | Records the modules directly imported by a module for extracting e.g. -- usage information, and also to give better error message type ImportedMods = ModuleEnv [ImportedBy] -- | If a module was "imported" by the user, we associate it with -- more detailed usage information 'ImportedModsVal'; a module -- imported by the system only gets used for usage information. data ImportedBy = ImportedByUser ImportedModsVal | ImportedBySystem importedByUser :: [ImportedBy] -> [ImportedModsVal] importedByUser (ImportedByUser imv : bys) = imv : importedByUser bys importedByUser (ImportedBySystem : bys) = importedByUser bys importedByUser [] = [] data ImportedModsVal = ImportedModsVal { imv_name :: ModuleName -- ^ The name the module is imported with , imv_span :: SrcSpan -- ^ the source span of the whole import , imv_is_safe :: IsSafeImport -- ^ whether this is a safe import , imv_is_hiding :: Bool -- ^ whether this is an "hiding" import , imv_all_exports :: !GlobalRdrEnv -- ^ all the things the module could provide. -- -- NB. BangPattern here: otherwise this leaks. (#15111) , imv_qualified :: Bool -- ^ whether this is a qualified import } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Location.hs0000644000000000000000000001035214472400113022105 0ustar0000000000000000-- | Module location module GHC.Unit.Module.Location ( ModLocation(..) , addBootSuffix , addBootSuffix_maybe , addBootSuffixLocn_maybe , addBootSuffixLocn , addBootSuffixLocnOut , removeBootSuffix ) where import GHC.Prelude import GHC.Unit.Types import GHC.Utils.Outputable -- | Module Location -- -- Where a module lives on the file system: the actual locations -- of the .hs, .hi, .dyn_hi, .o, .dyn_o and .hie files, if we have them. -- -- For a module in another unit, the ml_hs_file and ml_obj_file components of -- ModLocation are undefined. -- -- The locations specified by a ModLocation may or may not -- correspond to actual files yet: for example, even if the object -- file doesn't exist, the ModLocation still contains the path to -- where the object file will reside if/when it is created. -- -- The paths of anything which can affect recompilation should be placed inside -- ModLocation. -- -- When a ModLocation is created none of the filepaths will have -boot suffixes. -- This is because in --make mode the ModLocation is put in the finder cache which -- is indexed by ModuleName, when a ModLocation is retrieved from the FinderCache -- the boot suffixes are appended. -- The other case is in -c mode, there the ModLocation immediately gets given the -- boot suffixes in mkOneShotModLocation. data ModLocation = ModLocation { ml_hs_file :: Maybe FilePath, -- ^ The source file, if we have one. Package modules -- probably don't have source files. ml_hi_file :: FilePath, -- ^ Where the .hi file is, whether or not it exists -- yet. Always of form foo.hi, even if there is an -- hi-boot file (we add the -boot suffix later) ml_dyn_hi_file :: FilePath, -- ^ Where the .dyn_hi file is, whether or not it exists -- yet. ml_obj_file :: FilePath, -- ^ Where the .o file is, whether or not it exists yet. -- (might not exist either because the module hasn't -- been compiled yet, or because it is part of a -- unit with a .a file) ml_dyn_obj_file :: FilePath, -- ^ Where the .dy file is, whether or not it exists -- yet. ml_hie_file :: FilePath -- ^ Where the .hie file is, whether or not it exists -- yet. } deriving Show instance Outputable ModLocation where ppr = text . show -- | Add the @-boot@ suffix to .hs, .hi and .o files addBootSuffix :: FilePath -> FilePath addBootSuffix path = path ++ "-boot" -- | Remove the @-boot@ suffix to .hs, .hi and .o files removeBootSuffix :: FilePath -> FilePath removeBootSuffix "-boot" = [] removeBootSuffix (x:xs) = x : removeBootSuffix xs removeBootSuffix [] = error "removeBootSuffix: no -boot suffix" -- | Add the @-boot@ suffix if the @Bool@ argument is @True@ addBootSuffix_maybe :: IsBootInterface -> FilePath -> FilePath addBootSuffix_maybe is_boot path = case is_boot of IsBoot -> addBootSuffix path NotBoot -> path addBootSuffixLocn_maybe :: IsBootInterface -> ModLocation -> ModLocation addBootSuffixLocn_maybe is_boot locn = case is_boot of IsBoot -> addBootSuffixLocn locn _ -> locn -- | Add the @-boot@ suffix to all file paths associated with the module addBootSuffixLocn :: ModLocation -> ModLocation addBootSuffixLocn locn = locn { ml_hs_file = fmap addBootSuffix (ml_hs_file locn) , ml_hi_file = addBootSuffix (ml_hi_file locn) , ml_dyn_hi_file = addBootSuffix (ml_dyn_hi_file locn) , ml_obj_file = addBootSuffix (ml_obj_file locn) , ml_dyn_obj_file = addBootSuffix (ml_dyn_obj_file locn) , ml_hie_file = addBootSuffix (ml_hie_file locn) } -- | Add the @-boot@ suffix to all output file paths associated with the -- module, not including the input file itself addBootSuffixLocnOut :: ModLocation -> ModLocation addBootSuffixLocnOut locn = locn { ml_hi_file = addBootSuffix (ml_hi_file locn) , ml_dyn_hi_file = addBootSuffix (ml_dyn_hi_file locn) , ml_obj_file = addBootSuffix (ml_obj_file locn) , ml_dyn_obj_file = addBootSuffix (ml_dyn_obj_file locn) , ml_hie_file = addBootSuffix (ml_hie_file locn) } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/ModDetails.hs0000644000000000000000000000305314472400113022362 0ustar0000000000000000module GHC.Unit.Module.ModDetails ( ModDetails (..) , emptyModDetails ) where import GHC.Core ( CoreRule ) import GHC.Core.FamInstEnv import GHC.Core.InstEnv ( InstEnv, emptyInstEnv ) import GHC.Types.Avail import GHC.Types.CompleteMatch import GHC.Types.TypeEnv import GHC.Types.Annotations ( Annotation ) -- | The 'ModDetails' is essentially a cache for information in the 'ModIface' -- for home modules only. Information relating to packages will be loaded into -- global environments in 'ExternalPackageState'. data ModDetails = ModDetails { -- The next two fields are created by the typechecker md_exports :: [AvailInfo] , md_types :: !TypeEnv -- ^ Local type environment for this particular module -- Includes Ids, TyCons, PatSyns , md_insts :: InstEnv -- ^ 'DFunId's for the instances in this module , md_fam_insts :: ![FamInst] , md_rules :: ![CoreRule] -- ^ Domain may include 'Id's from other modules , md_anns :: ![Annotation] -- ^ Annotations present in this module: currently -- they only annotate things also declared in this module , md_complete_matches :: [CompleteMatch] -- ^ Complete match pragmas for this module } -- | Constructs an empty ModDetails emptyModDetails :: ModDetails emptyModDetails = ModDetails { md_types = emptyTypeEnv , md_exports = [] , md_insts = emptyInstEnv , md_rules = [] , md_fam_insts = [] , md_anns = [] , md_complete_matches = [] } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/ModGuts.hs0000644000000000000000000001565714472400113021734 0ustar0000000000000000module GHC.Unit.Module.ModGuts ( ModGuts (..) , mg_mnwib , CgGuts (..) ) where import GHC.Prelude import GHC.ByteCode.Types import GHC.ForeignSrcLang import GHC.Hs import GHC.Unit import GHC.Unit.Module.Deps import GHC.Unit.Module.Warnings import GHC.Core.InstEnv ( InstEnv, ClsInst ) import GHC.Core.FamInstEnv import GHC.Core ( CoreProgram, CoreRule ) import GHC.Core.TyCon import GHC.Core.PatSyn import GHC.Linker.Types ( SptEntry(..) ) import GHC.Types.Annotations ( Annotation ) import GHC.Types.Avail import GHC.Types.CompleteMatch import GHC.Types.Fixity.Env import GHC.Types.ForeignStubs import GHC.Types.HpcInfo import GHC.Types.Name.Reader import GHC.Types.Name.Set (NameSet) import GHC.Types.SafeHaskell import GHC.Types.SourceFile ( HscSource(..), hscSourceToIsBoot ) import GHC.Types.SrcLoc import GHC.Types.CostCentre import Data.Set (Set) -- | A ModGuts is carried through the compiler, accumulating stuff as it goes -- There is only one ModGuts at any time, the one for the module -- being compiled right now. Once it is compiled, a 'ModIface' and -- 'ModDetails' are extracted and the ModGuts is discarded. data ModGuts = ModGuts { mg_module :: !Module, -- ^ Module being compiled mg_hsc_src :: HscSource, -- ^ Whether it's an hs-boot module mg_loc :: SrcSpan, -- ^ For error messages from inner passes mg_exports :: ![AvailInfo], -- ^ What it exports mg_deps :: !Dependencies, -- ^ What it depends on, directly or -- otherwise mg_usages :: ![Usage], -- ^ What was used? Used for interfaces. mg_used_th :: !Bool, -- ^ Did we run a TH splice? mg_rdr_env :: !GlobalRdrEnv, -- ^ Top-level lexical environment -- These fields all describe the things **declared in this module** mg_fix_env :: !FixityEnv, -- ^ Fixities declared in this module. -- Used for creating interface files. mg_tcs :: ![TyCon], -- ^ TyCons declared in this module -- (includes TyCons for classes) mg_insts :: ![ClsInst], -- ^ Class instances declared in this module mg_fam_insts :: ![FamInst], -- ^ Family instances declared in this module mg_patsyns :: ![PatSyn], -- ^ Pattern synonyms declared in this module mg_rules :: ![CoreRule], -- ^ Before the core pipeline starts, contains -- See Note [Overall plumbing for rules] in "GHC.Core.Rules" mg_binds :: !CoreProgram, -- ^ Bindings for this module mg_foreign :: !ForeignStubs, -- ^ Foreign exports declared in this module mg_foreign_files :: ![(ForeignSrcLang, FilePath)], -- ^ Files to be compiled with the C compiler mg_warns :: !(Warnings GhcRn), -- ^ Warnings declared in the module mg_anns :: [Annotation], -- ^ Annotations declared in this module mg_complete_matches :: [CompleteMatch], -- ^ Complete Matches mg_hpc_info :: !HpcInfo, -- ^ Coverage tick boxes in the module mg_modBreaks :: !(Maybe ModBreaks), -- ^ Breakpoints for the module -- The next two fields are unusual, because they give instance -- environments for *all* modules in the home package, including -- this module, rather than for *just* this module. -- Reason: when looking up an instance we don't want to have to -- look at each module in the home package in turn mg_inst_env :: InstEnv, -- ^ Class instance environment for -- /home-package/ modules (including this -- one); c.f. 'tcg_inst_env' mg_fam_inst_env :: FamInstEnv, -- ^ Type-family instance environment for -- /home-package/ modules (including this -- one); c.f. 'tcg_fam_inst_env' mg_boot_exports :: !NameSet, -- Things that are also export via hs-boot file mg_safe_haskell :: SafeHaskellMode, -- ^ Safe Haskell mode mg_trust_pkg :: Bool, -- ^ Do we need to trust our -- own package for Safe Haskell? -- See Note [Trust Own Package] -- in "GHC.Rename.Names" mg_docs :: !(Maybe Docs) -- ^ Documentation. } mg_mnwib :: ModGuts -> ModuleNameWithIsBoot mg_mnwib mg = GWIB (moduleName (mg_module mg)) (hscSourceToIsBoot (mg_hsc_src mg)) -- The ModGuts takes on several slightly different forms: -- -- After simplification, the following fields change slightly: -- mg_rules Orphan rules only (local ones now attached to binds) -- mg_binds With rules attached --------------------------------------------------------- -- The Tidy pass forks the information about this module: -- * one lot goes to interface file generation (ModIface) -- and later compilations (ModDetails) -- * the other lot goes to code generation (CgGuts) -- | A restricted form of 'ModGuts' for code generation purposes data CgGuts = CgGuts { cg_module :: !Module, -- ^ Module being compiled cg_tycons :: [TyCon], -- ^ Algebraic data types (including ones that started -- life as classes); generate constructors and info -- tables. Includes newtypes, just for the benefit of -- External Core cg_binds :: CoreProgram, -- ^ The tidied main bindings, including -- previously-implicit bindings for record and class -- selectors, and data constructor wrappers. But *not* -- data constructor workers; reason: we regard them -- as part of the code-gen of tycons cg_ccs :: [CostCentre], -- List of cost centres used in bindings and rules cg_foreign :: !ForeignStubs, -- ^ Foreign export stubs cg_foreign_files :: ![(ForeignSrcLang, FilePath)], cg_dep_pkgs :: !(Set UnitId), -- ^ Dependent packages, used to -- generate #includes for C code gen cg_hpc_info :: !HpcInfo, -- ^ Program coverage tick box information cg_modBreaks :: !(Maybe ModBreaks), -- ^ Module breakpoints cg_spt_entries :: [SptEntry] -- ^ Static pointer table entries for static forms defined in -- the module. -- See Note [Grand plan for static forms] in "GHC.Iface.Tidy.StaticPtrTable" } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/ModIface.hs0000644000000000000000000005423714472400113022016 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} module GHC.Unit.Module.ModIface ( ModIface , ModIface_ (..) , PartialModIface , ModIfaceBackend (..) , IfaceDeclExts , IfaceBackendExts , IfaceExport , WhetherHasOrphans , WhetherHasFamInst , mi_boot , mi_fix , mi_semantic_module , mi_free_holes , mi_mnwib , renameFreeHoles , emptyPartialModIface , emptyFullModIface , mkIfaceHashCache , emptyIfaceHashCache , forceModIface ) where import GHC.Prelude import GHC.Hs import GHC.Iface.Syntax import GHC.Iface.Ext.Fields import GHC.Unit import GHC.Unit.Module.Deps import GHC.Unit.Module.Warnings import GHC.Types.Avail import GHC.Types.Fixity import GHC.Types.Fixity.Env import GHC.Types.HpcInfo import GHC.Types.Name import GHC.Types.Name.Reader import GHC.Types.SafeHaskell import GHC.Types.SourceFile import GHC.Types.Unique.DSet import GHC.Types.Unique.FM import GHC.Data.Maybe import GHC.Utils.Fingerprint import GHC.Utils.Binary import Control.DeepSeq import Control.Exception {- Note [Interface file stages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Interface files have two possible stages. * A partial stage built from the result of the core pipeline. * A fully instantiated form. Which also includes fingerprints and potentially information provided by backends. We can build a full interface file two ways: * Directly from a partial one: Then we omit backend information and mostly compute fingerprints. * From a partial one + information produced by a backend. Then we store the provided information and fingerprint both. -} type PartialModIface = ModIface_ 'ModIfaceCore type ModIface = ModIface_ 'ModIfaceFinal -- | Extends a PartialModIface with information which is either: -- * Computed after codegen -- * Or computed just before writing the iface to disk. (Hashes) -- In order to fully instantiate it. data ModIfaceBackend = ModIfaceBackend { mi_iface_hash :: !Fingerprint -- ^ Hash of the whole interface , mi_mod_hash :: !Fingerprint -- ^ Hash of the ABI only , mi_flag_hash :: !Fingerprint -- ^ Hash of the important flags used when compiling the module, excluding -- optimisation flags , mi_opt_hash :: !Fingerprint -- ^ Hash of optimisation flags , mi_hpc_hash :: !Fingerprint -- ^ Hash of hpc flags , mi_plugin_hash :: !Fingerprint -- ^ Hash of plugins , mi_orphan :: !WhetherHasOrphans -- ^ Whether this module has orphans , mi_finsts :: !WhetherHasFamInst -- ^ Whether this module has family instances. See Note [The type family -- instance consistency story]. , mi_exp_hash :: !Fingerprint -- ^ Hash of export list , mi_orphan_hash :: !Fingerprint -- ^ Hash for orphan rules, class and family instances combined -- Cached environments for easy lookup. These are computed (lazily) from -- other fields and are not put into the interface file. -- Not really produced by the backend but there is no need to create them -- any earlier. , mi_warn_fn :: !(OccName -> Maybe (WarningTxt GhcRn)) -- ^ Cached lookup for 'mi_warns' , mi_fix_fn :: !(OccName -> Maybe Fixity) -- ^ Cached lookup for 'mi_fixities' , mi_hash_fn :: !(OccName -> Maybe (OccName, Fingerprint)) -- ^ Cached lookup for 'mi_decls'. The @Nothing@ in 'mi_hash_fn' means that -- the thing isn't in decls. It's useful to know that when seeing if we are -- up to date wrt. the old interface. The 'OccName' is the parent of the -- name, if it has one. } data ModIfacePhase = ModIfaceCore -- ^ Partial interface built based on output of core pipeline. | ModIfaceFinal -- | Selects a IfaceDecl representation. -- For fully instantiated interfaces we also maintain -- a fingerprint, which is used for recompilation checks. type family IfaceDeclExts (phase :: ModIfacePhase) where IfaceDeclExts 'ModIfaceCore = IfaceDecl IfaceDeclExts 'ModIfaceFinal = (Fingerprint, IfaceDecl) type family IfaceBackendExts (phase :: ModIfacePhase) where IfaceBackendExts 'ModIfaceCore = () IfaceBackendExts 'ModIfaceFinal = ModIfaceBackend -- | A 'ModIface' plus a 'ModDetails' summarises everything we know -- about a compiled module. The 'ModIface' is the stuff *before* linking, -- and can be written out to an interface file. The 'ModDetails is after -- linking and can be completely recovered from just the 'ModIface'. -- -- When we read an interface file, we also construct a 'ModIface' from it, -- except that we explicitly make the 'mi_decls' and a few other fields empty; -- as when reading we consolidate the declarations etc. into a number of indexed -- maps and environments in the 'ExternalPackageState'. -- -- See Note [Strictness in ModIface] to learn about why some fields are -- strict and others are not. data ModIface_ (phase :: ModIfacePhase) = ModIface { mi_module :: !Module, -- ^ Name of the module we are for mi_sig_of :: !(Maybe Module), -- ^ Are we a sig of another mod? mi_hsc_src :: !HscSource, -- ^ Boot? Signature? mi_deps :: Dependencies, -- ^ The dependencies of the module. This is -- consulted for directly-imported modules, but not -- for anything else (hence lazy) mi_usages :: [Usage], -- ^ Usages; kept sorted so that it's easy to decide -- whether to write a new iface file (changing usages -- doesn't affect the hash of this module) -- NOT STRICT! we read this field lazily from the interface file -- It is *only* consulted by the recompilation checker mi_exports :: ![IfaceExport], -- ^ Exports -- Kept sorted by (mod,occ), to make version comparisons easier -- Records the modules that are the declaration points for things -- exported by this module, and the 'OccName's of those things mi_used_th :: !Bool, -- ^ Module required TH splices when it was compiled. -- This disables recompilation avoidance (see #481). mi_fixities :: [(OccName,Fixity)], -- ^ Fixities -- NOT STRICT! we read this field lazily from the interface file mi_warns :: (Warnings GhcRn), -- ^ Warnings -- NOT STRICT! we read this field lazily from the interface file mi_anns :: [IfaceAnnotation], -- ^ Annotations -- NOT STRICT! we read this field lazily from the interface file mi_decls :: [IfaceDeclExts phase], -- ^ Type, class and variable declarations -- The hash of an Id changes if its fixity or deprecations change -- (as well as its type of course) -- Ditto data constructors, class operations, except that -- the hash of the parent class/tycon changes mi_globals :: !(Maybe GlobalRdrEnv), -- ^ Binds all the things defined at the top level in -- the /original source/ code for this module. which -- is NOT the same as mi_exports, nor mi_decls (which -- may contains declarations for things not actually -- defined by the user). Used for GHCi and for inspecting -- the contents of modules via the GHC API only. -- -- (We need the source file to figure out the -- top-level environment, if we didn't compile this module -- from source then this field contains @Nothing@). -- -- Strictly speaking this field should live in the -- 'HomeModInfo', but that leads to more plumbing. -- Instance declarations and rules mi_insts :: [IfaceClsInst], -- ^ Sorted class instance mi_fam_insts :: [IfaceFamInst], -- ^ Sorted family instances mi_rules :: [IfaceRule], -- ^ Sorted rules mi_hpc :: !AnyHpcUsage, -- ^ True if this program uses Hpc at any point in the program. mi_trust :: !IfaceTrustInfo, -- ^ Safe Haskell Trust information for this module. mi_trust_pkg :: !Bool, -- ^ Do we require the package this module resides in be trusted -- to trust this module? This is used for the situation where a -- module is Safe (so doesn't require the package be trusted -- itself) but imports some trustworthy modules from its own -- package (which does require its own package be trusted). -- See Note [Trust Own Package] in GHC.Rename.Names mi_complete_matches :: ![IfaceCompleteMatch], mi_docs :: Maybe Docs, -- ^ Docstrings and related data for use by haddock, the ghci -- @:doc@ command, and other tools. -- -- @Just _@ @<=>@ the module was built with @-haddock@. mi_final_exts :: !(IfaceBackendExts phase), -- ^ Either `()` or `ModIfaceBackend` for -- a fully instantiated interface. mi_ext_fields :: !ExtensibleFields, -- ^ Additional optional fields, where the Map key represents -- the field name, resulting in a (size, serialized data) pair. -- Because the data is intended to be serialized through the -- internal `Binary` class (increasing compatibility with types -- using `Name` and `FastString`, such as HIE), this format is -- chosen over `ByteString`s. -- mi_src_hash :: !Fingerprint -- ^ Hash of the .hs source, used for recompilation checking. } {- Note [Strictness in ModIface] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ModIface is the Haskell representation of an interface (.hi) file. * During compilation we write out ModIface values to disk for files that we have just compiled * For packages that we depend on we load the ModIface from disk. Some fields in the ModIface are deliberately lazy because when we read an interface file we don't always need all the parts. For example, an interface file contains information about documentation which is often not needed during compilation. This is achieved using the lazyPut/lazyGet pair. If the field was strict then we would pointlessly load this information into memory. On the other hand, if we create a ModIface but **don't** write it to disk then to avoid space leaks we need to make sure to deepseq all these lazy fields because the ModIface might live for a long time (for instance in a GHCi session). That's why in GHC.Driver.Main.hscMaybeWriteIface there is the call to forceModIface. -} -- | Old-style accessor for whether or not the ModIface came from an hs-boot -- file. mi_boot :: ModIface -> IsBootInterface mi_boot iface = if mi_hsc_src iface == HsBootFile then IsBoot else NotBoot mi_mnwib :: ModIface -> ModuleNameWithIsBoot mi_mnwib iface = GWIB (moduleName $ mi_module iface) (mi_boot iface) -- | Lookups up a (possibly cached) fixity from a 'ModIface'. If one cannot be -- found, 'defaultFixity' is returned instead. mi_fix :: ModIface -> OccName -> Fixity mi_fix iface name = mi_fix_fn (mi_final_exts iface) name `orElse` defaultFixity -- | The semantic module for this interface; e.g., if it's a interface -- for a signature, if 'mi_module' is @p[A=]:A@, 'mi_semantic_module' -- will be @@. mi_semantic_module :: ModIface_ a -> Module mi_semantic_module iface = case mi_sig_of iface of Nothing -> mi_module iface Just mod -> mod -- | The "precise" free holes, e.g., the signatures that this -- 'ModIface' depends on. mi_free_holes :: ModIface -> UniqDSet ModuleName mi_free_holes iface = case getModuleInstantiation (mi_module iface) of (_, Just indef) -- A mini-hack: we rely on the fact that 'renameFreeHoles' -- drops things that aren't holes. -> renameFreeHoles (mkUniqDSet cands) (instUnitInsts (moduleUnit indef)) _ -> emptyUniqDSet where cands = dep_sig_mods $ mi_deps iface -- | Given a set of free holes, and a unit identifier, rename -- the free holes according to the instantiation of the unit -- identifier. For example, if we have A and B free, and -- our unit identity is @p[A=,B=impl:B]@, the renamed free -- holes are just C. renameFreeHoles :: UniqDSet ModuleName -> [(ModuleName, Module)] -> UniqDSet ModuleName renameFreeHoles fhs insts = unionManyUniqDSets (map lookup_impl (uniqDSetToList fhs)) where hmap = listToUFM insts lookup_impl mod_name | Just mod <- lookupUFM hmap mod_name = moduleFreeHoles mod -- It wasn't actually a hole | otherwise = emptyUniqDSet -- See Note [Strictness in ModIface] about where we use lazyPut vs put instance Binary ModIface where put_ bh (ModIface { mi_module = mod, mi_sig_of = sig_of, mi_hsc_src = hsc_src, mi_src_hash = _src_hash, -- Don't `put_` this in the instance -- because we are going to write it -- out separately in the actual file mi_deps = deps, mi_usages = usages, mi_exports = exports, mi_used_th = used_th, mi_fixities = fixities, mi_warns = warns, mi_anns = anns, mi_decls = decls, mi_insts = insts, mi_fam_insts = fam_insts, mi_rules = rules, mi_hpc = hpc_info, mi_trust = trust, mi_trust_pkg = trust_pkg, mi_complete_matches = complete_matches, mi_docs = docs, mi_ext_fields = _ext_fields, -- Don't `put_` this in the instance so we -- can deal with it's pointer in the header -- when we write the actual file mi_final_exts = ModIfaceBackend { mi_iface_hash = iface_hash, mi_mod_hash = mod_hash, mi_flag_hash = flag_hash, mi_opt_hash = opt_hash, mi_hpc_hash = hpc_hash, mi_plugin_hash = plugin_hash, mi_orphan = orphan, mi_finsts = hasFamInsts, mi_exp_hash = exp_hash, mi_orphan_hash = orphan_hash }}) = do put_ bh mod put_ bh sig_of put_ bh hsc_src put_ bh iface_hash put_ bh mod_hash put_ bh flag_hash put_ bh opt_hash put_ bh hpc_hash put_ bh plugin_hash put_ bh orphan put_ bh hasFamInsts lazyPut bh deps lazyPut bh usages put_ bh exports put_ bh exp_hash put_ bh used_th put_ bh fixities lazyPut bh warns lazyPut bh anns put_ bh decls put_ bh insts put_ bh fam_insts lazyPut bh rules put_ bh orphan_hash put_ bh hpc_info put_ bh trust put_ bh trust_pkg put_ bh complete_matches lazyPutMaybe bh docs get bh = do mod <- get bh sig_of <- get bh hsc_src <- get bh iface_hash <- get bh mod_hash <- get bh flag_hash <- get bh opt_hash <- get bh hpc_hash <- get bh plugin_hash <- get bh orphan <- get bh hasFamInsts <- get bh deps <- lazyGet bh usages <- {-# SCC "bin_usages" #-} lazyGet bh exports <- {-# SCC "bin_exports" #-} get bh exp_hash <- get bh used_th <- get bh fixities <- {-# SCC "bin_fixities" #-} get bh warns <- {-# SCC "bin_warns" #-} lazyGet bh anns <- {-# SCC "bin_anns" #-} lazyGet bh decls <- {-# SCC "bin_tycldecls" #-} get bh insts <- {-# SCC "bin_insts" #-} get bh fam_insts <- {-# SCC "bin_fam_insts" #-} get bh rules <- {-# SCC "bin_rules" #-} lazyGet bh orphan_hash <- get bh hpc_info <- get bh trust <- get bh trust_pkg <- get bh complete_matches <- get bh docs <- lazyGetMaybe bh return (ModIface { mi_module = mod, mi_sig_of = sig_of, mi_hsc_src = hsc_src, mi_src_hash = fingerprint0, -- placeholder because this is dealt -- with specially when the file is read mi_deps = deps, mi_usages = usages, mi_exports = exports, mi_used_th = used_th, mi_anns = anns, mi_fixities = fixities, mi_warns = warns, mi_decls = decls, mi_globals = Nothing, mi_insts = insts, mi_fam_insts = fam_insts, mi_rules = rules, mi_hpc = hpc_info, mi_trust = trust, mi_trust_pkg = trust_pkg, -- And build the cached values mi_complete_matches = complete_matches, mi_docs = docs, mi_ext_fields = emptyExtensibleFields, -- placeholder because this is dealt -- with specially when the file is read mi_final_exts = ModIfaceBackend { mi_iface_hash = iface_hash, mi_mod_hash = mod_hash, mi_flag_hash = flag_hash, mi_opt_hash = opt_hash, mi_hpc_hash = hpc_hash, mi_plugin_hash = plugin_hash, mi_orphan = orphan, mi_finsts = hasFamInsts, mi_exp_hash = exp_hash, mi_orphan_hash = orphan_hash, mi_warn_fn = mkIfaceWarnCache warns, mi_fix_fn = mkIfaceFixCache fixities, mi_hash_fn = mkIfaceHashCache decls }}) -- | The original names declared of a certain module that are exported type IfaceExport = AvailInfo emptyPartialModIface :: Module -> PartialModIface emptyPartialModIface mod = ModIface { mi_module = mod, mi_sig_of = Nothing, mi_hsc_src = HsSrcFile, mi_src_hash = fingerprint0, mi_deps = noDependencies, mi_usages = [], mi_exports = [], mi_used_th = False, mi_fixities = [], mi_warns = NoWarnings, mi_anns = [], mi_insts = [], mi_fam_insts = [], mi_rules = [], mi_decls = [], mi_globals = Nothing, mi_hpc = False, mi_trust = noIfaceTrustInfo, mi_trust_pkg = False, mi_complete_matches = [], mi_docs = Nothing, mi_final_exts = (), mi_ext_fields = emptyExtensibleFields } emptyFullModIface :: Module -> ModIface emptyFullModIface mod = (emptyPartialModIface mod) { mi_decls = [] , mi_final_exts = ModIfaceBackend { mi_iface_hash = fingerprint0, mi_mod_hash = fingerprint0, mi_flag_hash = fingerprint0, mi_opt_hash = fingerprint0, mi_hpc_hash = fingerprint0, mi_plugin_hash = fingerprint0, mi_orphan = False, mi_finsts = False, mi_exp_hash = fingerprint0, mi_orphan_hash = fingerprint0, mi_warn_fn = emptyIfaceWarnCache, mi_fix_fn = emptyIfaceFixCache, mi_hash_fn = emptyIfaceHashCache } } -- | Constructs cache for the 'mi_hash_fn' field of a 'ModIface' mkIfaceHashCache :: [(Fingerprint,IfaceDecl)] -> (OccName -> Maybe (OccName, Fingerprint)) mkIfaceHashCache pairs = \occ -> lookupOccEnv env occ where env = foldl' add_decl emptyOccEnv pairs add_decl env0 (v,d) = foldl' add env0 (ifaceDeclFingerprints v d) where add env0 (occ,hash) = extendOccEnv env0 occ (occ,hash) emptyIfaceHashCache :: OccName -> Maybe (OccName, Fingerprint) emptyIfaceHashCache _occ = Nothing -- Take care, this instance only forces to the degree necessary to -- avoid major space leaks. instance (NFData (IfaceBackendExts (phase :: ModIfacePhase)), NFData (IfaceDeclExts (phase :: ModIfacePhase))) => NFData (ModIface_ phase) where rnf (ModIface f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23) = rnf f1 `seq` rnf f2 `seq` f3 `seq` f4 `seq` f5 `seq` f6 `seq` rnf f7 `seq` f8 `seq` f9 `seq` rnf f10 `seq` rnf f11 `seq` f12 `seq` rnf f13 `seq` rnf f14 `seq` rnf f15 `seq` rnf f16 `seq` f17 `seq` rnf f18 `seq` rnf f19 `seq` f20 `seq` f21 `seq` f22 `seq` rnf f23 `seq` () instance NFData (ModIfaceBackend) where rnf (ModIfaceBackend f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13) = rnf f1 `seq` rnf f2 `seq` rnf f3 `seq` rnf f4 `seq` rnf f5 `seq` rnf f6 `seq` rnf f7 `seq` rnf f8 `seq` rnf f9 `seq` rnf f10 `seq` rnf f11 `seq` rnf f12 `seq` rnf f13 forceModIface :: ModIface -> IO () forceModIface iface = () <$ (evaluate $ force iface) -- | Records whether a module has orphans. An \"orphan\" is one of: -- -- * An instance declaration in a module other than the definition -- module for one of the type constructors or classes in the instance head -- -- * A rewrite rule in a module other than the one defining -- the function in the head of the rule -- type WhetherHasOrphans = Bool -- | Does this module define family instances? type WhetherHasFamInst = Bool ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/ModSummary.hs0000644000000000000000000001643114472400113022436 0ustar0000000000000000{-# LANGUAGE TupleSections #-} -- | A ModSummary is a node in the compilation manager's dependency graph -- (ModuleGraph) module GHC.Unit.Module.ModSummary ( ModSummary (..) , ms_unitid , ms_installed_mod , ms_mod_name , ms_imps , ms_plugin_imps , ms_mnwib , ms_home_srcimps , ms_home_imps , msHiFilePath , msDynHiFilePath , msHsFilePath , msObjFilePath , msDynObjFilePath , msDeps , isBootSummary , findTarget ) where import GHC.Prelude import GHC.Hs import GHC.Driver.Session import GHC.Unit.Types import GHC.Unit.Module import GHC.Types.SourceFile ( HscSource(..), hscSourceString ) import GHC.Types.SrcLoc import GHC.Types.Target import GHC.Types.PkgQual import GHC.Data.Maybe import GHC.Data.StringBuffer ( StringBuffer ) import GHC.Utils.Fingerprint import GHC.Utils.Outputable import Data.Time -- | Data for a module node in a 'ModuleGraph'. Module nodes of the module graph -- are one of: -- -- * A regular Haskell source module -- * A hi-boot source module -- data ModSummary = ModSummary { ms_mod :: Module, -- ^ Identity of the module ms_hsc_src :: HscSource, -- ^ The module source either plain Haskell, hs-boot, or hsig ms_location :: ModLocation, -- ^ Location of the various files belonging to the module ms_hs_hash :: Fingerprint, -- ^ Content hash of source file ms_obj_date :: Maybe UTCTime, -- ^ Timestamp of object, if we have one ms_dyn_obj_date :: !(Maybe UTCTime), -- ^ Timestamp of dynamic object, if we have one ms_iface_date :: Maybe UTCTime, -- ^ Timestamp of hi file, if we have one -- See Note [When source is considered modified] and #9243 ms_hie_date :: Maybe UTCTime, -- ^ Timestamp of hie file, if we have one ms_srcimps :: [(PkgQual, Located ModuleName)], -- FIXME: source imports are never from an external package, why do we allow PkgQual? -- ^ Source imports of the module ms_textual_imps :: [(PkgQual, Located ModuleName)], -- ^ Non-source imports of the module from the module *text* ms_ghc_prim_import :: !Bool, -- ^ Whether the special module GHC.Prim was imported explicitliy ms_parsed_mod :: Maybe HsParsedModule, -- ^ The parsed, nonrenamed source, if we have it. This is also -- used to support "inline module syntax" in Backpack files. ms_hspp_file :: FilePath, -- ^ Filename of preprocessed source file ms_hspp_opts :: DynFlags, -- ^ Cached flags from @OPTIONS@, @INCLUDE@ and @LANGUAGE@ -- pragmas in the modules source code ms_hspp_buf :: Maybe StringBuffer -- ^ The actual preprocessed source, if we have it } ms_unitid :: ModSummary -> UnitId ms_unitid = toUnitId . moduleUnit . ms_mod ms_installed_mod :: ModSummary -> InstalledModule ms_installed_mod = fst . getModuleInstantiation . ms_mod ms_mod_name :: ModSummary -> ModuleName ms_mod_name = moduleName . ms_mod -- | Textual imports, plus plugin imports but not SOURCE imports. ms_imps :: ModSummary -> [(PkgQual, Located ModuleName)] ms_imps ms = ms_textual_imps ms ++ ms_plugin_imps ms -- | Plugin imports ms_plugin_imps :: ModSummary -> [(PkgQual, Located ModuleName)] ms_plugin_imps ms = map ((NoPkgQual,) . noLoc) (pluginModNames (ms_hspp_opts ms)) -- | All of the (possibly) home module imports from the given list that is to -- say, each of these module names could be a home import if an appropriately -- named file existed. (This is in contrast to package qualified imports, which -- are guaranteed not to be home imports.) home_imps :: [(PkgQual, Located ModuleName)] -> [(PkgQual, Located ModuleName)] home_imps imps = filter (maybe_home . fst) imps where maybe_home NoPkgQual = True maybe_home (ThisPkg _) = True maybe_home (OtherPkg _) = False -- | Like 'ms_home_imps', but for SOURCE imports. ms_home_srcimps :: ModSummary -> ([Located ModuleName]) -- [] here because source imports can only refer to the current package. ms_home_srcimps = map snd . home_imps . ms_srcimps -- | All of the (possibly) home module imports from a -- 'ModSummary'; that is to say, each of these module names -- could be a home import if an appropriately named file -- existed. (This is in contrast to package qualified -- imports, which are guaranteed not to be home imports.) ms_home_imps :: ModSummary -> ([(PkgQual, Located ModuleName)]) ms_home_imps = home_imps . ms_imps -- The ModLocation contains both the original source filename and the -- filename of the cleaned-up source file after all preprocessing has been -- done. The point is that the summariser will have to cpp/unlit/whatever -- all files anyway, and there's no point in doing this twice -- just -- park the result in a temp file, put the name of it in the location, -- and let @compile@ read from that file on the way back up. -- The ModLocation is stable over successive up-sweeps in GHCi, wheres -- the ms_hs_hash and imports can, of course, change msHsFilePath, msDynHiFilePath, msHiFilePath, msObjFilePath, msDynObjFilePath :: ModSummary -> FilePath msHsFilePath ms = expectJust "msHsFilePath" (ml_hs_file (ms_location ms)) msHiFilePath ms = ml_hi_file (ms_location ms) msDynHiFilePath ms = ml_dyn_hi_file (ms_location ms) msObjFilePath ms = ml_obj_file (ms_location ms) msDynObjFilePath ms = ml_dyn_obj_file (ms_location ms) -- | Did this 'ModSummary' originate from a hs-boot file? isBootSummary :: ModSummary -> IsBootInterface isBootSummary ms = if ms_hsc_src ms == HsBootFile then IsBoot else NotBoot ms_mnwib :: ModSummary -> ModuleNameWithIsBoot ms_mnwib ms = GWIB (ms_mod_name ms) (isBootSummary ms) -- | Returns the dependencies of the ModSummary s. msDeps :: ModSummary -> ([(PkgQual, GenWithIsBoot (Located ModuleName))]) msDeps s = [ (NoPkgQual, d) | m <- ms_home_srcimps s , d <- [ GWIB { gwib_mod = m, gwib_isBoot = IsBoot } ] ] ++ [ (pkg, (GWIB { gwib_mod = m, gwib_isBoot = NotBoot })) | (pkg, m) <- ms_imps s ] instance Outputable ModSummary where ppr ms = sep [text "ModSummary {", nest 3 (sep [text "ms_hs_hash = " <> text (show (ms_hs_hash ms)), text "ms_mod =" <+> ppr (ms_mod ms) <> text (hscSourceString (ms_hsc_src ms)) <> comma, text "unit =" <+> ppr (ms_unitid ms), text "ms_textual_imps =" <+> ppr (ms_textual_imps ms), text "ms_srcimps =" <+> ppr (ms_srcimps ms)]), char '}' ] -- | Find the first target in the provided list which matches the specified -- 'ModSummary'. findTarget :: ModSummary -> [Target] -> Maybe Target findTarget ms ts = case filter (matches ms) ts of [] -> Nothing (t:_) -> Just t where summary `matches` Target { targetId = TargetModule m, targetUnitId = unitId } = ms_mod_name summary == m && ms_unitid summary == unitId summary `matches` Target { targetId = TargetFile f _, targetUnitId = unitid } | Just f' <- ml_hs_file (ms_location summary) = f == f' && ms_unitid summary == unitid _ `matches` _ = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Name.hs0000644000000000000000000000527214472400113021222 0ustar0000000000000000 -- | The ModuleName type module GHC.Unit.Module.Name ( ModuleName , pprModuleName , moduleNameFS , moduleNameString , moduleNameSlashes, moduleNameColons , mkModuleName , mkModuleNameFS , stableModuleNameCmp , parseModuleName ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Types.Unique import GHC.Data.FastString import GHC.Utils.Binary import GHC.Utils.Misc import Control.DeepSeq import Data.Data import System.FilePath import qualified Text.ParserCombinators.ReadP as Parse import Text.ParserCombinators.ReadP (ReadP) import Data.Char (isAlphaNum) -- | A ModuleName is essentially a simple string, e.g. @Data.List@. newtype ModuleName = ModuleName FastString deriving Show instance Uniquable ModuleName where getUnique (ModuleName nm) = getUnique nm instance Eq ModuleName where nm1 == nm2 = getUnique nm1 == getUnique nm2 instance Ord ModuleName where nm1 `compare` nm2 = stableModuleNameCmp nm1 nm2 instance Outputable ModuleName where ppr = pprModuleName instance Binary ModuleName where put_ bh (ModuleName fs) = put_ bh fs get bh = do fs <- get bh; return (ModuleName fs) instance Data ModuleName where -- don't traverse? toConstr _ = abstractConstr "ModuleName" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "ModuleName" instance NFData ModuleName where rnf x = x `seq` () stableModuleNameCmp :: ModuleName -> ModuleName -> Ordering -- ^ Compares module names lexically, rather than by their 'Unique's stableModuleNameCmp n1 n2 = moduleNameFS n1 `lexicalCompareFS` moduleNameFS n2 pprModuleName :: ModuleName -> SDoc pprModuleName (ModuleName nm) = getPprStyle $ \ sty -> if codeStyle sty then ztext (zEncodeFS nm) else ftext nm moduleNameFS :: ModuleName -> FastString moduleNameFS (ModuleName mod) = mod moduleNameString :: ModuleName -> String moduleNameString (ModuleName mod) = unpackFS mod mkModuleName :: String -> ModuleName mkModuleName s = ModuleName (mkFastString s) mkModuleNameFS :: FastString -> ModuleName mkModuleNameFS s = ModuleName s -- |Returns the string version of the module name, with dots replaced by slashes. -- moduleNameSlashes :: ModuleName -> String moduleNameSlashes = dots_to_slashes . moduleNameString where dots_to_slashes = map (\c -> if c == '.' then pathSeparator else c) -- |Returns the string version of the module name, with dots replaced by colons. -- moduleNameColons :: ModuleName -> String moduleNameColons = dots_to_colons . moduleNameString where dots_to_colons = map (\c -> if c == '.' then ':' else c) parseModuleName :: ReadP ModuleName parseModuleName = fmap mkModuleName $ Parse.munch1 (\c -> isAlphaNum c || c `elem` "_.") ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Status.hs0000644000000000000000000000310314472400113021614 0ustar0000000000000000module GHC.Unit.Module.Status ( HscBackendAction(..), HscRecompStatus (..) ) where import GHC.Prelude import GHC.Unit import GHC.Unit.Module.ModGuts import GHC.Unit.Module.ModIface import GHC.Utils.Fingerprint import GHC.Linker.Types import GHC.Utils.Outputable -- | Status of a module in incremental compilation data HscRecompStatus -- | Nothing to do because code already exists. = HscUpToDate ModIface (Maybe Linkable) -- | Recompilation of module, or update of interface is required. Optionally -- pass the old interface hash to avoid updating the existing interface when -- it has not changed. | HscRecompNeeded (Maybe Fingerprint) -- | Action to perform in backend compilation data HscBackendAction -- | Update the boot and signature file results. = HscUpdate ModIface -- | Recompile this module. | HscRecomp { hscs_guts :: CgGuts -- ^ Information for the code generator. , hscs_mod_location :: !ModLocation -- ^ Module info , hscs_partial_iface :: !PartialModIface -- ^ Partial interface , hscs_old_iface_hash :: !(Maybe Fingerprint) -- ^ Old interface hash for this compilation, if an old interface file -- exists. Pass to `hscMaybeWriteIface` when writing the interface to -- avoid updating the existing interface when the interface isn't -- changed. } instance Outputable HscBackendAction where ppr (HscUpdate mi) = text "Update:" <+> (ppr (mi_module mi)) ppr (HscRecomp _ ml _mi _mf) = text "Recomp:" <+> ppr ml ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Warnings.hs0000644000000000000000000001152514472400113022130 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} {-# LANGUAGE StandaloneDeriving #-} -- | Warnings for a module module GHC.Unit.Module.Warnings ( Warnings (..) , WarningTxt (..) , pprWarningTxtForMsg , mkIfaceWarnCache , emptyIfaceWarnCache , plusWarns ) where import GHC.Prelude import GHC.Types.SourceText import GHC.Types.Name.Occurrence import GHC.Types.SrcLoc import GHC.Hs.Doc import GHC.Hs.Extension import GHC.Utils.Outputable import GHC.Utils.Binary import Language.Haskell.Syntax.Extension import Data.Data -- | Warning Text -- -- reason/explanation from a WARNING or DEPRECATED pragma data WarningTxt pass = WarningTxt (Located SourceText) [Located (WithHsDocIdentifiers StringLiteral pass)] | DeprecatedTxt (Located SourceText) [Located (WithHsDocIdentifiers StringLiteral pass)] deriving instance Eq (IdP pass) => Eq (WarningTxt pass) deriving instance (Data pass, Data (IdP pass)) => Data (WarningTxt pass) instance Outputable (WarningTxt pass) where ppr (WarningTxt lsrc ws) = case unLoc lsrc of NoSourceText -> pp_ws ws SourceText src -> text src <+> pp_ws ws <+> text "#-}" ppr (DeprecatedTxt lsrc ds) = case unLoc lsrc of NoSourceText -> pp_ws ds SourceText src -> text src <+> pp_ws ds <+> text "#-}" instance Binary (WarningTxt GhcRn) where put_ bh (WarningTxt s w) = do putByte bh 0 put_ bh s put_ bh w put_ bh (DeprecatedTxt s d) = do putByte bh 1 put_ bh s put_ bh d get bh = do h <- getByte bh case h of 0 -> do s <- get bh w <- get bh return (WarningTxt s w) _ -> do s <- get bh d <- get bh return (DeprecatedTxt s d) pp_ws :: [Located (WithHsDocIdentifiers StringLiteral pass)] -> SDoc pp_ws [l] = ppr $ unLoc l pp_ws ws = text "[" <+> vcat (punctuate comma (map (ppr . unLoc) ws)) <+> text "]" pprWarningTxtForMsg :: WarningTxt p -> SDoc pprWarningTxtForMsg (WarningTxt _ ws) = doubleQuotes (vcat (map (ftext . sl_fs . hsDocString . unLoc) ws)) pprWarningTxtForMsg (DeprecatedTxt _ ds) = text "Deprecated:" <+> doubleQuotes (vcat (map (ftext . sl_fs . hsDocString . unLoc) ds)) -- | Warning information for a module data Warnings pass = NoWarnings -- ^ Nothing deprecated | WarnAll (WarningTxt pass) -- ^ Whole module deprecated | WarnSome [(OccName,WarningTxt pass)] -- ^ Some specific things deprecated -- Only an OccName is needed because -- (1) a deprecation always applies to a binding -- defined in the module in which the deprecation appears. -- (2) deprecations are only reported outside the defining module. -- this is important because, otherwise, if we saw something like -- -- {-# DEPRECATED f "" #-} -- f = ... -- h = f -- g = let f = undefined in f -- -- we'd need more information than an OccName to know to say something -- about the use of f in h but not the use of the locally bound f in g -- -- however, because we only report about deprecations from the outside, -- and a module can only export one value called f, -- an OccName suffices. -- -- this is in contrast with fixity declarations, where we need to map -- a Name to its fixity declaration. deriving instance Eq (IdP pass) => Eq (Warnings pass) instance Binary (Warnings GhcRn) where put_ bh NoWarnings = putByte bh 0 put_ bh (WarnAll t) = do putByte bh 1 put_ bh t put_ bh (WarnSome ts) = do putByte bh 2 put_ bh ts get bh = do h <- getByte bh case h of 0 -> return NoWarnings 1 -> do aa <- get bh return (WarnAll aa) _ -> do aa <- get bh return (WarnSome aa) -- | Constructs the cache for the 'mi_warn_fn' field of a 'ModIface' mkIfaceWarnCache :: Warnings p -> OccName -> Maybe (WarningTxt p) mkIfaceWarnCache NoWarnings = \_ -> Nothing mkIfaceWarnCache (WarnAll t) = \_ -> Just t mkIfaceWarnCache (WarnSome pairs) = lookupOccEnv (mkOccEnv pairs) emptyIfaceWarnCache :: OccName -> Maybe (WarningTxt p) emptyIfaceWarnCache _ = Nothing plusWarns :: Warnings p -> Warnings p -> Warnings p plusWarns d NoWarnings = d plusWarns NoWarnings d = d plusWarns _ (WarnAll t) = WarnAll t plusWarns (WarnAll t) _ = WarnAll t plusWarns (WarnSome v1) (WarnSome v2) = WarnSome (v1 ++ v2) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Parser.hs0000644000000000000000000000272414472400113020350 0ustar0000000000000000-- | Parsers for unit/module identifiers module GHC.Unit.Parser ( parseUnit , parseUnitId , parseHoleyModule , parseModSubst ) where import GHC.Prelude import GHC.Unit.Types import GHC.Unit.Module.Name import GHC.Data.FastString import qualified Text.ParserCombinators.ReadP as Parse import Text.ParserCombinators.ReadP (ReadP, (<++)) import Data.Char (isAlphaNum) parseUnit :: ReadP Unit parseUnit = parseVirtUnitId <++ parseDefUnitId where parseVirtUnitId = do uid <- parseUnitId insts <- parseModSubst return (mkVirtUnit uid insts) parseDefUnitId = do s <- parseUnitId return (RealUnit (Definite s)) parseUnitId :: ReadP UnitId parseUnitId = do s <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "-_.+") return (UnitId (mkFastString s)) parseHoleyModule :: ReadP Module parseHoleyModule = parseModuleVar <++ parseModule where parseModuleVar = do _ <- Parse.char '<' modname <- parseModuleName _ <- Parse.char '>' return (Module HoleUnit modname) parseModule = do uid <- parseUnit _ <- Parse.char ':' modname <- parseModuleName return (Module uid modname) parseModSubst :: ReadP [(ModuleName, Module)] parseModSubst = Parse.between (Parse.char '[') (Parse.char ']') . flip Parse.sepBy (Parse.char ',') $ do k <- parseModuleName _ <- Parse.char '=' v <- parseHoleyModule return (k, v) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Ppr.hs0000644000000000000000000000213614472400113017652 0ustar0000000000000000-- | Unit identifier pretty-printing module GHC.Unit.Ppr ( UnitPprInfo (..) ) where import GHC.Prelude import GHC.Data.FastString import GHC.Utils.Outputable import Data.Version -- | Subset of UnitInfo: just enough to pretty-print a unit-id -- -- Instead of printing the unit-id which may contain a hash, we print: -- package-version:componentname -- data UnitPprInfo = UnitPprInfo { unitPprId :: FastString -- ^ Identifier , unitPprPackageName :: String -- ^ Source package name , unitPprPackageVersion :: Version -- ^ Source package version , unitPprComponentName :: Maybe String -- ^ Component name } instance Outputable UnitPprInfo where ppr pprinfo = getPprDebug $ \debug -> if debug then ftext (unitPprId pprinfo) else text $ mconcat [ unitPprPackageName pprinfo , case unitPprPackageVersion pprinfo of Version [] [] -> "" version -> "-" ++ showVersion version , case unitPprComponentName pprinfo of Nothing -> "" Just cname -> ":" ++ cname ] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/State.hs0000644000000000000000000027261514472400113020204 0ustar0000000000000000-- (c) The University of Glasgow, 2006 {-# LANGUAGE ScopedTypeVariables, BangPatterns, FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} -- | Unit manipulation module GHC.Unit.State ( module GHC.Unit.Info, -- * Reading the package config, and processing cmdline args UnitState(..), PreloadUnitClosure, UnitDatabase (..), UnitErr (..), emptyUnitState, initUnits, readUnitDatabases, readUnitDatabase, getUnitDbRefs, resolveUnitDatabase, listUnitInfo, -- * Querying the package config UnitInfoMap, lookupUnit, lookupUnit', unsafeLookupUnit, lookupUnitId, lookupUnitId', unsafeLookupUnitId, lookupPackageName, resolvePackageImport, improveUnit, searchPackageId, listVisibleModuleNames, lookupModuleInAllUnits, lookupModuleWithSuggestions, lookupModulePackage, lookupPluginModuleWithSuggestions, requirementMerges, LookupResult(..), ModuleSuggestion(..), ModuleOrigin(..), UnusableUnitReason(..), pprReason, closeUnitDeps, closeUnitDeps', mayThrowUnitErr, -- * Module hole substitution ShHoleSubst, renameHoleUnit, renameHoleModule, renameHoleUnit', renameHoleModule', instUnitToUnit, instModuleToModule, -- * Pretty-printing pprFlag, pprUnits, pprUnitsSimple, pprUnitIdForUser, pprUnitInfoForUser, pprModuleMap, pprWithUnitState, -- * Utils unwireUnit, implicitPackageDeps) where import GHC.Prelude import GHC.Driver.Session import GHC.Platform import GHC.Platform.Ways import GHC.Unit.Database import GHC.Unit.Info import GHC.Unit.Ppr import GHC.Unit.Types import GHC.Unit.Module import GHC.Unit.Home import GHC.Types.Unique.FM import GHC.Types.Unique.DFM import GHC.Types.Unique.Set import GHC.Types.Unique.DSet import GHC.Types.PkgQual import GHC.Utils.Misc import GHC.Utils.Panic import GHC.Utils.Outputable as Outputable import GHC.Data.Maybe import System.Environment ( getEnv ) import GHC.Data.FastString import qualified GHC.Data.ShortText as ST import GHC.Utils.Logger import GHC.Utils.Error import GHC.Utils.Exception import System.Directory import System.FilePath as FilePath import Control.Monad import Data.Graph (stronglyConnComp, SCC(..)) import Data.Char ( toUpper ) import Data.List ( intersperse, partition, sortBy, isSuffixOf ) import Data.Map (Map) import Data.Set (Set) import Data.Monoid (First(..)) import qualified Data.Semigroup as Semigroup import qualified Data.Map as Map import qualified Data.Map.Strict as MapStrict import qualified Data.Set as Set import GHC.LanguageExtensions import Control.Applicative -- --------------------------------------------------------------------------- -- The Unit state -- The unit state is computed by 'initUnits', and kept in HscEnv. -- It is influenced by various command-line flags: -- -- * @-package \@ and @-package-id \@ cause @\@ to become exposed. -- If @-hide-all-packages@ was not specified, these commands also cause -- all other packages with the same name to become hidden. -- -- * @-hide-package \@ causes @\@ to become hidden. -- -- * (there are a few more flags, check below for their semantics) -- -- The unit state has the following properties. -- -- * Let @exposedUnits@ be the set of packages thus exposed. -- Let @depExposedUnits@ be the transitive closure from @exposedUnits@ of -- their dependencies. -- -- * When searching for a module from a preload import declaration, -- only the exposed modules in @exposedUnits@ are valid. -- -- * When searching for a module from an implicit import, all modules -- from @depExposedUnits@ are valid. -- -- * When linking in a compilation manager mode, we link in packages the -- program depends on (the compiler knows this list by the -- time it gets to the link step). Also, we link in all packages -- which were mentioned with preload @-package@ flags on the command-line, -- or are a transitive dependency of same, or are \"base\"\/\"rts\". -- The reason for this is that we might need packages which don't -- contain any Haskell modules, and therefore won't be discovered -- by the normal mechanism of dependency tracking. -- Notes on DLLs -- ~~~~~~~~~~~~~ -- When compiling module A, which imports module B, we need to -- know whether B will be in the same DLL as A. -- If it's in the same DLL, we refer to B_f_closure -- If it isn't, we refer to _imp__B_f_closure -- When compiling A, we record in B's Module value whether it's -- in a different DLL, by setting the DLL flag. -- | Given a module name, there may be multiple ways it came into scope, -- possibly simultaneously. This data type tracks all the possible ways -- it could have come into scope. Warning: don't use the record functions, -- they're partial! data ModuleOrigin = -- | Module is hidden, and thus never will be available for import. -- (But maybe the user didn't realize), so we'll still keep track -- of these modules.) ModHidden -- | Module is unavailable because the package is unusable. | ModUnusable UnusableUnitReason -- | Module is public, and could have come from some places. | ModOrigin { -- | @Just False@ means that this module is in -- someone's @exported-modules@ list, but that package is hidden; -- @Just True@ means that it is available; @Nothing@ means neither -- applies. fromOrigUnit :: Maybe Bool -- | Is the module available from a reexport of an exposed package? -- There could be multiple. , fromExposedReexport :: [UnitInfo] -- | Is the module available from a reexport of a hidden package? , fromHiddenReexport :: [UnitInfo] -- | Did the module export come from a package flag? (ToDo: track -- more information. , fromPackageFlag :: Bool } instance Outputable ModuleOrigin where ppr ModHidden = text "hidden module" ppr (ModUnusable _) = text "unusable module" ppr (ModOrigin e res rhs f) = sep (punctuate comma ( (case e of Nothing -> [] Just False -> [text "hidden package"] Just True -> [text "exposed package"]) ++ (if null res then [] else [text "reexport by" <+> sep (map (ppr . mkUnit) res)]) ++ (if null rhs then [] else [text "hidden reexport by" <+> sep (map (ppr . mkUnit) res)]) ++ (if f then [text "package flag"] else []) )) -- | Smart constructor for a module which is in @exposed-modules@. Takes -- as an argument whether or not the defining package is exposed. fromExposedModules :: Bool -> ModuleOrigin fromExposedModules e = ModOrigin (Just e) [] [] False -- | Smart constructor for a module which is in @reexported-modules@. Takes -- as an argument whether or not the reexporting package is exposed, and -- also its 'UnitInfo'. fromReexportedModules :: Bool -> UnitInfo -> ModuleOrigin fromReexportedModules True pkg = ModOrigin Nothing [pkg] [] False fromReexportedModules False pkg = ModOrigin Nothing [] [pkg] False -- | Smart constructor for a module which was bound by a package flag. fromFlag :: ModuleOrigin fromFlag = ModOrigin Nothing [] [] True instance Semigroup ModuleOrigin where x@(ModOrigin e res rhs f) <> y@(ModOrigin e' res' rhs' f') = ModOrigin (g e e') (res ++ res') (rhs ++ rhs') (f || f') where g (Just b) (Just b') | b == b' = Just b | otherwise = pprPanic "ModOrigin: package both exposed/hidden" $ text "x: " <> ppr x $$ text "y: " <> ppr y g Nothing x = x g x Nothing = x x <> y = pprPanic "ModOrigin: hidden module redefined" $ text "x: " <> ppr x $$ text "y: " <> ppr y instance Monoid ModuleOrigin where mempty = ModOrigin Nothing [] [] False mappend = (Semigroup.<>) -- | Is the name from the import actually visible? (i.e. does it cause -- ambiguity, or is it only relevant when we're making suggestions?) originVisible :: ModuleOrigin -> Bool originVisible ModHidden = False originVisible (ModUnusable _) = False originVisible (ModOrigin b res _ f) = b == Just True || not (null res) || f -- | Are there actually no providers for this module? This will never occur -- except when we're filtering based on package imports. originEmpty :: ModuleOrigin -> Bool originEmpty (ModOrigin Nothing [] [] False) = True originEmpty _ = False type PreloadUnitClosure = UniqSet UnitId -- | 'UniqFM' map from 'Unit' to a 'UnitVisibility'. type VisibilityMap = Map Unit UnitVisibility -- | 'UnitVisibility' records the various aspects of visibility of a particular -- 'Unit'. data UnitVisibility = UnitVisibility { uv_expose_all :: Bool -- ^ Should all modules in exposed-modules should be dumped into scope? , uv_renamings :: [(ModuleName, ModuleName)] -- ^ Any custom renamings that should bring extra 'ModuleName's into -- scope. , uv_package_name :: First FastString -- ^ The package name associated with the 'Unit'. This is used -- to implement legacy behavior where @-package foo-0.1@ implicitly -- hides any packages named @foo@ , uv_requirements :: Map ModuleName (Set InstantiatedModule) -- ^ The signatures which are contributed to the requirements context -- from this unit ID. , uv_explicit :: Maybe PackageArg -- ^ Whether or not this unit was explicitly brought into scope, -- as opposed to implicitly via the 'exposed' fields in the -- package database (when @-hide-all-packages@ is not passed.) } instance Outputable UnitVisibility where ppr (UnitVisibility { uv_expose_all = b, uv_renamings = rns, uv_package_name = First mb_pn, uv_requirements = reqs, uv_explicit = explicit }) = ppr (b, rns, mb_pn, reqs, explicit) instance Semigroup UnitVisibility where uv1 <> uv2 = UnitVisibility { uv_expose_all = uv_expose_all uv1 || uv_expose_all uv2 , uv_renamings = uv_renamings uv1 ++ uv_renamings uv2 , uv_package_name = mappend (uv_package_name uv1) (uv_package_name uv2) , uv_requirements = Map.unionWith Set.union (uv_requirements uv1) (uv_requirements uv2) , uv_explicit = uv_explicit uv1 <|> uv_explicit uv2 } instance Monoid UnitVisibility where mempty = UnitVisibility { uv_expose_all = False , uv_renamings = [] , uv_package_name = First Nothing , uv_requirements = Map.empty , uv_explicit = Nothing } mappend = (Semigroup.<>) -- | Unit configuration data UnitConfig = UnitConfig { unitConfigPlatformArchOS :: !ArchOS -- ^ Platform arch and OS , unitConfigWays :: !Ways -- ^ Ways to use , unitConfigAllowVirtual :: !Bool -- ^ Allow virtual units -- ^ Do we allow the use of virtual units instantiated on-the-fly (see -- Note [About units] in GHC.Unit). This should only be true when we are -- type-checking an indefinite unit (not producing any code). , unitConfigProgramName :: !String -- ^ Name of the compiler (e.g. "GHC", "GHCJS"). Used to fetch environment -- variables such as "GHC[JS]_PACKAGE_PATH". , unitConfigGlobalDB :: !FilePath -- ^ Path to global DB , unitConfigGHCDir :: !FilePath -- ^ Main GHC dir: contains settings, etc. , unitConfigDBName :: !String -- ^ User DB name (e.g. "package.conf.d") , unitConfigAutoLink :: ![UnitId] -- ^ Units to link automatically (e.g. base, rts) , unitConfigDistrustAll :: !Bool -- ^ Distrust all units by default , unitConfigHideAll :: !Bool -- ^ Hide all units by default , unitConfigHideAllPlugins :: !Bool -- ^ Hide all plugins units by default , unitConfigDBCache :: Maybe [UnitDatabase UnitId] -- ^ Cache of databases to use, in the order they were specified on the -- command line (later databases shadow earlier ones). -- If Nothing, databases will be found using `unitConfigFlagsDB`. -- command-line flags , unitConfigFlagsDB :: [PackageDBFlag] -- ^ Unit databases flags , unitConfigFlagsExposed :: [PackageFlag] -- ^ Exposed units , unitConfigFlagsIgnored :: [IgnorePackageFlag] -- ^ Ignored units , unitConfigFlagsTrusted :: [TrustFlag] -- ^ Trusted units , unitConfigFlagsPlugins :: [PackageFlag] -- ^ Plugins exposed units , unitConfigHomeUnits :: Set.Set UnitId } initUnitConfig :: DynFlags -> Maybe [UnitDatabase UnitId] -> Set.Set UnitId -> UnitConfig initUnitConfig dflags cached_dbs home_units = let !hu_id = homeUnitId_ dflags !hu_instanceof = homeUnitInstanceOf_ dflags !hu_instantiations = homeUnitInstantiations_ dflags autoLink | not (gopt Opt_AutoLinkPackages dflags) = [] -- By default we add base & rts to the preload units (when they are -- found in the unit database) except when we are building them | otherwise = filter (hu_id /=) [baseUnitId, rtsUnitId] -- if the home unit is indefinite, it means we are type-checking it only -- (not producing any code). Hence we can use virtual units instantiated -- on-the-fly. See Note [About units] in GHC.Unit allow_virtual_units = case (hu_instanceof, hu_instantiations) of (Just u, is) -> u == hu_id && any (isHoleModule . snd) is _ -> False in UnitConfig { unitConfigPlatformArchOS = platformArchOS (targetPlatform dflags) , unitConfigProgramName = programName dflags , unitConfigWays = ways dflags , unitConfigAllowVirtual = allow_virtual_units , unitConfigGlobalDB = globalPackageDatabasePath dflags , unitConfigGHCDir = topDir dflags , unitConfigDBName = "package.conf.d" , unitConfigAutoLink = autoLink , unitConfigDistrustAll = gopt Opt_DistrustAllPackages dflags , unitConfigHideAll = gopt Opt_HideAllPackages dflags , unitConfigHideAllPlugins = gopt Opt_HideAllPluginPackages dflags , unitConfigDBCache = cached_dbs , unitConfigFlagsDB = map (offsetPackageDb (workingDirectory dflags)) $ packageDBFlags dflags , unitConfigFlagsExposed = packageFlags dflags , unitConfigFlagsIgnored = ignorePackageFlags dflags , unitConfigFlagsTrusted = trustFlags dflags , unitConfigFlagsPlugins = pluginPackageFlags dflags , unitConfigHomeUnits = home_units } where offsetPackageDb :: Maybe FilePath -> PackageDBFlag -> PackageDBFlag offsetPackageDb (Just offset) (PackageDB (PkgDbPath p)) | isRelative p = PackageDB (PkgDbPath (offset p)) offsetPackageDb _ p = p -- | Map from 'ModuleName' to a set of module providers (i.e. a 'Module' and -- its 'ModuleOrigin'). -- -- NB: the set is in fact a 'Map Module ModuleOrigin', probably to keep only one -- origin for a given 'Module' type ModuleNameProvidersMap = Map ModuleName (Map Module ModuleOrigin) data UnitState = UnitState { -- | A mapping of 'Unit' to 'UnitInfo'. This list is adjusted -- so that only valid units are here. 'UnitInfo' reflects -- what was stored *on disk*, except for the 'trusted' flag, which -- is adjusted at runtime. (In particular, some units in this map -- may have the 'exposed' flag be 'False'.) unitInfoMap :: UnitInfoMap, -- | The set of transitively reachable units according -- to the explicitly provided command line arguments. -- A fully instantiated VirtUnit may only be replaced by a RealUnit from -- this set. -- See Note [VirtUnit to RealUnit improvement] preloadClosure :: PreloadUnitClosure, -- | A mapping of 'PackageName' to 'UnitId'. If several units have the same -- package name (e.g. different instantiations), then we return one of them... -- This is used when users refer to packages in Backpack includes. -- And also to resolve package qualifiers with the PackageImports extension. packageNameMap :: UniqFM PackageName UnitId, -- | A mapping from database unit keys to wired in unit ids. wireMap :: Map UnitId UnitId, -- | A mapping from wired in unit ids to unit keys from the database. unwireMap :: Map UnitId UnitId, -- | The units we're going to link in eagerly. This list -- should be in reverse dependency order; that is, a unit -- is always mentioned before the units it depends on. preloadUnits :: [UnitId], -- | Units which we explicitly depend on (from a command line flag). -- We'll use this to generate version macros and the unused packages warning. The -- original flag which was used to bring the unit into scope is recorded for the -- -Wunused-packages warning. explicitUnits :: [(Unit, Maybe PackageArg)], homeUnitDepends :: [UnitId], -- | This is a full map from 'ModuleName' to all modules which may possibly -- be providing it. These providers may be hidden (but we'll still want -- to report them in error messages), or it may be an ambiguous import. moduleNameProvidersMap :: !ModuleNameProvidersMap, -- | A map, like 'moduleNameProvidersMap', but controlling plugin visibility. pluginModuleNameProvidersMap :: !ModuleNameProvidersMap, -- | A map saying, for each requirement, what interfaces must be merged -- together when we use them. For example, if our dependencies -- are @p[A=\]@ and @q[A=\,B=r[C=\]:B]@, then the interfaces -- to merge for A are @p[A=\]:A@, @q[A=\,B=r[C=\]:B]:A@ -- and @r[C=\]:C@. -- -- There's an entry in this map for each hole in our home library. requirementContext :: Map ModuleName [InstantiatedModule], -- | Indicate if we can instantiate units on-the-fly. -- -- This should only be true when we are type-checking an indefinite unit. -- See Note [About units] in GHC.Unit. allowVirtualUnits :: !Bool } emptyUnitState :: UnitState emptyUnitState = UnitState { unitInfoMap = Map.empty, preloadClosure = emptyUniqSet, packageNameMap = emptyUFM, wireMap = Map.empty, unwireMap = Map.empty, preloadUnits = [], explicitUnits = [], homeUnitDepends = [], moduleNameProvidersMap = Map.empty, pluginModuleNameProvidersMap = Map.empty, requirementContext = Map.empty, allowVirtualUnits = False } -- | Unit database data UnitDatabase unit = UnitDatabase { unitDatabasePath :: FilePath , unitDatabaseUnits :: [GenUnitInfo unit] } instance Outputable u => Outputable (UnitDatabase u) where ppr (UnitDatabase fp _u) = text "DB:" <+> text fp type UnitInfoMap = Map UnitId UnitInfo -- | Find the unit we know about with the given unit, if any lookupUnit :: UnitState -> Unit -> Maybe UnitInfo lookupUnit pkgs = lookupUnit' (allowVirtualUnits pkgs) (unitInfoMap pkgs) (preloadClosure pkgs) -- | A more specialized interface, which doesn't require a 'UnitState' (so it -- can be used while we're initializing 'DynFlags') -- -- Parameters: -- * a boolean specifying whether or not to look for on-the-fly renamed interfaces -- * a 'UnitInfoMap' -- * a 'PreloadUnitClosure' lookupUnit' :: Bool -> UnitInfoMap -> PreloadUnitClosure -> Unit -> Maybe UnitInfo lookupUnit' allowOnTheFlyInst pkg_map closure u = case u of HoleUnit -> error "Hole unit" RealUnit i -> Map.lookup (unDefinite i) pkg_map VirtUnit i | allowOnTheFlyInst -> -- lookup UnitInfo of the indefinite unit to be instantiated and -- instantiate it on-the-fly fmap (renameUnitInfo pkg_map closure (instUnitInsts i)) (Map.lookup (instUnitInstanceOf i) pkg_map) | otherwise -> -- lookup UnitInfo by virtual UnitId. This is used to find indefinite -- units. Even if they are real, installed units, they can't use the -- `RealUnit` constructor (it is reserved for definite units) so we use -- the `VirtUnit` constructor. Map.lookup (virtualUnitId i) pkg_map -- | Find the unit we know about with the given unit id, if any lookupUnitId :: UnitState -> UnitId -> Maybe UnitInfo lookupUnitId state uid = lookupUnitId' (unitInfoMap state) uid -- | Find the unit we know about with the given unit id, if any lookupUnitId' :: UnitInfoMap -> UnitId -> Maybe UnitInfo lookupUnitId' db uid = Map.lookup uid db -- | Looks up the given unit in the unit state, panicing if it is not found unsafeLookupUnit :: HasDebugCallStack => UnitState -> Unit -> UnitInfo unsafeLookupUnit state u = case lookupUnit state u of Just info -> info Nothing -> pprPanic "unsafeLookupUnit" (ppr u) -- | Looks up the given unit id in the unit state, panicing if it is not found unsafeLookupUnitId :: HasDebugCallStack => UnitState -> UnitId -> UnitInfo unsafeLookupUnitId state uid = case lookupUnitId state uid of Just info -> info Nothing -> pprPanic "unsafeLookupUnitId" (ppr uid) -- | Find the unit we know about with the given package name (e.g. @foo@), if any -- (NB: there might be a locally defined unit name which overrides this) -- This function is unsafe to use in general because it doesn't respect package -- visibility. lookupPackageName :: UnitState -> PackageName -> Maybe UnitId lookupPackageName pkgstate n = lookupUFM (packageNameMap pkgstate) n -- | Search for units with a given package ID (e.g. \"foo-0.1\") searchPackageId :: UnitState -> PackageId -> [UnitInfo] searchPackageId pkgstate pid = filter ((pid ==) . unitPackageId) (listUnitInfo pkgstate) -- | Find the UnitId which an import qualified by a package import comes from. -- Compared to 'lookupPackageName', this function correctly accounts for visibility, -- renaming and thinning. resolvePackageImport :: UnitState -> ModuleName -> PackageName -> Maybe UnitId resolvePackageImport unit_st mn pn = do -- 1. Find all modules providing the ModuleName (this accounts for visibility/thinning etc) providers <- Map.filter originVisible <$> Map.lookup mn (moduleNameProvidersMap unit_st) -- 2. Get the UnitIds of the candidates let candidates_uid = concatMap to_uid $ Map.assocs providers -- 3. Get the package names of the candidates let candidates_units = map (\ui -> ((unitPackageName ui), unitId ui)) $ mapMaybe (\uid -> Map.lookup uid (unitInfoMap unit_st)) candidates_uid -- 4. Check to see if the PackageName helps us disambiguate any candidates. lookup pn candidates_units where -- Get the UnitId from which a visible identifier is from to_uid :: (Module, ModuleOrigin) -> [UnitId] to_uid (mod, ModOrigin mo re_exps _ _) = case mo of -- Available directly, but also potentially from re-exports Just True -> (toUnitId (moduleUnit mod)) : map unitId re_exps -- Just available from these re-exports _ -> map unitId re_exps to_uid _ = [] -- | Create a Map UnitId UnitInfo -- -- For each instantiated unit, we add two map keys: -- * the real unit id -- * the virtual unit id made from its instantiation -- -- We do the same thing for fully indefinite units (which are "instantiated" -- with module holes). -- mkUnitInfoMap :: [UnitInfo] -> UnitInfoMap mkUnitInfoMap infos = foldl' add Map.empty infos where mkVirt p = virtualUnitId (mkInstantiatedUnit (unitInstanceOf p) (unitInstantiations p)) add pkg_map p | not (null (unitInstantiations p)) = Map.insert (mkVirt p) p $ Map.insert (unitId p) p $ pkg_map | otherwise = Map.insert (unitId p) p pkg_map -- | Get a list of entries from the unit database. NB: be careful with -- this function, although all units in this map are "visible", this -- does not imply that the exposed-modules of the unit are available -- (they may have been thinned or renamed). listUnitInfo :: UnitState -> [UnitInfo] listUnitInfo state = Map.elems (unitInfoMap state) -- ---------------------------------------------------------------------------- -- Loading the unit db files and building up the unit state -- | Read the unit database files, and sets up various internal tables of -- unit information, according to the unit-related flags on the -- command-line (@-package@, @-hide-package@ etc.) -- -- 'initUnits' can be called again subsequently after updating the -- 'packageFlags' field of the 'DynFlags', and it will update the -- 'unitState' in 'DynFlags'. initUnits :: Logger -> DynFlags -> Maybe [UnitDatabase UnitId] -> Set.Set UnitId -> IO ([UnitDatabase UnitId], UnitState, HomeUnit, Maybe PlatformConstants) initUnits logger dflags cached_dbs home_units = do let forceUnitInfoMap (state, _) = unitInfoMap state `seq` () (unit_state,dbs) <- withTiming logger (text "initializing unit database") forceUnitInfoMap $ mkUnitState logger (initUnitConfig dflags cached_dbs home_units) putDumpFileMaybe logger Opt_D_dump_mod_map "Module Map" FormatText (updSDocContext (\ctx -> ctx {sdocLineLength = 200}) $ pprModuleMap (moduleNameProvidersMap unit_state)) let home_unit = mkHomeUnit unit_state (homeUnitId_ dflags) (homeUnitInstanceOf_ dflags) (homeUnitInstantiations_ dflags) -- Try to find platform constants -- -- See Note [Platform constants] in GHC.Platform mconstants <- if homeUnitId_ dflags == rtsUnitId then do -- we're building the RTS! Lookup GhclibDerivedConstants.h in the include paths lookupPlatformConstants (includePathsGlobal (includePaths dflags)) else -- lookup the GhclibDerivedConstants.h header bundled with the RTS unit. We -- don't fail if we can't find the RTS unit as it can be a valid (but -- uncommon) case, e.g. building a C utility program (not depending on the -- RTS) before building the RTS. In any case, we will fail later on if we -- really need to use the platform constants but they have not been loaded. case lookupUnitId unit_state rtsUnitId of Nothing -> return Nothing Just info -> lookupPlatformConstants (fmap ST.unpack (unitIncludeDirs info)) return (dbs,unit_state,home_unit,mconstants) mkHomeUnit :: UnitState -> UnitId -- ^ Home unit id -> Maybe UnitId -- ^ Home unit instance of -> [(ModuleName, Module)] -- ^ Home unit instantiations -> HomeUnit mkHomeUnit unit_state hu_id hu_instanceof hu_instantiations_ = let -- Some wired units can be used to instantiate the home unit. We need to -- replace their unit keys with their wired unit ids. wmap = wireMap unit_state hu_instantiations = map (fmap (upd_wired_in_mod wmap)) hu_instantiations_ in case (hu_instanceof, hu_instantiations) of (Nothing,[]) -> DefiniteHomeUnit hu_id Nothing (Nothing, _) -> throwGhcException $ CmdLineError ("Use of -instantiated-with requires -this-component-id") (Just _, []) -> throwGhcException $ CmdLineError ("Use of -this-component-id requires -instantiated-with") (Just u, is) -- detect fully indefinite units: all their instantiations are hole -- modules and the home unit id is the same as the instantiating unit -- id (see Note [About units] in GHC.Unit) | all (isHoleModule . snd) is && u == hu_id -> IndefiniteHomeUnit u is -- otherwise it must be that we (fully) instantiate an indefinite unit -- to make it definite. -- TODO: error when the unit is partially instantiated?? | otherwise -> DefiniteHomeUnit hu_id (Just (u, is)) -- ----------------------------------------------------------------------------- -- Reading the unit database(s) readUnitDatabases :: Logger -> UnitConfig -> IO [UnitDatabase UnitId] readUnitDatabases logger cfg = do conf_refs <- getUnitDbRefs cfg confs <- liftM catMaybes $ mapM (resolveUnitDatabase cfg) conf_refs mapM (readUnitDatabase logger cfg) confs getUnitDbRefs :: UnitConfig -> IO [PkgDbRef] getUnitDbRefs cfg = do let system_conf_refs = [UserPkgDb, GlobalPkgDb] e_pkg_path <- tryIO (getEnv $ map toUpper (unitConfigProgramName cfg) ++ "_PACKAGE_PATH") let base_conf_refs = case e_pkg_path of Left _ -> system_conf_refs Right path | not (null path) && isSearchPathSeparator (last path) -> map PkgDbPath (splitSearchPath (init path)) ++ system_conf_refs | otherwise -> map PkgDbPath (splitSearchPath path) -- Apply the package DB-related flags from the command line to get the -- final list of package DBs. -- -- Notes on ordering: -- * The list of flags is reversed (later ones first) -- * We work with the package DB list in "left shadows right" order -- * and finally reverse it at the end, to get "right shadows left" -- return $ reverse (foldr doFlag base_conf_refs (unitConfigFlagsDB cfg)) where doFlag (PackageDB p) dbs = p : dbs doFlag NoUserPackageDB dbs = filter isNotUser dbs doFlag NoGlobalPackageDB dbs = filter isNotGlobal dbs doFlag ClearPackageDBs _ = [] isNotUser UserPkgDb = False isNotUser _ = True isNotGlobal GlobalPkgDb = False isNotGlobal _ = True -- | Return the path of a package database from a 'PkgDbRef'. Return 'Nothing' -- when the user database filepath is expected but the latter doesn't exist. -- -- NB: This logic is reimplemented in Cabal, so if you change it, -- make sure you update Cabal. (Or, better yet, dump it in the -- compiler info so Cabal can use the info.) resolveUnitDatabase :: UnitConfig -> PkgDbRef -> IO (Maybe FilePath) resolveUnitDatabase cfg GlobalPkgDb = return $ Just (unitConfigGlobalDB cfg) resolveUnitDatabase cfg UserPkgDb = runMaybeT $ do dir <- versionedAppDir (unitConfigProgramName cfg) (unitConfigPlatformArchOS cfg) let pkgconf = dir unitConfigDBName cfg exist <- tryMaybeT $ doesDirectoryExist pkgconf if exist then return pkgconf else mzero resolveUnitDatabase _ (PkgDbPath name) = return $ Just name readUnitDatabase :: Logger -> UnitConfig -> FilePath -> IO (UnitDatabase UnitId) readUnitDatabase logger cfg conf_file = do isdir <- doesDirectoryExist conf_file proto_pkg_configs <- if isdir then readDirStyleUnitInfo conf_file else do isfile <- doesFileExist conf_file if isfile then do mpkgs <- tryReadOldFileStyleUnitInfo case mpkgs of Just pkgs -> return pkgs Nothing -> throwGhcExceptionIO $ InstallationError $ "ghc no longer supports single-file style package " ++ "databases (" ++ conf_file ++ ") use 'ghc-pkg init' to create the database with " ++ "the correct format." else throwGhcExceptionIO $ InstallationError $ "can't find a package database at " ++ conf_file let -- Fix #16360: remove trailing slash from conf_file before calculating pkgroot conf_file' = dropTrailingPathSeparator conf_file top_dir = unitConfigGHCDir cfg pkgroot = takeDirectory conf_file' pkg_configs1 = map (mungeUnitInfo top_dir pkgroot . mapUnitInfo (\(UnitKey x) -> UnitId x) . mkUnitKeyInfo) proto_pkg_configs -- return $ UnitDatabase conf_file' pkg_configs1 where readDirStyleUnitInfo conf_dir = do let filename = conf_dir "package.cache" cache_exists <- doesFileExist filename if cache_exists then do debugTraceMsg logger 2 $ text "Using binary package database:" <+> text filename readPackageDbForGhc filename else do -- If there is no package.cache file, we check if the database is not -- empty by inspecting if the directory contains any .conf file. If it -- does, something is wrong and we fail. Otherwise we assume that the -- database is empty. debugTraceMsg logger 2 $ text "There is no package.cache in" <+> text conf_dir <> text ", checking if the database is empty" db_empty <- all (not . isSuffixOf ".conf") <$> getDirectoryContents conf_dir if db_empty then do debugTraceMsg logger 3 $ text "There are no .conf files in" <+> text conf_dir <> text ", treating" <+> text "package database as empty" return [] else throwGhcExceptionIO $ InstallationError $ "there is no package.cache in " ++ conf_dir ++ " even though package database is not empty" -- Single-file style package dbs have been deprecated for some time, but -- it turns out that Cabal was using them in one place. So this is a -- workaround to allow older Cabal versions to use this newer ghc. -- We check if the file db contains just "[]" and if so, we look for a new -- dir-style db in conf_file.d/, ie in a dir next to the given file. -- We cannot just replace the file with a new dir style since Cabal still -- assumes it's a file and tries to overwrite with 'writeFile'. -- ghc-pkg also cooperates with this workaround. tryReadOldFileStyleUnitInfo = do content <- readFile conf_file `catchIO` \_ -> return "" if take 2 content == "[]" then do let conf_dir = conf_file <.> "d" direxists <- doesDirectoryExist conf_dir if direxists then do debugTraceMsg logger 2 (text "Ignoring old file-style db and trying:" <+> text conf_dir) liftM Just (readDirStyleUnitInfo conf_dir) else return (Just []) -- ghc-pkg will create it when it's updated else return Nothing distrustAllUnits :: [UnitInfo] -> [UnitInfo] distrustAllUnits pkgs = map distrust pkgs where distrust pkg = pkg{ unitIsTrusted = False } mungeUnitInfo :: FilePath -> FilePath -> UnitInfo -> UnitInfo mungeUnitInfo top_dir pkgroot = mungeDynLibFields . mungeUnitInfoPaths (ST.pack top_dir) (ST.pack pkgroot) mungeDynLibFields :: UnitInfo -> UnitInfo mungeDynLibFields pkg = pkg { unitLibraryDynDirs = case unitLibraryDynDirs pkg of [] -> unitLibraryDirs pkg ds -> ds } -- ----------------------------------------------------------------------------- -- Modify our copy of the unit database based on trust flags, -- -trust and -distrust. applyTrustFlag :: UnitPrecedenceMap -> UnusableUnits -> [UnitInfo] -> TrustFlag -> MaybeErr UnitErr [UnitInfo] applyTrustFlag prec_map unusable pkgs flag = case flag of -- we trust all matching packages. Maybe should only trust first one? -- and leave others the same or set them untrusted TrustPackage str -> case selectPackages prec_map (PackageArg str) pkgs unusable of Left ps -> Failed (TrustFlagErr flag ps) Right (ps,qs) -> Succeeded (map trust ps ++ qs) where trust p = p {unitIsTrusted=True} DistrustPackage str -> case selectPackages prec_map (PackageArg str) pkgs unusable of Left ps -> Failed (TrustFlagErr flag ps) Right (ps,qs) -> Succeeded (distrustAllUnits ps ++ qs) applyPackageFlag :: UnitPrecedenceMap -> UnitInfoMap -> PreloadUnitClosure -> UnusableUnits -> Bool -- if False, if you expose a package, it implicitly hides -- any previously exposed packages with the same name -> [UnitInfo] -> VisibilityMap -- Initially exposed -> PackageFlag -- flag to apply -> MaybeErr UnitErr VisibilityMap -- Now exposed applyPackageFlag prec_map pkg_map closure unusable no_hide_others pkgs vm flag = case flag of ExposePackage _ arg (ModRenaming b rns) -> case findPackages prec_map pkg_map closure arg pkgs unusable of Left ps -> Failed (PackageFlagErr flag ps) Right (p:_) -> Succeeded vm' where n = fsPackageName p -- If a user says @-unit-id p[A=]@, this imposes -- a requirement on us: whatever our signature A is, -- it must fulfill all of p[A=]:A's requirements. -- This method is responsible for computing what our -- inherited requirements are. reqs | UnitIdArg orig_uid <- arg = collectHoles orig_uid | otherwise = Map.empty collectHoles uid = case uid of HoleUnit -> Map.empty RealUnit {} -> Map.empty -- definite units don't have holes VirtUnit indef -> let local = [ Map.singleton (moduleName mod) (Set.singleton $ Module indef mod_name) | (mod_name, mod) <- instUnitInsts indef , isHoleModule mod ] recurse = [ collectHoles (moduleUnit mod) | (_, mod) <- instUnitInsts indef ] in Map.unionsWith Set.union $ local ++ recurse uv = UnitVisibility { uv_expose_all = b , uv_renamings = rns , uv_package_name = First (Just n) , uv_requirements = reqs , uv_explicit = Just arg } vm' = Map.insertWith mappend (mkUnit p) uv vm_cleared -- In the old days, if you said `ghc -package p-0.1 -package p-0.2` -- (or if p-0.1 was registered in the pkgdb as exposed: True), -- the second package flag would override the first one and you -- would only see p-0.2 in exposed modules. This is good for -- usability. -- -- However, with thinning and renaming (or Backpack), there might be -- situations where you legitimately want to see two versions of a -- package at the same time, and this behavior would make it -- impossible to do so. So we decided that if you pass -- -hide-all-packages, this should turn OFF the overriding behavior -- where an exposed package hides all other packages with the same -- name. This should not affect Cabal at all, which only ever -- exposes one package at a time. -- -- NB: Why a variable no_hide_others? We have to apply this logic to -- -plugin-package too, and it's more consistent if the switch in -- behavior is based off of -- -hide-all-packages/-hide-all-plugin-packages depending on what -- flag is in question. vm_cleared | no_hide_others = vm -- NB: renamings never clear | (_:_) <- rns = vm | otherwise = Map.filterWithKey (\k uv -> k == mkUnit p || First (Just n) /= uv_package_name uv) vm _ -> panic "applyPackageFlag" HidePackage str -> case findPackages prec_map pkg_map closure (PackageArg str) pkgs unusable of Left ps -> Failed (PackageFlagErr flag ps) Right ps -> Succeeded $ foldl' (flip Map.delete) vm (map mkUnit ps) -- | Like 'selectPackages', but doesn't return a list of unmatched -- packages. Furthermore, any packages it returns are *renamed* -- if the 'UnitArg' has a renaming associated with it. findPackages :: UnitPrecedenceMap -> UnitInfoMap -> PreloadUnitClosure -> PackageArg -> [UnitInfo] -> UnusableUnits -> Either [(UnitInfo, UnusableUnitReason)] [UnitInfo] findPackages prec_map pkg_map closure arg pkgs unusable = let ps = mapMaybe (finder arg) pkgs in if null ps then Left (mapMaybe (\(x,y) -> finder arg x >>= \x' -> return (x',y)) (Map.elems unusable)) else Right (sortByPreference prec_map ps) where finder (PackageArg str) p = if matchingStr str p then Just p else Nothing finder (UnitIdArg uid) p = case uid of RealUnit (Definite iuid) | iuid == unitId p -> Just p VirtUnit inst | instUnitInstanceOf inst == unitId p -> Just (renameUnitInfo pkg_map closure (instUnitInsts inst) p) _ -> Nothing selectPackages :: UnitPrecedenceMap -> PackageArg -> [UnitInfo] -> UnusableUnits -> Either [(UnitInfo, UnusableUnitReason)] ([UnitInfo], [UnitInfo]) selectPackages prec_map arg pkgs unusable = let matches = matching arg (ps,rest) = partition matches pkgs in if null ps then Left (filter (matches.fst) (Map.elems unusable)) else Right (sortByPreference prec_map ps, rest) -- | Rename a 'UnitInfo' according to some module instantiation. renameUnitInfo :: UnitInfoMap -> PreloadUnitClosure -> [(ModuleName, Module)] -> UnitInfo -> UnitInfo renameUnitInfo pkg_map closure insts conf = let hsubst = listToUFM insts smod = renameHoleModule' pkg_map closure hsubst new_insts = map (\(k,v) -> (k,smod v)) (unitInstantiations conf) in conf { unitInstantiations = new_insts, unitExposedModules = map (\(mod_name, mb_mod) -> (mod_name, fmap smod mb_mod)) (unitExposedModules conf) } -- A package named on the command line can either include the -- version, or just the name if it is unambiguous. matchingStr :: String -> UnitInfo -> Bool matchingStr str p = str == unitPackageIdString p || str == unitPackageNameString p matchingId :: UnitId -> UnitInfo -> Bool matchingId uid p = uid == unitId p matching :: PackageArg -> UnitInfo -> Bool matching (PackageArg str) = matchingStr str matching (UnitIdArg (RealUnit (Definite uid))) = matchingId uid matching (UnitIdArg _) = \_ -> False -- TODO: warn in this case -- | This sorts a list of packages, putting "preferred" packages first. -- See 'compareByPreference' for the semantics of "preference". sortByPreference :: UnitPrecedenceMap -> [UnitInfo] -> [UnitInfo] sortByPreference prec_map = sortBy (flip (compareByPreference prec_map)) -- | Returns 'GT' if @pkg@ should be preferred over @pkg'@ when picking -- which should be "active". Here is the order of preference: -- -- 1. First, prefer the latest version -- 2. If the versions are the same, prefer the package that -- came in the latest package database. -- -- Pursuant to #12518, we could change this policy to, for example, remove -- the version preference, meaning that we would always prefer the units -- in later unit database. compareByPreference :: UnitPrecedenceMap -> UnitInfo -> UnitInfo -> Ordering compareByPreference prec_map pkg pkg' = case comparing unitPackageVersion pkg pkg' of GT -> GT EQ | Just prec <- Map.lookup (unitId pkg) prec_map , Just prec' <- Map.lookup (unitId pkg') prec_map -- Prefer the unit from the later DB flag (i.e., higher -- precedence) -> compare prec prec' | otherwise -> EQ LT -> LT comparing :: Ord a => (t -> a) -> t -> t -> Ordering comparing f a b = f a `compare` f b pprFlag :: PackageFlag -> SDoc pprFlag flag = case flag of HidePackage p -> text "-hide-package " <> text p ExposePackage doc _ _ -> text doc pprTrustFlag :: TrustFlag -> SDoc pprTrustFlag flag = case flag of TrustPackage p -> text "-trust " <> text p DistrustPackage p -> text "-distrust " <> text p -- ----------------------------------------------------------------------------- -- Wired-in units -- -- See Note [Wired-in units] in GHC.Unit.Module type WiringMap = Map UnitId UnitId findWiredInUnits :: Logger -> UnitPrecedenceMap -> [UnitInfo] -- database -> VisibilityMap -- info on what units are visible -- for wired in selection -> IO ([UnitInfo], -- unit database updated for wired in WiringMap) -- map from unit id to wired identity findWiredInUnits logger prec_map pkgs vis_map = do -- Now we must find our wired-in units, and rename them to -- their canonical names (eg. base-1.0 ==> base), as described -- in Note [Wired-in units] in GHC.Unit.Module let matches :: UnitInfo -> UnitId -> Bool pc `matches` pid = unitPackageName pc == PackageName (unitIdFS pid) -- find which package corresponds to each wired-in package -- delete any other packages with the same name -- update the package and any dependencies to point to the new -- one. -- -- When choosing which package to map to a wired-in package -- name, we try to pick the latest version of exposed packages. -- However, if there are no exposed wired in packages available -- (e.g. -hide-all-packages was used), we can't bail: we *have* -- to assign a package for the wired-in package: so we try again -- with hidden packages included to (and pick the latest -- version). -- -- You can also override the default choice by using -ignore-package: -- this works even when there is no exposed wired in package -- available. -- findWiredInUnit :: [UnitInfo] -> UnitId -> IO (Maybe (UnitId, UnitInfo)) findWiredInUnit pkgs wired_pkg = let all_ps = [ p | p <- pkgs, p `matches` wired_pkg ] all_exposed_ps = [ p | p <- all_ps , Map.member (mkUnit p) vis_map ] in case all_exposed_ps of [] -> case all_ps of [] -> notfound many -> pick (head (sortByPreference prec_map many)) many -> pick (head (sortByPreference prec_map many)) where notfound = do debugTraceMsg logger 2 $ text "wired-in package " <> ftext (unitIdFS wired_pkg) <> text " not found." return Nothing pick :: UnitInfo -> IO (Maybe (UnitId, UnitInfo)) pick pkg = do debugTraceMsg logger 2 $ text "wired-in package " <> ftext (unitIdFS wired_pkg) <> text " mapped to " <> ppr (unitId pkg) return (Just (wired_pkg, pkg)) mb_wired_in_pkgs <- mapM (findWiredInUnit pkgs) wiredInUnitIds let wired_in_pkgs = catMaybes mb_wired_in_pkgs wiredInMap :: Map UnitId UnitId wiredInMap = Map.fromList [ (unitId realUnitInfo, wiredInUnitId) | (wiredInUnitId, realUnitInfo) <- wired_in_pkgs , not (unitIsIndefinite realUnitInfo) ] updateWiredInDependencies pkgs = map (upd_deps . upd_pkg) pkgs where upd_pkg pkg | Just wiredInUnitId <- Map.lookup (unitId pkg) wiredInMap = pkg { unitId = wiredInUnitId , unitInstanceOf = wiredInUnitId -- every non instantiated unit is an instance of -- itself (required by Backpack...) -- -- See Note [About units] in GHC.Unit } | otherwise = pkg upd_deps pkg = pkg { unitDepends = map (upd_wired_in wiredInMap) (unitDepends pkg), unitExposedModules = map (\(k,v) -> (k, fmap (upd_wired_in_mod wiredInMap) v)) (unitExposedModules pkg) } return (updateWiredInDependencies pkgs, wiredInMap) -- Helper functions for rewiring Module and Unit. These -- rewrite Units of modules in wired-in packages to the form known to the -- compiler, as described in Note [Wired-in units] in GHC.Unit.Module. -- -- For instance, base-4.9.0.0 will be rewritten to just base, to match -- what appears in GHC.Builtin.Names. upd_wired_in_mod :: WiringMap -> Module -> Module upd_wired_in_mod wiredInMap (Module uid m) = Module (upd_wired_in_uid wiredInMap uid) m upd_wired_in_uid :: WiringMap -> Unit -> Unit upd_wired_in_uid wiredInMap u = case u of HoleUnit -> HoleUnit RealUnit (Definite uid) -> RealUnit (Definite (upd_wired_in wiredInMap uid)) VirtUnit indef_uid -> VirtUnit $ mkInstantiatedUnit (instUnitInstanceOf indef_uid) (map (\(x,y) -> (x,upd_wired_in_mod wiredInMap y)) (instUnitInsts indef_uid)) upd_wired_in :: WiringMap -> UnitId -> UnitId upd_wired_in wiredInMap key | Just key' <- Map.lookup key wiredInMap = key' | otherwise = key updateVisibilityMap :: WiringMap -> VisibilityMap -> VisibilityMap updateVisibilityMap wiredInMap vis_map = foldl' f vis_map (Map.toList wiredInMap) where f vm (from, to) = case Map.lookup (RealUnit (Definite from)) vis_map of Nothing -> vm Just r -> Map.insert (RealUnit (Definite to)) r (Map.delete (RealUnit (Definite from)) vm) -- ---------------------------------------------------------------------------- -- | The reason why a unit is unusable. data UnusableUnitReason = -- | We ignored it explicitly using @-ignore-package@. IgnoredWithFlag -- | This unit transitively depends on a unit that was never present -- in any of the provided databases. | BrokenDependencies [UnitId] -- | This unit transitively depends on a unit involved in a cycle. -- Note that the list of 'UnitId' reports the direct dependencies -- of this unit that (transitively) depended on the cycle, and not -- the actual cycle itself (which we report separately at high verbosity.) | CyclicDependencies [UnitId] -- | This unit transitively depends on a unit which was ignored. | IgnoredDependencies [UnitId] -- | This unit transitively depends on a unit which was -- shadowed by an ABI-incompatible unit. | ShadowedDependencies [UnitId] instance Outputable UnusableUnitReason where ppr IgnoredWithFlag = text "[ignored with flag]" ppr (BrokenDependencies uids) = brackets (text "broken" <+> ppr uids) ppr (CyclicDependencies uids) = brackets (text "cyclic" <+> ppr uids) ppr (IgnoredDependencies uids) = brackets (text "ignored" <+> ppr uids) ppr (ShadowedDependencies uids) = brackets (text "shadowed" <+> ppr uids) type UnusableUnits = Map UnitId (UnitInfo, UnusableUnitReason) pprReason :: SDoc -> UnusableUnitReason -> SDoc pprReason pref reason = case reason of IgnoredWithFlag -> pref <+> text "ignored due to an -ignore-package flag" BrokenDependencies deps -> pref <+> text "unusable due to missing dependencies:" $$ nest 2 (hsep (map ppr deps)) CyclicDependencies deps -> pref <+> text "unusable due to cyclic dependencies:" $$ nest 2 (hsep (map ppr deps)) IgnoredDependencies deps -> pref <+> text ("unusable because the -ignore-package flag was used to " ++ "ignore at least one of its dependencies:") $$ nest 2 (hsep (map ppr deps)) ShadowedDependencies deps -> pref <+> text "unusable due to shadowed dependencies:" $$ nest 2 (hsep (map ppr deps)) reportCycles :: Logger -> [SCC UnitInfo] -> IO () reportCycles logger sccs = mapM_ report sccs where report (AcyclicSCC _) = return () report (CyclicSCC vs) = debugTraceMsg logger 2 $ text "these packages are involved in a cycle:" $$ nest 2 (hsep (map (ppr . unitId) vs)) reportUnusable :: Logger -> UnusableUnits -> IO () reportUnusable logger pkgs = mapM_ report (Map.toList pkgs) where report (ipid, (_, reason)) = debugTraceMsg logger 2 $ pprReason (text "package" <+> ppr ipid <+> text "is") reason -- ---------------------------------------------------------------------------- -- -- Utilities on the database -- -- | A reverse dependency index, mapping an 'UnitId' to -- the 'UnitId's which have a dependency on it. type RevIndex = Map UnitId [UnitId] -- | Compute the reverse dependency index of a unit database. reverseDeps :: UnitInfoMap -> RevIndex reverseDeps db = Map.foldl' go Map.empty db where go r pkg = foldl' (go' (unitId pkg)) r (unitDepends pkg) go' from r to = Map.insertWith (++) to [from] r -- | Given a list of 'UnitId's to remove, a database, -- and a reverse dependency index (as computed by 'reverseDeps'), -- remove those units, plus any units which depend on them. -- Returns the pruned database, as well as a list of 'UnitInfo's -- that was removed. removeUnits :: [UnitId] -> RevIndex -> UnitInfoMap -> (UnitInfoMap, [UnitInfo]) removeUnits uids index m = go uids (m,[]) where go [] (m,pkgs) = (m,pkgs) go (uid:uids) (m,pkgs) | Just pkg <- Map.lookup uid m = case Map.lookup uid index of Nothing -> go uids (Map.delete uid m, pkg:pkgs) Just rdeps -> go (rdeps ++ uids) (Map.delete uid m, pkg:pkgs) | otherwise = go uids (m,pkgs) -- | Given a 'UnitInfo' from some 'UnitInfoMap', return all entries in 'depends' -- which correspond to units that do not exist in the index. depsNotAvailable :: UnitInfoMap -> UnitInfo -> [UnitId] depsNotAvailable pkg_map pkg = filter (not . (`Map.member` pkg_map)) (unitDepends pkg) -- | Given a 'UnitInfo' from some 'UnitInfoMap' return all entries in -- 'unitAbiDepends' which correspond to units that do not exist, OR have -- mismatching ABIs. depsAbiMismatch :: UnitInfoMap -> UnitInfo -> [UnitId] depsAbiMismatch pkg_map pkg = map fst . filter (not . abiMatch) $ unitAbiDepends pkg where abiMatch (dep_uid, abi) | Just dep_pkg <- Map.lookup dep_uid pkg_map = unitAbiHash dep_pkg == abi | otherwise = False -- ----------------------------------------------------------------------------- -- Ignore units ignoreUnits :: [IgnorePackageFlag] -> [UnitInfo] -> UnusableUnits ignoreUnits flags pkgs = Map.fromList (concatMap doit flags) where doit (IgnorePackage str) = case partition (matchingStr str) pkgs of (ps, _) -> [ (unitId p, (p, IgnoredWithFlag)) | p <- ps ] -- missing unit is not an error for -ignore-package, -- because a common usage is to -ignore-package P as -- a preventative measure just in case P exists. -- ---------------------------------------------------------------------------- -- -- Merging databases -- -- | For each unit, a mapping from uid -> i indicates that this -- unit was brought into GHC by the ith @-package-db@ flag on -- the command line. We use this mapping to make sure we prefer -- units that were defined later on the command line, if there -- is an ambiguity. type UnitPrecedenceMap = Map UnitId Int -- | Given a list of databases, merge them together, where -- units with the same unit id in later databases override -- earlier ones. This does NOT check if the resulting database -- makes sense (that's done by 'validateDatabase'). mergeDatabases :: Logger -> [UnitDatabase UnitId] -> IO (UnitInfoMap, UnitPrecedenceMap) mergeDatabases logger = foldM merge (Map.empty, Map.empty) . zip [1..] where merge (pkg_map, prec_map) (i, UnitDatabase db_path db) = do debugTraceMsg logger 2 $ text "loading package database" <+> text db_path forM_ (Set.toList override_set) $ \pkg -> debugTraceMsg logger 2 $ text "package" <+> ppr pkg <+> text "overrides a previously defined package" return (pkg_map', prec_map') where db_map = mk_pkg_map db mk_pkg_map = Map.fromList . map (\p -> (unitId p, p)) -- The set of UnitIds which appear in both db and pkgs. These are the -- ones that get overridden. Compute this just to give some -- helpful debug messages at -v2 override_set :: Set UnitId override_set = Set.intersection (Map.keysSet db_map) (Map.keysSet pkg_map) -- Now merge the sets together (NB: in case of duplicate, -- first argument preferred) pkg_map' :: UnitInfoMap pkg_map' = Map.union db_map pkg_map prec_map' :: UnitPrecedenceMap prec_map' = Map.union (Map.map (const i) db_map) prec_map -- | Validates a database, removing unusable units from it -- (this includes removing units that the user has explicitly -- ignored.) Our general strategy: -- -- 1. Remove all broken units (dangling dependencies) -- 2. Remove all units that are cyclic -- 3. Apply ignore flags -- 4. Remove all units which have deps with mismatching ABIs -- validateDatabase :: UnitConfig -> UnitInfoMap -> (UnitInfoMap, UnusableUnits, [SCC UnitInfo]) validateDatabase cfg pkg_map1 = (pkg_map5, unusable, sccs) where ignore_flags = reverse (unitConfigFlagsIgnored cfg) -- Compute the reverse dependency index index = reverseDeps pkg_map1 -- Helper function mk_unusable mk_err dep_matcher m uids = Map.fromList [ (unitId pkg, (pkg, mk_err (dep_matcher m pkg))) | pkg <- uids ] -- Find broken units directly_broken = filter (not . null . depsNotAvailable pkg_map1) (Map.elems pkg_map1) (pkg_map2, broken) = removeUnits (map unitId directly_broken) index pkg_map1 unusable_broken = mk_unusable BrokenDependencies depsNotAvailable pkg_map2 broken -- Find recursive units sccs = stronglyConnComp [ (pkg, unitId pkg, unitDepends pkg) | pkg <- Map.elems pkg_map2 ] getCyclicSCC (CyclicSCC vs) = map unitId vs getCyclicSCC (AcyclicSCC _) = [] (pkg_map3, cyclic) = removeUnits (concatMap getCyclicSCC sccs) index pkg_map2 unusable_cyclic = mk_unusable CyclicDependencies depsNotAvailable pkg_map3 cyclic -- Apply ignore flags directly_ignored = ignoreUnits ignore_flags (Map.elems pkg_map3) (pkg_map4, ignored) = removeUnits (Map.keys directly_ignored) index pkg_map3 unusable_ignored = mk_unusable IgnoredDependencies depsNotAvailable pkg_map4 ignored -- Knock out units whose dependencies don't agree with ABI -- (i.e., got invalidated due to shadowing) directly_shadowed = filter (not . null . depsAbiMismatch pkg_map4) (Map.elems pkg_map4) (pkg_map5, shadowed) = removeUnits (map unitId directly_shadowed) index pkg_map4 unusable_shadowed = mk_unusable ShadowedDependencies depsAbiMismatch pkg_map5 shadowed unusable = directly_ignored `Map.union` unusable_ignored `Map.union` unusable_broken `Map.union` unusable_cyclic `Map.union` unusable_shadowed -- ----------------------------------------------------------------------------- -- When all the command-line options are in, we can process our unit -- settings and populate the unit state. mkUnitState :: Logger -> UnitConfig -> IO (UnitState,[UnitDatabase UnitId]) mkUnitState logger cfg = do {- Plan. There are two main steps for making the package state: 1. We want to build a single, unified package database based on all of the input databases, which upholds the invariant that there is only one package per any UnitId and there are no dangling dependencies. We'll do this by merging, and then successively filtering out bad dependencies. a) Merge all the databases together. If an input database defines unit ID that is already in the unified database, that package SHADOWS the existing package in the current unified database. Note that order is important: packages defined later in the list of command line arguments shadow those defined earlier. b) Remove all packages with missing dependencies, or mutually recursive dependencies. b) Remove packages selected by -ignore-package from input database c) Remove all packages which depended on packages that are now shadowed by an ABI-incompatible package d) report (with -v) any packages that were removed by steps 1-3 2. We want to look at the flags controlling package visibility, and build a mapping of what module names are in scope and where they live. a) on the final, unified database, we apply -trust/-distrust flags directly, modifying the database so that the 'trusted' field has the correct value. b) we use the -package/-hide-package flags to compute a visibility map, stating what packages are "exposed" for the purposes of computing the module map. * if any flag refers to a package which was removed by 1-5, then we can give an error message explaining why * if -hide-all-packages was not specified, this step also hides packages which are superseded by later exposed packages * this step is done TWICE if -plugin-package/-hide-all-plugin-packages are used c) based on the visibility map, we pick wired packages and rewrite them to have the expected unitId. d) finally, using the visibility map and the package database, we build a mapping saying what every in scope module name points to. -} -- if databases have not been provided, read the database flags raw_dbs <- case unitConfigDBCache cfg of Nothing -> readUnitDatabases logger cfg Just dbs -> return dbs -- distrust all units if the flag is set let distrust_all db = db { unitDatabaseUnits = distrustAllUnits (unitDatabaseUnits db) } dbs | unitConfigDistrustAll cfg = map distrust_all raw_dbs | otherwise = raw_dbs -- This, and the other reverse's that you will see, are due to the fact that -- packageFlags, pluginPackageFlags, etc. are all specified in *reverse* order -- than they are on the command line. let raw_other_flags = reverse (unitConfigFlagsExposed cfg) (hpt_flags, other_flags) = partition (selectHptFlag (unitConfigHomeUnits cfg)) raw_other_flags debugTraceMsg logger 2 $ text "package flags" <+> ppr other_flags let home_unit_deps = selectHomeUnits (unitConfigHomeUnits cfg) hpt_flags -- Merge databases together, without checking validity (pkg_map1, prec_map) <- mergeDatabases logger dbs -- Now that we've merged everything together, prune out unusable -- packages. let (pkg_map2, unusable, sccs) = validateDatabase cfg pkg_map1 reportCycles logger sccs reportUnusable logger unusable -- Apply trust flags (these flags apply regardless of whether -- or not packages are visible or not) pkgs1 <- mayThrowUnitErr $ foldM (applyTrustFlag prec_map unusable) (Map.elems pkg_map2) (reverse (unitConfigFlagsTrusted cfg)) let prelim_pkg_db = mkUnitInfoMap pkgs1 -- -- Calculate the initial set of units from package databases, prior to any package flags. -- -- Conceptually, we select the latest versions of all valid (not unusable) *packages* -- (not units). This is empty if we have -hide-all-packages. -- -- Then we create an initial visibility map with default visibilities for all -- exposed, definite units which belong to the latest valid packages. -- let preferLater unit unit' = case compareByPreference prec_map unit unit' of GT -> unit _ -> unit' addIfMorePreferable m unit = addToUDFM_C preferLater m (fsPackageName unit) unit -- This is the set of maximally preferable packages. In fact, it is a set of -- most preferable *units* keyed by package name, which act as stand-ins in -- for "a package in a database". We use units here because we don't have -- "a package in a database" as a type currently. mostPreferablePackageReps = if unitConfigHideAll cfg then emptyUDFM else foldl' addIfMorePreferable emptyUDFM pkgs1 -- When exposing units, we want to consider all of those in the most preferable -- packages. We can implement that by looking for units that are equi-preferable -- with the most preferable unit for package. Being equi-preferable means that -- they must be in the same database, with the same version, and the same package name. -- -- We must take care to consider all these units and not just the most -- preferable one, otherwise we can end up with problems like #16228. mostPreferable u = case lookupUDFM mostPreferablePackageReps (fsPackageName u) of Nothing -> False Just u' -> compareByPreference prec_map u u' == EQ vis_map1 = foldl' (\vm p -> -- Note: we NEVER expose indefinite packages by -- default, because it's almost assuredly not -- what you want (no mix-in linking has occurred). if unitIsExposed p && unitIsDefinite (mkUnit p) && mostPreferable p then Map.insert (mkUnit p) UnitVisibility { uv_expose_all = True, uv_renamings = [], uv_package_name = First (Just (fsPackageName p)), uv_requirements = Map.empty, uv_explicit = Nothing } vm else vm) Map.empty pkgs1 -- -- Compute a visibility map according to the command-line flags (-package, -- -hide-package). This needs to know about the unusable packages, since if a -- user tries to enable an unusable package, we should let them know. -- vis_map2 <- mayThrowUnitErr $ foldM (applyPackageFlag prec_map prelim_pkg_db emptyUniqSet unusable (unitConfigHideAll cfg) pkgs1) vis_map1 other_flags -- -- Sort out which packages are wired in. This has to be done last, since -- it modifies the unit ids of wired in packages, but when we process -- package arguments we need to key against the old versions. -- (pkgs2, wired_map) <- findWiredInUnits logger prec_map pkgs1 vis_map2 let pkg_db = mkUnitInfoMap pkgs2 -- Update the visibility map, so we treat wired packages as visible. let vis_map = updateVisibilityMap wired_map vis_map2 let hide_plugin_pkgs = unitConfigHideAllPlugins cfg plugin_vis_map <- case unitConfigFlagsPlugins cfg of -- common case; try to share the old vis_map [] | not hide_plugin_pkgs -> return vis_map | otherwise -> return Map.empty _ -> do let plugin_vis_map1 | hide_plugin_pkgs = Map.empty -- Use the vis_map PRIOR to wired in, -- because otherwise applyPackageFlag -- won't work. | otherwise = vis_map2 plugin_vis_map2 <- mayThrowUnitErr $ foldM (applyPackageFlag prec_map prelim_pkg_db emptyUniqSet unusable hide_plugin_pkgs pkgs1) plugin_vis_map1 (reverse (unitConfigFlagsPlugins cfg)) -- Updating based on wired in packages is mostly -- good hygiene, because it won't matter: no wired in -- package has a compiler plugin. -- TODO: If a wired in package had a compiler plugin, -- and you tried to pick different wired in packages -- with the plugin flags and the normal flags... what -- would happen? I don't know! But this doesn't seem -- likely to actually happen. return (updateVisibilityMap wired_map plugin_vis_map2) let pkgname_map = listToUFM [ (unitPackageName p, unitInstanceOf p) | p <- pkgs2 ] -- The explicitUnits accurately reflects the set of units we have turned -- on; as such, it also is the only way one can come up with requirements. -- The requirement context is directly based off of this: we simply -- look for nested unit IDs that are directly fed holes: the requirements -- of those units are precisely the ones we need to track let explicit_pkgs = [(k, uv_explicit v) | (k, v) <- Map.toList vis_map] req_ctx = Map.map (Set.toList) $ Map.unionsWith Set.union (map uv_requirements (Map.elems vis_map)) -- -- Here we build up a set of the packages mentioned in -package -- flags on the command line; these are called the "preload" -- packages. we link these packages in eagerly. The preload set -- should contain at least rts & base, which is why we pretend that -- the command line contains -package rts & -package base. -- -- NB: preload IS important even for type-checking, because we -- need the correct include path to be set. -- let preload1 = Map.keys (Map.filter (isJust . uv_explicit) vis_map) -- add default preload units if they can be found in the db basicLinkedUnits = fmap (RealUnit . Definite) $ filter (flip Map.member pkg_db) $ unitConfigAutoLink cfg preload3 = ordNub $ (basicLinkedUnits ++ preload1) -- Close the preload packages with their dependencies dep_preload <- mayThrowUnitErr $ closeUnitDeps pkg_db $ zip (map toUnitId preload3) (repeat Nothing) let mod_map1 = mkModuleNameProvidersMap logger cfg pkg_db emptyUniqSet vis_map mod_map2 = mkUnusableModuleNameProvidersMap unusable mod_map = Map.union mod_map1 mod_map2 -- Force the result to avoid leaking input parameters let !state = UnitState { preloadUnits = dep_preload , explicitUnits = explicit_pkgs , homeUnitDepends = Set.toList home_unit_deps , unitInfoMap = pkg_db , preloadClosure = emptyUniqSet , moduleNameProvidersMap = mod_map , pluginModuleNameProvidersMap = mkModuleNameProvidersMap logger cfg pkg_db emptyUniqSet plugin_vis_map , packageNameMap = pkgname_map , wireMap = wired_map , unwireMap = Map.fromList [ (v,k) | (k,v) <- Map.toList wired_map ] , requirementContext = req_ctx , allowVirtualUnits = unitConfigAllowVirtual cfg } return (state, raw_dbs) selectHptFlag :: Set.Set UnitId -> PackageFlag -> Bool selectHptFlag home_units (ExposePackage _ (UnitIdArg uid) _) | toUnitId uid `Set.member` home_units = True selectHptFlag _ _ = False selectHomeUnits :: Set.Set UnitId -> [PackageFlag] -> Set.Set UnitId selectHomeUnits home_units flags = foldl' go Set.empty flags where go :: Set.Set UnitId -> PackageFlag -> Set.Set UnitId go cur (ExposePackage _ (UnitIdArg uid) _) | toUnitId uid `Set.member` home_units = Set.insert (toUnitId uid) cur -- MP: This does not yet support thinning/renaming go cur _ = cur -- | Given a wired-in 'Unit', "unwire" it into the 'Unit' -- that it was recorded as in the package database. unwireUnit :: UnitState -> Unit -> Unit unwireUnit state uid@(RealUnit (Definite def_uid)) = maybe uid (RealUnit . Definite) (Map.lookup def_uid (unwireMap state)) unwireUnit _ uid = uid -- ----------------------------------------------------------------------------- -- | Makes the mapping from ModuleName to package info -- Slight irritation: we proceed by leafing through everything -- in the installed package database, which makes handling indefinite -- packages a bit bothersome. mkModuleNameProvidersMap :: Logger -> UnitConfig -> UnitInfoMap -> PreloadUnitClosure -> VisibilityMap -> ModuleNameProvidersMap mkModuleNameProvidersMap logger cfg pkg_map closure vis_map = -- What should we fold on? Both situations are awkward: -- -- * Folding on the visibility map means that we won't create -- entries for packages that aren't mentioned in vis_map -- (e.g., hidden packages, causing #14717) -- -- * Folding on pkg_map is awkward because if we have an -- Backpack instantiation, we need to possibly add a -- package from pkg_map multiple times to the actual -- ModuleNameProvidersMap. Also, we don't really want -- definite package instantiations to show up in the -- list of possibilities. -- -- So what will we do instead? We'll extend vis_map with -- entries for every definite (for non-Backpack) and -- indefinite (for Backpack) package, so that we get the -- hidden entries we need. Map.foldlWithKey extend_modmap emptyMap vis_map_extended where vis_map_extended = Map.union vis_map {- preferred -} default_vis default_vis = Map.fromList [ (mkUnit pkg, mempty) | pkg <- Map.elems pkg_map -- Exclude specific instantiations of an indefinite -- package , unitIsIndefinite pkg || null (unitInstantiations pkg) ] emptyMap = Map.empty setOrigins m os = fmap (const os) m extend_modmap modmap uid UnitVisibility { uv_expose_all = b, uv_renamings = rns } = addListTo modmap theBindings where pkg = unit_lookup uid theBindings :: [(ModuleName, Map Module ModuleOrigin)] theBindings = newBindings b rns newBindings :: Bool -> [(ModuleName, ModuleName)] -> [(ModuleName, Map Module ModuleOrigin)] newBindings e rns = es e ++ hiddens ++ map rnBinding rns rnBinding :: (ModuleName, ModuleName) -> (ModuleName, Map Module ModuleOrigin) rnBinding (orig, new) = (new, setOrigins origEntry fromFlag) where origEntry = case lookupUFM esmap orig of Just r -> r Nothing -> throwGhcException (CmdLineError (renderWithContext (log_default_user_context (logFlags logger)) (text "package flag: could not find module name" <+> ppr orig <+> text "in package" <+> ppr pk))) es :: Bool -> [(ModuleName, Map Module ModuleOrigin)] es e = do (m, exposedReexport) <- exposed_mods let (pk', m', origin') = case exposedReexport of Nothing -> (pk, m, fromExposedModules e) Just (Module pk' m') -> (pk', m', fromReexportedModules e pkg) return (m, mkModMap pk' m' origin') esmap :: UniqFM ModuleName (Map Module ModuleOrigin) esmap = listToUFM (es False) -- parameter here doesn't matter, orig will -- be overwritten hiddens = [(m, mkModMap pk m ModHidden) | m <- hidden_mods] pk = mkUnit pkg unit_lookup uid = lookupUnit' (unitConfigAllowVirtual cfg) pkg_map closure uid `orElse` pprPanic "unit_lookup" (ppr uid) exposed_mods = unitExposedModules pkg hidden_mods = unitHiddenModules pkg -- | Make a 'ModuleNameProvidersMap' covering a set of unusable packages. mkUnusableModuleNameProvidersMap :: UnusableUnits -> ModuleNameProvidersMap mkUnusableModuleNameProvidersMap unusables = Map.foldl' extend_modmap Map.empty unusables where extend_modmap modmap (pkg, reason) = addListTo modmap bindings where bindings :: [(ModuleName, Map Module ModuleOrigin)] bindings = exposed ++ hidden origin = ModUnusable reason pkg_id = mkUnit pkg exposed = map get_exposed exposed_mods hidden = [(m, mkModMap pkg_id m origin) | m <- hidden_mods] get_exposed (mod, Just mod') = (mod, Map.singleton mod' origin) get_exposed (mod, _) = (mod, mkModMap pkg_id mod origin) exposed_mods = unitExposedModules pkg hidden_mods = unitHiddenModules pkg -- | Add a list of key/value pairs to a nested map. -- -- The outer map is processed with 'Data.Map.Strict' to prevent memory leaks -- when reloading modules in GHCi (see #4029). This ensures that each -- value is forced before installing into the map. addListTo :: (Monoid a, Ord k1, Ord k2) => Map k1 (Map k2 a) -> [(k1, Map k2 a)] -> Map k1 (Map k2 a) addListTo = foldl' merge where merge m (k, v) = MapStrict.insertWith (Map.unionWith mappend) k v m -- | Create a singleton module mapping mkModMap :: Unit -> ModuleName -> ModuleOrigin -> Map Module ModuleOrigin mkModMap pkg mod = Map.singleton (mkModule pkg mod) -- ----------------------------------------------------------------------------- -- Package Utils -- | Takes a 'ModuleName', and if the module is in any package returns -- list of modules which take that name. lookupModuleInAllUnits :: UnitState -> ModuleName -> [(Module, UnitInfo)] lookupModuleInAllUnits pkgs m = case lookupModuleWithSuggestions pkgs m NoPkgQual of LookupFound a b -> [(a,fst b)] LookupMultiple rs -> map f rs where f (m,_) = (m, expectJust "lookupModule" (lookupUnit pkgs (moduleUnit m))) _ -> [] -- | The result of performing a lookup data LookupResult = -- | Found the module uniquely, nothing else to do LookupFound Module (UnitInfo, ModuleOrigin) -- | Multiple modules with the same name in scope | LookupMultiple [(Module, ModuleOrigin)] -- | No modules found, but there were some hidden ones with -- an exact name match. First is due to package hidden, second -- is due to module being hidden | LookupHidden [(Module, ModuleOrigin)] [(Module, ModuleOrigin)] -- | No modules found, but there were some unusable ones with -- an exact name match | LookupUnusable [(Module, ModuleOrigin)] -- | Nothing found, here are some suggested different names | LookupNotFound [ModuleSuggestion] -- suggestions data ModuleSuggestion = SuggestVisible ModuleName Module ModuleOrigin | SuggestHidden ModuleName Module ModuleOrigin lookupModuleWithSuggestions :: UnitState -> ModuleName -> PkgQual -> LookupResult lookupModuleWithSuggestions pkgs = lookupModuleWithSuggestions' pkgs (moduleNameProvidersMap pkgs) -- | The package which the module **appears** to come from, this could be -- the one which reexports the module from it's original package. This function -- is currently only used for -Wunused-packages lookupModulePackage :: UnitState -> ModuleName -> PkgQual -> Maybe [UnitInfo] lookupModulePackage pkgs mn mfs = case lookupModuleWithSuggestions' pkgs (moduleNameProvidersMap pkgs) mn mfs of LookupFound _ (orig_unit, origin) -> case origin of ModOrigin {fromOrigUnit, fromExposedReexport} -> case fromOrigUnit of -- Just True means, the import is available from its original location Just True -> pure [orig_unit] -- Otherwise, it must be available from a reexport _ -> pure fromExposedReexport _ -> Nothing _ -> Nothing lookupPluginModuleWithSuggestions :: UnitState -> ModuleName -> PkgQual -> LookupResult lookupPluginModuleWithSuggestions pkgs = lookupModuleWithSuggestions' pkgs (pluginModuleNameProvidersMap pkgs) lookupModuleWithSuggestions' :: UnitState -> ModuleNameProvidersMap -> ModuleName -> PkgQual -> LookupResult lookupModuleWithSuggestions' pkgs mod_map m mb_pn = case Map.lookup m mod_map of Nothing -> LookupNotFound suggestions Just xs -> case foldl' classify ([],[],[], []) (Map.toList xs) of ([], [], [], []) -> LookupNotFound suggestions (_, _, _, [(m, o)]) -> LookupFound m (mod_unit m, o) (_, _, _, exposed@(_:_)) -> LookupMultiple exposed ([], [], unusable@(_:_), []) -> LookupUnusable unusable (hidden_pkg, hidden_mod, _, []) -> LookupHidden hidden_pkg hidden_mod where classify (hidden_pkg, hidden_mod, unusable, exposed) (m, origin0) = let origin = filterOrigin mb_pn (mod_unit m) origin0 x = (m, origin) in case origin of ModHidden -> (hidden_pkg, x:hidden_mod, unusable, exposed) ModUnusable _ -> (hidden_pkg, hidden_mod, x:unusable, exposed) _ | originEmpty origin -> (hidden_pkg, hidden_mod, unusable, exposed) | originVisible origin -> (hidden_pkg, hidden_mod, unusable, x:exposed) | otherwise -> (x:hidden_pkg, hidden_mod, unusable, exposed) unit_lookup p = lookupUnit pkgs p `orElse` pprPanic "lookupModuleWithSuggestions" (ppr p <+> ppr m) mod_unit = unit_lookup . moduleUnit -- Filters out origins which are not associated with the given package -- qualifier. No-op if there is no package qualifier. Test if this -- excluded all origins with 'originEmpty'. filterOrigin :: PkgQual -> UnitInfo -> ModuleOrigin -> ModuleOrigin filterOrigin NoPkgQual _ o = o filterOrigin (ThisPkg _) _ o = o filterOrigin (OtherPkg u) pkg o = let match_pkg p = u == unitId p in case o of ModHidden | match_pkg pkg -> ModHidden | otherwise -> mempty ModUnusable _ | match_pkg pkg -> o | otherwise -> mempty ModOrigin { fromOrigUnit = e, fromExposedReexport = res, fromHiddenReexport = rhs } -> ModOrigin { fromOrigUnit = if match_pkg pkg then e else Nothing , fromExposedReexport = filter match_pkg res , fromHiddenReexport = filter match_pkg rhs , fromPackageFlag = False -- always excluded } suggestions = fuzzyLookup (moduleNameString m) all_mods all_mods :: [(String, ModuleSuggestion)] -- All modules all_mods = sortBy (comparing fst) $ [ (moduleNameString m, suggestion) | (m, e) <- Map.toList (moduleNameProvidersMap pkgs) , suggestion <- map (getSuggestion m) (Map.toList e) ] getSuggestion name (mod, origin) = (if originVisible origin then SuggestVisible else SuggestHidden) name mod origin listVisibleModuleNames :: UnitState -> [ModuleName] listVisibleModuleNames state = map fst (filter visible (Map.toList (moduleNameProvidersMap state))) where visible (_, ms) = any originVisible (Map.elems ms) -- | Takes a list of UnitIds (and their "parent" dependency, used for error -- messages), and returns the list with dependencies included, in reverse -- dependency order (a units appears before those it depends on). closeUnitDeps :: UnitInfoMap -> [(UnitId,Maybe UnitId)] -> MaybeErr UnitErr [UnitId] closeUnitDeps pkg_map ps = closeUnitDeps' pkg_map [] ps -- | Similar to closeUnitDeps but takes a list of already loaded units as an -- additional argument. closeUnitDeps' :: UnitInfoMap -> [UnitId] -> [(UnitId,Maybe UnitId)] -> MaybeErr UnitErr [UnitId] closeUnitDeps' pkg_map current_ids ps = foldM (add_unit pkg_map) current_ids ps -- | Add a UnitId and those it depends on (recursively) to the given list of -- UnitIds if they are not already in it. Return a list in reverse dependency -- order (a unit appears before those it depends on). -- -- The UnitId is looked up in the given UnitInfoMap (to find its dependencies). -- It it's not found, the optional parent unit is used to return a more precise -- error message ("dependency of "). add_unit :: UnitInfoMap -> [UnitId] -> (UnitId,Maybe UnitId) -> MaybeErr UnitErr [UnitId] add_unit pkg_map ps (p, mb_parent) | p `elem` ps = return ps -- Check if we've already added this unit | otherwise = case lookupUnitId' pkg_map p of Nothing -> Failed (CloseUnitErr p mb_parent) Just info -> do -- Add the unit's dependents also ps' <- foldM add_unit_key ps (unitDepends info) return (p : ps') where add_unit_key ps key = add_unit pkg_map ps (key, Just p) data UnitErr = CloseUnitErr !UnitId !(Maybe UnitId) | PackageFlagErr !PackageFlag ![(UnitInfo,UnusableUnitReason)] | TrustFlagErr !TrustFlag ![(UnitInfo,UnusableUnitReason)] mayThrowUnitErr :: MaybeErr UnitErr a -> IO a mayThrowUnitErr = \case Failed e -> throwGhcExceptionIO $ CmdLineError $ renderWithContext defaultSDocContext $ withPprStyle defaultUserStyle $ ppr e Succeeded a -> return a instance Outputable UnitErr where ppr = \case CloseUnitErr p mb_parent -> (ftext (fsLit "unknown unit:") <+> ppr p) <> case mb_parent of Nothing -> Outputable.empty Just parent -> space <> parens (text "dependency of" <+> ftext (unitIdFS parent)) PackageFlagErr flag reasons -> flag_err (pprFlag flag) reasons TrustFlagErr flag reasons -> flag_err (pprTrustFlag flag) reasons where flag_err flag_doc reasons = text "cannot satisfy " <> flag_doc <> (if null reasons then Outputable.empty else text ": ") $$ nest 4 (vcat (map ppr_reason reasons) $$ text "(use -v for more information)") ppr_reason (p, reason) = pprReason (ppr (unitId p) <+> text "is") reason -- | Return this list of requirement interfaces that need to be merged -- to form @mod_name@, or @[]@ if this is not a requirement. requirementMerges :: UnitState -> ModuleName -> [InstantiatedModule] requirementMerges pkgstate mod_name = fromMaybe [] (Map.lookup mod_name (requirementContext pkgstate)) -- ----------------------------------------------------------------------------- -- | Pretty-print a UnitId for the user. -- -- Cabal packages may contain several components (programs, libraries, etc.). -- As far as GHC is concerned, installed package components ("units") are -- identified by an opaque UnitId string provided by Cabal. As the string -- contains a hash, we don't want to display it to users so GHC queries the -- database to retrieve some infos about the original source package (name, -- version, component name). -- -- Instead we want to display: packagename-version[:componentname] -- -- Component name is only displayed if it isn't the default library -- -- To do this we need to query a unit database. pprUnitIdForUser :: UnitState -> UnitId -> SDoc pprUnitIdForUser state uid@(UnitId fs) = case lookupUnitPprInfo state uid of Nothing -> ftext fs -- we didn't find the unit at all Just i -> ppr i pprUnitInfoForUser :: UnitInfo -> SDoc pprUnitInfoForUser info = ppr (mkUnitPprInfo unitIdFS info) lookupUnitPprInfo :: UnitState -> UnitId -> Maybe UnitPprInfo lookupUnitPprInfo state uid = fmap (mkUnitPprInfo unitIdFS) (lookupUnitId state uid) -- ----------------------------------------------------------------------------- -- Displaying packages -- | Show (very verbose) package info pprUnits :: UnitState -> SDoc pprUnits = pprUnitsWith pprUnitInfo pprUnitsWith :: (UnitInfo -> SDoc) -> UnitState -> SDoc pprUnitsWith pprIPI pkgstate = vcat (intersperse (text "---") (map pprIPI (listUnitInfo pkgstate))) -- | Show simplified unit info. -- -- The idea is to only print package id, and any information that might -- be different from the package databases (exposure, trust) pprUnitsSimple :: UnitState -> SDoc pprUnitsSimple = pprUnitsWith pprIPI where pprIPI ipi = let i = unitIdFS (unitId ipi) e = if unitIsExposed ipi then text "E" else text " " t = if unitIsTrusted ipi then text "T" else text " " in e <> t <> text " " <> ftext i -- | Show the mapping of modules to where they come from. pprModuleMap :: ModuleNameProvidersMap -> SDoc pprModuleMap mod_map = vcat (map pprLine (Map.toList mod_map)) where pprLine (m,e) = ppr m $$ nest 50 (vcat (map (pprEntry m) (Map.toList e))) pprEntry :: Outputable a => ModuleName -> (Module, a) -> SDoc pprEntry m (m',o) | m == moduleName m' = ppr (moduleUnit m') <+> parens (ppr o) | otherwise = ppr m' <+> parens (ppr o) fsPackageName :: UnitInfo -> FastString fsPackageName info = fs where PackageName fs = unitPackageName info -- | Given a fully instantiated 'InstantiatedUnit', improve it into a -- 'RealUnit' if we can find it in the package database. improveUnit :: UnitState -> Unit -> Unit improveUnit state u = improveUnit' (unitInfoMap state) (preloadClosure state) u -- | Given a fully instantiated 'InstantiatedUnit', improve it into a -- 'RealUnit' if we can find it in the package database. improveUnit' :: UnitInfoMap -> PreloadUnitClosure -> Unit -> Unit improveUnit' _ _ uid@(RealUnit _) = uid -- short circuit improveUnit' pkg_map closure uid = -- Do NOT lookup indefinite ones, they won't be useful! case lookupUnit' False pkg_map closure uid of Nothing -> uid Just pkg -> -- Do NOT improve if the indefinite unit id is not -- part of the closure unique set. See -- Note [VirtUnit to RealUnit improvement] if unitId pkg `elementOfUniqSet` closure then mkUnit pkg else uid -- | Check the database to see if we already have an installed unit that -- corresponds to the given 'InstantiatedUnit'. -- -- Return a `UnitId` which either wraps the `InstantiatedUnit` unchanged or -- references a matching installed unit. -- -- See Note [VirtUnit to RealUnit improvement] instUnitToUnit :: UnitState -> InstantiatedUnit -> Unit instUnitToUnit state iuid = -- NB: suppose that we want to compare the instantiated -- unit p[H=impl:H] against p+abcd (where p+abcd -- happens to be the existing, installed version of -- p[H=impl:H]. If we *only* wrap in p[H=impl:H] -- VirtUnit, they won't compare equal; only -- after improvement will the equality hold. improveUnit state $ VirtUnit iuid -- | Substitution on module variables, mapping module names to module -- identifiers. type ShHoleSubst = ModuleNameEnv Module -- | Substitutes holes in a 'Module'. NOT suitable for being called -- directly on a 'nameModule', see Note [Representation of module/name variables]. -- @p[A=\]:B@ maps to @p[A=q():A]:B@ with @A=q():A@; -- similarly, @\@ maps to @q():A@. renameHoleModule :: UnitState -> ShHoleSubst -> Module -> Module renameHoleModule state = renameHoleModule' (unitInfoMap state) (preloadClosure state) -- | Substitutes holes in a 'Unit', suitable for renaming when -- an include occurs; see Note [Representation of module/name variables]. -- -- @p[A=\]@ maps to @p[A=\]@ with @A=\@. renameHoleUnit :: UnitState -> ShHoleSubst -> Unit -> Unit renameHoleUnit state = renameHoleUnit' (unitInfoMap state) (preloadClosure state) -- | Like 'renameHoleModule', but requires only 'ClosureUnitInfoMap' -- so it can be used by "GHC.Unit.State". renameHoleModule' :: UnitInfoMap -> PreloadUnitClosure -> ShHoleSubst -> Module -> Module renameHoleModule' pkg_map closure env m | not (isHoleModule m) = let uid = renameHoleUnit' pkg_map closure env (moduleUnit m) in mkModule uid (moduleName m) | Just m' <- lookupUFM env (moduleName m) = m' -- NB m = , that's what's in scope. | otherwise = m -- | Like 'renameHoleUnit, but requires only 'ClosureUnitInfoMap' -- so it can be used by "GHC.Unit.State". renameHoleUnit' :: UnitInfoMap -> PreloadUnitClosure -> ShHoleSubst -> Unit -> Unit renameHoleUnit' pkg_map closure env uid = case uid of (VirtUnit InstantiatedUnit{ instUnitInstanceOf = cid , instUnitInsts = insts , instUnitHoles = fh }) -> if isNullUFM (intersectUFM_C const (udfmToUfm (getUniqDSet fh)) env) then uid -- Functorially apply the substitution to the instantiation, -- then check the 'ClosureUnitInfoMap' to see if there is -- a compiled version of this 'InstantiatedUnit' we can improve to. -- See Note [VirtUnit to RealUnit improvement] else improveUnit' pkg_map closure $ mkVirtUnit cid (map (\(k,v) -> (k, renameHoleModule' pkg_map closure env v)) insts) _ -> uid -- | Injects an 'InstantiatedModule' to 'Module' (see also -- 'instUnitToUnit'. instModuleToModule :: UnitState -> InstantiatedModule -> Module instModuleToModule pkgstate (Module iuid mod_name) = mkModule (instUnitToUnit pkgstate iuid) mod_name -- | Print unit-ids with UnitInfo found in the given UnitState pprWithUnitState :: UnitState -> SDoc -> SDoc pprWithUnitState state = updSDocContext (\ctx -> ctx { sdocUnitIdForUser = \fs -> pprUnitIdForUser state (UnitId fs) }) -- | Add package dependencies on the wired-in packages we use implicitPackageDeps :: DynFlags -> [UnitId] implicitPackageDeps dflags = [thUnitId | xopt TemplateHaskellQuotes dflags] -- TODO: Should also include `base` and `ghc-prim` if we use those implicitly, but -- it is possible to not depend on base (for example, see `ghc-prim`) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Types.hs0000644000000000000000000005531714472400113020226 0ustar0000000000000000{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} -- | Unit & Module types -- -- This module is used to resolve the loops between Unit and Module types -- (Module references a Unit and vice-versa). module GHC.Unit.Types ( -- * Modules GenModule (..) , Module , InstalledModule , HomeUnitModule , InstantiatedModule , mkModule , moduleUnitId , pprModule , pprInstantiatedModule , moduleFreeHoles -- * Units , IsUnitId , GenUnit (..) , Unit , UnitId (..) , UnitKey (..) , GenInstantiatedUnit (..) , InstantiatedUnit , DefUnitId , Instantiations , GenInstantiations , mkInstantiatedUnit , mkInstantiatedUnitHash , mkVirtUnit , mapGenUnit , mapInstantiations , unitFreeModuleHoles , fsToUnit , unitFS , unitString , toUnitId , virtualUnitId , stringToUnit , stableUnitCmp , unitIsDefinite , isHoleUnit -- * Unit Ids , unitIdString , stringToUnitId -- * Utils , Definite (..) -- * Wired-in units , primUnitId , bignumUnitId , baseUnitId , rtsUnitId , thUnitId , mainUnitId , thisGhcUnitId , interactiveUnitId , primUnit , bignumUnit , baseUnit , rtsUnit , thUnit , mainUnit , thisGhcUnit , interactiveUnit , isInteractiveModule , wiredInUnitIds -- * Boot modules , IsBootInterface (..) , GenWithIsBoot (..) , ModuleNameWithIsBoot , ModuleWithIsBoot ) where import GHC.Prelude import GHC.Types.Unique import GHC.Types.Unique.DSet import GHC.Unit.Module.Name import GHC.Utils.Binary import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Encoding import GHC.Utils.Fingerprint import GHC.Utils.Misc import Control.DeepSeq import Data.Data import Data.List (sortBy ) import Data.Function import Data.Bifunctor import qualified Data.ByteString as BS import qualified Data.ByteString.Char8 as BS.Char8 --------------------------------------------------------------------- -- MODULES --------------------------------------------------------------------- -- | A generic module is a pair of a unit identifier and a 'ModuleName'. data GenModule unit = Module { moduleUnit :: !unit -- ^ Unit the module belongs to , moduleName :: !ModuleName -- ^ Module name (e.g. A.B.C) } deriving (Eq,Ord,Data,Functor) -- | A Module is a pair of a 'Unit' and a 'ModuleName'. type Module = GenModule Unit moduleUnitId :: Module -> UnitId moduleUnitId = toUnitId . moduleUnit -- | A 'InstalledModule' is a 'Module' whose unit is identified with an -- 'UnitId'. type InstalledModule = GenModule UnitId -- | A 'HomeUnitModule' is like an 'InstalledModule' but we expect to find it in -- one of the home units rather than the package database. type HomeUnitModule = GenModule UnitId -- | An `InstantiatedModule` is a 'Module' whose unit is identified with an `InstantiatedUnit`. type InstantiatedModule = GenModule InstantiatedUnit mkModule :: u -> ModuleName -> GenModule u mkModule = Module instance Uniquable Module where getUnique (Module p n) = getUnique (unitFS p `appendFS` moduleNameFS n) instance Binary a => Binary (GenModule a) where put_ bh (Module p n) = put_ bh p >> put_ bh n get bh = do p <- get bh; n <- get bh; return (Module p n) instance NFData (GenModule a) where rnf (Module unit name) = unit `seq` name `seq` () instance Outputable Module where ppr = pprModule instance Outputable InstalledModule where ppr (Module p n) = ppr p <> char ':' <> pprModuleName n instance Outputable InstantiatedModule where ppr = pprInstantiatedModule instance Outputable InstantiatedUnit where ppr uid = -- getPprStyle $ \sty -> ppr cid <> (if not (null insts) -- pprIf then brackets (hcat (punctuate comma $ [ ppr modname <> text "=" <> pprModule m | (modname, m) <- insts])) else empty) where cid = instUnitInstanceOf uid insts = instUnitInsts uid -- | Class for types that are used as unit identifiers (UnitKey, UnitId, Unit) -- -- We need this class because we create new unit ids for virtual units (see -- VirtUnit) and they have to to be made from units with different kinds of -- identifiers. class IsUnitId u where unitFS :: u -> FastString instance IsUnitId UnitKey where unitFS (UnitKey fs) = fs instance IsUnitId UnitId where unitFS (UnitId fs) = fs instance IsUnitId u => IsUnitId (GenUnit u) where unitFS (VirtUnit x) = instUnitFS x unitFS (RealUnit (Definite x)) = unitFS x unitFS HoleUnit = holeFS pprModule :: Module -> SDoc pprModule mod@(Module p n) = getPprStyle doc where doc sty | codeStyle sty = (if p == mainUnit then empty -- never qualify the main package in code else ztext (zEncodeFS (unitFS p)) <> char '_') <> pprModuleName n | qualModule sty mod = case p of HoleUnit -> angleBrackets (pprModuleName n) _ -> ppr (moduleUnit mod) <> char ':' <> pprModuleName n | otherwise = pprModuleName n pprInstantiatedModule :: InstantiatedModule -> SDoc pprInstantiatedModule (Module uid m) = ppr uid <> char ':' <> ppr m --------------------------------------------------------------------- -- UNITS --------------------------------------------------------------------- -- | A unit key in the database newtype UnitKey = UnitKey FastString -- | A unit identifier identifies a (possibly partially) instantiated library. -- It is primarily used as part of 'Module', which in turn is used in 'Name', -- which is used to give names to entities when typechecking. -- -- There are two possible forms for a 'Unit': -- -- 1) It can be a 'RealUnit', in which case we just have a 'DefUnitId' that -- uniquely identifies some fully compiled, installed library we have on disk. -- -- 2) It can be an 'VirtUnit'. When we are typechecking a library with missing -- holes, we may need to instantiate a library on the fly (in which case we -- don't have any on-disk representation.) In that case, you have an -- 'InstantiatedUnit', which explicitly records the instantiation, so that we -- can substitute over it. data GenUnit uid = RealUnit !(Definite uid) -- ^ Installed definite unit (either a fully instantiated unit or a closed unit) | VirtUnit {-# UNPACK #-} !(GenInstantiatedUnit uid) -- ^ Virtual unit instantiated on-the-fly. It may be definite if all the -- holes are instantiated but we don't have code objects for it. | HoleUnit -- ^ Fake hole unit -- | An instantiated unit. -- -- It identifies an indefinite library (with holes) that has been instantiated. -- -- This unit may be indefinite or not (i.e. with remaining holes or not). If it -- is definite, we don't know if it has already been compiled and installed in a -- database. Nevertheless, we have a mechanism called "improvement" to try to -- match a fully instantiated unit with existing compiled and installed units: -- see Note [VirtUnit to RealUnit improvement]. -- -- An indefinite unit identifier pretty-prints to something like -- @p[H=,A=aimpl:A>]@ (@p@ is the 'UnitId', and the -- brackets enclose the module substitution). data GenInstantiatedUnit unit = InstantiatedUnit { -- | A private, uniquely identifying representation of -- an InstantiatedUnit. This string is completely private to GHC -- and is just used to get a unique. instUnitFS :: !FastString, -- | Cached unique of 'unitFS'. instUnitKey :: !Unique, -- | The (indefinite) unit being instantiated. instUnitInstanceOf :: !unit, -- | The sorted (by 'ModuleName') instantiations of this unit. instUnitInsts :: !(GenInstantiations unit), -- | A cache of the free module holes of 'instUnitInsts'. -- This lets us efficiently tell if a 'InstantiatedUnit' has been -- fully instantiated (empty set of free module holes) -- and whether or not a substitution can have any effect. instUnitHoles :: UniqDSet ModuleName } type Unit = GenUnit UnitId type InstantiatedUnit = GenInstantiatedUnit UnitId type GenInstantiations unit = [(ModuleName,GenModule (GenUnit unit))] type Instantiations = GenInstantiations UnitId holeUnique :: Unique holeUnique = getUnique holeFS holeFS :: FastString holeFS = fsLit "" isHoleUnit :: GenUnit u -> Bool isHoleUnit HoleUnit = True isHoleUnit _ = False instance Eq (GenInstantiatedUnit unit) where u1 == u2 = instUnitKey u1 == instUnitKey u2 instance Ord (GenInstantiatedUnit unit) where u1 `compare` u2 = instUnitFS u1 `lexicalCompareFS` instUnitFS u2 instance Binary InstantiatedUnit where put_ bh indef = do put_ bh (instUnitInstanceOf indef) put_ bh (instUnitInsts indef) get bh = do cid <- get bh insts <- get bh let fs = mkInstantiatedUnitHash cid insts return InstantiatedUnit { instUnitInstanceOf = cid, instUnitInsts = insts, instUnitHoles = unionManyUniqDSets (map (moduleFreeHoles.snd) insts), instUnitFS = fs, instUnitKey = getUnique fs } instance IsUnitId u => Eq (GenUnit u) where uid1 == uid2 = unitUnique uid1 == unitUnique uid2 instance IsUnitId u => Uniquable (GenUnit u) where getUnique = unitUnique instance Ord Unit where nm1 `compare` nm2 = stableUnitCmp nm1 nm2 instance Data Unit where -- don't traverse? toConstr _ = abstractConstr "Unit" gunfold _ _ = error "gunfold" dataTypeOf _ = mkNoRepType "Unit" instance NFData Unit where rnf x = x `seq` () -- | Compares unit ids lexically, rather than by their 'Unique's stableUnitCmp :: Unit -> Unit -> Ordering stableUnitCmp p1 p2 = unitFS p1 `lexicalCompareFS` unitFS p2 instance Outputable Unit where ppr pk = pprUnit pk pprUnit :: Unit -> SDoc pprUnit (RealUnit uid) = ppr uid pprUnit (VirtUnit uid) = ppr uid pprUnit HoleUnit = ftext holeFS instance Show Unit where show = unitString -- Performance: would prefer to have a NameCache like thing instance Binary Unit where put_ bh (RealUnit def_uid) = do putByte bh 0 put_ bh def_uid put_ bh (VirtUnit indef_uid) = do putByte bh 1 put_ bh indef_uid put_ bh HoleUnit = putByte bh 2 get bh = do b <- getByte bh case b of 0 -> fmap RealUnit (get bh) 1 -> fmap VirtUnit (get bh) _ -> pure HoleUnit -- | Retrieve the set of free module holes of a 'Unit'. unitFreeModuleHoles :: GenUnit u -> UniqDSet ModuleName unitFreeModuleHoles (VirtUnit x) = instUnitHoles x unitFreeModuleHoles (RealUnit _) = emptyUniqDSet unitFreeModuleHoles HoleUnit = emptyUniqDSet -- | Calculate the free holes of a 'Module'. If this set is non-empty, -- this module was defined in an indefinite library that had required -- signatures. -- -- If a module has free holes, that means that substitutions can operate on it; -- if it has no free holes, substituting over a module has no effect. moduleFreeHoles :: GenModule (GenUnit u) -> UniqDSet ModuleName moduleFreeHoles (Module HoleUnit name) = unitUniqDSet name moduleFreeHoles (Module u _ ) = unitFreeModuleHoles u -- | Create a new 'GenInstantiatedUnit' given an explicit module substitution. mkInstantiatedUnit :: IsUnitId u => u -> GenInstantiations u -> GenInstantiatedUnit u mkInstantiatedUnit cid insts = InstantiatedUnit { instUnitInstanceOf = cid, instUnitInsts = sorted_insts, instUnitHoles = unionManyUniqDSets (map (moduleFreeHoles.snd) insts), instUnitFS = fs, instUnitKey = getUnique fs } where fs = mkInstantiatedUnitHash cid sorted_insts sorted_insts = sortBy (stableModuleNameCmp `on` fst) insts -- | Smart constructor for instantiated GenUnit mkVirtUnit :: IsUnitId u => u -> [(ModuleName, GenModule (GenUnit u))] -> GenUnit u mkVirtUnit uid [] = RealUnit $ Definite uid mkVirtUnit uid insts = VirtUnit $ mkInstantiatedUnit uid insts -- | Generate a uniquely identifying hash (internal unit-id) for an instantiated -- unit. -- -- This is a one-way function. If the indefinite unit has not been instantiated at all, we return its unit-id. -- -- This hash is completely internal to GHC and is not used for symbol names or -- file paths. It is different from the hash Cabal would produce for the same -- instantiated unit. mkInstantiatedUnitHash :: IsUnitId u => u -> [(ModuleName, GenModule (GenUnit u))] -> FastString mkInstantiatedUnitHash cid sorted_holes = mkFastStringByteString . fingerprintUnitId (bytesFS (unitFS cid)) $ hashInstantiations sorted_holes -- | Generate a hash for a sorted module instantiation. hashInstantiations :: IsUnitId u => [(ModuleName, GenModule (GenUnit u))] -> Fingerprint hashInstantiations sorted_holes = fingerprintByteString . BS.concat $ do (m, b) <- sorted_holes [ bytesFS (moduleNameFS m), BS.Char8.singleton ' ', bytesFS (unitFS (moduleUnit b)), BS.Char8.singleton ':', bytesFS (moduleNameFS (moduleName b)), BS.Char8.singleton '\n'] fingerprintUnitId :: BS.ByteString -> Fingerprint -> BS.ByteString fingerprintUnitId prefix (Fingerprint a b) = BS.concat $ [ prefix , BS.Char8.singleton '-' , BS.Char8.pack (toBase62Padded a) , BS.Char8.pack (toBase62Padded b) ] unitUnique :: IsUnitId u => GenUnit u -> Unique unitUnique (VirtUnit x) = instUnitKey x unitUnique (RealUnit (Definite x)) = getUnique (unitFS x) unitUnique HoleUnit = holeUnique -- | Create a new simple unit identifier from a 'FastString'. Internally, -- this is primarily used to specify wired-in unit identifiers. fsToUnit :: FastString -> Unit fsToUnit = RealUnit . Definite . UnitId unitString :: IsUnitId u => u -> String unitString = unpackFS . unitFS stringToUnit :: String -> Unit stringToUnit = fsToUnit . mkFastString -- | Map over the unit type of a 'GenUnit' mapGenUnit :: IsUnitId v => (u -> v) -> GenUnit u -> GenUnit v mapGenUnit f = go where go gu = case gu of HoleUnit -> HoleUnit RealUnit d -> RealUnit (fmap f d) VirtUnit i -> VirtUnit $ mkInstantiatedUnit (f (instUnitInstanceOf i)) (fmap (second (fmap go)) (instUnitInsts i)) -- | Map over the unit identifier of unit instantiations. mapInstantiations :: IsUnitId v => (u -> v) -> GenInstantiations u -> GenInstantiations v mapInstantiations f = map (second (fmap (mapGenUnit f))) -- | Return the UnitId of the Unit. For on-the-fly instantiated units, return -- the UnitId of the indefinite unit this unit is an instance of. toUnitId :: Unit -> UnitId toUnitId (RealUnit (Definite iuid)) = iuid toUnitId (VirtUnit indef) = instUnitInstanceOf indef toUnitId HoleUnit = error "Hole unit" -- | Return the virtual UnitId of an on-the-fly instantiated unit. virtualUnitId :: InstantiatedUnit -> UnitId virtualUnitId i = UnitId (instUnitFS i) -- | A 'Unit' is definite if it has no free holes. unitIsDefinite :: Unit -> Bool unitIsDefinite = isEmptyUniqDSet . unitFreeModuleHoles --------------------------------------------------------------------- -- UNIT IDs --------------------------------------------------------------------- -- | A UnitId identifies a built library in a database and is used to generate -- unique symbols, etc. It's usually of the form: -- -- pkgname-1.2:libname+hash -- -- These UnitId are provided to us via the @-this-unit-id@ flag. -- -- The library in question may be definite or indefinite; if it is indefinite, -- none of the holes have been filled (we never install partially instantiated -- libraries as we can cheaply instantiate them on-the-fly, cf VirtUnit). Put -- another way, an installed unit id is either fully instantiated, or not -- instantiated at all. newtype UnitId = UnitId { unitIdFS :: FastString -- ^ The full hashed unit identifier, including the component id -- and the hash. } deriving (Data) instance Binary UnitId where put_ bh (UnitId fs) = put_ bh fs get bh = do fs <- get bh; return (UnitId fs) instance Eq UnitId where uid1 == uid2 = getUnique uid1 == getUnique uid2 instance Ord UnitId where -- we compare lexically to avoid non-deterministic output when sets of -- unit-ids are printed (dependencies, etc.) u1 `compare` u2 = unitIdFS u1 `lexicalCompareFS` unitIdFS u2 instance Uniquable UnitId where getUnique = getUnique . unitIdFS instance Outputable UnitId where ppr (UnitId fs) = sdocOption sdocUnitIdForUser ($ fs) -- see Note [Pretty-printing UnitId] -- in "GHC.Unit" -- | A 'DefUnitId' is an 'UnitId' with the invariant that -- it only refers to a definite library; i.e., one we have generated -- code for. type DefUnitId = Definite UnitId unitIdString :: UnitId -> String unitIdString = unpackFS . unitIdFS stringToUnitId :: String -> UnitId stringToUnitId = UnitId . mkFastString --------------------------------------------------------------------- -- UTILS --------------------------------------------------------------------- -- | A definite unit (i.e. without any free module hole) newtype Definite unit = Definite { unDefinite :: unit } deriving (Functor) deriving newtype (Eq, Ord, Outputable, Binary, Uniquable, IsUnitId) --------------------------------------------------------------------- -- WIRED-IN UNITS --------------------------------------------------------------------- {- Note [Wired-in units] ~~~~~~~~~~~~~~~~~~~~~ Certain packages are known to the compiler, in that we know about certain entities that reside in these packages, and the compiler needs to declare static Modules and Names that refer to these packages. Hence the wired-in packages can't include version numbers in their package UnitId, since we don't want to bake the version numbers of these packages into GHC. So here's the plan. Wired-in units are still versioned as normal in the packages database, and you can still have multiple versions of them installed. To the user, everything looks normal. However, for each invocation of GHC, only a single instance of each wired-in package will be recognised (the desired one is selected via @-package@\/@-hide-package@), and GHC will internally pretend that it has the *unversioned* 'UnitId', including in .hi files and object file symbols. Unselected versions of wired-in packages will be ignored, as will any other package that depends directly or indirectly on it (much as if you had used @-ignore-package@). The affected packages are compiled with, e.g., @-this-unit-id base@, so that the symbols in the object files have the unversioned unit id in their name. Make sure you change 'GHC.Unit.State.findWiredInUnits' if you add an entry here. -} bignumUnitId, primUnitId, baseUnitId, rtsUnitId, thUnitId, mainUnitId, thisGhcUnitId, interactiveUnitId :: UnitId bignumUnit, primUnit, baseUnit, rtsUnit, thUnit, mainUnit, thisGhcUnit, interactiveUnit :: Unit primUnitId = UnitId (fsLit "ghc-prim") bignumUnitId = UnitId (fsLit "ghc-bignum") baseUnitId = UnitId (fsLit "base") rtsUnitId = UnitId (fsLit "rts") thisGhcUnitId = UnitId (fsLit "ghc") interactiveUnitId = UnitId (fsLit "interactive") thUnitId = UnitId (fsLit "template-haskell") thUnit = RealUnit (Definite thUnitId) primUnit = RealUnit (Definite primUnitId) bignumUnit = RealUnit (Definite bignumUnitId) baseUnit = RealUnit (Definite baseUnitId) rtsUnit = RealUnit (Definite rtsUnitId) thisGhcUnit = RealUnit (Definite thisGhcUnitId) interactiveUnit = RealUnit (Definite interactiveUnitId) -- | This is the package Id for the current program. It is the default -- package Id if you don't specify a package name. We don't add this prefix -- to symbol names, since there can be only one main package per program. mainUnitId = UnitId (fsLit "main") mainUnit = RealUnit (Definite mainUnitId) isInteractiveModule :: Module -> Bool isInteractiveModule mod = moduleUnit mod == interactiveUnit wiredInUnitIds :: [UnitId] wiredInUnitIds = [ primUnitId , bignumUnitId , baseUnitId , rtsUnitId , thUnitId , thisGhcUnitId ] --------------------------------------------------------------------- -- Boot Modules --------------------------------------------------------------------- -- Note [Boot Module Naming] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- Why is this section here? After all, these modules are supposed to be about -- ways of referring to modules, not modules themselves. Well, the "bootness" of -- a module is in a way part of its name, because 'import {-# SOURCE #-} Foo' -- references the boot module in particular while 'import Foo' references the -- regular module. Backpack signatures live in the normal module namespace (no -- special import), so they don't matter here. When dealing with the modules -- themselves, however, one should use not 'IsBoot' or conflate signatures and -- modules in opposition to boot interfaces. Instead, one should use -- 'DriverPhases.HscSource'. See Note [HscSource types]. -- | Indicates whether a module name is referring to a boot interface (hs-boot -- file) or regular module (hs file). We need to treat boot modules specially -- when building compilation graphs, since they break cycles. Regular source -- files and signature files are treated equivalently. data IsBootInterface = NotBoot | IsBoot deriving (Eq, Ord, Show, Data) instance Binary IsBootInterface where put_ bh ib = put_ bh $ case ib of NotBoot -> False IsBoot -> True get bh = do b <- get bh return $ case b of False -> NotBoot True -> IsBoot -- | This data type just pairs a value 'mod' with an IsBootInterface flag. In -- practice, 'mod' is usually a @Module@ or @ModuleName@'. data GenWithIsBoot mod = GWIB { gwib_mod :: mod , gwib_isBoot :: IsBootInterface } deriving ( Eq, Ord, Show , Functor, Foldable, Traversable ) -- the Ord instance must ensure that we first sort by Module and then by -- IsBootInterface: this is assumed to perform filtering of non-boot modules, -- e.g. in GHC.Driver.Env.hptModulesBelow type ModuleNameWithIsBoot = GenWithIsBoot ModuleName type ModuleWithIsBoot = GenWithIsBoot Module instance Binary a => Binary (GenWithIsBoot a) where put_ bh (GWIB { gwib_mod, gwib_isBoot }) = do put_ bh gwib_mod put_ bh gwib_isBoot get bh = do gwib_mod <- get bh gwib_isBoot <- get bh pure $ GWIB { gwib_mod, gwib_isBoot } instance Outputable a => Outputable (GenWithIsBoot a) where ppr (GWIB { gwib_mod, gwib_isBoot }) = hsep $ ppr gwib_mod : case gwib_isBoot of IsBoot -> [ text "{-# SOURCE #-}" ] NotBoot -> [] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Binary.hs0000644000000000000000000012670114472400113020523 0ustar0000000000000000 {-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE UnboxedTuples #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} #if MIN_VERSION_base(4,16,0) #define HAS_TYPELITCHAR #endif -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected -- -- (c) The University of Glasgow 2002-2006 -- -- Binary I/O library, with special tweaks for GHC -- -- Based on the nhc98 Binary library, which is copyright -- (c) Malcolm Wallace and Colin Runciman, University of York, 1998. -- Under the terms of the license for that software, we must tell you -- where you can obtain the original version of the Binary library, namely -- http://www.cs.york.ac.uk/fp/nhc98/ module GHC.Utils.Binary ( {-type-} Bin, {-class-} Binary(..), {-type-} BinHandle, SymbolTable, Dictionary, BinData(..), dataHandle, handleData, openBinMem, -- closeBin, seekBin, tellBin, castBin, withBinBuffer, foldGet, writeBinMem, readBinMem, putAt, getAt, -- * For writing instances putByte, getByte, -- * Variable length encodings putULEB128, getULEB128, putSLEB128, getSLEB128, -- * Fixed length encoding FixedLengthEncoding(..), -- * Lazy Binary I/O lazyGet, lazyPut, lazyGetMaybe, lazyPutMaybe, -- * User data UserData(..), getUserData, setUserData, newReadState, newWriteState, putDictionary, getDictionary, putFS, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Name (Name) import GHC.Data.FastString import GHC.Utils.Panic.Plain import GHC.Types.Unique.FM import GHC.Data.FastMutInt import GHC.Utils.Fingerprint import GHC.Types.SrcLoc import qualified GHC.Data.Strict as Strict import Control.DeepSeq import Foreign hiding (shiftL, shiftR) import Data.Array import Data.Array.IO import Data.Array.Unsafe import Data.ByteString (ByteString) import qualified Data.ByteString.Internal as BS import qualified Data.ByteString.Unsafe as BS import Data.IORef import Data.Char ( ord, chr ) import Data.List.NonEmpty ( NonEmpty(..)) import qualified Data.List.NonEmpty as NonEmpty import Data.Set ( Set ) import qualified Data.Set as Set import Data.Time import Data.List (unfoldr) import Control.Monad ( when, (<$!>), unless, forM_ ) import System.IO as IO import System.IO.Unsafe ( unsafeInterleaveIO ) import System.IO.Error ( mkIOError, eofErrorType ) import GHC.Real ( Ratio(..) ) import Data.IntMap (IntMap) import qualified Data.IntMap as IntMap #if MIN_VERSION_base(4,15,0) import GHC.ForeignPtr ( unsafeWithForeignPtr ) #endif type BinArray = ForeignPtr Word8 #if !MIN_VERSION_base(4,15,0) unsafeWithForeignPtr :: ForeignPtr a -> (Ptr a -> IO b) -> IO b unsafeWithForeignPtr = withForeignPtr #endif --------------------------------------------------------------- -- BinData --------------------------------------------------------------- data BinData = BinData Int BinArray instance NFData BinData where rnf (BinData sz _) = rnf sz instance Binary BinData where put_ bh (BinData sz dat) = do put_ bh sz putPrim bh sz $ \dest -> unsafeWithForeignPtr dat $ \orig -> copyBytes dest orig sz -- get bh = do sz <- get bh dat <- mallocForeignPtrBytes sz getPrim bh sz $ \orig -> unsafeWithForeignPtr dat $ \dest -> copyBytes dest orig sz return (BinData sz dat) dataHandle :: BinData -> IO BinHandle dataHandle (BinData size bin) = do ixr <- newFastMutInt 0 szr <- newFastMutInt size binr <- newIORef bin return (BinMem noUserData ixr szr binr) handleData :: BinHandle -> IO BinData handleData (BinMem _ ixr _ binr) = BinData <$> readFastMutInt ixr <*> readIORef binr --------------------------------------------------------------- -- BinHandle --------------------------------------------------------------- data BinHandle = BinMem { -- binary data stored in an unboxed array bh_usr :: UserData, -- sigh, need parameterized modules :-) _off_r :: !FastMutInt, -- the current offset _sz_r :: !FastMutInt, -- size of the array (cached) _arr_r :: !(IORef BinArray) -- the array (bounds: (0,size-1)) } -- XXX: should really store a "high water mark" for dumping out -- the binary data to a file. getUserData :: BinHandle -> UserData getUserData bh = bh_usr bh setUserData :: BinHandle -> UserData -> BinHandle setUserData bh us = bh { bh_usr = us } -- | Get access to the underlying buffer. withBinBuffer :: BinHandle -> (ByteString -> IO a) -> IO a withBinBuffer (BinMem _ ix_r _ arr_r) action = do arr <- readIORef arr_r ix <- readFastMutInt ix_r action $ BS.fromForeignPtr arr 0 ix --------------------------------------------------------------- -- Bin --------------------------------------------------------------- newtype Bin a = BinPtr Int deriving (Eq, Ord, Show, Bounded) castBin :: Bin a -> Bin b castBin (BinPtr i) = BinPtr i --------------------------------------------------------------- -- class Binary --------------------------------------------------------------- -- | Do not rely on instance sizes for general types, -- we use variable length encoding for many of them. class Binary a where put_ :: BinHandle -> a -> IO () put :: BinHandle -> a -> IO (Bin a) get :: BinHandle -> IO a -- define one of put_, put. Use of put_ is recommended because it -- is more likely that tail-calls can kick in, and we rarely need the -- position return value. put_ bh a = do _ <- put bh a; return () put bh a = do p <- tellBin bh; put_ bh a; return p putAt :: Binary a => BinHandle -> Bin a -> a -> IO () putAt bh p x = do seekBin bh p; put_ bh x; return () getAt :: Binary a => BinHandle -> Bin a -> IO a getAt bh p = do seekBin bh p; get bh openBinMem :: Int -> IO BinHandle openBinMem size | size <= 0 = error "Data.Binary.openBinMem: size must be >= 0" | otherwise = do arr <- mallocForeignPtrBytes size arr_r <- newIORef arr ix_r <- newFastMutInt 0 sz_r <- newFastMutInt size return (BinMem noUserData ix_r sz_r arr_r) tellBin :: BinHandle -> IO (Bin a) tellBin (BinMem _ r _ _) = do ix <- readFastMutInt r; return (BinPtr ix) seekBin :: BinHandle -> Bin a -> IO () seekBin h@(BinMem _ ix_r sz_r _) (BinPtr !p) = do sz <- readFastMutInt sz_r if (p >= sz) then do expandBin h p; writeFastMutInt ix_r p else writeFastMutInt ix_r p writeBinMem :: BinHandle -> FilePath -> IO () writeBinMem (BinMem _ ix_r _ arr_r) fn = do h <- openBinaryFile fn WriteMode arr <- readIORef arr_r ix <- readFastMutInt ix_r unsafeWithForeignPtr arr $ \p -> hPutBuf h p ix hClose h readBinMem :: FilePath -> IO BinHandle -- Return a BinHandle with a totally undefined State readBinMem filename = do h <- openBinaryFile filename ReadMode filesize' <- hFileSize h let filesize = fromIntegral filesize' arr <- mallocForeignPtrBytes filesize count <- unsafeWithForeignPtr arr $ \p -> hGetBuf h p filesize when (count /= filesize) $ error ("Binary.readBinMem: only read " ++ show count ++ " bytes") hClose h arr_r <- newIORef arr ix_r <- newFastMutInt 0 sz_r <- newFastMutInt filesize return (BinMem noUserData ix_r sz_r arr_r) -- expand the size of the array to include a specified offset expandBin :: BinHandle -> Int -> IO () expandBin (BinMem _ _ sz_r arr_r) !off = do !sz <- readFastMutInt sz_r let !sz' = getSize sz arr <- readIORef arr_r arr' <- mallocForeignPtrBytes sz' withForeignPtr arr $ \old -> withForeignPtr arr' $ \new -> copyBytes new old sz writeFastMutInt sz_r sz' writeIORef arr_r arr' where getSize :: Int -> Int getSize !sz | sz > off = sz | otherwise = getSize (sz * 2) foldGet :: Binary a => Word -- n elements -> BinHandle -> b -- initial accumulator -> (Word -> a -> b -> IO b) -> IO b foldGet n bh init_b f = go 0 init_b where go i b | i == n = return b | otherwise = do a <- get bh b' <- f i a b go (i+1) b' -- ----------------------------------------------------------------------------- -- Low-level reading/writing of bytes -- | Takes a size and action writing up to @size@ bytes. -- After the action has run advance the index to the buffer -- by size bytes. putPrim :: BinHandle -> Int -> (Ptr Word8 -> IO ()) -> IO () putPrim h@(BinMem _ ix_r sz_r arr_r) size f = do ix <- readFastMutInt ix_r sz <- readFastMutInt sz_r when (ix + size > sz) $ expandBin h (ix + size) arr <- readIORef arr_r unsafeWithForeignPtr arr $ \op -> f (op `plusPtr` ix) writeFastMutInt ix_r (ix + size) -- -- | Similar to putPrim but advances the index by the actual number of -- -- bytes written. -- putPrimMax :: BinHandle -> Int -> (Ptr Word8 -> IO Int) -> IO () -- putPrimMax h@(BinMem _ ix_r sz_r arr_r) size f = do -- ix <- readFastMutInt ix_r -- sz <- readFastMutInt sz_r -- when (ix + size > sz) $ -- expandBin h (ix + size) -- arr <- readIORef arr_r -- written <- withForeignPtr arr $ \op -> f (op `plusPtr` ix) -- writeFastMutInt ix_r (ix + written) getPrim :: BinHandle -> Int -> (Ptr Word8 -> IO a) -> IO a getPrim (BinMem _ ix_r sz_r arr_r) size f = do ix <- readFastMutInt ix_r sz <- readFastMutInt sz_r when (ix + size > sz) $ ioError (mkIOError eofErrorType "Data.Binary.getPrim" Nothing Nothing) arr <- readIORef arr_r w <- unsafeWithForeignPtr arr $ \p -> f (p `plusPtr` ix) -- This is safe WRT #17760 as we we guarantee that the above line doesn't -- diverge writeFastMutInt ix_r (ix + size) return w putWord8 :: BinHandle -> Word8 -> IO () putWord8 h !w = putPrim h 1 (\op -> poke op w) getWord8 :: BinHandle -> IO Word8 getWord8 h = getPrim h 1 peek putWord16 :: BinHandle -> Word16 -> IO () putWord16 h w = putPrim h 2 (\op -> do pokeElemOff op 0 (fromIntegral (w `shiftR` 8)) pokeElemOff op 1 (fromIntegral (w .&. 0xFF)) ) getWord16 :: BinHandle -> IO Word16 getWord16 h = getPrim h 2 (\op -> do w0 <- fromIntegral <$> peekElemOff op 0 w1 <- fromIntegral <$> peekElemOff op 1 return $! w0 `shiftL` 8 .|. w1 ) putWord32 :: BinHandle -> Word32 -> IO () putWord32 h w = putPrim h 4 (\op -> do pokeElemOff op 0 (fromIntegral (w `shiftR` 24)) pokeElemOff op 1 (fromIntegral ((w `shiftR` 16) .&. 0xFF)) pokeElemOff op 2 (fromIntegral ((w `shiftR` 8) .&. 0xFF)) pokeElemOff op 3 (fromIntegral (w .&. 0xFF)) ) getWord32 :: BinHandle -> IO Word32 getWord32 h = getPrim h 4 (\op -> do w0 <- fromIntegral <$> peekElemOff op 0 w1 <- fromIntegral <$> peekElemOff op 1 w2 <- fromIntegral <$> peekElemOff op 2 w3 <- fromIntegral <$> peekElemOff op 3 return $! (w0 `shiftL` 24) .|. (w1 `shiftL` 16) .|. (w2 `shiftL` 8) .|. w3 ) putWord64 :: BinHandle -> Word64 -> IO () putWord64 h w = putPrim h 8 (\op -> do pokeElemOff op 0 (fromIntegral (w `shiftR` 56)) pokeElemOff op 1 (fromIntegral ((w `shiftR` 48) .&. 0xFF)) pokeElemOff op 2 (fromIntegral ((w `shiftR` 40) .&. 0xFF)) pokeElemOff op 3 (fromIntegral ((w `shiftR` 32) .&. 0xFF)) pokeElemOff op 4 (fromIntegral ((w `shiftR` 24) .&. 0xFF)) pokeElemOff op 5 (fromIntegral ((w `shiftR` 16) .&. 0xFF)) pokeElemOff op 6 (fromIntegral ((w `shiftR` 8) .&. 0xFF)) pokeElemOff op 7 (fromIntegral (w .&. 0xFF)) ) getWord64 :: BinHandle -> IO Word64 getWord64 h = getPrim h 8 (\op -> do w0 <- fromIntegral <$> peekElemOff op 0 w1 <- fromIntegral <$> peekElemOff op 1 w2 <- fromIntegral <$> peekElemOff op 2 w3 <- fromIntegral <$> peekElemOff op 3 w4 <- fromIntegral <$> peekElemOff op 4 w5 <- fromIntegral <$> peekElemOff op 5 w6 <- fromIntegral <$> peekElemOff op 6 w7 <- fromIntegral <$> peekElemOff op 7 return $! (w0 `shiftL` 56) .|. (w1 `shiftL` 48) .|. (w2 `shiftL` 40) .|. (w3 `shiftL` 32) .|. (w4 `shiftL` 24) .|. (w5 `shiftL` 16) .|. (w6 `shiftL` 8) .|. w7 ) putByte :: BinHandle -> Word8 -> IO () putByte bh !w = putWord8 bh w getByte :: BinHandle -> IO Word8 getByte h = getWord8 h -- ----------------------------------------------------------------------------- -- Encode numbers in LEB128 encoding. -- Requires one byte of space per 7 bits of data. -- -- There are signed and unsigned variants. -- Do NOT use the unsigned one for signed values, at worst it will -- result in wrong results, at best it will lead to bad performance -- when coercing negative values to an unsigned type. -- -- We mark them as SPECIALIZE as it's extremely critical that they get specialized -- to their specific types. -- -- TODO: Each use of putByte performs a bounds check, -- we should use putPrimMax here. However it's quite hard to return -- the number of bytes written into putPrimMax without allocating an -- Int for it, while the code below does not allocate at all. -- So we eat the cost of the bounds check instead of increasing allocations -- for now. -- Unsigned numbers {-# SPECIALISE putULEB128 :: BinHandle -> Word -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Word64 -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Word32 -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Word16 -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Int -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Int64 -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Int32 -> IO () #-} {-# SPECIALISE putULEB128 :: BinHandle -> Int16 -> IO () #-} putULEB128 :: forall a. (Integral a, FiniteBits a) => BinHandle -> a -> IO () putULEB128 bh w = #if defined(DEBUG) (if w < 0 then panic "putULEB128: Signed number" else id) $ #endif go w where go :: a -> IO () go w | w <= (127 :: a) = putByte bh (fromIntegral w :: Word8) | otherwise = do -- bit 7 (8th bit) indicates more to come. let !byte = setBit (fromIntegral w) 7 :: Word8 putByte bh byte go (w `unsafeShiftR` 7) {-# SPECIALISE getULEB128 :: BinHandle -> IO Word #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Word64 #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Word32 #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Word16 #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Int #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Int64 #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Int32 #-} {-# SPECIALISE getULEB128 :: BinHandle -> IO Int16 #-} getULEB128 :: forall a. (Integral a, FiniteBits a) => BinHandle -> IO a getULEB128 bh = go 0 0 where go :: Int -> a -> IO a go shift w = do b <- getByte bh let !hasMore = testBit b 7 let !val = w .|. ((clearBit (fromIntegral b) 7) `unsafeShiftL` shift) :: a if hasMore then do go (shift+7) val else return $! val -- Signed numbers {-# SPECIALISE putSLEB128 :: BinHandle -> Word -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Word64 -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Word32 -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Word16 -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Int -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Int64 -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Int32 -> IO () #-} {-# SPECIALISE putSLEB128 :: BinHandle -> Int16 -> IO () #-} putSLEB128 :: forall a. (Integral a, Bits a) => BinHandle -> a -> IO () putSLEB128 bh initial = go initial where go :: a -> IO () go val = do let !byte = fromIntegral (clearBit val 7) :: Word8 let !val' = val `unsafeShiftR` 7 let !signBit = testBit byte 6 let !done = -- Unsigned value, val' == 0 and last value can -- be discriminated from a negative number. ((val' == 0 && not signBit) || -- Signed value, (val' == -1 && signBit)) let !byte' = if done then byte else setBit byte 7 putByte bh byte' unless done $ go val' {-# SPECIALISE getSLEB128 :: BinHandle -> IO Word #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Word64 #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Word32 #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Word16 #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Int #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Int64 #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Int32 #-} {-# SPECIALISE getSLEB128 :: BinHandle -> IO Int16 #-} getSLEB128 :: forall a. (Show a, Integral a, FiniteBits a) => BinHandle -> IO a getSLEB128 bh = do (val,shift,signed) <- go 0 0 if signed && (shift < finiteBitSize val ) then return $! ((complement 0 `unsafeShiftL` shift) .|. val) else return val where go :: Int -> a -> IO (a,Int,Bool) go shift val = do byte <- getByte bh let !byteVal = fromIntegral (clearBit byte 7) :: a let !val' = val .|. (byteVal `unsafeShiftL` shift) let !more = testBit byte 7 let !shift' = shift+7 if more then go (shift') val' else do let !signed = testBit byte 6 return (val',shift',signed) -- ----------------------------------------------------------------------------- -- Fixed length encoding instances -- Sometimes words are used to represent a certain bit pattern instead -- of a number. Using FixedLengthEncoding we will write the pattern as -- is to the interface file without the variable length encoding we usually -- apply. -- | Encode the argument in it's full length. This is different from many default -- binary instances which make no guarantee about the actual encoding and -- might do things use variable length encoding. newtype FixedLengthEncoding a = FixedLengthEncoding { unFixedLength :: a } instance Binary (FixedLengthEncoding Word8) where put_ h (FixedLengthEncoding x) = putByte h x get h = FixedLengthEncoding <$> getByte h instance Binary (FixedLengthEncoding Word16) where put_ h (FixedLengthEncoding x) = putWord16 h x get h = FixedLengthEncoding <$> getWord16 h instance Binary (FixedLengthEncoding Word32) where put_ h (FixedLengthEncoding x) = putWord32 h x get h = FixedLengthEncoding <$> getWord32 h instance Binary (FixedLengthEncoding Word64) where put_ h (FixedLengthEncoding x) = putWord64 h x get h = FixedLengthEncoding <$> getWord64 h -- ----------------------------------------------------------------------------- -- Primitive Word writes instance Binary Word8 where put_ bh !w = putWord8 bh w get = getWord8 instance Binary Word16 where put_ = putULEB128 get = getULEB128 instance Binary Word32 where put_ = putULEB128 get = getULEB128 instance Binary Word64 where put_ = putULEB128 get = getULEB128 -- ----------------------------------------------------------------------------- -- Primitive Int writes instance Binary Int8 where put_ h w = put_ h (fromIntegral w :: Word8) get h = do w <- get h; return $! (fromIntegral (w::Word8)) instance Binary Int16 where put_ = putSLEB128 get = getSLEB128 instance Binary Int32 where put_ = putSLEB128 get = getSLEB128 instance Binary Int64 where put_ h w = putSLEB128 h w get h = getSLEB128 h -- ----------------------------------------------------------------------------- -- Instances for standard types instance Binary () where put_ _ () = return () get _ = return () instance Binary Bool where put_ bh b = putByte bh (fromIntegral (fromEnum b)) get bh = do x <- getWord8 bh; return $! (toEnum (fromIntegral x)) instance Binary Char where put_ bh c = put_ bh (fromIntegral (ord c) :: Word32) get bh = do x <- get bh; return $! (chr (fromIntegral (x :: Word32))) instance Binary Int where put_ bh i = put_ bh (fromIntegral i :: Int64) get bh = do x <- get bh return $! (fromIntegral (x :: Int64)) instance Binary a => Binary [a] where put_ bh l = do let len = length l put_ bh len mapM_ (put_ bh) l get bh = do len <- get bh :: IO Int -- Int is variable length encoded so only -- one byte for small lists. let loop 0 = return [] loop n = do a <- get bh; as <- loop (n-1); return (a:as) loop len -- | This instance doesn't rely on the determinism of the keys' 'Ord' instance, -- so it works e.g. for 'Name's too. instance (Binary a, Ord a) => Binary (Set a) where put_ bh s = put_ bh (Set.toList s) get bh = Set.fromList <$> get bh instance Binary a => Binary (NonEmpty a) where put_ bh = put_ bh . NonEmpty.toList get bh = NonEmpty.fromList <$> get bh instance (Ix a, Binary a, Binary b) => Binary (Array a b) where put_ bh arr = do put_ bh $ bounds arr put_ bh $ elems arr get bh = do bounds <- get bh xs <- get bh return $ listArray bounds xs instance (Binary a, Binary b) => Binary (a,b) where put_ bh (a,b) = do put_ bh a; put_ bh b get bh = do a <- get bh b <- get bh return (a,b) instance (Binary a, Binary b, Binary c) => Binary (a,b,c) where put_ bh (a,b,c) = do put_ bh a; put_ bh b; put_ bh c get bh = do a <- get bh b <- get bh c <- get bh return (a,b,c) instance (Binary a, Binary b, Binary c, Binary d) => Binary (a,b,c,d) where put_ bh (a,b,c,d) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d get bh = do a <- get bh b <- get bh c <- get bh d <- get bh return (a,b,c,d) instance (Binary a, Binary b, Binary c, Binary d, Binary e) => Binary (a,b,c,d, e) where put_ bh (a,b,c,d, e) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d; put_ bh e; get bh = do a <- get bh b <- get bh c <- get bh d <- get bh e <- get bh return (a,b,c,d,e) instance (Binary a, Binary b, Binary c, Binary d, Binary e, Binary f) => Binary (a,b,c,d, e, f) where put_ bh (a,b,c,d, e, f) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d; put_ bh e; put_ bh f; get bh = do a <- get bh b <- get bh c <- get bh d <- get bh e <- get bh f <- get bh return (a,b,c,d,e,f) instance (Binary a, Binary b, Binary c, Binary d, Binary e, Binary f, Binary g) => Binary (a,b,c,d,e,f,g) where put_ bh (a,b,c,d,e,f,g) = do put_ bh a; put_ bh b; put_ bh c; put_ bh d; put_ bh e; put_ bh f; put_ bh g get bh = do a <- get bh b <- get bh c <- get bh d <- get bh e <- get bh f <- get bh g <- get bh return (a,b,c,d,e,f,g) instance Binary a => Binary (Maybe a) where put_ bh Nothing = putByte bh 0 put_ bh (Just a) = do putByte bh 1; put_ bh a get bh = do h <- getWord8 bh case h of 0 -> return Nothing _ -> do x <- get bh; return (Just x) instance Binary a => Binary (Strict.Maybe a) where put_ bh Strict.Nothing = putByte bh 0 put_ bh (Strict.Just a) = do putByte bh 1; put_ bh a get bh = do h <- getWord8 bh case h of 0 -> return Strict.Nothing _ -> do x <- get bh; return (Strict.Just x) instance (Binary a, Binary b) => Binary (Either a b) where put_ bh (Left a) = do putByte bh 0; put_ bh a put_ bh (Right b) = do putByte bh 1; put_ bh b get bh = do h <- getWord8 bh case h of 0 -> do a <- get bh ; return (Left a) _ -> do b <- get bh ; return (Right b) instance Binary UTCTime where put_ bh u = do put_ bh (utctDay u) put_ bh (utctDayTime u) get bh = do day <- get bh dayTime <- get bh return $ UTCTime { utctDay = day, utctDayTime = dayTime } instance Binary Day where put_ bh d = put_ bh (toModifiedJulianDay d) get bh = do i <- get bh return $ ModifiedJulianDay { toModifiedJulianDay = i } instance Binary DiffTime where put_ bh dt = put_ bh (toRational dt) get bh = do r <- get bh return $ fromRational r {- Finally - a reasonable portable Integer instance. We used to encode values in the Int32 range as such, falling back to a string of all things. In either case we stored a tag byte to discriminate between the two cases. This made some sense as it's highly portable but also not very efficient. However GHC stores a surprisingly large number off large Integer values. In the examples looked at between 25% and 50% of Integers serialized were outside of the Int32 range. Consider a valie like `2724268014499746065`, some sort of hash actually generated by GHC. In the old scheme this was encoded as a list of 19 chars. This gave a size of 77 Bytes, one for the length of the list and 76 since we encode chars as Word32 as well. We can easily do better. The new plan is: * Start with a tag byte * 0 => Int64 (LEB128 encoded) * 1 => Negative large interger * 2 => Positive large integer * Followed by the value: * Int64 is encoded as usual * Large integers are encoded as a list of bytes (Word8). We use Data.Bits which defines a bit order independent of the representation. Values are stored LSB first. This means our example value `2724268014499746065` is now only 10 bytes large. * One byte tag * One byte for the length of the [Word8] list. * 8 bytes for the actual date. The new scheme also does not depend in any way on architecture specific details. We still use this scheme even with LEB128 available, as it has less overhead for truly large numbers. (> maxBound :: Int64) The instance is used for in Binary Integer and Binary Rational in GHC.Types.Literal -} instance Binary Integer where put_ bh i | i >= lo64 && i <= hi64 = do putWord8 bh 0 put_ bh (fromIntegral i :: Int64) | otherwise = do if i < 0 then putWord8 bh 1 else putWord8 bh 2 put_ bh (unroll $ abs i) where lo64 = fromIntegral (minBound :: Int64) hi64 = fromIntegral (maxBound :: Int64) get bh = do int_kind <- getWord8 bh case int_kind of 0 -> fromIntegral <$!> (get bh :: IO Int64) -- Large integer 1 -> negate <$!> getInt 2 -> getInt _ -> panic "Binary Integer - Invalid byte" where getInt :: IO Integer getInt = roll <$!> (get bh :: IO [Word8]) unroll :: Integer -> [Word8] unroll = unfoldr step where step 0 = Nothing step i = Just (fromIntegral i, i `shiftR` 8) roll :: [Word8] -> Integer roll = foldl' unstep 0 . reverse where unstep a b = a `shiftL` 8 .|. fromIntegral b {- -- This code is currently commented out. -- See https://gitlab.haskell.org/ghc/ghc/issues/3379#note_104346 for -- discussion. put_ bh (S# i#) = do putByte bh 0; put_ bh (I# i#) put_ bh (J# s# a#) = do putByte bh 1 put_ bh (I# s#) let sz# = sizeofByteArray# a# -- in *bytes* put_ bh (I# sz#) -- in *bytes* putByteArray bh a# sz# get bh = do b <- getByte bh case b of 0 -> do (I# i#) <- get bh return (S# i#) _ -> do (I# s#) <- get bh sz <- get bh (BA a#) <- getByteArray bh sz return (J# s# a#) putByteArray :: BinHandle -> ByteArray# -> Int# -> IO () putByteArray bh a s# = loop 0# where loop n# | n# ==# s# = return () | otherwise = do putByte bh (indexByteArray a n#) loop (n# +# 1#) getByteArray :: BinHandle -> Int -> IO ByteArray getByteArray bh (I# sz) = do (MBA arr) <- newByteArray sz let loop n | n ==# sz = return () | otherwise = do w <- getByte bh writeByteArray arr n w loop (n +# 1#) loop 0# freezeByteArray arr -} {- data ByteArray = BA ByteArray# data MBA = MBA (MutableByteArray# RealWorld) newByteArray :: Int# -> IO MBA newByteArray sz = IO $ \s -> case newByteArray# sz s of { (# s, arr #) -> (# s, MBA arr #) } freezeByteArray :: MutableByteArray# RealWorld -> IO ByteArray freezeByteArray arr = IO $ \s -> case unsafeFreezeByteArray# arr s of { (# s, arr #) -> (# s, BA arr #) } writeByteArray :: MutableByteArray# RealWorld -> Int# -> Word8 -> IO () writeByteArray arr i (W8# w) = IO $ \s -> case writeWord8Array# arr i w s of { s -> (# s, () #) } indexByteArray :: ByteArray# -> Int# -> Word8 indexByteArray a# n# = W8# (indexWord8Array# a# n#) -} instance (Binary a) => Binary (Ratio a) where put_ bh (a :% b) = do put_ bh a; put_ bh b get bh = do a <- get bh; b <- get bh; return (a :% b) -- Instance uses fixed-width encoding to allow inserting -- Bin placeholders in the stream. instance Binary (Bin a) where put_ bh (BinPtr i) = putWord32 bh (fromIntegral i :: Word32) get bh = do i <- getWord32 bh; return (BinPtr (fromIntegral (i :: Word32))) -- ----------------------------------------------------------------------------- -- Lazy reading/writing lazyPut :: Binary a => BinHandle -> a -> IO () lazyPut bh a = do -- output the obj with a ptr to skip over it: pre_a <- tellBin bh put_ bh pre_a -- save a slot for the ptr put_ bh a -- dump the object q <- tellBin bh -- q = ptr to after object putAt bh pre_a q -- fill in slot before a with ptr to q seekBin bh q -- finally carry on writing at q lazyGet :: Binary a => BinHandle -> IO a lazyGet bh = do p <- get bh -- a BinPtr p_a <- tellBin bh a <- unsafeInterleaveIO $ do -- NB: Use a fresh off_r variable in the child thread, for thread -- safety. off_r <- newFastMutInt 0 getAt bh { _off_r = off_r } p_a seekBin bh p -- skip over the object for now return a -- | Serialize the constructor strictly but lazily serialize a value inside a -- 'Just'. -- -- This way we can check for the presence of a value without deserializing the -- value itself. lazyPutMaybe :: Binary a => BinHandle -> Maybe a -> IO () lazyPutMaybe bh Nothing = putWord8 bh 0 lazyPutMaybe bh (Just x) = do putWord8 bh 1 lazyPut bh x -- | Deserialize a value serialized by 'lazyPutMaybe'. lazyGetMaybe :: Binary a => BinHandle -> IO (Maybe a) lazyGetMaybe bh = do h <- getWord8 bh case h of 0 -> pure Nothing _ -> Just <$> lazyGet bh -- ----------------------------------------------------------------------------- -- UserData -- ----------------------------------------------------------------------------- -- | Information we keep around during interface file -- serialization/deserialization. Namely we keep the functions for serializing -- and deserializing 'Name's and 'FastString's. We do this because we actually -- use serialization in two distinct settings, -- -- * When serializing interface files themselves -- -- * When computing the fingerprint of an IfaceDecl (which we computing by -- hashing its Binary serialization) -- -- These two settings have different needs while serializing Names: -- -- * Names in interface files are serialized via a symbol table (see Note -- [Symbol table representation of names] in "GHC.Iface.Binary"). -- -- * During fingerprinting a binding Name is serialized as the OccName and a -- non-binding Name is serialized as the fingerprint of the thing they -- represent. See Note [Fingerprinting IfaceDecls] for further discussion. -- data UserData = UserData { -- for *deserialising* only: ud_get_name :: BinHandle -> IO Name, ud_get_fs :: BinHandle -> IO FastString, -- for *serialising* only: ud_put_nonbinding_name :: BinHandle -> Name -> IO (), -- ^ serialize a non-binding 'Name' (e.g. a reference to another -- binding). ud_put_binding_name :: BinHandle -> Name -> IO (), -- ^ serialize a binding 'Name' (e.g. the name of an IfaceDecl) ud_put_fs :: BinHandle -> FastString -> IO () } newReadState :: (BinHandle -> IO Name) -- ^ how to deserialize 'Name's -> (BinHandle -> IO FastString) -> UserData newReadState get_name get_fs = UserData { ud_get_name = get_name, ud_get_fs = get_fs, ud_put_nonbinding_name = undef "put_nonbinding_name", ud_put_binding_name = undef "put_binding_name", ud_put_fs = undef "put_fs" } newWriteState :: (BinHandle -> Name -> IO ()) -- ^ how to serialize non-binding 'Name's -> (BinHandle -> Name -> IO ()) -- ^ how to serialize binding 'Name's -> (BinHandle -> FastString -> IO ()) -> UserData newWriteState put_nonbinding_name put_binding_name put_fs = UserData { ud_get_name = undef "get_name", ud_get_fs = undef "get_fs", ud_put_nonbinding_name = put_nonbinding_name, ud_put_binding_name = put_binding_name, ud_put_fs = put_fs } noUserData :: a noUserData = undef "UserData" undef :: String -> a undef s = panic ("Binary.UserData: no " ++ s) --------------------------------------------------------- -- The Dictionary --------------------------------------------------------- type Dictionary = Array Int FastString -- The dictionary -- Should be 0-indexed putDictionary :: BinHandle -> Int -> UniqFM FastString (Int,FastString) -> IO () putDictionary bh sz dict = do put_ bh sz mapM_ (putFS bh) (elems (array (0,sz-1) (nonDetEltsUFM dict))) -- It's OK to use nonDetEltsUFM here because the elements have indices -- that array uses to create order getDictionary :: BinHandle -> IO Dictionary getDictionary bh = do sz <- get bh :: IO Int mut_arr <- newArray_ (0, sz-1) :: IO (IOArray Int FastString) forM_ [0..(sz-1)] $ \i -> do fs <- getFS bh writeArray mut_arr i fs unsafeFreeze mut_arr --------------------------------------------------------- -- The Symbol Table --------------------------------------------------------- -- On disk, the symbol table is an array of IfExtName, when -- reading it in we turn it into a SymbolTable. type SymbolTable = Array Int Name --------------------------------------------------------- -- Reading and writing FastStrings --------------------------------------------------------- putFS :: BinHandle -> FastString -> IO () putFS bh fs = putBS bh $ bytesFS fs getFS :: BinHandle -> IO FastString getFS bh = do l <- get bh :: IO Int getPrim bh l (\src -> pure $! mkFastStringBytes src l ) putBS :: BinHandle -> ByteString -> IO () putBS bh bs = BS.unsafeUseAsCStringLen bs $ \(ptr, l) -> do put_ bh l putPrim bh l (\op -> copyBytes op (castPtr ptr) l) getBS :: BinHandle -> IO ByteString getBS bh = do l <- get bh :: IO Int BS.create l $ \dest -> do getPrim bh l (\src -> copyBytes dest src l) instance Binary ByteString where put_ bh f = putBS bh f get bh = getBS bh instance Binary FastString where put_ bh f = case getUserData bh of UserData { ud_put_fs = put_fs } -> put_fs bh f get bh = case getUserData bh of UserData { ud_get_fs = get_fs } -> get_fs bh deriving instance Binary NonDetFastString deriving instance Binary LexicalFastString instance Binary Fingerprint where put_ h (Fingerprint w1 w2) = do put_ h w1; put_ h w2 get h = do w1 <- get h; w2 <- get h; return (Fingerprint w1 w2) -- instance Binary FunctionOrData where -- put_ bh IsFunction = putByte bh 0 -- put_ bh IsData = putByte bh 1 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> return IsFunction -- 1 -> return IsData -- _ -> panic "Binary FunctionOrData" -- instance Binary TupleSort where -- put_ bh BoxedTuple = putByte bh 0 -- put_ bh UnboxedTuple = putByte bh 1 -- put_ bh ConstraintTuple = putByte bh 2 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return BoxedTuple -- 1 -> do return UnboxedTuple -- _ -> do return ConstraintTuple -- instance Binary Activation where -- put_ bh NeverActive = do -- putByte bh 0 -- put_ bh FinalActive = do -- putByte bh 1 -- put_ bh AlwaysActive = do -- putByte bh 2 -- put_ bh (ActiveBefore src aa) = do -- putByte bh 3 -- put_ bh src -- put_ bh aa -- put_ bh (ActiveAfter src ab) = do -- putByte bh 4 -- put_ bh src -- put_ bh ab -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return NeverActive -- 1 -> do return FinalActive -- 2 -> do return AlwaysActive -- 3 -> do src <- get bh -- aa <- get bh -- return (ActiveBefore src aa) -- _ -> do src <- get bh -- ab <- get bh -- return (ActiveAfter src ab) -- instance Binary InlinePragma where -- put_ bh (InlinePragma s a b c d) = do -- put_ bh s -- put_ bh a -- put_ bh b -- put_ bh c -- put_ bh d -- get bh = do -- s <- get bh -- a <- get bh -- b <- get bh -- c <- get bh -- d <- get bh -- return (InlinePragma s a b c d) -- instance Binary RuleMatchInfo where -- put_ bh FunLike = putByte bh 0 -- put_ bh ConLike = putByte bh 1 -- get bh = do -- h <- getByte bh -- if h == 1 then return ConLike -- else return FunLike -- instance Binary InlineSpec where -- put_ bh NoUserInlinePrag = putByte bh 0 -- put_ bh Inline = putByte bh 1 -- put_ bh Inlinable = putByte bh 2 -- put_ bh NoInline = putByte bh 3 -- get bh = do h <- getByte bh -- case h of -- 0 -> return NoUserInlinePrag -- 1 -> return Inline -- 2 -> return Inlinable -- _ -> return NoInline -- instance Binary RecFlag where -- put_ bh Recursive = do -- putByte bh 0 -- put_ bh NonRecursive = do -- putByte bh 1 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return Recursive -- _ -> do return NonRecursive -- instance Binary OverlapMode where -- put_ bh (NoOverlap s) = putByte bh 0 >> put_ bh s -- put_ bh (Overlaps s) = putByte bh 1 >> put_ bh s -- put_ bh (Incoherent s) = putByte bh 2 >> put_ bh s -- put_ bh (Overlapping s) = putByte bh 3 >> put_ bh s -- put_ bh (Overlappable s) = putByte bh 4 >> put_ bh s -- get bh = do -- h <- getByte bh -- case h of -- 0 -> (get bh) >>= \s -> return $ NoOverlap s -- 1 -> (get bh) >>= \s -> return $ Overlaps s -- 2 -> (get bh) >>= \s -> return $ Incoherent s -- 3 -> (get bh) >>= \s -> return $ Overlapping s -- 4 -> (get bh) >>= \s -> return $ Overlappable s -- _ -> panic ("get OverlapMode" ++ show h) -- instance Binary OverlapFlag where -- put_ bh flag = do put_ bh (overlapMode flag) -- put_ bh (isSafeOverlap flag) -- get bh = do -- h <- get bh -- b <- get bh -- return OverlapFlag { overlapMode = h, isSafeOverlap = b } -- instance Binary FixityDirection where -- put_ bh InfixL = do -- putByte bh 0 -- put_ bh InfixR = do -- putByte bh 1 -- put_ bh InfixN = do -- putByte bh 2 -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do return InfixL -- 1 -> do return InfixR -- _ -> do return InfixN -- instance Binary Fixity where -- put_ bh (Fixity src aa ab) = do -- put_ bh src -- put_ bh aa -- put_ bh ab -- get bh = do -- src <- get bh -- aa <- get bh -- ab <- get bh -- return (Fixity src aa ab) -- instance Binary WarningTxt where -- put_ bh (WarningTxt s w) = do -- putByte bh 0 -- put_ bh s -- put_ bh w -- put_ bh (DeprecatedTxt s d) = do -- putByte bh 1 -- put_ bh s -- put_ bh d -- get bh = do -- h <- getByte bh -- case h of -- 0 -> do s <- get bh -- w <- get bh -- return (WarningTxt s w) -- _ -> do s <- get bh -- d <- get bh -- return (DeprecatedTxt s d) -- instance Binary StringLiteral where -- put_ bh (StringLiteral st fs _) = do -- put_ bh st -- put_ bh fs -- get bh = do -- st <- get bh -- fs <- get bh -- return (StringLiteral st fs Nothing) instance Binary a => Binary (Located a) where put_ bh (L l x) = do put_ bh l put_ bh x get bh = do l <- get bh x <- get bh return (L l x) instance Binary RealSrcSpan where put_ bh ss = do put_ bh (srcSpanFile ss) put_ bh (srcSpanStartLine ss) put_ bh (srcSpanStartCol ss) put_ bh (srcSpanEndLine ss) put_ bh (srcSpanEndCol ss) get bh = do f <- get bh sl <- get bh sc <- get bh el <- get bh ec <- get bh return (mkRealSrcSpan (mkRealSrcLoc f sl sc) (mkRealSrcLoc f el ec)) instance Binary UnhelpfulSpanReason where put_ bh r = case r of UnhelpfulNoLocationInfo -> putByte bh 0 UnhelpfulWiredIn -> putByte bh 1 UnhelpfulInteractive -> putByte bh 2 UnhelpfulGenerated -> putByte bh 3 UnhelpfulOther fs -> putByte bh 4 >> put_ bh fs get bh = do h <- getByte bh case h of 0 -> return UnhelpfulNoLocationInfo 1 -> return UnhelpfulWiredIn 2 -> return UnhelpfulInteractive 3 -> return UnhelpfulGenerated _ -> UnhelpfulOther <$> get bh instance Binary SrcSpan where put_ bh (RealSrcSpan ss _sb) = do putByte bh 0 -- BufSpan doesn't ever get serialised because the positions depend -- on build location. put_ bh ss put_ bh (UnhelpfulSpan s) = do putByte bh 1 put_ bh s get bh = do h <- getByte bh case h of 0 -> do ss <- get bh return (RealSrcSpan ss Strict.Nothing) _ -> do s <- get bh return (UnhelpfulSpan s) -------------------------------------------------------------------------------- -- Instances for the containers package -------------------------------------------------------------------------------- instance (Binary v) => Binary (IntMap v) where put_ bh m = put_ bh (IntMap.toList m) get bh = IntMap.fromList <$> get bh ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Binary/Typeable.hs0000644000000000000000000001616714472400113022274 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE GADTs #-} {-# OPTIONS_GHC -O2 -funbox-strict-fields #-} {-# OPTIONS_GHC -Wno-orphans -Wincomplete-patterns #-} #if MIN_VERSION_base(4,16,0) #define HAS_TYPELITCHAR #endif -- | Orphan Binary instances for Data.Typeable stuff module GHC.Utils.Binary.Typeable ( getSomeTypeRep ) where import GHC.Prelude import GHC.Utils.Binary import GHC.Exts (RuntimeRep(..), VecCount(..), VecElem(..)) #if __GLASGOW_HASKELL__ >= 901 import GHC.Exts (Levity(Lifted, Unlifted)) #endif import GHC.Serialized import Foreign import Type.Reflection import Type.Reflection.Unsafe import Data.Kind (Type) instance Binary TyCon where put_ bh tc = do put_ bh (tyConPackage tc) put_ bh (tyConModule tc) put_ bh (tyConName tc) put_ bh (tyConKindArgs tc) put_ bh (tyConKindRep tc) get bh = mkTyCon <$> get bh <*> get bh <*> get bh <*> get bh <*> get bh getSomeTypeRep :: BinHandle -> IO SomeTypeRep getSomeTypeRep bh = do tag <- get bh :: IO Word8 case tag of 0 -> return $ SomeTypeRep (typeRep :: TypeRep Type) 1 -> do con <- get bh :: IO TyCon ks <- get bh :: IO [SomeTypeRep] return $ SomeTypeRep $ mkTrCon con ks 2 -> do SomeTypeRep f <- getSomeTypeRep bh SomeTypeRep x <- getSomeTypeRep bh case typeRepKind f of Fun arg res -> case arg `eqTypeRep` typeRepKind x of Just HRefl -> case typeRepKind res `eqTypeRep` (typeRep :: TypeRep Type) of Just HRefl -> return $ SomeTypeRep $ mkTrApp f x _ -> failure "Kind mismatch in type application" [] _ -> failure "Kind mismatch in type application" [ " Found argument of kind: " ++ show (typeRepKind x) , " Where the constructor: " ++ show f , " Expects kind: " ++ show arg ] _ -> failure "Applied non-arrow" [ " Applied type: " ++ show f , " To argument: " ++ show x ] _ -> failure "Invalid SomeTypeRep" [] where failure description info = fail $ unlines $ [ "Binary.getSomeTypeRep: "++description ] ++ map (" "++) info instance Binary SomeTypeRep where put_ bh (SomeTypeRep rep) = putTypeRep bh rep get = getSomeTypeRep instance Typeable a => Binary (TypeRep (a :: k)) where put_ = putTypeRep get bh = do SomeTypeRep rep <- getSomeTypeRep bh case rep `eqTypeRep` expected of Just HRefl -> pure rep Nothing -> fail $ unlines [ "Binary: Type mismatch" , " Deserialized type: " ++ show rep , " Expected type: " ++ show expected ] where expected = typeRep :: TypeRep a instance Binary VecCount where put_ bh = putByte bh . fromIntegral . fromEnum get bh = toEnum . fromIntegral <$> getByte bh instance Binary VecElem where put_ bh = putByte bh . fromIntegral . fromEnum get bh = toEnum . fromIntegral <$> getByte bh instance Binary RuntimeRep where put_ bh (VecRep a b) = putByte bh 0 >> put_ bh a >> put_ bh b put_ bh (TupleRep reps) = putByte bh 1 >> put_ bh reps put_ bh (SumRep reps) = putByte bh 2 >> put_ bh reps #if __GLASGOW_HASKELL__ >= 901 put_ bh (BoxedRep Lifted) = putByte bh 3 put_ bh (BoxedRep Unlifted) = putByte bh 4 #else put_ bh LiftedRep = putByte bh 3 put_ bh UnliftedRep = putByte bh 4 #endif put_ bh IntRep = putByte bh 5 put_ bh WordRep = putByte bh 6 put_ bh Int64Rep = putByte bh 7 put_ bh Word64Rep = putByte bh 8 put_ bh AddrRep = putByte bh 9 put_ bh FloatRep = putByte bh 10 put_ bh DoubleRep = putByte bh 11 put_ bh Int8Rep = putByte bh 12 put_ bh Word8Rep = putByte bh 13 put_ bh Int16Rep = putByte bh 14 put_ bh Word16Rep = putByte bh 15 put_ bh Int32Rep = putByte bh 16 put_ bh Word32Rep = putByte bh 17 get bh = do tag <- getByte bh case tag of 0 -> VecRep <$> get bh <*> get bh 1 -> TupleRep <$> get bh 2 -> SumRep <$> get bh #if __GLASGOW_HASKELL__ >= 901 3 -> pure (BoxedRep Lifted) 4 -> pure (BoxedRep Unlifted) #else 3 -> pure LiftedRep 4 -> pure UnliftedRep #endif 5 -> pure IntRep 6 -> pure WordRep 7 -> pure Int64Rep 8 -> pure Word64Rep 9 -> pure AddrRep 10 -> pure FloatRep 11 -> pure DoubleRep 12 -> pure Int8Rep 13 -> pure Word8Rep 14 -> pure Int16Rep 15 -> pure Word16Rep 16 -> pure Int32Rep 17 -> pure Word32Rep _ -> fail "Binary.putRuntimeRep: invalid tag" instance Binary KindRep where put_ bh (KindRepTyConApp tc k) = putByte bh 0 >> put_ bh tc >> put_ bh k put_ bh (KindRepVar bndr) = putByte bh 1 >> put_ bh bndr put_ bh (KindRepApp a b) = putByte bh 2 >> put_ bh a >> put_ bh b put_ bh (KindRepFun a b) = putByte bh 3 >> put_ bh a >> put_ bh b put_ bh (KindRepTYPE r) = putByte bh 4 >> put_ bh r put_ bh (KindRepTypeLit sort r) = putByte bh 5 >> put_ bh sort >> put_ bh r get bh = do tag <- getByte bh case tag of 0 -> KindRepTyConApp <$> get bh <*> get bh 1 -> KindRepVar <$> get bh 2 -> KindRepApp <$> get bh <*> get bh 3 -> KindRepFun <$> get bh <*> get bh 4 -> KindRepTYPE <$> get bh 5 -> KindRepTypeLit <$> get bh <*> get bh _ -> fail "Binary.putKindRep: invalid tag" instance Binary TypeLitSort where put_ bh TypeLitSymbol = putByte bh 0 put_ bh TypeLitNat = putByte bh 1 #if defined(HAS_TYPELITCHAR) put_ bh TypeLitChar = putByte bh 2 #endif get bh = do tag <- getByte bh case tag of 0 -> pure TypeLitSymbol 1 -> pure TypeLitNat #if defined(HAS_TYPELITCHAR) 2 -> pure TypeLitChar #endif _ -> fail "Binary.putTypeLitSort: invalid tag" putTypeRep :: BinHandle -> TypeRep a -> IO () putTypeRep bh rep -- Handle Type specially since it's so common | Just HRefl <- rep `eqTypeRep` (typeRep :: TypeRep Type) = put_ bh (0 :: Word8) putTypeRep bh (Con' con ks) = do put_ bh (1 :: Word8) put_ bh con put_ bh ks putTypeRep bh (App f x) = do put_ bh (2 :: Word8) putTypeRep bh f putTypeRep bh x #if __GLASGOW_HASKELL__ < 903 putTypeRep bh (Fun arg res) = do put_ bh (3 :: Word8) putTypeRep bh arg putTypeRep bh res #endif instance Binary Serialized where put_ bh (Serialized the_type bytes) = do put_ bh the_type put_ bh bytes get bh = do the_type <- get bh bytes <- get bh return (Serialized the_type bytes) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/BufHandle.hs0000644000000000000000000001036514472400113021125 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} ----------------------------------------------------------------------------- -- -- Fast write-buffered Handles -- -- (c) The University of Glasgow 2005-2006 -- -- This is a simple abstraction over Handles that offers very fast write -- buffering, but without the thread safety that Handles provide. It's used -- to save time in GHC.Utils.Ppr.printDoc. -- ----------------------------------------------------------------------------- module GHC.Utils.BufHandle ( BufHandle(..), newBufHandle, bPutChar, bPutStr, bPutFS, bPutFZS, bPutPtrString, bPutReplicate, bFlush, ) where import GHC.Prelude import GHC.Data.FastString import GHC.Data.FastMutInt import Control.Monad ( when ) import Data.ByteString (ByteString) import qualified Data.ByteString.Unsafe as BS import Data.Char ( ord ) import Foreign import Foreign.C.String import System.IO -- ----------------------------------------------------------------------------- data BufHandle = BufHandle {-#UNPACK#-}!(Ptr Word8) {-#UNPACK#-}!FastMutInt Handle newBufHandle :: Handle -> IO BufHandle newBufHandle hdl = do ptr <- mallocBytes buf_size r <- newFastMutInt 0 return (BufHandle ptr r hdl) buf_size :: Int buf_size = 8192 bPutChar :: BufHandle -> Char -> IO () bPutChar b@(BufHandle buf r hdl) !c = do i <- readFastMutInt r if (i >= buf_size) then do hPutBuf hdl buf buf_size writeFastMutInt r 0 bPutChar b c else do pokeElemOff buf i (fromIntegral (ord c) :: Word8) writeFastMutInt r (i+1) bPutStr :: BufHandle -> String -> IO () bPutStr (BufHandle buf r hdl) !str = do i <- readFastMutInt r loop str i where loop "" !i = do writeFastMutInt r i; return () loop (c:cs) !i | i >= buf_size = do hPutBuf hdl buf buf_size loop (c:cs) 0 | otherwise = do pokeElemOff buf i (fromIntegral (ord c)) loop cs (i+1) bPutFS :: BufHandle -> FastString -> IO () bPutFS b fs = bPutBS b $ bytesFS fs bPutFZS :: BufHandle -> FastZString -> IO () bPutFZS b fs = bPutBS b $ fastZStringToByteString fs bPutBS :: BufHandle -> ByteString -> IO () bPutBS b bs = BS.unsafeUseAsCStringLen bs $ bPutCStringLen b bPutCStringLen :: BufHandle -> CStringLen -> IO () bPutCStringLen b@(BufHandle buf r hdl) cstr@(ptr, len) = do i <- readFastMutInt r if (i + len) >= buf_size then do hPutBuf hdl buf i writeFastMutInt r 0 if (len >= buf_size) then hPutBuf hdl ptr len else bPutCStringLen b cstr else do copyBytes (buf `plusPtr` i) ptr len writeFastMutInt r (i + len) bPutPtrString :: BufHandle -> PtrString -> IO () bPutPtrString b@(BufHandle buf r hdl) l@(PtrString a len) = l `seq` do i <- readFastMutInt r if (i+len) >= buf_size then do hPutBuf hdl buf i writeFastMutInt r 0 if (len >= buf_size) then hPutBuf hdl a len else bPutPtrString b l else do copyBytes (buf `plusPtr` i) a len writeFastMutInt r (i+len) -- | Replicate an 8-bit character bPutReplicate :: BufHandle -> Int -> Char -> IO () bPutReplicate (BufHandle buf r hdl) len c = do i <- readFastMutInt r let oc = fromIntegral (ord c) if (i+len) < buf_size then do fillBytes (buf `plusPtr` i) oc len writeFastMutInt r (i+len) else do -- flush the current buffer when (i /= 0) $ hPutBuf hdl buf i if (len < buf_size) then do fillBytes buf oc len writeFastMutInt r len else do -- fill a full buffer fillBytes buf oc buf_size -- flush it as many times as necessary let go n | n >= buf_size = do hPutBuf hdl buf buf_size go (n-buf_size) | otherwise = writeFastMutInt r n go len bFlush :: BufHandle -> IO () bFlush (BufHandle buf r hdl) = do i <- readFastMutInt r when (i > 0) $ hPutBuf hdl buf i free buf return () ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/CliOption.hs0000644000000000000000000000176214472400113021176 0ustar0000000000000000module GHC.Utils.CliOption ( Option (..) , showOpt ) where import GHC.Prelude -- ----------------------------------------------------------------------------- -- Command-line options -- | When invoking external tools as part of the compilation pipeline, we -- pass these a sequence of options on the command-line. Rather than -- just using a list of Strings, we use a type that allows us to distinguish -- between filepaths and 'other stuff'. The reason for this is that -- this type gives us a handle on transforming filenames, and filenames only, -- to whatever format they're expected to be on a particular platform. data Option = FileOption -- an entry that _contains_ filename(s) / filepaths. String -- a non-filepath prefix that shouldn't be -- transformed (e.g., "/out=") String -- the filepath/filename portion | Option String deriving ( Eq ) showOpt :: Option -> String showOpt (FileOption pre f) = pre ++ f showOpt (Option s) = s ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Constants.hs0000644000000000000000000000177414472400113021255 0ustar0000000000000000{-# LANGUAGE CPP #-} module GHC.Utils.Constants ( debugIsOn , ghciSupported , isWindowsHost , isDarwinHost ) where import GHC.Prelude {- These booleans are global constants, set by CPP flags. They allow us to recompile a single module (this one) to change whether or not debug output appears. They sometimes let us avoid even running CPP elsewhere. It's important that the flags are literal constants (True/False). Then, with -0, tests of the flags in other modules will simplify to the correct branch of the conditional, thereby dropping debug code altogether when the flags are off. -} ghciSupported :: Bool #if defined(HAVE_INTERNAL_INTERPRETER) ghciSupported = True #else ghciSupported = False #endif debugIsOn :: Bool #if defined(DEBUG) debugIsOn = True #else debugIsOn = False #endif isWindowsHost :: Bool #if defined(mingw32_HOST_OS) isWindowsHost = True #else isWindowsHost = False #endif isDarwinHost :: Bool #if defined(darwin_HOST_OS) isDarwinHost = True #else isDarwinHost = False #endif ghc-lib-parser-9.4.7.20230826/libraries/ghc-boot/GHC/Utils/Encoding.hs0000644000000000000000000005123214472375231022701 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples, MultiWayIf #-} {-# OPTIONS_GHC -O2 -fno-warn-name-shadowing #-} -- We always optimise this, otherwise performance of a non-optimised -- compiler is severely affected. This module used to live in the `ghc` -- package but has been moved to `ghc-boot` because the definition -- of the package database (needed in both ghc and in ghc-pkg) lives in -- `ghc-boot` and uses ShortText, which in turn depends on this module. -- ----------------------------------------------------------------------------- -- -- (c) The University of Glasgow, 1997-2006 -- -- Character encodings -- -- ----------------------------------------------------------------------------- module GHC.Utils.Encoding ( -- * UTF-8 utf8DecodeCharAddr#, utf8PrevChar, utf8CharStart, utf8DecodeChar, utf8DecodeByteString, utf8UnconsByteString, utf8DecodeShortByteString, utf8CompareShortByteString, utf8DecodeStringLazy, utf8EncodeChar, utf8EncodeString, utf8EncodeStringPtr, utf8EncodeShortByteString, utf8EncodedLength, countUTF8Chars, -- * Z-encoding zEncodeString, zDecodeString, -- * Base62-encoding toBase62, toBase62Padded ) where import Prelude import Foreign import Foreign.ForeignPtr.Unsafe (unsafeForeignPtrToPtr) import Data.Char import qualified Data.Char as Char import Numeric import GHC.IO import GHC.ST import Data.ByteString (ByteString) import qualified Data.ByteString.Internal as BS import Data.ByteString.Short.Internal (ShortByteString(..)) import GHC.Exts -- ----------------------------------------------------------------------------- -- UTF-8 -- We can't write the decoder as efficiently as we'd like without -- resorting to unboxed extensions, unfortunately. I tried to write -- an IO version of this function, but GHC can't eliminate boxed -- results from an IO-returning function. -- -- We assume we can ignore overflow when parsing a multibyte character here. -- To make this safe, we add extra sentinel bytes to unparsed UTF-8 sequences -- before decoding them (see "GHC.Data.StringBuffer"). {-# INLINE utf8DecodeChar# #-} utf8DecodeChar# :: (Int# -> Word#) -> (# Char#, Int# #) utf8DecodeChar# indexWord8# = let !ch0 = word2Int# (indexWord8# 0#) in case () of _ | isTrue# (ch0 <=# 0x7F#) -> (# chr# ch0, 1# #) | isTrue# ((ch0 >=# 0xC0#) `andI#` (ch0 <=# 0xDF#)) -> let !ch1 = word2Int# (indexWord8# 1#) in if isTrue# ((ch1 <# 0x80#) `orI#` (ch1 >=# 0xC0#)) then fail 1# else (# chr# (((ch0 -# 0xC0#) `uncheckedIShiftL#` 6#) +# (ch1 -# 0x80#)), 2# #) | isTrue# ((ch0 >=# 0xE0#) `andI#` (ch0 <=# 0xEF#)) -> let !ch1 = word2Int# (indexWord8# 1#) in if isTrue# ((ch1 <# 0x80#) `orI#` (ch1 >=# 0xC0#)) then fail 1# else let !ch2 = word2Int# (indexWord8# 2#) in if isTrue# ((ch2 <# 0x80#) `orI#` (ch2 >=# 0xC0#)) then fail 2# else (# chr# (((ch0 -# 0xE0#) `uncheckedIShiftL#` 12#) +# ((ch1 -# 0x80#) `uncheckedIShiftL#` 6#) +# (ch2 -# 0x80#)), 3# #) | isTrue# ((ch0 >=# 0xF0#) `andI#` (ch0 <=# 0xF8#)) -> let !ch1 = word2Int# (indexWord8# 1#) in if isTrue# ((ch1 <# 0x80#) `orI#` (ch1 >=# 0xC0#)) then fail 1# else let !ch2 = word2Int# (indexWord8# 2#) in if isTrue# ((ch2 <# 0x80#) `orI#` (ch2 >=# 0xC0#)) then fail 2# else let !ch3 = word2Int# (indexWord8# 3#) in if isTrue# ((ch3 <# 0x80#) `orI#` (ch3 >=# 0xC0#)) then fail 3# else (# chr# (((ch0 -# 0xF0#) `uncheckedIShiftL#` 18#) +# ((ch1 -# 0x80#) `uncheckedIShiftL#` 12#) +# ((ch2 -# 0x80#) `uncheckedIShiftL#` 6#) +# (ch3 -# 0x80#)), 4# #) | otherwise -> fail 1# where -- all invalid sequences end up here: fail :: Int# -> (# Char#, Int# #) fail nBytes# = (# '\0'#, nBytes# #) -- '\xFFFD' would be the usual replacement character, but -- that's a valid symbol in Haskell, so will result in a -- confusing parse error later on. Instead we use '\0' which -- will signal a lexer error immediately. utf8DecodeCharAddr# :: Addr# -> Int# -> (# Char#, Int# #) utf8DecodeCharAddr# a# off# = #if !MIN_VERSION_base(4,16,0) utf8DecodeChar# (\i# -> indexWord8OffAddr# a# (i# +# off#)) #else utf8DecodeChar# (\i# -> word8ToWord# (indexWord8OffAddr# a# (i# +# off#))) #endif utf8DecodeCharByteArray# :: ByteArray# -> Int# -> (# Char#, Int# #) utf8DecodeCharByteArray# ba# off# = #if !MIN_VERSION_base(4,16,0) utf8DecodeChar# (\i# -> indexWord8Array# ba# (i# +# off#)) #else utf8DecodeChar# (\i# -> word8ToWord# (indexWord8Array# ba# (i# +# off#))) #endif utf8DecodeChar :: Ptr Word8 -> (Char, Int) utf8DecodeChar !(Ptr a#) = case utf8DecodeCharAddr# a# 0# of (# c#, nBytes# #) -> ( C# c#, I# nBytes# ) -- UTF-8 is cleverly designed so that we can always figure out where -- the start of the current character is, given any position in a -- stream. This function finds the start of the previous character, -- assuming there *is* a previous character. utf8PrevChar :: Ptr Word8 -> IO (Ptr Word8) utf8PrevChar p = utf8CharStart (p `plusPtr` (-1)) utf8CharStart :: Ptr Word8 -> IO (Ptr Word8) utf8CharStart p = go p where go p = do w <- peek p if w >= 0x80 && w < 0xC0 then go (p `plusPtr` (-1)) else return p {-# INLINE utf8DecodeLazy# #-} utf8DecodeLazy# :: (IO ()) -> (Int# -> (# Char#, Int# #)) -> Int# -> IO [Char] utf8DecodeLazy# retain decodeChar# len# = unpack 0# where unpack i# | isTrue# (i# >=# len#) = retain >> return [] | otherwise = case decodeChar# i# of (# c#, nBytes# #) -> do rest <- unsafeDupableInterleaveIO $ unpack (i# +# nBytes#) return (C# c# : rest) utf8DecodeByteString :: ByteString -> [Char] utf8DecodeByteString (BS.PS fptr offset len) = utf8DecodeStringLazy fptr offset len utf8UnconsByteString :: ByteString -> Maybe (Char, ByteString) utf8UnconsByteString (BS.PS _ _ 0) = Nothing utf8UnconsByteString (BS.PS fptr offset len) = unsafeDupablePerformIO $ withForeignPtr fptr $ \ptr -> do let (c,n) = utf8DecodeChar (ptr `plusPtr` offset) return $ Just (c, BS.PS fptr (offset + n) (len - n)) utf8DecodeStringLazy :: ForeignPtr Word8 -> Int -> Int -> [Char] utf8DecodeStringLazy fp offset (I# len#) = unsafeDupablePerformIO $ do let !(Ptr a#) = unsafeForeignPtrToPtr fp `plusPtr` offset utf8DecodeLazy# (touchForeignPtr fp) (utf8DecodeCharAddr# a#) len# -- Note that since utf8DecodeLazy# returns a thunk the lifetime of the -- ForeignPtr actually needs to be longer than the lexical lifetime -- withForeignPtr would provide here. That's why we use touchForeignPtr to -- keep the fp alive until the last character has actually been decoded. utf8CompareShortByteString :: ShortByteString -> ShortByteString -> Ordering utf8CompareShortByteString (SBS a1) (SBS a2) = go 0# 0# -- UTF-8 has the property that sorting by bytes values also sorts by -- code-points. -- BUT we use "Modified UTF-8" which encodes \0 as 0xC080 so this property -- doesn't hold and we must explicitly check this case here. -- Note that decoding every code point would also work but it would be much -- more costly. where !sz1 = sizeofByteArray# a1 !sz2 = sizeofByteArray# a2 go off1 off2 | isTrue# ((off1 >=# sz1) `andI#` (off2 >=# sz2)) = EQ | isTrue# (off1 >=# sz1) = LT | isTrue# (off2 >=# sz2) = GT | otherwise = #if !MIN_VERSION_base(4,16,0) let !b1_1 = indexWord8Array# a1 off1 !b2_1 = indexWord8Array# a2 off2 #else let !b1_1 = word8ToWord# (indexWord8Array# a1 off1) !b2_1 = word8ToWord# (indexWord8Array# a2 off2) #endif in case b1_1 of 0xC0## -> case b2_1 of 0xC0## -> go (off1 +# 1#) (off2 +# 1#) #if !MIN_VERSION_base(4,16,0) _ -> case indexWord8Array# a1 (off1 +# 1#) of #else _ -> case word8ToWord# (indexWord8Array# a1 (off1 +# 1#)) of #endif 0x80## -> LT _ -> go (off1 +# 1#) (off2 +# 1#) _ -> case b2_1 of #if !MIN_VERSION_base(4,16,0) 0xC0## -> case indexWord8Array# a2 (off2 +# 1#) of #else 0xC0## -> case word8ToWord# (indexWord8Array# a2 (off2 +# 1#)) of #endif 0x80## -> GT _ -> go (off1 +# 1#) (off2 +# 1#) _ | isTrue# (b1_1 `gtWord#` b2_1) -> GT | isTrue# (b1_1 `ltWord#` b2_1) -> LT | otherwise -> go (off1 +# 1#) (off2 +# 1#) utf8DecodeShortByteString :: ShortByteString -> [Char] utf8DecodeShortByteString (SBS ba#) = unsafeDupablePerformIO $ let len# = sizeofByteArray# ba# in utf8DecodeLazy# (return ()) (utf8DecodeCharByteArray# ba#) len# countUTF8Chars :: ShortByteString -> IO Int countUTF8Chars (SBS ba) = go 0# 0# where len# = sizeofByteArray# ba go i# n# | isTrue# (i# >=# len#) = return (I# n#) | otherwise = do case utf8DecodeCharByteArray# ba i# of (# _, nBytes# #) -> go (i# +# nBytes#) (n# +# 1#) {-# INLINE utf8EncodeChar #-} utf8EncodeChar :: (Int# -> Word8# -> State# s -> State# s) -> Char -> ST s Int utf8EncodeChar write# c = let x = fromIntegral (ord c) in case () of _ | x > 0 && x <= 0x007f -> do write 0 x return 1 -- NB. '\0' is encoded as '\xC0\x80', not '\0'. This is so that we -- can have 0-terminated UTF-8 strings (see GHC.Base.unpackCStringUtf8). | x <= 0x07ff -> do write 0 (0xC0 .|. ((x `shiftR` 6) .&. 0x1F)) write 1 (0x80 .|. (x .&. 0x3F)) return 2 | x <= 0xffff -> do write 0 (0xE0 .|. (x `shiftR` 12) .&. 0x0F) write 1 (0x80 .|. (x `shiftR` 6) .&. 0x3F) write 2 (0x80 .|. (x .&. 0x3F)) return 3 | otherwise -> do write 0 (0xF0 .|. (x `shiftR` 18)) write 1 (0x80 .|. ((x `shiftR` 12) .&. 0x3F)) write 2 (0x80 .|. ((x `shiftR` 6) .&. 0x3F)) write 3 (0x80 .|. (x .&. 0x3F)) return 4 where {-# INLINE write #-} write (I# off#) (W# c#) = ST $ \s -> #if !MIN_VERSION_base(4,16,0) case write# off# (narrowWord8# c#) s of #else case write# off# (wordToWord8# c#) s of #endif s -> (# s, () #) utf8EncodeString :: String -> ByteString utf8EncodeString s = unsafePerformIO $ do let len = utf8EncodedLength s buf <- mallocForeignPtrBytes len withForeignPtr buf $ \ptr -> do utf8EncodeStringPtr ptr s pure (BS.fromForeignPtr buf 0 len) utf8EncodeStringPtr :: Ptr Word8 -> String -> IO () utf8EncodeStringPtr (Ptr a#) str = go a# str where go !_ [] = return () go a# (c:cs) = do #if !MIN_VERSION_base(4,16,0) -- writeWord8OffAddr# was taking a Word# I# off# <- stToIO $ utf8EncodeChar (\i w -> writeWord8OffAddr# a# i (extendWord8# w)) c #else I# off# <- stToIO $ utf8EncodeChar (writeWord8OffAddr# a#) c #endif go (a# `plusAddr#` off#) cs utf8EncodeShortByteString :: String -> IO ShortByteString utf8EncodeShortByteString str = IO $ \s -> case utf8EncodedLength str of { I# len# -> case newByteArray# len# s of { (# s, mba# #) -> case go mba# 0# str of { ST f_go -> case f_go s of { (# s, () #) -> case unsafeFreezeByteArray# mba# s of { (# s, ba# #) -> (# s, SBS ba# #) }}}}} where go _ _ [] = return () go mba# i# (c:cs) = do #if !MIN_VERSION_base(4,16,0) -- writeWord8Array# was taking a Word# I# off# <- utf8EncodeChar (\j# w -> writeWord8Array# mba# (i# +# j#) (extendWord8# w)) c #else I# off# <- utf8EncodeChar (\j# -> writeWord8Array# mba# (i# +# j#)) c #endif go mba# (i# +# off#) cs utf8EncodedLength :: String -> Int utf8EncodedLength str = go 0 str where go !n [] = n go n (c:cs) | ord c > 0 && ord c <= 0x007f = go (n+1) cs | ord c <= 0x07ff = go (n+2) cs | ord c <= 0xffff = go (n+3) cs | otherwise = go (n+4) cs -- ----------------------------------------------------------------------------- -- Note [Z-Encoding] -- ~~~~~~~~~~~~~~~~~ {- This is the main name-encoding and decoding function. It encodes any string into a string that is acceptable as a C name. This is done right before we emit a symbol name into the compiled C or asm code. Z-encoding of strings is cached in the FastString interface, so we never encode the same string more than once. The basic encoding scheme is this. * Tuples (,,,) are coded as Z3T * Alphabetic characters (upper and lower) and digits all translate to themselves; except 'Z', which translates to 'ZZ' and 'z', which translates to 'zz' We need both so that we can preserve the variable/tycon distinction * Most other printable characters translate to 'zx' or 'Zx' for some alphabetic character x * The others translate as 'znnnU' where 'nnn' is the decimal number of the character Before After -------------------------- Trak Trak foo_wib foozuwib > zg >1 zg1 foo# foozh foo## foozhzh foo##1 foozhzh1 fooZ fooZZ :+ ZCzp () Z0T 0-tuple (,,,,) Z5T 5-tuple (# #) Z1H unboxed 1-tuple (note the space) (#,,,,#) Z5H unboxed 5-tuple (NB: There is no Z1T nor Z0H.) -} type UserString = String -- As the user typed it type EncodedString = String -- Encoded form zEncodeString :: UserString -> EncodedString zEncodeString cs = case maybe_tuple cs of Just n -> n -- Tuples go to Z2T etc Nothing -> go cs where go [] = [] go (c:cs) = encode_digit_ch c ++ go' cs go' [] = [] go' (c:cs) = encode_ch c ++ go' cs unencodedChar :: Char -> Bool -- True for chars that don't need encoding unencodedChar 'Z' = False unencodedChar 'z' = False unencodedChar c = c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' -- If a digit is at the start of a symbol then we need to encode it. -- Otherwise package names like 9pH-0.1 give linker errors. encode_digit_ch :: Char -> EncodedString encode_digit_ch c | c >= '0' && c <= '9' = encode_as_unicode_char c encode_digit_ch c | otherwise = encode_ch c encode_ch :: Char -> EncodedString encode_ch c | unencodedChar c = [c] -- Common case first -- Constructors encode_ch '(' = "ZL" -- Needed for things like (,), and (->) encode_ch ')' = "ZR" -- For symmetry with ( encode_ch '[' = "ZM" encode_ch ']' = "ZN" encode_ch ':' = "ZC" encode_ch 'Z' = "ZZ" -- Variables encode_ch 'z' = "zz" encode_ch '&' = "za" encode_ch '|' = "zb" encode_ch '^' = "zc" encode_ch '$' = "zd" encode_ch '=' = "ze" encode_ch '>' = "zg" encode_ch '#' = "zh" encode_ch '.' = "zi" encode_ch '<' = "zl" encode_ch '-' = "zm" encode_ch '!' = "zn" encode_ch '+' = "zp" encode_ch '\'' = "zq" encode_ch '\\' = "zr" encode_ch '/' = "zs" encode_ch '*' = "zt" encode_ch '_' = "zu" encode_ch '%' = "zv" encode_ch c = encode_as_unicode_char c encode_as_unicode_char :: Char -> EncodedString encode_as_unicode_char c = 'z' : if isDigit (head hex_str) then hex_str else '0':hex_str where hex_str = showHex (ord c) "U" -- ToDo: we could improve the encoding here in various ways. -- eg. strings of unicode characters come out as 'z1234Uz5678U', we -- could remove the 'U' in the middle (the 'z' works as a separator). zDecodeString :: EncodedString -> UserString zDecodeString [] = [] zDecodeString ('Z' : d : rest) | isDigit d = decode_tuple d rest | otherwise = decode_upper d : zDecodeString rest zDecodeString ('z' : d : rest) | isDigit d = decode_num_esc d rest | otherwise = decode_lower d : zDecodeString rest zDecodeString (c : rest) = c : zDecodeString rest decode_upper, decode_lower :: Char -> Char decode_upper 'L' = '(' decode_upper 'R' = ')' decode_upper 'M' = '[' decode_upper 'N' = ']' decode_upper 'C' = ':' decode_upper 'Z' = 'Z' decode_upper ch = {-pprTrace "decode_upper" (char ch)-} ch decode_lower 'z' = 'z' decode_lower 'a' = '&' decode_lower 'b' = '|' decode_lower 'c' = '^' decode_lower 'd' = '$' decode_lower 'e' = '=' decode_lower 'g' = '>' decode_lower 'h' = '#' decode_lower 'i' = '.' decode_lower 'l' = '<' decode_lower 'm' = '-' decode_lower 'n' = '!' decode_lower 'p' = '+' decode_lower 'q' = '\'' decode_lower 'r' = '\\' decode_lower 's' = '/' decode_lower 't' = '*' decode_lower 'u' = '_' decode_lower 'v' = '%' decode_lower ch = {-pprTrace "decode_lower" (char ch)-} ch -- Characters not having a specific code are coded as z224U (in hex) decode_num_esc :: Char -> EncodedString -> UserString decode_num_esc d rest = go (digitToInt d) rest where go n (c : rest) | isHexDigit c = go (16*n + digitToInt c) rest go n ('U' : rest) = chr n : zDecodeString rest go n other = error ("decode_num_esc: " ++ show n ++ ' ':other) decode_tuple :: Char -> EncodedString -> UserString decode_tuple d rest = go (digitToInt d) rest where -- NB. recurse back to zDecodeString after decoding the tuple, because -- the tuple might be embedded in a longer name. go n (c : rest) | isDigit c = go (10*n + digitToInt c) rest go 0 ('T':rest) = "()" ++ zDecodeString rest go n ('T':rest) = '(' : replicate (n-1) ',' ++ ")" ++ zDecodeString rest go 1 ('H':rest) = "(# #)" ++ zDecodeString rest go n ('H':rest) = '(' : '#' : replicate (n-1) ',' ++ "#)" ++ zDecodeString rest go n other = error ("decode_tuple: " ++ show n ++ ' ':other) {- Tuples are encoded as Z3T or Z3H for 3-tuples or unboxed 3-tuples respectively. No other encoding starts Z * "(# #)" is the tycon for an unboxed 1-tuple (not 0-tuple) There are no unboxed 0-tuples. * "()" is the tycon for a boxed 0-tuple. There are no boxed 1-tuples. -} maybe_tuple :: UserString -> Maybe EncodedString maybe_tuple "(# #)" = Just("Z1H") maybe_tuple ('(' : '#' : cs) = case count_commas (0::Int) cs of (n, '#' : ')' : _) -> Just ('Z' : shows (n+1) "H") _ -> Nothing maybe_tuple "()" = Just("Z0T") maybe_tuple ('(' : cs) = case count_commas (0::Int) cs of (n, ')' : _) -> Just ('Z' : shows (n+1) "T") _ -> Nothing maybe_tuple _ = Nothing count_commas :: Int -> String -> (Int, String) count_commas n (',' : cs) = count_commas (n+1) cs count_commas n cs = (n,cs) {- ************************************************************************ * * Base 62 * * ************************************************************************ Note [Base 62 encoding 128-bit integers] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Instead of base-62 encoding a single 128-bit integer (ceil(21.49) characters), we'll base-62 a pair of 64-bit integers (2 * ceil(10.75) characters). Luckily for us, it's the same number of characters! -} -------------------------------------------------------------------------- -- Base 62 -- The base-62 code is based off of 'locators' -- ((c) Operational Dynamics Consulting, BSD3 licensed) -- | Size of a 64-bit word when written as a base-62 string word64Base62Len :: Int word64Base62Len = 11 -- | Converts a 64-bit word into a base-62 string toBase62Padded :: Word64 -> String toBase62Padded w = pad ++ str where pad = replicate len '0' len = word64Base62Len - length str -- 11 == ceil(64 / lg 62) str = toBase62 w toBase62 :: Word64 -> String toBase62 w = showIntAtBase 62 represent w "" where represent :: Int -> Char represent x | x < 10 = Char.chr (48 + x) | x < 36 = Char.chr (65 + x - 10) | x < 62 = Char.chr (97 + x - 36) | otherwise = error "represent (base 62): impossible!" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Error.hs0000644000000000000000000004733614472400113020376 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DeriveFunctor #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ViewPatterns #-} {- (c) The AQUA Project, Glasgow University, 1994-1998 \section[ErrsUtils]{Utilities for error reporting} -} module GHC.Utils.Error ( -- * Basic types Validity'(..), Validity, andValid, allValid, getInvalids, Severity(..), -- * Messages Diagnostic(..), MsgEnvelope(..), MessageClass(..), SDoc, DecoratedSDoc(unDecorated), Messages, mkMessages, unionMessages, errorsFound, isEmptyMessages, -- ** Formatting pprMessageBag, pprMsgEnvelopeBagWithLoc, pprMessages, pprLocMsgEnvelope, formatBulleted, -- ** Construction DiagOpts (..), diag_wopt, diag_fatal_wopt, emptyMessages, mkDecorated, mkLocMessage, mkLocMessageAnn, mkMsgEnvelope, mkPlainMsgEnvelope, mkPlainErrorMsgEnvelope, mkErrorMsgEnvelope, mkMCDiagnostic, errorDiagnostic, diagReasonSeverity, mkPlainError, mkPlainDiagnostic, mkDecoratedError, mkDecoratedDiagnostic, noHints, -- * Utilities getCaretDiagnostic, -- * Issuing messages during compilation putMsg, printInfoForUser, printOutputForUser, logInfo, logOutput, errorMsg, fatalErrorMsg, compilationProgressMsg, showPass, withTiming, withTimingSilent, debugTraceMsg, ghcExit, prettyPrintGhcErrors, traceCmd, sortMsgBag ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Data.Bag import qualified GHC.Data.EnumSet as EnumSet import GHC.Data.EnumSet (EnumSet) import GHC.Utils.Exception import GHC.Utils.Outputable as Outputable import GHC.Utils.Panic import GHC.Utils.Panic.Plain import GHC.Utils.Logger import GHC.Types.Error import GHC.Types.SrcLoc as SrcLoc import System.Exit ( ExitCode(..), exitWith ) import Data.List ( sortBy ) import Data.Function import Debug.Trace import Control.Monad import Control.Monad.IO.Class import Control.Monad.Catch as MC (handle) import GHC.Conc ( getAllocationCounter ) import System.CPUTime data DiagOpts = DiagOpts { diag_warning_flags :: !(EnumSet WarningFlag) -- ^ Enabled warnings , diag_fatal_warning_flags :: !(EnumSet WarningFlag) -- ^ Fatal warnings , diag_warn_is_error :: !Bool -- ^ Treat warnings as errors , diag_reverse_errors :: !Bool -- ^ Reverse error reporting order , diag_max_errors :: !(Maybe Int) -- ^ Max reported error count , diag_ppr_ctx :: !SDocContext -- ^ Error printing context } diag_wopt :: WarningFlag -> DiagOpts -> Bool diag_wopt wflag opts = wflag `EnumSet.member` diag_warning_flags opts diag_fatal_wopt :: WarningFlag -> DiagOpts -> Bool diag_fatal_wopt wflag opts = wflag `EnumSet.member` diag_fatal_warning_flags opts -- | Computes the /right/ 'Severity' for the input 'DiagnosticReason' out of -- the 'DiagOpts. This function /has/ to be called when a diagnostic is constructed, -- i.e. with a 'DiagOpts \"snapshot\" taken as close as possible to where a -- particular diagnostic message is built, otherwise the computed 'Severity' might -- not be correct, due to the mutable nature of the 'DynFlags' in GHC. diagReasonSeverity :: DiagOpts -> DiagnosticReason -> Severity diagReasonSeverity opts reason = case reason of WarningWithFlag wflag | not (diag_wopt wflag opts) -> SevIgnore | diag_fatal_wopt wflag opts -> SevError | otherwise -> SevWarning WarningWithoutFlag | diag_warn_is_error opts -> SevError | otherwise -> SevWarning ErrorWithoutFlag -> SevError -- | Make a 'MessageClass' for a given 'DiagnosticReason', consulting the -- 'DiagOpts. mkMCDiagnostic :: DiagOpts -> DiagnosticReason -> MessageClass mkMCDiagnostic opts reason = MCDiagnostic (diagReasonSeverity opts reason) reason -- | Varation of 'mkMCDiagnostic' which can be used when we are /sure/ the -- input 'DiagnosticReason' /is/ 'ErrorWithoutFlag'. errorDiagnostic :: MessageClass errorDiagnostic = MCDiagnostic SevError ErrorWithoutFlag -- -- Creating MsgEnvelope(s) -- mk_msg_envelope :: Diagnostic e => Severity -> SrcSpan -> PrintUnqualified -> e -> MsgEnvelope e mk_msg_envelope severity locn print_unqual err = MsgEnvelope { errMsgSpan = locn , errMsgContext = print_unqual , errMsgDiagnostic = err , errMsgSeverity = severity } -- | Wrap a 'Diagnostic' in a 'MsgEnvelope', recording its location. -- If you know your 'Diagnostic' is an error, consider using 'mkErrorMsgEnvelope', -- which does not require looking at the 'DiagOpts' mkMsgEnvelope :: Diagnostic e => DiagOpts -> SrcSpan -> PrintUnqualified -> e -> MsgEnvelope e mkMsgEnvelope opts locn print_unqual err = mk_msg_envelope (diagReasonSeverity opts (diagnosticReason err)) locn print_unqual err -- | Wrap a 'Diagnostic' in a 'MsgEnvelope', recording its location. -- Precondition: the diagnostic is, in fact, an error. That is, -- @diagnosticReason msg == ErrorWithoutFlag@. mkErrorMsgEnvelope :: Diagnostic e => SrcSpan -> PrintUnqualified -> e -> MsgEnvelope e mkErrorMsgEnvelope locn unqual msg = assert (diagnosticReason msg == ErrorWithoutFlag) $ mk_msg_envelope SevError locn unqual msg -- | Variant that doesn't care about qualified/unqualified names. mkPlainMsgEnvelope :: Diagnostic e => DiagOpts -> SrcSpan -> e -> MsgEnvelope e mkPlainMsgEnvelope opts locn msg = mkMsgEnvelope opts locn alwaysQualify msg -- | Variant of 'mkPlainMsgEnvelope' which can be used when we are /sure/ we -- are constructing a diagnostic with a 'ErrorWithoutFlag' reason. mkPlainErrorMsgEnvelope :: Diagnostic e => SrcSpan -> e -> MsgEnvelope e mkPlainErrorMsgEnvelope locn msg = mk_msg_envelope SevError locn alwaysQualify msg ------------------------- data Validity' a = IsValid -- ^ Everything is fine | NotValid a -- ^ A problem, and some indication of why deriving Functor -- | Monomorphic version of @Validity'@ specialised for 'SDoc's. type Validity = Validity' SDoc andValid :: Validity' a -> Validity' a -> Validity' a andValid IsValid v = v andValid v _ = v -- | If they aren't all valid, return the first allValid :: [Validity' a] -> Validity' a allValid [] = IsValid allValid (v : vs) = v `andValid` allValid vs getInvalids :: [Validity' a] -> [a] getInvalids vs = [d | NotValid d <- vs] -- ----------------------------------------------------------------------------- -- Collecting up messages for later ordering and printing. ---------------- -- | Formats the input list of structured document, where each element of the list gets a bullet. formatBulleted :: SDocContext -> DecoratedSDoc -> SDoc formatBulleted ctx (unDecorated -> docs) = case msgs of [] -> Outputable.empty [msg] -> msg _ -> vcat $ map starred msgs where msgs = filter (not . Outputable.isEmpty ctx) docs starred = (bullet<+>) pprMessages :: Diagnostic e => Messages e -> SDoc pprMessages = vcat . pprMsgEnvelopeBagWithLoc . getMessages pprMsgEnvelopeBagWithLoc :: Diagnostic e => Bag (MsgEnvelope e) -> [SDoc] pprMsgEnvelopeBagWithLoc bag = [ pprLocMsgEnvelope item | item <- sortMsgBag Nothing bag ] pprLocMsgEnvelope :: Diagnostic e => MsgEnvelope e -> SDoc pprLocMsgEnvelope (MsgEnvelope { errMsgSpan = s , errMsgDiagnostic = e , errMsgSeverity = sev , errMsgContext = unqual }) = sdocWithContext $ \ctx -> withErrStyle unqual $ mkLocMessage (MCDiagnostic sev (diagnosticReason e)) s (formatBulleted ctx $ diagnosticMessage e) sortMsgBag :: Maybe DiagOpts -> Bag (MsgEnvelope e) -> [MsgEnvelope e] sortMsgBag mopts = maybeLimit . sortBy (cmp `on` errMsgSpan) . bagToList where cmp | Just opts <- mopts , diag_reverse_errors opts = SrcLoc.rightmost_smallest | otherwise = SrcLoc.leftmost_smallest maybeLimit | Just opts <- mopts , Just err_limit <- diag_max_errors opts = take err_limit | otherwise = id ghcExit :: Logger -> Int -> IO () ghcExit logger val | val == 0 = exitWith ExitSuccess | otherwise = do errorMsg logger (text "\nCompilation had errors\n\n") exitWith (ExitFailure val) -- ----------------------------------------------------------------------------- -- Outputting messages from the compiler errorMsg :: Logger -> SDoc -> IO () errorMsg logger msg = logMsg logger errorDiagnostic noSrcSpan $ withPprStyle defaultErrStyle msg fatalErrorMsg :: Logger -> SDoc -> IO () fatalErrorMsg logger msg = logMsg logger MCFatal noSrcSpan $ withPprStyle defaultErrStyle msg compilationProgressMsg :: Logger -> SDoc -> IO () compilationProgressMsg logger msg = do let logflags = logFlags logger let str = renderWithContext (log_default_user_context logflags) (text "GHC progress: " <> msg) traceEventIO str when (logVerbAtLeast logger 1) $ logOutput logger $ withPprStyle defaultUserStyle msg showPass :: Logger -> String -> IO () showPass logger what = when (logVerbAtLeast logger 2) $ logInfo logger $ withPprStyle defaultUserStyle (text "***" <+> text what <> colon) data PrintTimings = PrintTimings | DontPrintTimings deriving (Eq, Show) -- | Time a compilation phase. -- -- When timings are enabled (e.g. with the @-v2@ flag), the allocations -- and CPU time used by the phase will be reported to stderr. Consider -- a typical usage: -- @withTiming getDynFlags (text "simplify") force PrintTimings pass@. -- When timings are enabled the following costs are included in the -- produced accounting, -- -- - The cost of executing @pass@ to a result @r@ in WHNF -- - The cost of evaluating @force r@ to WHNF (e.g. @()@) -- -- The choice of the @force@ function depends upon the amount of forcing -- desired; the goal here is to ensure that the cost of evaluating the result -- is, to the greatest extent possible, included in the accounting provided by -- 'withTiming'. Often the pass already sufficiently forces its result during -- construction; in this case @const ()@ is a reasonable choice. -- In other cases, it is necessary to evaluate the result to normal form, in -- which case something like @Control.DeepSeq.rnf@ is appropriate. -- -- To avoid adversely affecting compiler performance when timings are not -- requested, the result is only forced when timings are enabled. -- -- See Note [withTiming] for more. withTiming :: MonadIO m => Logger -> SDoc -- ^ The name of the phase -> (a -> ()) -- ^ A function to force the result -- (often either @const ()@ or 'rnf') -> m a -- ^ The body of the phase to be timed -> m a withTiming logger what force action = withTiming' logger what force PrintTimings action -- | Same as 'withTiming', but doesn't print timings in the -- console (when given @-vN@, @N >= 2@ or @-ddump-timings@). -- -- See Note [withTiming] for more. withTimingSilent :: MonadIO m => Logger -> SDoc -- ^ The name of the phase -> (a -> ()) -- ^ A function to force the result -- (often either @const ()@ or 'rnf') -> m a -- ^ The body of the phase to be timed -> m a withTimingSilent logger what force action = withTiming' logger what force DontPrintTimings action -- | Worker for 'withTiming' and 'withTimingSilent'. withTiming' :: MonadIO m => Logger -> SDoc -- ^ The name of the phase -> (a -> ()) -- ^ A function to force the result -- (often either @const ()@ or 'rnf') -> PrintTimings -- ^ Whether to print the timings -> m a -- ^ The body of the phase to be timed -> m a withTiming' logger what force_result prtimings action = if logVerbAtLeast logger 2 || logHasDumpFlag logger Opt_D_dump_timings then do whenPrintTimings $ logInfo logger $ withPprStyle defaultUserStyle $ text "***" <+> what <> colon let ctx = log_default_user_context (logFlags logger) alloc0 <- liftIO getAllocationCounter start <- liftIO getCPUTime eventBegins ctx what recordAllocs alloc0 !r <- action () <- pure $ force_result r eventEnds ctx what end <- liftIO getCPUTime alloc1 <- liftIO getAllocationCounter recordAllocs alloc1 -- recall that allocation counter counts down let alloc = alloc0 - alloc1 time = realToFrac (end - start) * 1e-9 when (logVerbAtLeast logger 2 && prtimings == PrintTimings) $ liftIO $ logInfo logger $ withPprStyle defaultUserStyle (text "!!!" <+> what <> colon <+> text "finished in" <+> doublePrec 2 time <+> text "milliseconds" <> comma <+> text "allocated" <+> doublePrec 3 (realToFrac alloc / 1024 / 1024) <+> text "megabytes") whenPrintTimings $ putDumpFileMaybe logger Opt_D_dump_timings "" FormatText $ text $ showSDocOneLine ctx $ hsep [ what <> colon , text "alloc=" <> ppr alloc , text "time=" <> doublePrec 3 time ] pure r else action where whenPrintTimings = liftIO . when (prtimings == PrintTimings) recordAllocs alloc = liftIO $ traceMarkerIO $ "GHC:allocs:" ++ show alloc eventBegins ctx w = do let doc = eventBeginsDoc ctx w whenPrintTimings $ traceMarkerIO doc liftIO $ traceEventIO doc eventEnds ctx w = do let doc = eventEndsDoc ctx w whenPrintTimings $ traceMarkerIO doc liftIO $ traceEventIO doc eventBeginsDoc ctx w = showSDocOneLine ctx $ text "GHC:started:" <+> w eventEndsDoc ctx w = showSDocOneLine ctx $ text "GHC:finished:" <+> w debugTraceMsg :: Logger -> Int -> SDoc -> IO () debugTraceMsg logger val msg = when (log_verbosity (logFlags logger) >= val) $ logInfo logger (withPprStyle defaultDumpStyle msg) {-# INLINE debugTraceMsg #-} -- see Note [INLINE conditional tracing utilities] putMsg :: Logger -> SDoc -> IO () putMsg logger msg = logInfo logger (withPprStyle defaultUserStyle msg) printInfoForUser :: Logger -> PrintUnqualified -> SDoc -> IO () printInfoForUser logger print_unqual msg = logInfo logger (withUserStyle print_unqual AllTheWay msg) printOutputForUser :: Logger -> PrintUnqualified -> SDoc -> IO () printOutputForUser logger print_unqual msg = logOutput logger (withUserStyle print_unqual AllTheWay msg) logInfo :: Logger -> SDoc -> IO () logInfo logger msg = logMsg logger MCInfo noSrcSpan msg -- | Like 'logInfo' but with 'SevOutput' rather then 'SevInfo' logOutput :: Logger -> SDoc -> IO () logOutput logger msg = logMsg logger MCOutput noSrcSpan msg prettyPrintGhcErrors :: ExceptionMonad m => Logger -> m a -> m a prettyPrintGhcErrors logger = do let ctx = log_default_user_context (logFlags logger) MC.handle $ \e -> case e of PprPanic str doc -> pprDebugAndThen ctx panic (text str) doc PprSorry str doc -> pprDebugAndThen ctx sorry (text str) doc PprProgramError str doc -> pprDebugAndThen ctx pgmError (text str) doc _ -> liftIO $ throwIO e -- | Trace a command (when verbosity level >= 3) traceCmd :: Logger -> String -> String -> IO a -> IO a traceCmd logger phase_name cmd_line action = do showPass logger phase_name let cmd_doc = text cmd_line handle_exn exn = do debugTraceMsg logger 2 (char '\n') debugTraceMsg logger 2 (text "Failed:" <+> cmd_doc <+> text (show exn)) throwGhcExceptionIO (ProgramError (show exn)) debugTraceMsg logger 3 cmd_doc loggerTraceFlush logger -- And run it! action `catchIO` handle_exn {- Note [withTiming] ~~~~~~~~~~~~~~~~~~~~ For reference: withTiming :: MonadIO => m DynFlags -- how to get the DynFlags -> SDoc -- label for the computation we're timing -> (a -> ()) -- how to evaluate the result -> PrintTimings -- whether to report the timings when passed -- -v2 or -ddump-timings -> m a -- computation we're timing -> m a withTiming lets you run an action while: (1) measuring the CPU time it took and reporting that on stderr (when PrintTimings is passed), (2) emitting start/stop events to GHC's event log, with the label given as an argument. Evaluation of the result ------------------------ 'withTiming' takes as an argument a function of type 'a -> ()', whose purpose is to evaluate the result "sufficiently". A given pass might return an 'm a' for some monad 'm' and result type 'a', but where the 'a' is complex enough that evaluating it to WHNF barely scratches its surface and leaves many complex and time-consuming computations unevaluated. Those would only be forced by the next pass, and the time needed to evaluate them would be mis-attributed to that next pass. A more appropriate function would be one that deeply evaluates the result, so as to assign the time spent doing it to the pass we're timing. Note: as hinted at above, the time spent evaluating the application of the forcing function to the result is included in the timings reported by 'withTiming'. How we use it ------------- We measure the time and allocations of various passes in GHC's pipeline by just wrapping the whole pass with 'withTiming'. This also materializes by having a label for each pass in the eventlog, where each pass is executed in one go, during a continuous time window. However, from STG onwards, the pipeline uses streams to emit groups of STG/Cmm/etc declarations one at a time, and process them until we get to assembly code generation. This means that the execution of those last few passes is interleaved and that we cannot measure how long they take by just wrapping the whole thing with 'withTiming'. Instead we wrap the processing of each individual stream element, all along the codegen pipeline, using the appropriate label for the pass to which this processing belongs. That generates a lot more data but allows us to get fine-grained timings about all the passes and we can easily compute totals with tools like ghc-events-analyze (see below). Producing an eventlog for GHC ----------------------------- You can produce an eventlog when compiling, for instance, hello.hs by simply running: If GHC was built by Hadrian: $ _build/stage1/bin/ghc -ddump-timings hello.hs -o hello +RTS -l If GHC was built with Make: $ inplace/bin/ghc-stage2 -ddump-timing hello.hs -o hello +RTS -l You could alternatively use -v (with N >= 2) instead of -ddump-timings, to ask GHC to report timings (on stderr and the eventlog). This will write the eventlog to ./ghc.eventlog in both cases. You can then visualize it or look at the totals for each label by using ghc-events-analyze, threadscope or any other eventlog consumer. Illustrating with ghc-events-analyze: $ ghc-events-analyze --timed --timed-txt --totals \ --start "GHC:started:" --stop "GHC:finished:" \ ghc.eventlog This produces ghc.timed.txt (all event timestamps), ghc.timed.svg (visualisation of the execution through the various labels) and ghc.totals.txt (total time spent in each label). -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Exception.hs0000644000000000000000000000124114472400113021224 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-deprecations #-} {-# LANGUAGE ConstraintKinds #-} module GHC.Utils.Exception ( module CE, module GHC.Utils.Exception ) where import GHC.Prelude import GHC.IO (catchException) import Control.Exception as CE hiding (assert) import Control.Monad.IO.Class import Control.Monad.Catch -- Monomorphised versions of exception-handling utilities catchIO :: IO a -> (IOException -> IO a) -> IO a catchIO = catchException handleIO :: (IOException -> IO a) -> IO a -> IO a handleIO = flip catchIO tryIO :: IO a -> IO (Either IOException a) tryIO = CE.try type ExceptionMonad m = (MonadCatch m, MonadThrow m, MonadMask m, MonadIO m) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/FV.hs0000644000000000000000000001537514472400113017616 0ustar0000000000000000{- (c) Bartosz Nitka, Facebook 2015 -} {-# LANGUAGE BangPatterns #-} -- | Utilities for efficiently and deterministically computing free variables. module GHC.Utils.FV ( -- * Deterministic free vars computations FV, InterestingVarFun, -- * Running the computations fvVarList, fvVarSet, fvDVarSet, -- ** Manipulating those computations unitFV, emptyFV, mkFVs, unionFV, unionsFV, delFV, delFVs, filterFV, mapUnionFV, ) where import GHC.Prelude import GHC.Types.Var import GHC.Types.Var.Set -- | Predicate on possible free variables: returns @True@ iff the variable is -- interesting type InterestingVarFun = Var -> Bool -- Note [Deterministic FV] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- When computing free variables, the order in which you get them affects -- the results of floating and specialization. If you use UniqFM to collect -- them and then turn that into a list, you get them in nondeterministic -- order as described in Note [Deterministic UniqFM] in GHC.Types.Unique.DFM. -- A naive algorithm for free variables relies on merging sets of variables. -- Merging costs O(n+m) for UniqFM and for UniqDFM there's an additional log -- factor. It's cheaper to incrementally add to a list and use a set to check -- for duplicates. type FV = InterestingVarFun -- Used for filtering sets as we build them -> VarSet -- Locally bound variables -> VarAcc -- Accumulator -> VarAcc type VarAcc = ([Var], VarSet) -- List to preserve ordering and set to check for membership, -- so that the list doesn't have duplicates -- For explanation of why using `VarSet` is not deterministic see -- Note [Deterministic UniqFM] in GHC.Types.Unique.DFM. -- Note [FV naming conventions] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- To get the performance and determinism that FV provides, FV computations -- need to built up from smaller FV computations and then evaluated with -- one of `fvVarList`, `fvDVarSet` That means the functions -- returning FV need to be exported. -- -- The conventions are: -- -- a) non-deterministic functions: -- * a function that returns VarSet -- e.g. `tyVarsOfType` -- b) deterministic functions: -- * a worker that returns FV -- e.g. `tyFVsOfType` -- * a function that returns [Var] -- e.g. `tyVarsOfTypeList` -- * a function that returns DVarSet -- e.g. `tyVarsOfTypeDSet` -- -- Where tyVarsOfType, tyVarsOfTypeList, tyVarsOfTypeDSet are implemented -- in terms of the worker evaluated with fvVarSet, fvVarList, fvDVarSet -- respectively. -- | Run a free variable computation, returning a list of distinct free -- variables in deterministic order and a non-deterministic set containing -- those variables. fvVarAcc :: FV -> ([Var], VarSet) fvVarAcc fv = fv (const True) emptyVarSet ([], emptyVarSet) -- | Run a free variable computation, returning a list of distinct free -- variables in deterministic order. fvVarList :: FV -> [Var] fvVarList = fst . fvVarAcc -- | Run a free variable computation, returning a deterministic set of free -- variables. Note that this is just a wrapper around the version that -- returns a deterministic list. If you need a list you should use -- `fvVarList`. fvDVarSet :: FV -> DVarSet fvDVarSet = mkDVarSet . fvVarList -- | Run a free variable computation, returning a non-deterministic set of -- free variables. Don't use if the set will be later converted to a list -- and the order of that list will impact the generated code. fvVarSet :: FV -> VarSet fvVarSet = snd . fvVarAcc -- Note [FV eta expansion] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- Let's consider an eta-reduced implementation of freeVarsOf using FV: -- -- freeVarsOf (App a b) = freeVarsOf a `unionFV` freeVarsOf b -- -- If GHC doesn't eta-expand it, after inlining unionFV we end up with -- -- freeVarsOf = \x -> -- case x of -- App a b -> \fv_cand in_scope acc -> -- freeVarsOf a fv_cand in_scope $! freeVarsOf b fv_cand in_scope $! acc -- -- which has to create a thunk, resulting in more allocations. -- -- On the other hand if it is eta-expanded: -- -- freeVarsOf (App a b) fv_cand in_scope acc = -- (freeVarsOf a `unionFV` freeVarsOf b) fv_cand in_scope acc -- -- after inlining unionFV we have: -- -- freeVarsOf = \x fv_cand in_scope acc -> -- case x of -- App a b -> -- freeVarsOf a fv_cand in_scope $! freeVarsOf b fv_cand in_scope $! acc -- -- which saves allocations. -- -- GHC when presented with knowledge about all the call sites, correctly -- eta-expands in this case. Unfortunately due to the fact that freeVarsOf gets -- exported to be composed with other functions, GHC doesn't have that -- information and has to be more conservative here. -- -- Hence functions that get exported and return FV need to be manually -- eta-expanded. See also #11146. -- | Add a variable - when free, to the returned free variables. -- Ignores duplicates and respects the filtering function. unitFV :: Id -> FV unitFV var fv_cand in_scope acc@(have, haveSet) | var `elemVarSet` in_scope = acc | var `elemVarSet` haveSet = acc | fv_cand var = (var:have, extendVarSet haveSet var) | otherwise = acc {-# INLINE unitFV #-} -- | Return no free variables. emptyFV :: FV emptyFV _ _ acc = acc {-# INLINE emptyFV #-} -- | Union two free variable computations. unionFV :: FV -> FV -> FV unionFV fv1 fv2 fv_cand in_scope acc = fv1 fv_cand in_scope $! fv2 fv_cand in_scope $! acc {-# INLINE unionFV #-} -- | Mark the variable as not free by putting it in scope. delFV :: Var -> FV -> FV delFV var fv fv_cand !in_scope acc = fv fv_cand (extendVarSet in_scope var) acc {-# INLINE delFV #-} -- | Mark many free variables as not free. delFVs :: VarSet -> FV -> FV delFVs vars fv fv_cand !in_scope acc = fv fv_cand (in_scope `unionVarSet` vars) acc {-# INLINE delFVs #-} -- | Filter a free variable computation. filterFV :: InterestingVarFun -> FV -> FV filterFV fv_cand2 fv fv_cand1 in_scope acc = fv (\v -> fv_cand1 v && fv_cand2 v) in_scope acc {-# INLINE filterFV #-} -- | Map a free variable computation over a list and union the results. mapUnionFV :: (a -> FV) -> [a] -> FV mapUnionFV _f [] _fv_cand _in_scope acc = acc mapUnionFV f (a:as) fv_cand in_scope acc = mapUnionFV f as fv_cand in_scope $! f a fv_cand in_scope $! acc {-# INLINABLE mapUnionFV #-} -- | Union many free variable computations. unionsFV :: [FV] -> FV unionsFV fvs fv_cand in_scope acc = mapUnionFV id fvs fv_cand in_scope acc {-# INLINE unionsFV #-} -- | Add multiple variables - when free, to the returned free variables. -- Ignores duplicates and respects the filtering function. mkFVs :: [Var] -> FV mkFVs vars fv_cand in_scope acc = mapUnionFV unitFV vars fv_cand in_scope acc {-# INLINE mkFVs #-} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Fingerprint.hs0000644000000000000000000000250414472400113021560 0ustar0000000000000000 {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- ---------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2006 -- -- Fingerprints for recompilation checking and ABI versioning. -- -- https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/recompilation-avoidance -- -- ---------------------------------------------------------------------------- module GHC.Utils.Fingerprint ( readHexFingerprint, fingerprintByteString, -- * Re-exported from GHC.Fingerprint Fingerprint(..), fingerprint0, fingerprintFingerprints, fingerprintData, fingerprintString, getFileHash ) where import GHC.Prelude import Foreign import GHC.IO import Numeric ( readHex ) import qualified Data.ByteString as BS import qualified Data.ByteString.Unsafe as BS import GHC.Fingerprint -- useful for parsing the output of 'md5sum', should we want to do that. readHexFingerprint :: String -> Fingerprint readHexFingerprint s = Fingerprint w1 w2 where (s1,s2) = splitAt 16 s [(w1,"")] = readHex s1 [(w2,"")] = readHex (take 16 s2) fingerprintByteString :: BS.ByteString -> Fingerprint fingerprintByteString bs = unsafeDupablePerformIO $ BS.unsafeUseAsCStringLen bs $ \(ptr, len) -> fingerprintData (castPtr ptr) len ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/GlobalVars.hs0000644000000000000000000001035314472400113021326 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_GHC -fno-cse #-} -- -fno-cse is needed for GLOBAL_VAR's to behave properly -- | Do not use global variables! -- -- Global variables are a hack. Do not use them if you can help it. module GHC.Utils.GlobalVars ( v_unsafeHasPprDebug , v_unsafeHasNoDebugOutput , v_unsafeHasNoStateHack , unsafeHasPprDebug , unsafeHasNoDebugOutput , unsafeHasNoStateHack , global , consIORef , globalM , sharedGlobal , sharedGlobalM ) where import GHC.Prelude import GHC.Conc.Sync ( sharedCAF ) import System.IO.Unsafe import Data.IORef import Foreign (Ptr) #define GLOBAL_VAR(name,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = global (value); #define GLOBAL_VAR_M(name,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = globalM (value); #define SHARED_GLOBAL_VAR(name,accessor,saccessor,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = sharedGlobal (value) (accessor); \ foreign import ccall unsafe saccessor \ accessor :: Ptr (IORef a) -> IO (Ptr (IORef a)); #define SHARED_GLOBAL_VAR_M(name,accessor,saccessor,value,ty) \ {-# NOINLINE name #-}; \ name :: IORef (ty); \ name = sharedGlobalM (value) (accessor); \ foreign import ccall unsafe saccessor \ accessor :: Ptr (IORef a) -> IO (Ptr (IORef a)); #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) GLOBAL_VAR(v_unsafeHasPprDebug, False, Bool) GLOBAL_VAR(v_unsafeHasNoDebugOutput, False, Bool) GLOBAL_VAR(v_unsafeHasNoStateHack, False, Bool) #else SHARED_GLOBAL_VAR( v_unsafeHasPprDebug , getOrSetLibHSghcGlobalHasPprDebug , "getOrSetLibHSghcGlobalHasPprDebug" , False , Bool ) SHARED_GLOBAL_VAR( v_unsafeHasNoDebugOutput , getOrSetLibHSghcGlobalHasNoDebugOutput , "getOrSetLibHSghcGlobalHasNoDebugOutput" , False , Bool ) SHARED_GLOBAL_VAR( v_unsafeHasNoStateHack , getOrSetLibHSghcGlobalHasNoStateHack , "getOrSetLibHSghcGlobalHasNoStateHack" , False , Bool ) #endif unsafeHasPprDebug :: Bool unsafeHasPprDebug = unsafePerformIO $ readIORef v_unsafeHasPprDebug unsafeHasNoDebugOutput :: Bool unsafeHasNoDebugOutput = unsafePerformIO $ readIORef v_unsafeHasNoDebugOutput unsafeHasNoStateHack :: Bool unsafeHasNoStateHack = unsafePerformIO $ readIORef v_unsafeHasNoStateHack {- ************************************************************************ * * Globals and the RTS * * ************************************************************************ When a plugin is loaded, it currently gets linked against a *newly loaded* copy of the GHC package. This would not be a problem, except that the new copy has its own mutable state that is not shared with that state that has already been initialized by the original GHC package. (Note that if the GHC executable was dynamically linked this wouldn't be a problem, because we could share the GHC library it links to; this is only a problem if DYNAMIC_GHC_PROGRAMS=NO.) The solution is to make use of @sharedCAF@ through @sharedGlobal@ for globals that are shared between multiple copies of ghc packages. -} -- Global variables: global :: a -> IORef a global a = unsafePerformIO (newIORef a) consIORef :: IORef [a] -> a -> IO () consIORef var x = atomicModifyIORef' var (\xs -> (x:xs,())) globalM :: IO a -> IORef a globalM ma = unsafePerformIO (ma >>= newIORef) -- Shared global variables: sharedGlobal :: a -> (Ptr (IORef a) -> IO (Ptr (IORef a))) -> IORef a sharedGlobal a get_or_set = unsafePerformIO $ newIORef a >>= flip sharedCAF get_or_set sharedGlobalM :: IO a -> (Ptr (IORef a) -> IO (Ptr (IORef a))) -> IORef a sharedGlobalM ma get_or_set = unsafePerformIO $ ma >>= newIORef >>= flip sharedCAF get_or_set ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/IO/Unsafe.hs0000644000000000000000000000061414472400113021021 0ustar0000000000000000{- (c) The University of Glasgow, 2000-2006 -} {-# LANGUAGE MagicHash, UnboxedTuples #-} module GHC.Utils.IO.Unsafe ( inlinePerformIO, ) where import GHC.Prelude () import GHC.Exts import GHC.IO (IO(..)) -- Just like unsafeDupablePerformIO, but we inline it. {-# INLINE inlinePerformIO #-} inlinePerformIO :: IO a -> a inlinePerformIO (IO m) = case m realWorld# of (# _, r #) -> r ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Json.hs0000644000000000000000000000331514472400113020203 0ustar0000000000000000{-# LANGUAGE GADTs #-} {-# LANGUAGE FlexibleInstances #-} module GHC.Utils.Json where import GHC.Prelude import GHC.Utils.Outputable import Data.Char import Numeric -- | Simple data type to represent JSON documents. data JsonDoc where JSNull :: JsonDoc JSBool :: Bool -> JsonDoc JSInt :: Int -> JsonDoc JSString :: String -> JsonDoc JSArray :: [JsonDoc] -> JsonDoc JSObject :: [(String, JsonDoc)] -> JsonDoc -- This is simple and slow as it is only used for error reporting renderJSON :: JsonDoc -> SDoc renderJSON d = case d of JSNull -> text "null" JSBool b -> text $ if b then "true" else "false" JSInt n -> ppr n JSString s -> doubleQuotes $ text $ escapeJsonString s JSArray as -> brackets $ pprList renderJSON as JSObject fs -> braces $ pprList renderField fs where renderField :: (String, JsonDoc) -> SDoc renderField (s, j) = doubleQuotes (text s) <> colon <> renderJSON j pprList pp xs = hcat (punctuate comma (map pp xs)) escapeJsonString :: String -> String escapeJsonString = concatMap escapeChar where escapeChar '\b' = "\\b" escapeChar '\f' = "\\f" escapeChar '\n' = "\\n" escapeChar '\r' = "\\r" escapeChar '\t' = "\\t" escapeChar '"' = "\\\"" escapeChar '\\' = "\\\\" escapeChar c | isControl c || fromEnum c >= 0x7f = uni_esc c escapeChar c = [c] uni_esc c = "\\u" ++ (pad 4 (showHex (fromEnum c) "")) pad n cs | len < n = replicate (n-len) '0' ++ cs | otherwise = cs where len = length cs class ToJson a where json :: a -> JsonDoc instance ToJson String where json = JSString . escapeJsonString instance ToJson Int where json = JSInt ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Lexeme.hs0000644000000000000000000002072514472400113020515 0ustar0000000000000000-- (c) The GHC Team -- -- Functions to evaluate whether or not a string is a valid identifier. -- There is considerable overlap between the logic here and the logic -- in GHC.Parser.Lexer, but sadly there seems to be no way to merge them. module GHC.Utils.Lexeme ( -- * Lexical characteristics of Haskell names -- | Use these functions to figure what kind of name a 'FastString' -- represents; these functions do /not/ check that the identifier -- is valid. isLexCon, isLexVar, isLexId, isLexSym, isLexConId, isLexConSym, isLexVarId, isLexVarSym, startsVarSym, startsVarId, startsConSym, startsConId, -- * Validating identifiers -- | These functions (working over plain old 'String's) check -- to make sure that the identifier is valid. okVarOcc, okConOcc, okTcOcc, okVarIdOcc, okVarSymOcc, okConIdOcc, okConSymOcc -- Some of the exports above are not used within GHC, but may -- be of value to GHC API users. ) where import GHC.Prelude import GHC.Data.FastString import Data.Char import qualified Data.Set as Set import GHC.Lexeme {- ************************************************************************ * * Lexical categories * * ************************************************************************ These functions test strings to see if they fit the lexical categories defined in the Haskell report. Note [Classification of generated names] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some names generated for internal use can show up in debugging output, e.g. when using -ddump-simpl. These generated names start with a $ but should still be pretty-printed using prefix notation. We make sure this is the case in isLexVarSym by only classifying a name as a symbol if all its characters are symbols, not just its first one. -} isLexCon, isLexVar, isLexId, isLexSym :: FastString -> Bool isLexConId, isLexConSym, isLexVarId, isLexVarSym :: FastString -> Bool isLexCon cs = isLexConId cs || isLexConSym cs isLexVar cs = isLexVarId cs || isLexVarSym cs isLexId cs = isLexConId cs || isLexVarId cs isLexSym cs = isLexConSym cs || isLexVarSym cs ------------- isLexConId cs -- Prefix type or data constructors | nullFS cs = False -- e.g. "Foo", "[]", "(,)" | cs == (fsLit "[]") = True | otherwise = startsConId (headFS cs) isLexVarId cs -- Ordinary prefix identifiers | nullFS cs = False -- e.g. "x", "_x" | otherwise = startsVarId (headFS cs) isLexConSym cs -- Infix type or data constructors | nullFS cs = False -- e.g. ":-:", ":", "->" | cs == (fsLit "->") = True | otherwise = startsConSym (headFS cs) isLexVarSym fs -- Infix identifiers e.g. "+" | fs == (fsLit "~R#") = True | otherwise = case (if nullFS fs then [] else unpackFS fs) of [] -> False (c:cs) -> startsVarSym c && all isVarSymChar cs -- See Note [Classification of generated names] {- ************************************************************************ * * Detecting valid names for Template Haskell * * ************************************************************************ -} ---------------------- -- External interface ---------------------- -- | Is this an acceptable variable name? okVarOcc :: String -> Bool okVarOcc str@(c:_) | startsVarId c = okVarIdOcc str | startsVarSym c = okVarSymOcc str okVarOcc _ = False -- | Is this an acceptable constructor name? okConOcc :: String -> Bool okConOcc str@(c:_) | startsConId c = okConIdOcc str | startsConSym c = okConSymOcc str | str == "[]" = True okConOcc _ = False -- | Is this an acceptable type name? okTcOcc :: String -> Bool okTcOcc "[]" = True okTcOcc "->" = True okTcOcc "~" = True okTcOcc str@(c:_) | startsConId c = okConIdOcc str | startsConSym c = okConSymOcc str | startsVarSym c = okVarSymOcc str okTcOcc _ = False -- | Is this an acceptable alphanumeric variable name, assuming it starts -- with an acceptable letter? okVarIdOcc :: String -> Bool okVarIdOcc str = okIdOcc str && -- admit "_" as a valid identifier. Required to support typed -- holes in Template Haskell. See #10267 (str == "_" || not (str `Set.member` reservedIds)) -- | Is this an acceptable symbolic variable name, assuming it starts -- with an acceptable character? okVarSymOcc :: String -> Bool okVarSymOcc str = all okSymChar str && not (str `Set.member` reservedOps) && not (isDashes str) -- | Is this an acceptable alphanumeric constructor name, assuming it -- starts with an acceptable letter? okConIdOcc :: String -> Bool okConIdOcc str = okIdOcc str || is_tuple_name1 True str || -- Is it a boxed tuple... is_tuple_name1 False str || -- ...or an unboxed tuple (#12407)... is_sum_name1 str -- ...or an unboxed sum (#12514)? where -- check for tuple name, starting at the beginning is_tuple_name1 True ('(' : rest) = is_tuple_name2 True rest is_tuple_name1 False ('(' : '#' : rest) = is_tuple_name2 False rest is_tuple_name1 _ _ = False -- check for tuple tail is_tuple_name2 True ")" = True is_tuple_name2 False "#)" = True is_tuple_name2 boxed (',' : rest) = is_tuple_name2 boxed rest is_tuple_name2 boxed (ws : rest) | isSpace ws = is_tuple_name2 boxed rest is_tuple_name2 _ _ = False -- check for sum name, starting at the beginning is_sum_name1 ('(' : '#' : rest) = is_sum_name2 False rest is_sum_name1 _ = False -- check for sum tail, only allowing at most one underscore is_sum_name2 _ "#)" = True is_sum_name2 underscore ('|' : rest) = is_sum_name2 underscore rest is_sum_name2 False ('_' : rest) = is_sum_name2 True rest is_sum_name2 underscore (ws : rest) | isSpace ws = is_sum_name2 underscore rest is_sum_name2 _ _ = False -- | Is this an acceptable symbolic constructor name, assuming it -- starts with an acceptable character? okConSymOcc :: String -> Bool okConSymOcc ":" = True okConSymOcc str = all okSymChar str && not (str `Set.member` reservedOps) ---------------------- -- Internal functions ---------------------- -- | Is this string an acceptable id, possibly with a suffix of hashes, -- but not worrying about case or clashing with reserved words? okIdOcc :: String -> Bool okIdOcc str = let hashes = dropWhile okIdChar str in all (== '#') hashes -- -XMagicHash allows a suffix of hashes -- of course, `all` says "True" to an empty list -- | Is this character acceptable in an identifier (after the first letter)? -- See alexGetByte in GHC.Parser.Lexer okIdChar :: Char -> Bool okIdChar c = case generalCategory c of UppercaseLetter -> True LowercaseLetter -> True TitlecaseLetter -> True ModifierLetter -> True -- See #10196 OtherLetter -> True -- See #1103 NonSpacingMark -> True -- See #7650 DecimalNumber -> True OtherNumber -> True -- See #4373 _ -> c == '\'' || c == '_' -- | All reserved identifiers. Taken from section 2.4 of the 2010 Report. reservedIds :: Set.Set String reservedIds = Set.fromList [ "case", "class", "data", "default", "deriving" , "do", "else", "foreign", "if", "import", "in" , "infix", "infixl", "infixr", "instance", "let" , "module", "newtype", "of", "then", "type", "where" , "_" ] -- | All reserved operators. Taken from section 2.4 of the 2010 Report. reservedOps :: Set.Set String reservedOps = Set.fromList [ "..", ":", "::", "=", "\\", "|", "<-", "->" , "@", "~", "=>" ] -- | Does this string contain only dashes and has at least 2 of them? isDashes :: String -> Bool isDashes ('-' : '-' : rest) = all (== '-') rest isDashes _ = False ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Logger.hs0000644000000000000000000005064714472400113020523 0ustar0000000000000000{-# LANGUAGE RankNTypes #-} -- | Logger -- -- The Logger is an configurable entity that is used by the compiler to output -- messages on the console (stdout, stderr) and in dump files. -- -- The behaviour of default Logger returned by `initLogger` can be modified with -- hooks. The compiler itself uses hooks in multithreaded code (--make) and it -- is also probably used by ghc-api users (IDEs, etc.). -- -- In addition to hooks, the Logger suppors LogFlags: basically a subset of the -- command-line flags that control the logger behaviour at a higher level than -- hooks. -- -- 1. Hooks are used to define how to generate a info/warning/error/dump messages -- 2. LogFlags are used to decide when and how to generate messages -- module GHC.Utils.Logger ( Logger , HasLogger (..) , ContainsLogger (..) -- * Logger setup , initLogger , LogAction , DumpAction , TraceAction , DumpFormat (..) -- ** Hooks , popLogHook , pushLogHook , popDumpHook , pushDumpHook , popTraceHook , pushTraceHook , makeThreadSafe -- ** Flags , LogFlags (..) , defaultLogFlags , log_dopt , log_set_dopt , setLogFlags , updateLogFlags , logFlags , logHasDumpFlag , logVerbAtLeast -- * Logging , jsonLogAction , putLogMsg , defaultLogAction , defaultLogActionHPrintDoc , defaultLogActionHPutStrDoc , logMsg , logDumpMsg -- * Dumping , defaultDumpAction , putDumpFile , putDumpFileMaybe , putDumpFileMaybe' , withDumpFileHandle , touchDumpFile , logDumpFile -- * Tracing , defaultTraceAction , putTraceMsg , loggerTraceFlushUpdate , loggerTraceFlush , logTraceMsg ) where import GHC.Prelude import GHC.Driver.Flags import GHC.Types.Error import GHC.Types.SrcLoc import qualified GHC.Utils.Ppr as Pretty import GHC.Utils.Outputable import GHC.Utils.Json import GHC.Utils.Panic import GHC.Data.EnumSet (EnumSet) import qualified GHC.Data.EnumSet as EnumSet import Data.IORef import System.Directory import System.FilePath ( takeDirectory, () ) import qualified Data.Set as Set import Data.Set (Set) import Data.List (intercalate, stripPrefix) import qualified Data.List.NonEmpty as NE import Data.Time import System.IO import Control.Monad import Control.Concurrent.MVar import System.IO.Unsafe import Debug.Trace (trace) --------------------------------------------------------------- -- Log flags --------------------------------------------------------------- -- | Logger flags data LogFlags = LogFlags { log_default_user_context :: SDocContext , log_default_dump_context :: SDocContext , log_dump_flags :: !(EnumSet DumpFlag) -- ^ Dump flags , log_show_caret :: !Bool -- ^ Show caret in diagnostics , log_show_warn_groups :: !Bool -- ^ Show warning flag groups , log_enable_timestamps :: !Bool -- ^ Enable timestamps , log_dump_to_file :: !Bool -- ^ Enable dump to file , log_dump_dir :: !(Maybe FilePath) -- ^ Dump directory , log_dump_prefix :: !FilePath -- ^ Normal dump path ("basename.") , log_dump_prefix_override :: !(Maybe FilePath) -- ^ Overriden dump path , log_enable_debug :: !Bool -- ^ Enable debug output , log_verbosity :: !Int -- ^ Verbosity level } -- | Default LogFlags defaultLogFlags :: LogFlags defaultLogFlags = LogFlags { log_default_user_context = defaultSDocContext , log_default_dump_context = defaultSDocContext , log_dump_flags = EnumSet.empty , log_show_caret = True , log_show_warn_groups = True , log_enable_timestamps = True , log_dump_to_file = False , log_dump_dir = Nothing , log_dump_prefix = "" , log_dump_prefix_override = Nothing , log_enable_debug = False , log_verbosity = 0 } -- | Test if a DumpFlag is enabled log_dopt :: DumpFlag -> LogFlags -> Bool log_dopt f logflags = f `EnumSet.member` log_dump_flags logflags -- | Enable a DumpFlag log_set_dopt :: DumpFlag -> LogFlags -> LogFlags log_set_dopt f logflags = logflags { log_dump_flags = EnumSet.insert f (log_dump_flags logflags) } -- | Test if a DumpFlag is set logHasDumpFlag :: Logger -> DumpFlag -> Bool logHasDumpFlag logger f = log_dopt f (logFlags logger) -- | Test if verbosity is >= to the given value logVerbAtLeast :: Logger -> Int -> Bool logVerbAtLeast logger v = log_verbosity (logFlags logger) >= v -- | Update LogFlags updateLogFlags :: Logger -> (LogFlags -> LogFlags) -> Logger updateLogFlags logger f = setLogFlags logger (f (logFlags logger)) -- | Set LogFlags setLogFlags :: Logger -> LogFlags -> Logger setLogFlags logger flags = logger { logFlags = flags } --------------------------------------------------------------- -- Logger --------------------------------------------------------------- type LogAction = LogFlags -> MessageClass -> SrcSpan -> SDoc -> IO () type DumpAction = LogFlags -> PprStyle -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () type TraceAction a = LogFlags -> String -> SDoc -> a -> a -- | Format of a dump -- -- Dump formats are loosely defined: dumps may contain various additional -- headers and annotations and they may be partial. 'DumpFormat' is mainly a hint -- (e.g. for syntax highlighters). data DumpFormat = FormatHaskell -- ^ Haskell | FormatCore -- ^ Core | FormatSTG -- ^ STG | FormatByteCode -- ^ ByteCode | FormatCMM -- ^ Cmm | FormatASM -- ^ Assembly code | FormatC -- ^ C code/header | FormatLLVM -- ^ LLVM bytecode | FormatText -- ^ Unstructured dump deriving (Show,Eq) type DumpCache = IORef (Set FilePath) data Logger = Logger { log_hook :: [LogAction -> LogAction] -- ^ Log hooks stack , dump_hook :: [DumpAction -> DumpAction] -- ^ Dump hooks stack , trace_hook :: forall a. [TraceAction a -> TraceAction a] -- ^ Trace hooks stack , generated_dumps :: DumpCache -- ^ Already dumped files (to append instead of overwriting them) , trace_flush :: IO () -- ^ Flush the trace buffer , logFlags :: !LogFlags -- ^ Logger flags } -- | Set the trace flushing function -- -- The currently set trace flushing function is passed to the updating function loggerTraceFlushUpdate :: Logger -> (IO () -> IO ()) -> Logger loggerTraceFlushUpdate logger upd = logger { trace_flush = upd (trace_flush logger) } -- | Calls the trace flushing function loggerTraceFlush :: Logger -> IO () loggerTraceFlush logger = trace_flush logger -- | Default trace flushing function (flush stderr) defaultTraceFlush :: IO () defaultTraceFlush = hFlush stderr initLogger :: IO Logger initLogger = do dumps <- newIORef Set.empty return $ Logger { log_hook = [] , dump_hook = [] , trace_hook = [] , generated_dumps = dumps , trace_flush = defaultTraceFlush , logFlags = defaultLogFlags } -- | Log something putLogMsg :: Logger -> LogAction putLogMsg logger = foldr ($) defaultLogAction (log_hook logger) -- | Dump something putDumpFile :: Logger -> DumpAction putDumpFile logger = let fallback = putLogMsg logger dumps = generated_dumps logger deflt = defaultDumpAction dumps fallback in foldr ($) deflt (dump_hook logger) -- | Trace something putTraceMsg :: Logger -> TraceAction a putTraceMsg logger = foldr ($) defaultTraceAction (trace_hook logger) -- | Push a log hook pushLogHook :: (LogAction -> LogAction) -> Logger -> Logger pushLogHook h logger = logger { log_hook = h:log_hook logger } -- | Pop a log hook popLogHook :: Logger -> Logger popLogHook logger = case log_hook logger of [] -> panic "popLogHook: empty hook stack" _:hs -> logger { log_hook = hs } -- | Push a dump hook pushDumpHook :: (DumpAction -> DumpAction) -> Logger -> Logger pushDumpHook h logger = logger { dump_hook = h:dump_hook logger } -- | Pop a dump hook popDumpHook :: Logger -> Logger popDumpHook logger = case dump_hook logger of [] -> panic "popDumpHook: empty hook stack" _:hs -> logger { dump_hook = hs } -- | Push a trace hook pushTraceHook :: (forall a. TraceAction a -> TraceAction a) -> Logger -> Logger pushTraceHook h logger = logger { trace_hook = h:trace_hook logger } -- | Pop a trace hook popTraceHook :: Logger -> Logger popTraceHook logger = case trace_hook logger of [] -> panic "popTraceHook: empty hook stack" _ -> logger { trace_hook = tail (trace_hook logger) } -- | Make the logger thread-safe makeThreadSafe :: Logger -> IO Logger makeThreadSafe logger = do lock <- newMVar () let with_lock :: forall a. IO a -> IO a with_lock act = withMVar lock (const act) log action logflags msg_class loc doc = with_lock (action logflags msg_class loc doc) dmp action logflags sty opts str fmt doc = with_lock (action logflags sty opts str fmt doc) trc :: forall a. TraceAction a -> TraceAction a trc action logflags str doc v = unsafePerformIO (with_lock (return $! action logflags str doc v)) return $ pushLogHook log $ pushDumpHook dmp $ pushTraceHook trc $ logger -- See Note [JSON Error Messages] -- jsonLogAction :: LogAction jsonLogAction _ (MCDiagnostic SevIgnore _) _ _ = return () -- suppress the message jsonLogAction logflags msg_class srcSpan msg = defaultLogActionHPutStrDoc logflags True stdout (withPprStyle (PprCode CStyle) (doc $$ text "")) where str = renderWithContext (log_default_user_context logflags) msg doc = renderJSON $ JSObject [ ( "span", json srcSpan ) , ( "doc" , JSString str ) , ( "messageClass", json msg_class ) ] defaultLogAction :: LogAction defaultLogAction logflags msg_class srcSpan msg | log_dopt Opt_D_dump_json logflags = jsonLogAction logflags msg_class srcSpan msg | otherwise = case msg_class of MCOutput -> printOut msg MCDump -> printOut (msg $$ blankLine) MCInteractive -> putStrSDoc msg MCInfo -> printErrs msg MCFatal -> printErrs msg MCDiagnostic SevIgnore _ -> pure () -- suppress the message MCDiagnostic sev rea -> printDiagnostics sev rea where printOut = defaultLogActionHPrintDoc logflags False stdout printErrs = defaultLogActionHPrintDoc logflags False stderr putStrSDoc = defaultLogActionHPutStrDoc logflags False stdout -- Pretty print the warning flag, if any (#10752) message sev rea = mkLocMessageAnn (flagMsg sev rea) msg_class srcSpan msg printDiagnostics severity reason = do hPutChar stderr '\n' caretDiagnostic <- if log_show_caret logflags then getCaretDiagnostic msg_class srcSpan else pure empty printErrs $ getPprStyle $ \style -> withPprStyle (setStyleColoured True style) (message severity reason $+$ caretDiagnostic) -- careful (#2302): printErrs prints in UTF-8, -- whereas converting to string first and using -- hPutStr would just emit the low 8 bits of -- each unicode char. flagMsg :: Severity -> DiagnosticReason -> Maybe String flagMsg SevIgnore _ = panic "Called flagMsg with SevIgnore" flagMsg SevError WarningWithoutFlag = Just "-Werror" flagMsg SevError (WarningWithFlag wflag) = do let name = NE.head (warnFlagNames wflag) return $ "-W" ++ name ++ warnFlagGrp wflag ++ ", -Werror=" ++ name flagMsg SevError ErrorWithoutFlag = Nothing flagMsg SevWarning WarningWithoutFlag = Nothing flagMsg SevWarning (WarningWithFlag wflag) = do let name = NE.head (warnFlagNames wflag) return ("-W" ++ name ++ warnFlagGrp wflag) flagMsg SevWarning ErrorWithoutFlag = panic "SevWarning with ErrorWithoutFlag" warnFlagGrp flag | log_show_warn_groups logflags = case smallestWarningGroups flag of [] -> "" groups -> " (in " ++ intercalate ", " (map ("-W"++) groups) ++ ")" | otherwise = "" -- | Like 'defaultLogActionHPutStrDoc' but appends an extra newline. defaultLogActionHPrintDoc :: LogFlags -> Bool -> Handle -> SDoc -> IO () defaultLogActionHPrintDoc logflags asciiSpace h d = defaultLogActionHPutStrDoc logflags asciiSpace h (d $$ text "") -- | The boolean arguments let's the pretty printer know if it can optimize indent -- by writing ascii ' ' characters without going through decoding. defaultLogActionHPutStrDoc :: LogFlags -> Bool -> Handle -> SDoc -> IO () defaultLogActionHPutStrDoc logflags asciiSpace h d -- Don't add a newline at the end, so that successive -- calls to this log-action can output all on the same line = printSDoc (log_default_user_context logflags) (Pretty.PageMode asciiSpace) h d -- -- Note [JSON Error Messages] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- When the user requests the compiler output to be dumped as json -- we used to collect them all in an IORef and then print them at the end. -- This doesn't work very well with GHCi. (See #14078) So instead we now -- use the simpler method of just outputting a JSON document inplace to -- stdout. -- -- Before the compiler calls log_action, it has already turned the `ErrMsg` -- into a formatted message. This means that we lose some possible -- information to provide to the user but refactoring log_action is quite -- invasive as it is called in many places. So, for now I left it alone -- and we can refine its behaviour as users request different output. -- | Default action for 'dumpAction' hook defaultDumpAction :: DumpCache -> LogAction -> DumpAction defaultDumpAction dumps log_action logflags sty flag title _fmt doc = dumpSDocWithStyle dumps log_action sty logflags flag title doc -- | Write out a dump. -- -- If --dump-to-file is set then this goes to a file. -- otherwise emit to stdout (via the the LogAction parameter). -- -- When @hdr@ is empty, we print in a more compact format (no separators and -- blank lines) dumpSDocWithStyle :: DumpCache -> LogAction -> PprStyle -> LogFlags -> DumpFlag -> String -> SDoc -> IO () dumpSDocWithStyle dumps log_action sty logflags flag hdr doc = withDumpFileHandle dumps logflags flag writeDump where -- write dump to file writeDump (Just handle) = do doc' <- if null hdr then return doc else do timeStamp <- if log_enable_timestamps logflags then (text . show) <$> getCurrentTime else pure empty let d = timeStamp $$ blankLine $$ doc return $ mkDumpDoc hdr d -- When we dump to files we use UTF8. Which allows ascii spaces. defaultLogActionHPrintDoc logflags True handle (withPprStyle sty doc') -- write the dump to stdout writeDump Nothing = do let (doc', msg_class) | null hdr = (doc, MCOutput) | otherwise = (mkDumpDoc hdr doc, MCDump) log_action logflags msg_class noSrcSpan (withPprStyle sty doc') -- | Run an action with the handle of a 'DumpFlag' if we are outputting to a -- file, otherwise 'Nothing'. withDumpFileHandle :: DumpCache -> LogFlags -> DumpFlag -> (Maybe Handle -> IO ()) -> IO () withDumpFileHandle dumps logflags flag action = do let mFile = chooseDumpFile logflags flag case mFile of Just fileName -> do gd <- readIORef dumps let append = Set.member fileName gd mode = if append then AppendMode else WriteMode unless append $ writeIORef dumps (Set.insert fileName gd) createDirectoryIfMissing True (takeDirectory fileName) withFile fileName mode $ \handle -> do -- We do not want the dump file to be affected by -- environment variables, but instead to always use -- UTF8. See: -- https://gitlab.haskell.org/ghc/ghc/issues/10762 hSetEncoding handle utf8 action (Just handle) Nothing -> action Nothing -- | Choose where to put a dump file based on LogFlags and DumpFlag chooseDumpFile :: LogFlags -> DumpFlag -> Maybe FilePath chooseDumpFile logflags flag | log_dump_to_file logflags || forced_to_file = Just $ setDir (getPrefix ++ dump_suffix) | otherwise = Nothing where (forced_to_file, dump_suffix) = case flag of -- -dth-dec-file dumps expansions of TH -- splices into MODULE.th.hs even when -- -ddump-to-file isn't set Opt_D_th_dec_file -> (True, "th.hs") _ -> (False, default_suffix) -- build a suffix from the flag name -- e.g. -ddump-asm => ".dump-asm" default_suffix = map (\c -> if c == '_' then '-' else c) $ let str = show flag in case stripPrefix "Opt_D_" str of Just x -> x Nothing -> panic ("chooseDumpFile: bad flag name: " ++ str) getPrefix -- dump file location is being forced -- by the --ddump-file-prefix flag. | Just prefix <- log_dump_prefix_override logflags = prefix -- dump file locations, module specified to [modulename] set by -- GHC.Driver.Pipeline.runPipeline; non-module specific, e.g. Chasing dependencies, -- to 'non-module' by default. | otherwise = log_dump_prefix logflags setDir f = case log_dump_dir logflags of Just d -> d f Nothing -> f -- | Default action for 'traceAction' hook defaultTraceAction :: TraceAction a defaultTraceAction logflags title doc x = if not (log_enable_debug logflags) then x else trace (renderWithContext (log_default_dump_context logflags) (sep [text title, nest 2 doc])) x -- | Log something logMsg :: Logger -> MessageClass -> SrcSpan -> SDoc -> IO () logMsg logger mc loc msg = putLogMsg logger (logFlags logger) mc loc msg -- | Dump something logDumpFile :: Logger -> PprStyle -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () logDumpFile logger = putDumpFile logger (logFlags logger) -- | Log a trace message logTraceMsg :: Logger -> String -> SDoc -> a -> a logTraceMsg logger hdr doc a = putTraceMsg logger (logFlags logger) hdr doc a -- | Log a dump message (not a dump file) logDumpMsg :: Logger -> String -> SDoc -> IO () logDumpMsg logger hdr doc = logMsg logger MCDump noSrcSpan (withPprStyle defaultDumpStyle (mkDumpDoc hdr doc)) mkDumpDoc :: String -> SDoc -> SDoc mkDumpDoc hdr doc = vcat [blankLine, line <+> text hdr <+> line, doc, blankLine] where line = text "====================" -- | Dump if the given DumpFlag is set putDumpFileMaybe :: Logger -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () putDumpFileMaybe logger = putDumpFileMaybe' logger alwaysQualify {-# INLINE putDumpFileMaybe #-} -- see Note [INLINE conditional tracing utilities] -- | Dump if the given DumpFlag is set -- -- Unlike 'putDumpFileMaybe', has a PrintUnqualified argument putDumpFileMaybe' :: Logger -> PrintUnqualified -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () putDumpFileMaybe' logger printer flag hdr fmt doc = when (logHasDumpFlag logger flag) $ logDumpFile' logger printer flag hdr fmt doc {-# INLINE putDumpFileMaybe' #-} -- see Note [INLINE conditional tracing utilities] logDumpFile' :: Logger -> PrintUnqualified -> DumpFlag -> String -> DumpFormat -> SDoc -> IO () {-# NOINLINE logDumpFile' #-} -- NOINLINE: Now we are past the conditional, into the "cold" path, -- don't inline, to reduce code size at the call site -- See Note [INLINE conditional tracing utilities] logDumpFile' logger printer flag hdr fmt doc = logDumpFile logger (mkDumpStyle printer) flag hdr fmt doc -- | Ensure that a dump file is created even if it stays empty touchDumpFile :: Logger -> DumpFlag -> IO () touchDumpFile logger flag = withDumpFileHandle (generated_dumps logger) (logFlags logger) flag (const (return ())) class HasLogger m where getLogger :: m Logger class ContainsLogger t where extractLogger :: t -> Logger ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Misc.hs0000644000000000000000000014350314472400113020171 0ustar0000000000000000-- (c) The University of Glasgow 2006 {-# LANGUAGE CPP #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE MagicHash #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} -- | Highly random utility functions -- module GHC.Utils.Misc ( -- * Miscellaneous higher-order functions applyWhen, nTimes, const2, -- * General list processing zipEqual, zipWithEqual, zipWith3Equal, zipWith4Equal, zipLazy, stretchZipWith, zipWithAndUnzip, zipAndUnzip, zipWithLazy, zipWith3Lazy, filterByList, filterByLists, partitionByList, unzipWith, mapFst, mapSnd, chkAppend, mapAndUnzip, mapAndUnzip3, filterOut, partitionWith, mapAccumM, dropWhileEndLE, spanEnd, last2, lastMaybe, onJust, List.foldl1', foldl2, count, countWhile, all2, lengthExceeds, lengthIs, lengthIsNot, lengthAtLeast, lengthAtMost, lengthLessThan, listLengthCmp, atLength, equalLength, compareLength, leLength, ltLength, isSingleton, only, expectOnly, GHC.Utils.Misc.singleton, notNull, snocView, chunkList, changeLast, mapLastM, whenNonEmpty, mergeListsBy, isSortedBy, -- Foldable generalised functions, mapMaybe', -- * Tuples fstOf3, sndOf3, thdOf3, firstM, first3M, secondM, fst3, snd3, third3, uncurry3, liftFst, liftSnd, -- * List operations controlled by another list takeList, dropList, splitAtList, split, dropTail, capitalise, -- * Sorting sortWith, minWith, nubSort, ordNub, ordNubOn, -- * Comparisons isEqual, eqListBy, eqMaybeBy, thenCmp, cmpList, removeSpaces, (<&&>), (<||>), -- * Edit distance fuzzyMatch, fuzzyLookup, -- * Transitive closures transitiveClosure, -- * Strictness seqList, strictMap, strictZipWith, strictZipWith3, -- * Module names looksLikeModuleName, looksLikePackageName, -- * Integers exactLog2, -- * Floating point readRational, readSignificandExponentPair, readHexRational, readHexSignificandExponentPair, -- * IO-ish utilities doesDirNameExist, getModificationUTCTime, modificationTimeIfExists, fileHashIfExists, withAtomicRename, -- * Filenames and paths Suffix, splitLongestPrefix, escapeSpaces, Direction(..), reslash, makeRelativeTo, -- * Utils for defining Data instances abstractConstr, abstractDataType, mkNoRepType, -- * Utils for printing C code charToC, -- * Hashing hashString, -- * Call stacks HasCallStack, HasDebugCallStack, ) where import GHC.Prelude import GHC.Utils.Exception import GHC.Utils.Panic.Plain import GHC.Utils.Constants import GHC.Utils.Fingerprint import Data.Data import qualified Data.List as List import Data.List.NonEmpty ( NonEmpty(..) ) import GHC.Exts import GHC.Stack (HasCallStack) import Control.Applicative ( liftA2 ) import Control.Monad ( liftM, guard ) import Control.Monad.IO.Class ( MonadIO, liftIO ) import System.IO.Error as IO ( isDoesNotExistError ) import System.Directory ( doesDirectoryExist, getModificationTime, renameFile ) import System.FilePath import Data.Char ( isUpper, isAlphaNum, isSpace, chr, ord, isDigit, toUpper , isHexDigit, digitToInt ) import Data.Int import Data.Ratio ( (%) ) import Data.Ord ( comparing ) import Data.Word import qualified Data.IntMap as IM import qualified Data.Set as Set import Data.Time infixr 9 `thenCmp` {- ************************************************************************ * * \subsection{Miscellaneous higher-order functions} * * ************************************************************************ -} -- | Apply a function iff some condition is met. applyWhen :: Bool -> (a -> a) -> a -> a applyWhen True f x = f x applyWhen _ _ x = x -- | Apply a function @n@ times to a given value. nTimes :: Int -> (a -> a) -> (a -> a) nTimes 0 _ = id nTimes 1 f = f nTimes n f = f . nTimes (n-1) f const2 :: a -> b -> c -> a const2 x _ _ = x fstOf3 :: (a,b,c) -> a sndOf3 :: (a,b,c) -> b thdOf3 :: (a,b,c) -> c fstOf3 (a,_,_) = a sndOf3 (_,b,_) = b thdOf3 (_,_,c) = c fst3 :: (a -> d) -> (a, b, c) -> (d, b, c) fst3 f (a, b, c) = (f a, b, c) snd3 :: (b -> d) -> (a, b, c) -> (a, d, c) snd3 f (a, b, c) = (a, f b, c) third3 :: (c -> d) -> (a, b, c) -> (a, b, d) third3 f (a, b, c) = (a, b, f c) uncurry3 :: (a -> b -> c -> d) -> (a, b, c) -> d uncurry3 f (a, b, c) = f a b c liftFst :: (a -> b) -> (a, c) -> (b, c) liftFst f (a,c) = (f a, c) liftSnd :: (a -> b) -> (c, a) -> (c, b) liftSnd f (c,a) = (c, f a) firstM :: Monad m => (a -> m c) -> (a, b) -> m (c, b) firstM f (x, y) = liftM (\x' -> (x', y)) (f x) first3M :: Monad m => (a -> m d) -> (a, b, c) -> m (d, b, c) first3M f (x, y, z) = liftM (\x' -> (x', y, z)) (f x) secondM :: Monad m => (b -> m c) -> (a, b) -> m (a, c) secondM f (x, y) = (x,) <$> f y {- ************************************************************************ * * \subsection[Utils-lists]{General list processing} * * ************************************************************************ -} filterOut :: (a->Bool) -> [a] -> [a] -- ^ Like filter, only it reverses the sense of the test filterOut p = filter (not . p) partitionWith :: (a -> Either b c) -> [a] -> ([b], [c]) -- ^ Uses a function to determine which of two output lists an input element should join partitionWith _ [] = ([],[]) partitionWith f (x:xs) = case f x of Left b -> (b:bs, cs) Right c -> (bs, c:cs) where (bs,cs) = partitionWith f xs chkAppend :: [a] -> [a] -> [a] -- Checks for the second argument being empty -- Used in situations where that situation is common chkAppend xs ys | null ys = xs | otherwise = xs ++ ys {- A paranoid @zip@ (and some @zipWith@ friends) that checks the lists are of equal length. Alastair Reid thinks this should only happen if DEBUGging on; hey, why not? -} zipEqual :: String -> [a] -> [b] -> [(a,b)] zipWithEqual :: String -> (a->b->c) -> [a]->[b]->[c] zipWith3Equal :: String -> (a->b->c->d) -> [a]->[b]->[c]->[d] zipWith4Equal :: String -> (a->b->c->d->e) -> [a]->[b]->[c]->[d]->[e] #if !defined(DEBUG) zipEqual _ = zip zipWithEqual _ = zipWith zipWith3Equal _ = zipWith3 zipWith4Equal _ = List.zipWith4 #else zipEqual _ [] [] = [] zipEqual msg (a:as) (b:bs) = (a,b) : zipEqual msg as bs zipEqual msg _ _ = panic ("zipEqual: unequal lists: "++msg) zipWithEqual msg z (a:as) (b:bs)= z a b : zipWithEqual msg z as bs zipWithEqual _ _ [] [] = [] zipWithEqual msg _ _ _ = panic ("zipWithEqual: unequal lists: "++msg) zipWith3Equal msg z (a:as) (b:bs) (c:cs) = z a b c : zipWith3Equal msg z as bs cs zipWith3Equal _ _ [] [] [] = [] zipWith3Equal msg _ _ _ _ = panic ("zipWith3Equal: unequal lists: "++msg) zipWith4Equal msg z (a:as) (b:bs) (c:cs) (d:ds) = z a b c d : zipWith4Equal msg z as bs cs ds zipWith4Equal _ _ [] [] [] [] = [] zipWith4Equal msg _ _ _ _ _ = panic ("zipWith4Equal: unequal lists: "++msg) #endif -- | 'zipLazy' is a kind of 'zip' that is lazy in the second list (observe the ~) zipLazy :: [a] -> [b] -> [(a,b)] zipLazy [] _ = [] zipLazy (x:xs) ~(y:ys) = (x,y) : zipLazy xs ys -- | 'zipWithLazy' is like 'zipWith' but is lazy in the second list. -- The length of the output is always the same as the length of the first -- list. zipWithLazy :: (a -> b -> c) -> [a] -> [b] -> [c] zipWithLazy _ [] _ = [] zipWithLazy f (a:as) ~(b:bs) = f a b : zipWithLazy f as bs -- | 'zipWith3Lazy' is like 'zipWith3' but is lazy in the second and third lists. -- The length of the output is always the same as the length of the first -- list. zipWith3Lazy :: (a -> b -> c -> d) -> [a] -> [b] -> [c] -> [d] zipWith3Lazy _ [] _ _ = [] zipWith3Lazy f (a:as) ~(b:bs) ~(c:cs) = f a b c : zipWith3Lazy f as bs cs -- | 'filterByList' takes a list of Bools and a list of some elements and -- filters out these elements for which the corresponding value in the list of -- Bools is False. This function does not check whether the lists have equal -- length. filterByList :: [Bool] -> [a] -> [a] filterByList (True:bs) (x:xs) = x : filterByList bs xs filterByList (False:bs) (_:xs) = filterByList bs xs filterByList _ _ = [] -- | 'filterByLists' takes a list of Bools and two lists as input, and -- outputs a new list consisting of elements from the last two input lists. For -- each Bool in the list, if it is 'True', then it takes an element from the -- former list. If it is 'False', it takes an element from the latter list. -- The elements taken correspond to the index of the Bool in its list. -- For example: -- -- @ -- filterByLists [True, False, True, False] \"abcd\" \"wxyz\" = \"axcz\" -- @ -- -- This function does not check whether the lists have equal length. filterByLists :: [Bool] -> [a] -> [a] -> [a] filterByLists (True:bs) (x:xs) (_:ys) = x : filterByLists bs xs ys filterByLists (False:bs) (_:xs) (y:ys) = y : filterByLists bs xs ys filterByLists _ _ _ = [] -- | 'partitionByList' takes a list of Bools and a list of some elements and -- partitions the list according to the list of Bools. Elements corresponding -- to 'True' go to the left; elements corresponding to 'False' go to the right. -- For example, @partitionByList [True, False, True] [1,2,3] == ([1,3], [2])@ -- This function does not check whether the lists have equal -- length; when one list runs out, the function stops. partitionByList :: [Bool] -> [a] -> ([a], [a]) partitionByList = go [] [] where go trues falses (True : bs) (x : xs) = go (x:trues) falses bs xs go trues falses (False : bs) (x : xs) = go trues (x:falses) bs xs go trues falses _ _ = (reverse trues, reverse falses) stretchZipWith :: (a -> Bool) -> b -> (a->b->c) -> [a] -> [b] -> [c] -- ^ @stretchZipWith p z f xs ys@ stretches @ys@ by inserting @z@ in -- the places where @p@ returns @True@ stretchZipWith _ _ _ [] _ = [] stretchZipWith p z f (x:xs) ys | p x = f x z : stretchZipWith p z f xs ys | otherwise = case ys of [] -> [] (y:ys) -> f x y : stretchZipWith p z f xs ys mapFst :: (a->c) -> [(a,b)] -> [(c,b)] mapSnd :: (b->c) -> [(a,b)] -> [(a,c)] mapFst f xys = [(f x, y) | (x,y) <- xys] mapSnd f xys = [(x, f y) | (x,y) <- xys] mapAndUnzip :: (a -> (b, c)) -> [a] -> ([b], [c]) mapAndUnzip _ [] = ([], []) mapAndUnzip f (x:xs) = let (r1, r2) = f x (rs1, rs2) = mapAndUnzip f xs in (r1:rs1, r2:rs2) mapAndUnzip3 :: (a -> (b, c, d)) -> [a] -> ([b], [c], [d]) mapAndUnzip3 _ [] = ([], [], []) mapAndUnzip3 f (x:xs) = let (r1, r2, r3) = f x (rs1, rs2, rs3) = mapAndUnzip3 f xs in (r1:rs1, r2:rs2, r3:rs3) zipWithAndUnzip :: (a -> b -> (c,d)) -> [a] -> [b] -> ([c],[d]) zipWithAndUnzip f (a:as) (b:bs) = let (r1, r2) = f a b (rs1, rs2) = zipWithAndUnzip f as bs in (r1:rs1, r2:rs2) zipWithAndUnzip _ _ _ = ([],[]) -- | This has the effect of making the two lists have equal length by dropping -- the tail of the longer one. zipAndUnzip :: [a] -> [b] -> ([a],[b]) zipAndUnzip (a:as) (b:bs) = let (rs1, rs2) = zipAndUnzip as bs in (a:rs1, b:rs2) zipAndUnzip _ _ = ([],[]) -- | @atLength atLen atEnd ls n@ unravels list @ls@ to position @n@. Precisely: -- -- @ -- atLength atLenPred atEndPred ls n -- | n < 0 = atLenPred ls -- | length ls < n = atEndPred (n - length ls) -- | otherwise = atLenPred (drop n ls) -- @ atLength :: ([a] -> b) -- Called when length ls >= n, passed (drop n ls) -- NB: arg passed to this function may be [] -> b -- Called when length ls < n -> [a] -> Int -> b atLength atLenPred atEnd ls0 n0 | n0 < 0 = atLenPred ls0 | otherwise = go n0 ls0 where -- go's first arg n >= 0 go 0 ls = atLenPred ls go _ [] = atEnd -- n > 0 here go n (_:xs) = go (n-1) xs -- Some special cases of atLength: -- | @(lengthExceeds xs n) = (length xs > n)@ lengthExceeds :: [a] -> Int -> Bool lengthExceeds lst n | n < 0 = True | otherwise = atLength notNull False lst n -- | @(lengthAtLeast xs n) = (length xs >= n)@ lengthAtLeast :: [a] -> Int -> Bool lengthAtLeast = atLength (const True) False -- | @(lengthIs xs n) = (length xs == n)@ lengthIs :: [a] -> Int -> Bool lengthIs lst n | n < 0 = False | otherwise = atLength null False lst n -- | @(lengthIsNot xs n) = (length xs /= n)@ lengthIsNot :: [a] -> Int -> Bool lengthIsNot lst n | n < 0 = True | otherwise = atLength notNull True lst n -- | @(lengthAtMost xs n) = (length xs <= n)@ lengthAtMost :: [a] -> Int -> Bool lengthAtMost lst n | n < 0 = False | otherwise = atLength null True lst n -- | @(lengthLessThan xs n) == (length xs < n)@ lengthLessThan :: [a] -> Int -> Bool lengthLessThan = atLength (const False) True listLengthCmp :: [a] -> Int -> Ordering listLengthCmp = atLength atLen atEnd where atEnd = LT -- Not yet seen 'n' elts, so list length is < n. atLen [] = EQ atLen _ = GT equalLength :: [a] -> [b] -> Bool -- ^ True if length xs == length ys equalLength [] [] = True equalLength (_:xs) (_:ys) = equalLength xs ys equalLength _ _ = False compareLength :: [a] -> [b] -> Ordering compareLength [] [] = EQ compareLength (_:xs) (_:ys) = compareLength xs ys compareLength [] _ = LT compareLength _ [] = GT leLength :: [a] -> [b] -> Bool -- ^ True if length xs <= length ys leLength xs ys = case compareLength xs ys of LT -> True EQ -> True GT -> False ltLength :: [a] -> [b] -> Bool -- ^ True if length xs < length ys ltLength xs ys = case compareLength xs ys of LT -> True EQ -> False GT -> False ---------------------------- singleton :: a -> [a] singleton x = [x] isSingleton :: [a] -> Bool isSingleton [_] = True isSingleton _ = False notNull :: Foldable f => f a -> Bool notNull = not . null -- | Utility function to go from a singleton list to it's element. -- -- Wether or not the argument is a singleton list is only checked -- in debug builds. only :: [a] -> a #if defined(DEBUG) only [a] = a #else only (a:_) = a #endif only _ = panic "Util: only" -- | Extract the single element of a list and panic with the given message if -- there are more elements or the list was empty. -- Like 'expectJust', but for lists. expectOnly :: HasCallStack => String -> [a] -> a {-# INLINE expectOnly #-} #if defined(DEBUG) expectOnly _ [a] = a #else expectOnly _ (a:_) = a #endif expectOnly msg _ = panic ("expectOnly: " ++ msg) -- | Split a list into chunks of /n/ elements chunkList :: Int -> [a] -> [[a]] chunkList _ [] = [] chunkList n xs = as : chunkList n bs where (as,bs) = splitAt n xs -- | Replace the last element of a list with another element. changeLast :: [a] -> a -> [a] changeLast [] _ = panic "changeLast" changeLast [_] x = [x] changeLast (x:xs) x' = x : changeLast xs x' -- | Apply an effectful function to the last list element. -- Assumes a non-empty list (panics otherwise). mapLastM :: Functor f => (a -> f a) -> [a] -> f [a] mapLastM _ [] = panic "mapLastM: empty list" mapLastM f [x] = (\x' -> [x']) <$> f x mapLastM f (x:xs) = (x:) <$> mapLastM f xs mapAccumM :: (Monad m) => (r -> a -> m (r, b)) -> r -> [a] -> m (r, [b]) mapAccumM f = go where go acc [] = pure (acc,[]) go acc (x:xs) = do (acc',y) <- f acc x (acc'',ys) <- go acc' xs pure (acc'', y:ys) whenNonEmpty :: Applicative m => [a] -> (NonEmpty a -> m ()) -> m () whenNonEmpty [] _ = pure () whenNonEmpty (x:xs) f = f (x :| xs) -- | Merge an unsorted list of sorted lists, for example: -- -- > mergeListsBy compare [ [2,5,15], [1,10,100] ] = [1,2,5,10,15,100] -- -- \( O(n \log{} k) \) mergeListsBy :: forall a. (a -> a -> Ordering) -> [[a]] -> [a] mergeListsBy cmp lists | debugIsOn, not (all sorted lists) = -- When debugging is on, we check that the input lists are sorted. panic "mergeListsBy: input lists must be sorted" where sorted = isSortedBy cmp mergeListsBy cmp all_lists = merge_lists all_lists where -- Implements "Iterative 2-Way merge" described at -- https://en.wikipedia.org/wiki/K-way_merge_algorithm -- Merge two sorted lists into one in O(n). merge2 :: [a] -> [a] -> [a] merge2 [] ys = ys merge2 xs [] = xs merge2 (x:xs) (y:ys) = case cmp x y of GT -> y : merge2 (x:xs) ys _ -> x : merge2 xs (y:ys) -- Merge the first list with the second, the third with the fourth, and so -- on. The output has half as much lists as the input. merge_neighbours :: [[a]] -> [[a]] merge_neighbours [] = [] merge_neighbours [xs] = [xs] merge_neighbours (xs : ys : lists) = merge2 xs ys : merge_neighbours lists -- Since 'merge_neighbours' halves the amount of lists in each iteration, -- we perform O(log k) iteration. Each iteration is O(n). The total running -- time is therefore O(n log k). merge_lists :: [[a]] -> [a] merge_lists lists = case merge_neighbours lists of [] -> [] [xs] -> xs lists' -> merge_lists lists' isSortedBy :: (a -> a -> Ordering) -> [a] -> Bool isSortedBy cmp = sorted where sorted [] = True sorted [_] = True sorted (x:y:xs) = cmp x y /= GT && sorted (y:xs) {- ************************************************************************ * * \subsubsection{Sort utils} * * ************************************************************************ -} minWith :: Ord b => (a -> b) -> [a] -> a minWith get_key xs = assert (not (null xs) ) head (sortWith get_key xs) nubSort :: Ord a => [a] -> [a] nubSort = Set.toAscList . Set.fromList -- | Remove duplicates but keep elements in order. -- O(n * log n) ordNub :: Ord a => [a] -> [a] ordNub xs = ordNubOn id xs -- | Remove duplicates but keep elements in order. -- O(n * log n) ordNubOn :: Ord b => (a -> b) -> [a] -> [a] ordNubOn f xs = go Set.empty xs where go _ [] = [] go s (x:xs) | Set.member (f x) s = go s xs | otherwise = x : go (Set.insert (f x) s) xs {- ************************************************************************ * * \subsection[Utils-transitive-closure]{Transitive closure} * * ************************************************************************ This algorithm for transitive closure is straightforward, albeit quadratic. -} transitiveClosure :: (a -> [a]) -- Successor function -> (a -> a -> Bool) -- Equality predicate -> [a] -> [a] -- The transitive closure transitiveClosure succ eq xs = go [] xs where go done [] = done go done (x:xs) | x `is_in` done = go done xs | otherwise = go (x:done) (succ x ++ xs) _ `is_in` [] = False x `is_in` (y:ys) | eq x y = True | otherwise = x `is_in` ys {- ************************************************************************ * * \subsection[Utils-accum]{Accumulating} * * ************************************************************************ A combination of foldl with zip. It works with equal length lists. -} foldl2 :: (acc -> a -> b -> acc) -> acc -> [a] -> [b] -> acc foldl2 _ z [] [] = z foldl2 k z (a:as) (b:bs) = foldl2 k (k z a b) as bs foldl2 _ _ _ _ = panic "Util: foldl2" all2 :: (a -> b -> Bool) -> [a] -> [b] -> Bool -- True if the lists are the same length, and -- all corresponding elements satisfy the predicate all2 _ [] [] = True all2 p (x:xs) (y:ys) = p x y && all2 p xs ys all2 _ _ _ = False -- Count the number of times a predicate is true count :: (a -> Bool) -> [a] -> Int count p = go 0 where go !n [] = n go !n (x:xs) | p x = go (n+1) xs | otherwise = go n xs countWhile :: (a -> Bool) -> [a] -> Int -- Length of an /initial prefix/ of the list satisfying p countWhile p = go 0 where go !n (x:xs) | p x = go (n+1) xs go !n _ = n {- @splitAt@, @take@, and @drop@ but with length of another list giving the break-off point: -} takeList :: [b] -> [a] -> [a] -- (takeList as bs) trims bs to the be same length -- as as, unless as is longer in which case it's a no-op takeList [] _ = [] takeList (_:xs) ls = case ls of [] -> [] (y:ys) -> y : takeList xs ys dropList :: [b] -> [a] -> [a] dropList [] xs = xs dropList _ xs@[] = xs dropList (_:xs) (_:ys) = dropList xs ys -- | Given two lists xs and ys, return `splitAt (length xs) ys`. splitAtList :: [b] -> [a] -> ([a], [a]) splitAtList xs ys = go 0# xs ys where -- we are careful to avoid allocating when there are no leftover -- arguments: in this case we can return "ys" directly (cf #18535) -- -- We make `xs` strict because in the general case `ys` isn't `[]` so we -- will have to evaluate `xs` anyway. go _ !_ [] = (ys, []) -- length ys <= length xs go n [] bs = (take (I# n) ys, bs) -- = splitAt n ys go n (_:as) (_:bs) = go (n +# 1#) as bs -- | drop from the end of a list dropTail :: Int -> [a] -> [a] -- Specification: dropTail n = reverse . drop n . reverse -- Better implementation due to Joachim Breitner -- http://www.joachim-breitner.de/blog/archives/600-On-taking-the-last-n-elements-of-a-list.html dropTail n xs = go (drop n xs) xs where go (_:ys) (x:xs) = x : go ys xs go _ _ = [] -- Stop when ys runs out -- It'll always run out before xs does -- dropWhile from the end of a list. This is similar to Data.List.dropWhileEnd, -- but is lazy in the elements and strict in the spine. For reasonably short lists, -- such as path names and typical lines of text, dropWhileEndLE is generally -- faster than dropWhileEnd. Its advantage is magnified when the predicate is -- expensive--using dropWhileEndLE isSpace to strip the space off a line of text -- is generally much faster than using dropWhileEnd isSpace for that purpose. -- Specification: dropWhileEndLE p = reverse . dropWhile p . reverse -- Pay attention to the short-circuit (&&)! The order of its arguments is the only -- difference between dropWhileEnd and dropWhileEndLE. dropWhileEndLE :: (a -> Bool) -> [a] -> [a] dropWhileEndLE p = foldr (\x r -> if null r && p x then [] else x:r) [] -- | @spanEnd p l == reverse (span p (reverse l))@. The first list -- returns actually comes after the second list (when you look at the -- input list). spanEnd :: (a -> Bool) -> [a] -> ([a], [a]) spanEnd p l = go l [] [] l where go yes _rev_yes rev_no [] = (yes, reverse rev_no) go yes rev_yes rev_no (x:xs) | p x = go yes (x : rev_yes) rev_no xs | otherwise = go xs [] (x : rev_yes ++ rev_no) xs -- | Get the last two elements in a list. Partial! {-# INLINE last2 #-} last2 :: [a] -> (a,a) last2 = List.foldl' (\(_,x2) x -> (x2,x)) (partialError,partialError) where partialError = panic "last2 - list length less than two" lastMaybe :: [a] -> Maybe a lastMaybe [] = Nothing lastMaybe xs = Just $ last xs -- | @onJust x m f@ applies f to the value inside the Just or returns the default. onJust :: b -> Maybe a -> (a->b) -> b onJust dflt = flip (maybe dflt) -- | Split a list into its last element and the initial part of the list. -- @snocView xs = Just (init xs, last xs)@ for non-empty lists. -- @snocView xs = Nothing@ otherwise. -- Unless both parts of the result are guaranteed to be used -- prefer separate calls to @last@ + @init@. -- If you are guaranteed to use both, this will -- be more efficient. snocView :: [a] -> Maybe ([a],a) snocView [] = Nothing snocView xs | (xs,x) <- go xs = Just (xs,x) where go :: [a] -> ([a],a) go [x] = ([],x) go (x:xs) | !(xs',x') <- go xs = (x:xs', x') go [] = error "impossible" split :: Char -> String -> [String] split c s = case rest of [] -> [chunk] _:rest -> chunk : split c rest where (chunk, rest) = break (==c) s -- | Convert a word to title case by capitalising the first letter capitalise :: String -> String capitalise [] = [] capitalise (c:cs) = toUpper c : cs {- ************************************************************************ * * \subsection[Utils-comparison]{Comparisons} * * ************************************************************************ -} isEqual :: Ordering -> Bool -- Often used in (isEqual (a `compare` b)) isEqual GT = False isEqual EQ = True isEqual LT = False thenCmp :: Ordering -> Ordering -> Ordering {-# INLINE thenCmp #-} thenCmp EQ ordering = ordering thenCmp ordering _ = ordering eqListBy :: (a->a->Bool) -> [a] -> [a] -> Bool eqListBy _ [] [] = True eqListBy eq (x:xs) (y:ys) = eq x y && eqListBy eq xs ys eqListBy _ _ _ = False eqMaybeBy :: (a ->a->Bool) -> Maybe a -> Maybe a -> Bool eqMaybeBy _ Nothing Nothing = True eqMaybeBy eq (Just x) (Just y) = eq x y eqMaybeBy _ _ _ = False cmpList :: (a -> a -> Ordering) -> [a] -> [a] -> Ordering -- `cmpList' uses a user-specified comparer cmpList _ [] [] = EQ cmpList _ [] _ = LT cmpList _ _ [] = GT cmpList cmp (a:as) (b:bs) = case cmp a b of { EQ -> cmpList cmp as bs; xxx -> xxx } removeSpaces :: String -> String removeSpaces = dropWhileEndLE isSpace . dropWhile isSpace -- Boolean operators lifted to Applicative (<&&>) :: Applicative f => f Bool -> f Bool -> f Bool (<&&>) = liftA2 (&&) infixr 3 <&&> -- same as (&&) (<||>) :: Applicative f => f Bool -> f Bool -> f Bool (<||>) = liftA2 (||) infixr 2 <||> -- same as (||) {- ************************************************************************ * * \subsection{Edit distance} * * ************************************************************************ -} -- | Find the "restricted" Damerau-Levenshtein edit distance between two strings. -- See: . -- Based on the algorithm presented in "A Bit-Vector Algorithm for Computing -- Levenshtein and Damerau Edit Distances" in PSC'02 (Heikki Hyyro). -- See http://www.cs.uta.fi/~helmu/pubs/psc02.pdf and -- http://www.cs.uta.fi/~helmu/pubs/PSCerr.html for an explanation restrictedDamerauLevenshteinDistance :: String -> String -> Int restrictedDamerauLevenshteinDistance str1 str2 = restrictedDamerauLevenshteinDistanceWithLengths m n str1 str2 where m = length str1 n = length str2 restrictedDamerauLevenshteinDistanceWithLengths :: Int -> Int -> String -> String -> Int restrictedDamerauLevenshteinDistanceWithLengths m n str1 str2 | m <= n = if n <= 32 -- n must be larger so this check is sufficient then restrictedDamerauLevenshteinDistance' (undefined :: Word32) m n str1 str2 else restrictedDamerauLevenshteinDistance' (undefined :: Integer) m n str1 str2 | otherwise = if m <= 32 -- m must be larger so this check is sufficient then restrictedDamerauLevenshteinDistance' (undefined :: Word32) n m str2 str1 else restrictedDamerauLevenshteinDistance' (undefined :: Integer) n m str2 str1 restrictedDamerauLevenshteinDistance' :: (Bits bv, Num bv) => bv -> Int -> Int -> String -> String -> Int restrictedDamerauLevenshteinDistance' _bv_dummy m n str1 str2 | [] <- str1 = n | otherwise = extractAnswer $ List.foldl' (restrictedDamerauLevenshteinDistanceWorker (matchVectors str1) top_bit_mask vector_mask) (0, 0, m_ones, 0, m) str2 where m_ones@vector_mask = (2 ^ m) - 1 top_bit_mask = (1 `shiftL` (m - 1)) `asTypeOf` _bv_dummy extractAnswer (_, _, _, _, distance) = distance restrictedDamerauLevenshteinDistanceWorker :: (Bits bv, Num bv) => IM.IntMap bv -> bv -> bv -> (bv, bv, bv, bv, Int) -> Char -> (bv, bv, bv, bv, Int) restrictedDamerauLevenshteinDistanceWorker str1_mvs top_bit_mask vector_mask (pm, d0, vp, vn, distance) char2 = seq str1_mvs $ seq top_bit_mask $ seq vector_mask $ seq pm' $ seq d0' $ seq vp' $ seq vn' $ seq distance'' $ seq char2 $ (pm', d0', vp', vn', distance'') where pm' = IM.findWithDefault 0 (ord char2) str1_mvs d0' = ((((sizedComplement vector_mask d0) .&. pm') `shiftL` 1) .&. pm) .|. ((((pm' .&. vp) + vp) .&. vector_mask) `xor` vp) .|. pm' .|. vn -- No need to mask the shiftL because of the restricted range of pm hp' = vn .|. sizedComplement vector_mask (d0' .|. vp) hn' = d0' .&. vp hp'_shift = ((hp' `shiftL` 1) .|. 1) .&. vector_mask hn'_shift = (hn' `shiftL` 1) .&. vector_mask vp' = hn'_shift .|. sizedComplement vector_mask (d0' .|. hp'_shift) vn' = d0' .&. hp'_shift distance' = if hp' .&. top_bit_mask /= 0 then distance + 1 else distance distance'' = if hn' .&. top_bit_mask /= 0 then distance' - 1 else distance' sizedComplement :: Bits bv => bv -> bv -> bv sizedComplement vector_mask vect = vector_mask `xor` vect matchVectors :: (Bits bv, Num bv) => String -> IM.IntMap bv matchVectors = snd . List.foldl' go (0 :: Int, IM.empty) where go (ix, im) char = let ix' = ix + 1 im' = IM.insertWith (.|.) (ord char) (2 ^ ix) im in seq ix' $ seq im' $ (ix', im') {-# SPECIALIZE INLINE restrictedDamerauLevenshteinDistance' :: Word32 -> Int -> Int -> String -> String -> Int #-} {-# SPECIALIZE INLINE restrictedDamerauLevenshteinDistance' :: Integer -> Int -> Int -> String -> String -> Int #-} {-# SPECIALIZE restrictedDamerauLevenshteinDistanceWorker :: IM.IntMap Word32 -> Word32 -> Word32 -> (Word32, Word32, Word32, Word32, Int) -> Char -> (Word32, Word32, Word32, Word32, Int) #-} {-# SPECIALIZE restrictedDamerauLevenshteinDistanceWorker :: IM.IntMap Integer -> Integer -> Integer -> (Integer, Integer, Integer, Integer, Int) -> Char -> (Integer, Integer, Integer, Integer, Int) #-} {-# SPECIALIZE INLINE sizedComplement :: Word32 -> Word32 -> Word32 #-} {-# SPECIALIZE INLINE sizedComplement :: Integer -> Integer -> Integer #-} {-# SPECIALIZE matchVectors :: String -> IM.IntMap Word32 #-} {-# SPECIALIZE matchVectors :: String -> IM.IntMap Integer #-} fuzzyMatch :: String -> [String] -> [String] fuzzyMatch key vals = fuzzyLookup key [(v,v) | v <- vals] -- | Search for possible matches to the users input in the given list, -- returning a small number of ranked results fuzzyLookup :: String -> [(String,a)] -> [a] fuzzyLookup user_entered possibilites = map fst $ take mAX_RESULTS $ List.sortBy (comparing snd) [ (poss_val, sort_key) | (poss_str, poss_val) <- possibilites , let distance = restrictedDamerauLevenshteinDistance poss_str user_entered , distance <= fuzzy_threshold , let sort_key = (distance, length poss_str, poss_str) ] where -- Work out an appropriate match threshold: -- We report a candidate if its edit distance is <= the threshold, -- The threshold is set to about a quarter of the # of characters the user entered -- Length Threshold -- 1 0 -- Don't suggest *any* candidates -- 2 1 -- for single-char identifiers -- 3 1 -- 4 1 -- 5 1 -- 6 2 -- -- Candidates with the same distance are sorted by their length. We also -- use the actual string as the third sorting criteria the sort key to get -- deterministic output, even if the input may have depended on the uniques -- in question fuzzy_threshold = truncate $ fromIntegral (length user_entered + 2) / (4 :: Rational) mAX_RESULTS = 3 {- ************************************************************************ * * \subsection[Utils-pairs]{Pairs} * * ************************************************************************ -} unzipWith :: (a -> b -> c) -> [(a, b)] -> [c] unzipWith f pairs = map ( \ (a, b) -> f a b ) pairs seqList :: [a] -> b -> b seqList [] b = b seqList (x:xs) b = x `seq` seqList xs b strictMap :: (a -> b) -> [a] -> [b] strictMap _ [] = [] strictMap f (x:xs) = let !x' = f x !xs' = strictMap f xs in x' : xs' strictZipWith :: (a -> b -> c) -> [a] -> [b] -> [c] strictZipWith _ [] _ = [] strictZipWith _ _ [] = [] strictZipWith f (x:xs) (y:ys) = let !x' = f x y !xs' = strictZipWith f xs ys in x' : xs' strictZipWith3 :: (a -> b -> c -> d) -> [a] -> [b] -> [c] -> [d] strictZipWith3 _ [] _ _ = [] strictZipWith3 _ _ [] _ = [] strictZipWith3 _ _ _ [] = [] strictZipWith3 f (x:xs) (y:ys) (z:zs) = let !x' = f x y z !xs' = strictZipWith3 f xs ys zs in x' : xs' -- Module names: looksLikeModuleName :: String -> Bool looksLikeModuleName [] = False looksLikeModuleName (c:cs) = isUpper c && go cs where go [] = True go ('.':cs) = looksLikeModuleName cs go (c:cs) = (isAlphaNum c || c == '_' || c == '\'') && go cs -- Similar to 'parse' for Distribution.Package.PackageName, -- but we don't want to depend on Cabal. looksLikePackageName :: String -> Bool looksLikePackageName = all (all isAlphaNum <&&> not . (all isDigit)) . split '-' ----------------------------------------------------------------------------- -- Integers -- | Determine the $\log_2$ of exact powers of 2 exactLog2 :: Integer -> Maybe Integer exactLog2 x | x <= 0 = Nothing | x > fromIntegral (maxBound :: Int32) = Nothing | x' .&. (-x') /= x' = Nothing | otherwise = Just (fromIntegral c) where x' = fromIntegral x :: Int32 c = countTrailingZeros x' {- -- ----------------------------------------------------------------------------- -- Floats -} readRational__ :: ReadS Rational -- NB: doesn't handle leading "-" readRational__ r = do ((i, e), t) <- readSignificandExponentPair__ r return ((i%1)*10^^e, t) readRational :: String -> Rational -- NB: *does* handle a leading "-" readRational top_s = case top_s of '-' : xs -> negate (read_me xs) xs -> read_me xs where read_me s = case (do { (x,"") <- readRational__ s ; return x }) of [x] -> x [] -> error ("readRational: no parse:" ++ top_s) _ -> error ("readRational: ambiguous parse:" ++ top_s) readSignificandExponentPair__ :: ReadS (Integer, Integer) -- NB: doesn't handle leading "-" readSignificandExponentPair__ r = do (n,d,s) <- readFix r (k,t) <- readExp s let pair = (n, toInteger (k - d)) return (pair, t) where readFix r = do (ds,s) <- lexDecDigits r (ds',t) <- lexDotDigits s return (read (ds++ds'), length ds', t) readExp (e:s) | e `elem` "eE" = readExp' s readExp s = return (0,s) readExp' ('+':s) = readDec s readExp' ('-':s) = do (k,t) <- readDec s return (-k,t) readExp' s = readDec s readDec s = do (ds,r) <- nonnull isDigit s return (foldl1 (\n d -> n * 10 + d) [ ord d - ord '0' | d <- ds ], r) lexDecDigits = nonnull isDigit lexDotDigits ('.':s) = return (span' isDigit s) lexDotDigits s = return ("",s) nonnull p s = do (cs@(_:_),t) <- return (span' p s) return (cs,t) span' _ xs@[] = (xs, xs) span' p xs@(x:xs') | x == '_' = span' p xs' -- skip "_" (#14473) | p x = let (ys,zs) = span' p xs' in (x:ys,zs) | otherwise = ([],xs) -- | Parse a string into a significand and exponent. -- A trivial example might be: -- ghci> readSignificandExponentPair "1E2" -- (1,2) -- In a more complex case we might return a exponent different than that -- which the user wrote. This is needed in order to use a Integer significand. -- ghci> readSignificandExponentPair "-1.11E5" -- (-111,3) readSignificandExponentPair :: String -> (Integer, Integer) -- NB: *does* handle a leading "-" readSignificandExponentPair top_s = case top_s of '-' : xs -> let (i, e) = read_me xs in (-i, e) xs -> read_me xs where read_me s = case (do { (x,"") <- readSignificandExponentPair__ s ; return x }) of [x] -> x [] -> error ("readSignificandExponentPair: no parse:" ++ top_s) _ -> error ("readSignificandExponentPair: ambiguous parse:" ++ top_s) readHexRational :: String -> Rational readHexRational str = case str of '-' : xs -> negate (readMe xs) xs -> readMe xs where readMe as = case readHexRational__ as of Just n -> n _ -> error ("readHexRational: no parse:" ++ str) readHexRational__ :: String -> Maybe Rational readHexRational__ ('0' : x : rest) | x == 'X' || x == 'x' = do let (front,rest2) = span' isHexDigit rest guard (not (null front)) let frontNum = steps 16 0 front case rest2 of '.' : rest3 -> do let (back,rest4) = span' isHexDigit rest3 guard (not (null back)) let backNum = steps 16 frontNum back exp1 = -4 * length back case rest4 of p : ps | isExp p -> fmap (mk backNum . (+ exp1)) (getExp ps) _ -> return (mk backNum exp1) p : ps | isExp p -> fmap (mk frontNum) (getExp ps) _ -> Nothing where isExp p = p == 'p' || p == 'P' getExp ('+' : ds) = dec ds getExp ('-' : ds) = fmap negate (dec ds) getExp ds = dec ds mk :: Integer -> Int -> Rational mk n e = fromInteger n * 2^^e dec cs = case span' isDigit cs of (ds,"") | not (null ds) -> Just (steps 10 0 ds) _ -> Nothing steps base n ds = List.foldl' (step base) n ds step base n d = base * n + fromIntegral (digitToInt d) span' _ xs@[] = (xs, xs) span' p xs@(x:xs') | x == '_' = span' p xs' -- skip "_" (#14473) | p x = let (ys,zs) = span' p xs' in (x:ys,zs) | otherwise = ([],xs) readHexRational__ _ = Nothing -- | Parse a string into a significand and exponent according to -- the "Hexadecimal Floats in Haskell" proposal. -- A trivial example might be: -- ghci> readHexSignificandExponentPair "0x1p+1" -- (1,1) -- Behaves similar to readSignificandExponentPair but the base is 16 -- and numbers are given in hexadecimal: -- ghci> readHexSignificandExponentPair "0xAp-4" -- (10,-4) -- ghci> readHexSignificandExponentPair "0x1.2p3" -- (18,-1) readHexSignificandExponentPair :: String -> (Integer, Integer) readHexSignificandExponentPair str = case str of '-' : xs -> let (i, e) = readMe xs in (-i, e) xs -> readMe xs where readMe as = case readHexSignificandExponentPair__ as of Just n -> n _ -> error ("readHexSignificandExponentPair: no parse:" ++ str) readHexSignificandExponentPair__ :: String -> Maybe (Integer, Integer) readHexSignificandExponentPair__ ('0' : x : rest) | x == 'X' || x == 'x' = do let (front,rest2) = span' isHexDigit rest guard (not (null front)) let frontNum = steps 16 0 front case rest2 of '.' : rest3 -> do let (back,rest4) = span' isHexDigit rest3 guard (not (null back)) let backNum = steps 16 frontNum back exp1 = -4 * length back case rest4 of p : ps | isExp p -> fmap (mk backNum . (+ exp1)) (getExp ps) _ -> return (mk backNum exp1) p : ps | isExp p -> fmap (mk frontNum) (getExp ps) _ -> Nothing where isExp p = p == 'p' || p == 'P' getExp ('+' : ds) = dec ds getExp ('-' : ds) = fmap negate (dec ds) getExp ds = dec ds mk :: Integer -> Int -> (Integer, Integer) mk n e = (n, fromIntegral e) dec cs = case span' isDigit cs of (ds,"") | not (null ds) -> Just (steps 10 0 ds) _ -> Nothing steps base n ds = foldl' (step base) n ds step base n d = base * n + fromIntegral (digitToInt d) span' _ xs@[] = (xs, xs) span' p xs@(x:xs') | x == '_' = span' p xs' -- skip "_" (#14473) | p x = let (ys,zs) = span' p xs' in (x:ys,zs) | otherwise = ([],xs) readHexSignificandExponentPair__ _ = Nothing ----------------------------------------------------------------------------- -- Verify that the 'dirname' portion of a FilePath exists. -- doesDirNameExist :: FilePath -> IO Bool doesDirNameExist fpath = doesDirectoryExist (takeDirectory fpath) ----------------------------------------------------------------------------- -- Backwards compatibility definition of getModificationTime getModificationUTCTime :: FilePath -> IO UTCTime getModificationUTCTime = getModificationTime -- -------------------------------------------------------------- -- check existence & modification time at the same time modificationTimeIfExists :: FilePath -> IO (Maybe UTCTime) modificationTimeIfExists f = (do t <- getModificationUTCTime f; return (Just t)) `catchIO` \e -> if isDoesNotExistError e then return Nothing else ioError e -- -------------------------------------------------------------- -- check existence & hash at the same time fileHashIfExists :: FilePath -> IO (Maybe Fingerprint) fileHashIfExists f = (do t <- getFileHash f; return (Just t)) `catchIO` \e -> if isDoesNotExistError e then return Nothing else ioError e -- -------------------------------------------------------------- -- atomic file writing by writing to a temporary file first (see #14533) -- -- This should be used in all cases where GHC writes files to disk -- and uses their modification time to skip work later, -- as otherwise a partially written file (e.g. due to crash or Ctrl+C) -- also results in a skip. withAtomicRename :: (MonadIO m) => FilePath -> (FilePath -> m a) -> m a withAtomicRename targetFile f = do -- The temp file must be on the same file system (mount) as the target file -- to result in an atomic move on most platforms. -- The standard way to ensure that is to place it into the same directory. -- This can still be fooled when somebody mounts a different file system -- at just the right time, but that is not a case we aim to cover here. let temp = targetFile <.> "tmp" res <- f temp liftIO $ renameFile temp targetFile return res -- -------------------------------------------------------------- -- split a string at the last character where 'pred' is True, -- returning a pair of strings. The first component holds the string -- up (but not including) the last character for which 'pred' returned -- True, the second whatever comes after (but also not including the -- last character). -- -- If 'pred' returns False for all characters in the string, the original -- string is returned in the first component (and the second one is just -- empty). splitLongestPrefix :: String -> (Char -> Bool) -> (String,String) splitLongestPrefix str pred | null r_pre = (str, []) | otherwise = (reverse (tail r_pre), reverse r_suf) -- 'tail' drops the char satisfying 'pred' where (r_suf, r_pre) = break pred (reverse str) escapeSpaces :: String -> String escapeSpaces = foldr (\c s -> if isSpace c then '\\':c:s else c:s) "" type Suffix = String -------------------------------------------------------------- -- * Search path -------------------------------------------------------------- data Direction = Forwards | Backwards reslash :: Direction -> FilePath -> FilePath reslash d = f where f ('/' : xs) = slash : f xs f ('\\' : xs) = slash : f xs f (x : xs) = x : f xs f "" = "" slash = case d of Forwards -> '/' Backwards -> '\\' makeRelativeTo :: FilePath -> FilePath -> FilePath this `makeRelativeTo` that = directory thisFilename where (thisDirectory, thisFilename) = splitFileName this thatDirectory = dropFileName that directory = joinPath $ f (splitPath thisDirectory) (splitPath thatDirectory) f (x : xs) (y : ys) | x == y = f xs ys f xs ys = replicate (length ys) ".." ++ xs {- ************************************************************************ * * \subsection[Utils-Data]{Utils for defining Data instances} * * ************************************************************************ These functions helps us to define Data instances for abstract types. -} abstractConstr :: String -> Constr abstractConstr n = mkConstr (abstractDataType n) ("{abstract:"++n++"}") [] Prefix abstractDataType :: String -> DataType abstractDataType n = mkDataType n [abstractConstr n] {- ************************************************************************ * * \subsection[Utils-C]{Utils for printing C code} * * ************************************************************************ -} charToC :: Word8 -> String charToC w = case chr (fromIntegral w) of '\"' -> "\\\"" '\'' -> "\\\'" '\\' -> "\\\\" c | c >= ' ' && c <= '~' -> [c] | otherwise -> ['\\', chr (ord '0' + ord c `div` 64), chr (ord '0' + ord c `div` 8 `mod` 8), chr (ord '0' + ord c `mod` 8)] {- ************************************************************************ * * \subsection[Utils-Hashing]{Utils for hashing} * * ************************************************************************ -} -- | A sample hash function for Strings. We keep multiplying by the -- golden ratio and adding. The implementation is: -- -- > hashString = foldl' f golden -- > where f m c = fromIntegral (ord c) * magic + hashInt32 m -- > magic = 0xdeadbeef -- -- Where hashInt32 works just as hashInt shown above. -- -- Knuth argues that repeated multiplication by the golden ratio -- will minimize gaps in the hash space, and thus it's a good choice -- for combining together multiple keys to form one. -- -- Here we know that individual characters c are often small, and this -- produces frequent collisions if we use ord c alone. A -- particular problem are the shorter low ASCII and ISO-8859-1 -- character strings. We pre-multiply by a magic twiddle factor to -- obtain a good distribution. In fact, given the following test: -- -- > testp :: Int32 -> Int -- > testp k = (n - ) . length . group . sort . map hs . take n $ ls -- > where ls = [] : [c : l | l <- ls, c <- ['\0'..'\xff']] -- > hs = foldl' f golden -- > f m c = fromIntegral (ord c) * k + hashInt32 m -- > n = 100000 -- -- We discover that testp magic = 0. hashString :: String -> Int32 hashString = foldl' f golden where f m c = fromIntegral (ord c) * magic + hashInt32 m magic = fromIntegral (0xdeadbeef :: Word32) golden :: Int32 golden = 1013904242 -- = round ((sqrt 5 - 1) * 2^32) :: Int32 -- was -1640531527 = round ((sqrt 5 - 1) * 2^31) :: Int32 -- but that has bad mulHi properties (even adding 2^32 to get its inverse) -- Whereas the above works well and contains no hash duplications for -- [-32767..65536] -- | A sample (and useful) hash function for Int32, -- implemented by extracting the uppermost 32 bits of the 64-bit -- result of multiplying by a 33-bit constant. The constant is from -- Knuth, derived from the golden ratio: -- -- > golden = round ((sqrt 5 - 1) * 2^32) -- -- We get good key uniqueness on small inputs -- (a problem with previous versions): -- (length $ group $ sort $ map hashInt32 [-32767..65536]) == 65536 + 32768 -- hashInt32 :: Int32 -> Int32 hashInt32 x = mulHi x golden + x -- hi 32 bits of a x-bit * 32 bit -> 64-bit multiply mulHi :: Int32 -> Int32 -> Int32 mulHi a b = fromIntegral (r `shiftR` 32) where r :: Int64 r = fromIntegral a * fromIntegral b -- | A call stack constraint, but only when 'isDebugOn'. #if defined(DEBUG) type HasDebugCallStack = HasCallStack #else type HasDebugCallStack = (() :: Constraint) #endif mapMaybe' :: Foldable f => (a -> Maybe b) -> f a -> [b] mapMaybe' f = foldr g [] where g x rest | Just y <- f x = y : rest | otherwise = rest ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Monad.hs0000644000000000000000000003735114472400113020337 0ustar0000000000000000-- | Utilities related to Monad and Applicative classes -- Mostly for backwards compatibility. module GHC.Utils.Monad ( Applicative(..) , (<$>) , MonadFix(..) , MonadIO(..) , zipWith3M, zipWith3M_, zipWith4M, zipWithAndUnzipM , mapAndUnzipM, mapAndUnzip3M, mapAndUnzip4M, mapAndUnzip5M , mapAccumLM , liftFstM, liftSndM , mapSndM , concatMapM , mapMaybeM , fmapMaybeM, fmapEitherM , anyM, allM, orM , foldlM, foldlM_, foldrM , maybeMapM , whenM, unlessM , filterOutM ) where ------------------------------------------------------------------------------- -- Imports ------------------------------------------------------------------------------- import GHC.Prelude import Control.Applicative import Control.Monad import Control.Monad.Fix import Control.Monad.IO.Class import Data.Foldable (sequenceA_, foldlM, foldrM) import Data.List (unzip4, unzip5, zipWith4) ------------------------------------------------------------------------------- -- Common functions -- These are used throughout the compiler ------------------------------------------------------------------------------- {- Note [Inline @zipWithNM@ functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The inline principle for 'zipWith3M', 'zipWith4M' and 'zipWith3M_' is the same as for 'zipWithM' and 'zipWithM_' in "Control.Monad", see Note [Fusion for zipN/zipWithN] in GHC/List.hs for more details. The 'zipWithM'/'zipWithM_' functions are inlined so that the `zipWith` and `sequenceA` functions with which they are defined have an opportunity to fuse. Furthermore, 'zipWith3M'/'zipWith4M' and 'zipWith3M_' have been explicitly rewritten in a non-recursive way similarly to 'zipWithM'/'zipWithM_', and for more than just uniformity: after [D5241](https://phabricator.haskell.org/D5241) for issue #14037, all @zipN@/@zipWithN@ functions fuse, meaning 'zipWith3M'/'zipWIth4M' and 'zipWith3M_'@ now behave like 'zipWithM' and 'zipWithM_', respectively, with regards to fusion. As such, since there are not any differences between 2-ary 'zipWithM'/ 'zipWithM_' and their n-ary counterparts below aside from the number of arguments, the `INLINE` pragma should be replicated in the @zipWithNM@ functions below as well. -} zipWith3M :: Monad m => (a -> b -> c -> m d) -> [a] -> [b] -> [c] -> m [d] {-# INLINE zipWith3M #-} -- Inline so that fusion with 'zipWith3' and 'sequenceA' has a chance to fire. -- See Note [Inline @zipWithNM@ functions] above. zipWith3M f xs ys zs = sequenceA (zipWith3 f xs ys zs) zipWith3M_ :: Monad m => (a -> b -> c -> m d) -> [a] -> [b] -> [c] -> m () {-# INLINE zipWith3M_ #-} -- Inline so that fusion with 'zipWith4' and 'sequenceA' has a chance to fire. -- See Note [Inline @zipWithNM@ functions] above. zipWith3M_ f xs ys zs = sequenceA_ (zipWith3 f xs ys zs) zipWith4M :: Monad m => (a -> b -> c -> d -> m e) -> [a] -> [b] -> [c] -> [d] -> m [e] {-# INLINE zipWith4M #-} -- Inline so that fusion with 'zipWith5' and 'sequenceA' has a chance to fire. -- See Note [Inline @zipWithNM@ functions] above. zipWith4M f xs ys ws zs = sequenceA (zipWith4 f xs ys ws zs) zipWithAndUnzipM :: Monad m => (a -> b -> m (c, d)) -> [a] -> [b] -> m ([c], [d]) {-# INLINABLE zipWithAndUnzipM #-} -- this allows specialization to a given monad zipWithAndUnzipM f (x:xs) (y:ys) = do { (c, d) <- f x y ; (cs, ds) <- zipWithAndUnzipM f xs ys ; return (c:cs, d:ds) } zipWithAndUnzipM _ _ _ = return ([], []) {- Note [Inline @mapAndUnzipNM@ functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The inline principle is the same as 'mapAndUnzipM' in "Control.Monad". The 'mapAndUnzipM' function is inlined so that the `unzip` and `traverse` functions with which it is defined have an opportunity to fuse, see Note [Inline @unzipN@ functions] in Data/OldList.hs for more details. Furthermore, the @mapAndUnzipNM@ functions have been explicitly rewritten in a non-recursive way similarly to 'mapAndUnzipM', and for more than just uniformity: after [D5249](https://phabricator.haskell.org/D5249) for Trac ticket #14037, all @unzipN@ functions fuse, meaning 'mapAndUnzip3M', 'mapAndUnzip4M' and 'mapAndUnzip5M' now behave like 'mapAndUnzipM' with regards to fusion. As such, since there are not any differences between 2-ary 'mapAndUnzipM' and its n-ary counterparts below aside from the number of arguments, the `INLINE` pragma should be replicated in the @mapAndUnzipNM@ functions below as well. -} -- | mapAndUnzipM for triples mapAndUnzip3M :: Monad m => (a -> m (b,c,d)) -> [a] -> m ([b],[c],[d]) {-# INLINE mapAndUnzip3M #-} -- Inline so that fusion with 'unzip3' and 'traverse' has a chance to fire. -- See Note [Inline @mapAndUnzipNM@ functions] above. mapAndUnzip3M f xs = unzip3 <$> traverse f xs mapAndUnzip4M :: Monad m => (a -> m (b,c,d,e)) -> [a] -> m ([b],[c],[d],[e]) {-# INLINE mapAndUnzip4M #-} -- Inline so that fusion with 'unzip4' and 'traverse' has a chance to fire. -- See Note [Inline @mapAndUnzipNM@ functions] above. mapAndUnzip4M f xs = unzip4 <$> traverse f xs mapAndUnzip5M :: Monad m => (a -> m (b,c,d,e,f)) -> [a] -> m ([b],[c],[d],[e],[f]) {-# INLINE mapAndUnzip5M #-} -- Inline so that fusion with 'unzip5' and 'traverse' has a chance to fire. -- See Note [Inline @mapAndUnzipNM@ functions] above. mapAndUnzip5M f xs = unzip5 <$> traverse f xs -- TODO: mapAccumLM is used in many places. Surely most of -- these don't actually want to be lazy. We should add a strict -- variant and use it where appropriate. -- | Monadic version of mapAccumL mapAccumLM :: Monad m => (acc -> x -> m (acc, y)) -- ^ combining function -> acc -- ^ initial state -> [x] -- ^ inputs -> m (acc, [y]) -- ^ final state, outputs mapAccumLM f s xs = go s xs where go s (x:xs) = do (s1, x') <- f s x (s2, xs') <- go s1 xs return (s2, x' : xs') go s [] = return (s, []) -- | Monadic version of mapSnd mapSndM :: Monad m => (b -> m c) -> [(a,b)] -> m [(a,c)] mapSndM f xs = go xs where go [] = return [] go ((a,b):xs) = do { c <- f b; rs <- go xs; return ((a,c):rs) } liftFstM :: Monad m => (a -> b) -> m (a, r) -> m (b, r) liftFstM f thing = do { (a,r) <- thing; return (f a, r) } liftSndM :: Monad m => (a -> b) -> m (r, a) -> m (r, b) liftSndM f thing = do { (r,a) <- thing; return (r, f a) } -- | Monadic version of concatMap concatMapM :: Monad m => (a -> m [b]) -> [a] -> m [b] concatMapM f xs = liftM concat (mapM f xs) -- | Applicative version of mapMaybe mapMaybeM :: Applicative m => (a -> m (Maybe b)) -> [a] -> m [b] mapMaybeM f = foldr g (pure []) where g a = liftA2 (maybe id (:)) (f a) -- | Monadic version of fmap fmapMaybeM :: (Monad m) => (a -> m b) -> Maybe a -> m (Maybe b) fmapMaybeM _ Nothing = return Nothing fmapMaybeM f (Just x) = f x >>= (return . Just) -- | Monadic version of fmap fmapEitherM :: Monad m => (a -> m b) -> (c -> m d) -> Either a c -> m (Either b d) fmapEitherM fl _ (Left a) = fl a >>= (return . Left) fmapEitherM _ fr (Right b) = fr b >>= (return . Right) -- | Monadic version of 'any', aborts the computation at the first @True@ value anyM :: Monad m => (a -> m Bool) -> [a] -> m Bool anyM f xs = go xs where go [] = return False go (x:xs) = do b <- f x if b then return True else go xs -- | Monad version of 'all', aborts the computation at the first @False@ value allM :: Monad m => (a -> m Bool) -> [a] -> m Bool allM f bs = go bs where go [] = return True go (b:bs) = (f b) >>= (\bv -> if bv then go bs else return False) -- | Monadic version of or orM :: Monad m => m Bool -> m Bool -> m Bool orM m1 m2 = m1 >>= \x -> if x then return True else m2 -- | Monadic version of foldl that discards its result foldlM_ :: (Monad m, Foldable t) => (a -> b -> m a) -> a -> t b -> m () foldlM_ = foldM_ -- | Monadic version of fmap specialised for Maybe maybeMapM :: Monad m => (a -> m b) -> (Maybe a -> m (Maybe b)) maybeMapM _ Nothing = return Nothing maybeMapM m (Just x) = liftM Just $ m x -- | Monadic version of @when@, taking the condition in the monad whenM :: Monad m => m Bool -> m () -> m () whenM mb thing = do { b <- mb ; when b thing } -- | Monadic version of @unless@, taking the condition in the monad unlessM :: Monad m => m Bool -> m () -> m () unlessM condM acc = do { cond <- condM ; unless cond acc } -- | Like 'filterM', only it reverses the sense of the test. filterOutM :: (Applicative m) => (a -> m Bool) -> [a] -> m [a] filterOutM p = foldr (\ x -> liftA2 (\ flg -> if flg then id else (x:)) (p x)) (pure []) {- Note [The one-shot state monad trick] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Summary: many places in GHC use a state monad, and we really want those functions to be eta-expanded (#18202). The problem ~~~~~~~~~~~ Consider newtype M a = MkM (State -> (State, a)) instance Monad M where mf >>= k = MkM (\s -> case mf of MkM f -> case f s of (s',r) -> case k r of MkM g -> g s') fooM :: Int -> M Int fooM x = g y >>= \r -> h r where y = expensive x Now suppose you say (repeat 20 (fooM 4)), where repeat :: Int -> M Int -> M Int performs its argument n times. You would expect (expensive 4) to be evaluated only once, not 20 times. So foo should have arity 1 (not 2); it should look like this (modulo casts) fooM x = let y = expensive x in \s -> case g y of ... But creating and then repeating, a monadic computation is rare. If you /aren't/ re-using (M a) value, it's /much/ more efficient to make foo have arity 2, thus: fooM x s = case g (expensive x) of ... Why more efficient? Because now foo takes its argument both at once, rather than one at a time, creating a heap-allocated function closure. See https://www.joachim-breitner.de/blog/763-Faster_Winter_5__Eta-Expanding_ReaderT for a very good explanation of the issue which led to these optimisations into GHC. The trick ~~~~~~~~~ With state monads like M the general case is that we *aren't* reusing (M a) values so it is much more efficient to avoid allocating a function closure for them. So the state monad trick is a way to keep the monadic syntax but to make GHC eta-expand functions like `fooM`. To do that we use the "oneShot" magic function. Here is the trick: * Define a "smart constructor" mkM :: (State -> (State,a)) -> M a mkM f = MkM (oneShot m) * Never call MkM directly, as a constructor. Instead, always call mkM. And that's it! The magic 'oneShot' function does this transformation: oneShot (\s. e) ==> \s{os}. e which pins a one-shot flag {os} onto the binder 's'. That tells GHC that it can assume the lambda is called only once, and thus can freely float computations in and out of the lambda. To be concrete, let's see what happens to fooM: fooM = \x. g (expensive x) >>= \r -> h r = \x. let mf = g (expensive x) k = \r -> h r in MkM (oneShot (\s -> case mf of MkM' f -> case f s of (s',r) -> case k r of MkM' g -> g s')) -- The MkM' are just newtype casts nt_co = \x. let mf = g (expensive x) k = \r -> h r in (\s{os}. case (mf |> nt_co) s of (s',r) -> (k r) |> nt_co s') |> sym nt_co -- Crucial step: float let-bindings into that \s{os} = \x. (\s{os}. case (g (expensive x) |> nt_co) s of (s',r) -> h r |> nt_co s') |> sym nt_co and voila! fooM has arity 2. The trick is very similar to the built-in "state hack" (see Note [The state-transformer hack] in "GHC.Core.Opt.Arity") but is applicable on a monad-by-monad basis under programmer control. Using pattern synonyms ~~~~~~~~~~~~~~~~~~~~~~ Using a smart constructor is fine, but there is no way to check that we have found *all* uses, especially if the uses escape a single module. A neat (but more sophisticated) alternative is to use pattern synonyms: -- We rename the existing constructor. newtype M a = MkM' (State -> (State, a)) -- The pattern has the old constructor name. pattern MkM f <- MkM' f where MkM f = MkM' (oneShot f) Now we can simply grep to check that there are no uses of MkM' /anywhere/, to guarantee that we have not missed any. (Using the smart constructor alone we still need the data constructor in patterns.) That's the advantage of the pattern-synonym approach, but it is more elaborate. The pattern synonym approach is due to Sebastian Graaf (#18238) Do note that for monads for multiple arguments more than one oneShot function might be required. For example in FCode we use: newtype FCode a = FCode' { doFCode :: StgToCmmConfig -> CgState -> (a, CgState) } pattern FCode :: (StgToCmmConfig -> CgState -> (a, CgState)) -> FCode a pattern FCode m <- FCode' m where FCode m = FCode' $ oneShot (\cgInfoDown -> oneShot (\state ->m cgInfoDown state)) INLINE pragmas and (>>) ~~~~~~~~~~~~~~~~~~~~~~~ A nasty gotcha is described in #20008. In brief, be careful if you get (>>) via its default method: instance Applicative M where pure a = MkM (\s -> (s, a)) (<*>) = ap instance Monad UM where {-# INLINE (>>=) #-} m >>= k = MkM (\s -> blah) Here we define (>>), via its default method, in terms of (>>=). If you do this, be sure to put an INLINE pragma on (>>=), as above. That tells it to inline (>>=) in the RHS of (>>), even when it is applied to only two arguments, which in turn conveys the one-shot info from (>>=) to (>>). Lacking the INLINE, GHC may eta-expand (>>), and with a non-one-shot lambda. #20008 has more discussion. Derived instances ~~~~~~~~~~~~~~~~~ One caveat of both approaches is that derived instances don't use the smart constructor /or/ the pattern synonym. So they won't benefit from the automatic insertion of "oneShot". data M a = MkM' (State -> (State,a)) deriving (Functor) <-- Functor implementation will use MkM'! Conclusion: don't use 'derviving' in these cases. Multi-shot actions (cf #18238) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes we really *do* want computations to be shared! Remember our example (repeat 20 (fooM 4)). See Note [multiShotIO] in GHC.Types.Unique.Supply We can force fooM to have arity 1 using multiShot: fooM :: Int -> M Int fooM x = multiShotM (g y >>= \r -> h r) where y = expensive x multiShotM :: M a -> M a {-# INLINE multiShotM #-} multiShotM (MkM m) = MkM (\s -> inline m s) -- Really uses the data constructor, -- not the smart constructor! Now we can see how fooM optimises (ignoring casts) multiShotM (g y >>= \r -> h r) ==> {inline (>>=)} multiShotM (\s{os}. case g y s of ...) ==> {inline multiShotM} let m = \s{os}. case g y s of ... in \s. inline m s ==> {inline m} \s. (\s{os}. case g y s of ...) s ==> \s. case g y s of ... and voila! the one-shot flag has gone. It's possible that y has been replaced by (expensive x), but full laziness should pull it back out. (This part seems less robust.) The magic `inline` function does two things * It prevents eta reduction. If we wrote just multiShotIO (IO m) = IO (\s -> m s) the lamda would eta-reduce to 'm' and all would be lost. * It helps ensure that 'm' really does inline. Note that 'inline' evaporates in phase 0. See Note [inlineId magic] in GHC.Core.Opt.ConstantFold.match_inline. The INLINE pragma on multiShotM is very important, else the 'inline' call will evaporate when compiling the module that defines 'multiShotM', before it is ever exported. -} ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Monad/State/Strict.hs0000644000000000000000000000777514472400113022676 0ustar0000000000000000{-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE PatternSynonyms #-} -- | A state monad which is strict in its state. module GHC.Utils.Monad.State.Strict ( -- * The State monad State(State) , state , evalState , execState , runState -- * Operations , get , gets , put , modify ) where import GHC.Prelude import GHC.Exts (oneShot) {- Note [Strict State monad] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A State monad can be strict in many ways. Which kind of strictness do we mean? First of, since we represent the result pair as an unboxed pair, this State monad is strict in the sense of "Control.Monad.Trans.State.Strict": The computations and the sequencing there-of (through 'Applicative and 'Monad' instances) are forced strictly. Beyond the manual unboxing of one level (which CPR could achieve similarly, yet perhaps a bit less reliably), our 'State' is even stricter than the transformers version: It's also strict in the state `s` (but still lazy in the value `a`). What this means is that whenever callers examine the state component (perhaps through 'runState'), they will find that the `s` has already been evaluated. This additional strictness maintained in a single place, by the ubiquitous 'State' pattern synonym, by forcing the state component *after* any state action has been run. The INVARIANT is: > Any `s` that makes it into the unboxed pair representation is evaluated. This invariant has another nice effect: Because the evaluatedness is quite apparent, Nested CPR will try to unbox the state component `s` nestedly if feasible. Detecting evaluatedness of nested components is a necessary condition for Nested CPR to trigger; see the user's guide entry on that: https://ghc.gitlab.haskell.org/ghc/doc/users_guide/using-optimisation.html#ghc-flag--fcpr-anal Note that this doesn't have any effects on whether Nested CPR will unbox the `a` component (which is still lazy by default). The user still has to use the `return $!` idiom from the user's guide to encourage Nested CPR to unbox the `a` result of a stateful computation. -} -- | A state monad which is strict in the state `s`, but lazy in the value `a`. -- -- See Note [Strict State monad] for the particular notion of strictness and -- implementation details. newtype State s a = State' { runState' :: s -> (# a, s #) } pattern State :: (s -> (# a, s #)) -> State s a -- This pattern synonym makes the monad eta-expand, -- which as a very beneficial effect on compiler performance -- See #18202. -- See Note [The one-shot state monad trick] in GHC.Utils.Monad -- It also implements the particular notion of strictness of this monad; -- see Note [Strict State monad]. pattern State m <- State' m where State m = State' (oneShot $ \s -> forceState (m s)) -- | Forces the state component of the unboxed representation pair of 'State'. -- See Note [Strict State monad]. This is The Place doing the forcing! forceState :: (# a, s #) -> (# a, s #) forceState (# a, !s #) = (# a, s #) instance Functor (State s) where fmap f m = State $ \s -> case runState' m s of (# x, s' #) -> (# f x, s' #) instance Applicative (State s) where pure x = State $ \s -> (# x, s #) m <*> n = State $ \s -> case runState' m s of { (# f, s' #) -> case runState' n s' of { (# x, s'' #) -> (# f x, s'' #) }} instance Monad (State s) where m >>= n = State $ \s -> case runState' m s of (# r, !s' #) -> runState' (n r) s' state :: (s -> (a, s)) -> State s a state f = State $ \s -> case f s of (r, s') -> (# r, s' #) get :: State s s get = State $ \s -> (# s, s #) gets :: (s -> a) -> State s a gets f = State $ \s -> (# f s, s #) put :: s -> State s () put s' = State $ \_ -> (# (), s' #) modify :: (s -> s) -> State s () modify f = State $ \s -> (# (), f s #) evalState :: State s a -> s -> a evalState s i = case runState' s i of (# a, _ #) -> a execState :: State s a -> s -> s execState s i = case runState' s i of (# _, s' #) -> s' runState :: State s a -> s -> (a, s) runState s i = case runState' s i of (# a, !s' #) -> (a, s') ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Outputable.hs0000644000000000000000000015275014472400113021426 0ustar0000000000000000{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {- (c) The University of Glasgow 2006-2012 (c) The GRASP Project, Glasgow University, 1992-1998 -} -- | This module defines classes and functions for pretty-printing. It also -- exports a number of helpful debugging and other utilities such as 'trace' and 'panic'. -- -- The interface to this module is very similar to the standard Hughes-PJ pretty printing -- module, except that it exports a number of additional functions that are rarely used, -- and works over the 'SDoc' type. module GHC.Utils.Outputable ( -- * Type classes Outputable(..), OutputableBndr(..), OutputableP(..), -- * Pretty printing combinators SDoc, runSDoc, PDoc(..), docToSDoc, interppSP, interpp'SP, interpp'SP', pprQuotedList, pprWithCommas, quotedListWithOr, quotedListWithNor, pprWithBars, empty, isEmpty, nest, char, text, ftext, ptext, ztext, int, intWithCommas, integer, word, float, double, rational, doublePrec, parens, cparen, brackets, braces, quotes, quote, doubleQuotes, angleBrackets, semi, comma, colon, dcolon, space, equals, dot, vbar, arrow, lollipop, larrow, darrow, arrowt, larrowt, arrowtt, larrowtt, lambda, lparen, rparen, lbrack, rbrack, lbrace, rbrace, underscore, mulArrow, blankLine, forAllLit, bullet, (<>), (<+>), hcat, hsep, ($$), ($+$), vcat, sep, cat, fsep, fcat, hang, hangNotEmpty, punctuate, ppWhen, ppUnless, ppWhenOption, ppUnlessOption, speakNth, speakN, speakNOf, plural, singular, isOrAre, doOrDoes, itsOrTheir, thisOrThese, hasOrHave, unicodeSyntax, coloured, keyword, -- * Converting 'SDoc' into strings and outputting it printSDoc, printSDocLn, bufLeftRenderSDoc, pprCode, showSDocOneLine, showSDocUnsafe, showPprUnsafe, renderWithContext, pprDebugAndThen, pprInfixVar, pprPrefixVar, pprHsChar, pprHsString, pprHsBytes, primFloatSuffix, primCharSuffix, primDoubleSuffix, primInt8Suffix, primWord8Suffix, primInt16Suffix, primWord16Suffix, primInt32Suffix, primWord32Suffix, primInt64Suffix, primWord64Suffix, primIntSuffix, primWordSuffix, pprPrimChar, pprPrimInt, pprPrimWord, pprPrimInt8, pprPrimWord8, pprPrimInt16, pprPrimWord16, pprPrimInt32, pprPrimWord32, pprPrimInt64, pprPrimWord64, pprFastFilePath, pprFilePathString, -- * Controlling the style in which output is printed BindingSite(..), PprStyle(..), LabelStyle(..), PrintUnqualified(..), QueryQualifyName, QueryQualifyModule, QueryQualifyPackage, reallyAlwaysQualify, reallyAlwaysQualifyNames, alwaysQualify, alwaysQualifyNames, alwaysQualifyModules, neverQualify, neverQualifyNames, neverQualifyModules, alwaysQualifyPackages, neverQualifyPackages, QualifyName(..), queryQual, sdocOption, updSDocContext, SDocContext (..), sdocWithContext, defaultSDocContext, getPprStyle, withPprStyle, setStyleColoured, pprDeeper, pprDeeperList, pprSetDepth, codeStyle, userStyle, dumpStyle, asmStyle, qualName, qualModule, qualPackage, mkErrStyle, defaultErrStyle, defaultDumpStyle, mkDumpStyle, defaultUserStyle, mkUserStyle, cmdlineParserStyle, Depth(..), withUserStyle, withErrStyle, ifPprDebug, whenPprDebug, getPprDebug, ) where import GHC.Prelude import {-# SOURCE #-} GHC.Unit.Types ( Unit, Module, moduleName ) import {-# SOURCE #-} GHC.Unit.Module.Name( ModuleName ) import {-# SOURCE #-} GHC.Types.Name.Occurrence( OccName ) import GHC.Utils.BufHandle (BufHandle) import GHC.Data.FastString import qualified GHC.Utils.Ppr as Pretty import qualified GHC.Utils.Ppr.Colour as Col import GHC.Utils.Ppr ( Doc, Mode(..) ) import GHC.Serialized import GHC.LanguageExtensions (Extension) import Data.ByteString (ByteString) import qualified Data.ByteString as BS import Data.Char import qualified Data.Map as M import Data.Int import qualified Data.IntMap as IM import Data.Set (Set) import qualified Data.Set as Set import qualified Data.IntSet as IntSet import Data.String import Data.Word import System.IO ( Handle ) import System.FilePath import Text.Printf import Numeric (showFFloat) import Data.Graph (SCC(..)) import Data.List (intersperse) import Data.List.NonEmpty (NonEmpty (..)) import qualified Data.List.NonEmpty as NEL import Data.Time import Data.Time.Format.ISO8601 import GHC.Fingerprint import GHC.Show ( showMultiLineString ) import GHC.Utils.Exception import GHC.Exts (oneShot) {- ************************************************************************ * * \subsection{The @PprStyle@ data type} * * ************************************************************************ -} data PprStyle = PprUser PrintUnqualified Depth Coloured -- Pretty-print in a way that will make sense to the -- ordinary user; must be very close to Haskell -- syntax, etc. -- Assumes printing tidied code: non-system names are -- printed without uniques. | PprDump PrintUnqualified -- For -ddump-foo; less verbose than in ppr-debug mode, but more than PprUser -- Does not assume tidied code: non-external names -- are printed with uniques. | PprCode !LabelStyle -- ^ Print code; either C or assembler -- | Style of label pretty-printing. -- -- When we produce C sources or headers, we have to take into account that C -- compilers transform C labels when they convert them into symbols. For -- example, they can add prefixes (e.g., "_" on Darwin) or suffixes (size for -- stdcalls on Windows). So we provide two ways to pretty-print CLabels: C style -- or Asm style. -- data LabelStyle = CStyle -- ^ C label style (used by C and LLVM backends) | AsmStyle -- ^ Asm label style (used by NCG backend) deriving (Eq,Ord,Show) data Depth = AllTheWay | PartWay Int -- ^ 0 => stop | DefaultDepth -- ^ Use 'sdocDefaultDepth' field as depth data Coloured = Uncoloured | Coloured -- ----------------------------------------------------------------------------- -- Printing original names -- | When printing code that contains original names, we need to map the -- original names back to something the user understands. This is the -- purpose of the triple of functions that gets passed around -- when rendering 'SDoc'. data PrintUnqualified = QueryQualify { queryQualifyName :: QueryQualifyName, queryQualifyModule :: QueryQualifyModule, queryQualifyPackage :: QueryQualifyPackage } -- | Given a `Name`'s `Module` and `OccName`, decide whether and how to qualify -- it. type QueryQualifyName = Module -> OccName -> QualifyName -- | For a given module, we need to know whether to print it with -- a package name to disambiguate it. type QueryQualifyModule = Module -> Bool -- | For a given package, we need to know whether to print it with -- the component id to disambiguate it. type QueryQualifyPackage = Unit -> Bool -- See Note [Printing original names] in GHC.Types.Name.Ppr data QualifyName -- Given P:M.T = NameUnqual -- It's in scope unqualified as "T" -- OR nothing called "T" is in scope | NameQual ModuleName -- It's in scope qualified as "X.T" | NameNotInScope1 -- It's not in scope at all, but M.T is not bound -- in the current scope, so we can refer to it as "M.T" | NameNotInScope2 -- It's not in scope at all, and M.T is already bound in -- the current scope, so we must refer to it as "P:M.T" instance Outputable QualifyName where ppr NameUnqual = text "NameUnqual" ppr (NameQual _mod) = text "NameQual" -- can't print the mod without module loops :( ppr NameNotInScope1 = text "NameNotInScope1" ppr NameNotInScope2 = text "NameNotInScope2" reallyAlwaysQualifyNames :: QueryQualifyName reallyAlwaysQualifyNames _ _ = NameNotInScope2 -- | NB: This won't ever show package IDs alwaysQualifyNames :: QueryQualifyName alwaysQualifyNames m _ = NameQual (moduleName m) neverQualifyNames :: QueryQualifyName neverQualifyNames _ _ = NameUnqual alwaysQualifyModules :: QueryQualifyModule alwaysQualifyModules _ = True neverQualifyModules :: QueryQualifyModule neverQualifyModules _ = False alwaysQualifyPackages :: QueryQualifyPackage alwaysQualifyPackages _ = True neverQualifyPackages :: QueryQualifyPackage neverQualifyPackages _ = False reallyAlwaysQualify, alwaysQualify, neverQualify :: PrintUnqualified reallyAlwaysQualify = QueryQualify reallyAlwaysQualifyNames alwaysQualifyModules alwaysQualifyPackages alwaysQualify = QueryQualify alwaysQualifyNames alwaysQualifyModules alwaysQualifyPackages neverQualify = QueryQualify neverQualifyNames neverQualifyModules neverQualifyPackages defaultUserStyle :: PprStyle defaultUserStyle = mkUserStyle neverQualify AllTheWay defaultDumpStyle :: PprStyle -- Print without qualifiers to reduce verbosity, unless -dppr-debug defaultDumpStyle = PprDump neverQualify mkDumpStyle :: PrintUnqualified -> PprStyle mkDumpStyle print_unqual = PprDump print_unqual -- | Default style for error messages, when we don't know PrintUnqualified -- It's a bit of a hack because it doesn't take into account what's in scope -- Only used for desugarer warnings, and typechecker errors in interface sigs defaultErrStyle :: PprStyle defaultErrStyle = mkErrStyle neverQualify -- | Style for printing error messages mkErrStyle :: PrintUnqualified -> PprStyle mkErrStyle unqual = mkUserStyle unqual DefaultDepth cmdlineParserStyle :: PprStyle cmdlineParserStyle = mkUserStyle alwaysQualify AllTheWay mkUserStyle :: PrintUnqualified -> Depth -> PprStyle mkUserStyle unqual depth = PprUser unqual depth Uncoloured withUserStyle :: PrintUnqualified -> Depth -> SDoc -> SDoc withUserStyle unqual depth doc = withPprStyle (PprUser unqual depth Uncoloured) doc withErrStyle :: PrintUnqualified -> SDoc -> SDoc withErrStyle unqual doc = withPprStyle (mkErrStyle unqual) doc setStyleColoured :: Bool -> PprStyle -> PprStyle setStyleColoured col style = case style of PprUser q d _ -> PprUser q d c _ -> style where c | col = Coloured | otherwise = Uncoloured instance Outputable PprStyle where ppr (PprUser {}) = text "user-style" ppr (PprCode {}) = text "code-style" ppr (PprDump {}) = text "dump-style" {- Orthogonal to the above printing styles are (possibly) some command-line flags that affect printing (often carried with the style). The most likely ones are variations on how much type info is shown. The following test decides whether or not we are actually generating code (either C or assembly), or generating interface files. ************************************************************************ * * \subsection{The @SDoc@ data type} * * ************************************************************************ -} -- | Represents a pretty-printable document. -- -- To display an 'SDoc', use 'printSDoc', 'printSDocLn', 'bufLeftRenderSDoc', -- or 'renderWithContext'. Avoid calling 'runSDoc' directly as it breaks the -- abstraction layer. newtype SDoc = SDoc' (SDocContext -> Doc) -- See Note [The one-shot state monad trick] in GHC.Utils.Monad {-# COMPLETE SDoc #-} pattern SDoc :: (SDocContext -> Doc) -> SDoc pattern SDoc m <- SDoc' m where SDoc m = SDoc' (oneShot m) runSDoc :: SDoc -> (SDocContext -> Doc) runSDoc (SDoc m) = m data SDocContext = SDC { sdocStyle :: !PprStyle , sdocColScheme :: !Col.Scheme , sdocLastColour :: !Col.PprColour -- ^ The most recently used colour. -- This allows nesting colours. , sdocShouldUseColor :: !Bool , sdocDefaultDepth :: !Int , sdocLineLength :: !Int , sdocCanUseUnicode :: !Bool -- ^ True if Unicode encoding is supported -- and not disable by GHC_NO_UNICODE environment variable , sdocHexWordLiterals :: !Bool , sdocPprDebug :: !Bool , sdocPrintUnicodeSyntax :: !Bool , sdocPrintCaseAsLet :: !Bool , sdocPrintTypecheckerElaboration :: !Bool , sdocPrintAxiomIncomps :: !Bool , sdocPrintExplicitKinds :: !Bool , sdocPrintExplicitCoercions :: !Bool , sdocPrintExplicitRuntimeReps :: !Bool , sdocPrintExplicitForalls :: !Bool , sdocPrintPotentialInstances :: !Bool , sdocPrintEqualityRelations :: !Bool , sdocSuppressTicks :: !Bool , sdocSuppressTypeSignatures :: !Bool , sdocSuppressTypeApplications :: !Bool , sdocSuppressIdInfo :: !Bool , sdocSuppressCoercions :: !Bool , sdocSuppressCoercionTypes :: !Bool , sdocSuppressUnfoldings :: !Bool , sdocSuppressVarKinds :: !Bool , sdocSuppressUniques :: !Bool , sdocSuppressModulePrefixes :: !Bool , sdocSuppressStgExts :: !Bool , sdocSuppressStgReps :: !Bool , sdocErrorSpans :: !Bool , sdocStarIsType :: !Bool , sdocLinearTypes :: !Bool , sdocImpredicativeTypes :: !Bool , sdocPrintTypeAbbreviations :: !Bool , sdocUnitIdForUser :: !(FastString -> SDoc) -- ^ Used to map UnitIds to more friendly "package-version:component" -- strings while pretty-printing. -- -- Use `GHC.Unit.State.pprWithUnitState` to set it. Users should never -- have to set it to pretty-print SDocs emitted by GHC, otherwise it's a -- bug. It's an internal field used to thread the UnitState so that the -- Outputable instance of UnitId can use it. -- -- See Note [Pretty-printing UnitId] in "GHC.Unit" for more details. -- -- Note that we use `FastString` instead of `UnitId` to avoid boring -- module inter-dependency issues. } instance IsString SDoc where fromString = text -- The lazy programmer's friend. instance Outputable SDoc where ppr = id -- | Default pretty-printing options defaultSDocContext :: SDocContext defaultSDocContext = SDC { sdocStyle = defaultDumpStyle , sdocColScheme = Col.defaultScheme , sdocLastColour = Col.colReset , sdocShouldUseColor = False , sdocDefaultDepth = 5 , sdocLineLength = 100 , sdocCanUseUnicode = False , sdocHexWordLiterals = False , sdocPprDebug = False , sdocPrintUnicodeSyntax = False , sdocPrintCaseAsLet = False , sdocPrintTypecheckerElaboration = False , sdocPrintAxiomIncomps = False , sdocPrintExplicitKinds = False , sdocPrintExplicitCoercions = False , sdocPrintExplicitRuntimeReps = False , sdocPrintExplicitForalls = False , sdocPrintPotentialInstances = False , sdocPrintEqualityRelations = False , sdocSuppressTicks = False , sdocSuppressTypeSignatures = False , sdocSuppressTypeApplications = False , sdocSuppressIdInfo = False , sdocSuppressCoercions = False , sdocSuppressCoercionTypes = False , sdocSuppressUnfoldings = False , sdocSuppressVarKinds = False , sdocSuppressUniques = False , sdocSuppressModulePrefixes = False , sdocSuppressStgExts = False , sdocSuppressStgReps = True , sdocErrorSpans = False , sdocStarIsType = False , sdocImpredicativeTypes = False , sdocLinearTypes = False , sdocPrintTypeAbbreviations = True , sdocUnitIdForUser = ftext } withPprStyle :: PprStyle -> SDoc -> SDoc {-# INLINE CONLIKE withPprStyle #-} withPprStyle sty d = SDoc $ \ctxt -> runSDoc d ctxt{sdocStyle=sty} pprDeeper :: SDoc -> SDoc pprDeeper d = SDoc $ \ctx -> case sdocStyle ctx of PprUser q depth c -> let deeper 0 = Pretty.text "..." deeper n = runSDoc d ctx{sdocStyle = PprUser q (PartWay (n-1)) c} in case depth of DefaultDepth -> deeper (sdocDefaultDepth ctx) PartWay n -> deeper n AllTheWay -> runSDoc d ctx _ -> runSDoc d ctx -- | Truncate a list that is longer than the current depth. pprDeeperList :: ([SDoc] -> SDoc) -> [SDoc] -> SDoc pprDeeperList f ds | null ds = f [] | otherwise = SDoc work where work ctx@SDC{sdocStyle=PprUser q depth c} | DefaultDepth <- depth = work (ctx { sdocStyle = PprUser q (PartWay (sdocDefaultDepth ctx)) c }) | PartWay 0 <- depth = Pretty.text "..." | PartWay n <- depth = let go _ [] = [] go i (d:ds) | i >= n = [text "...."] | otherwise = d : go (i+1) ds in runSDoc (f (go 0 ds)) ctx{sdocStyle = PprUser q (PartWay (n-1)) c} work other_ctx = runSDoc (f ds) other_ctx pprSetDepth :: Depth -> SDoc -> SDoc pprSetDepth depth doc = SDoc $ \ctx -> case ctx of SDC{sdocStyle=PprUser q _ c} -> runSDoc doc ctx{sdocStyle = PprUser q depth c} _ -> runSDoc doc ctx getPprStyle :: (PprStyle -> SDoc) -> SDoc {-# INLINE CONLIKE getPprStyle #-} getPprStyle df = SDoc $ \ctx -> runSDoc (df (sdocStyle ctx)) ctx sdocWithContext :: (SDocContext -> SDoc) -> SDoc {-# INLINE CONLIKE sdocWithContext #-} sdocWithContext f = SDoc $ \ctx -> runSDoc (f ctx) ctx sdocOption :: (SDocContext -> a) -> (a -> SDoc) -> SDoc {-# INLINE CONLIKE sdocOption #-} sdocOption f g = sdocWithContext (g . f) updSDocContext :: (SDocContext -> SDocContext) -> SDoc -> SDoc {-# INLINE CONLIKE updSDocContext #-} updSDocContext upd doc = SDoc $ \ctx -> runSDoc doc (upd ctx) qualName :: PprStyle -> QueryQualifyName qualName (PprUser q _ _) mod occ = queryQualifyName q mod occ qualName (PprDump q) mod occ = queryQualifyName q mod occ qualName _other mod _ = NameQual (moduleName mod) qualModule :: PprStyle -> QueryQualifyModule qualModule (PprUser q _ _) m = queryQualifyModule q m qualModule (PprDump q) m = queryQualifyModule q m qualModule _other _m = True qualPackage :: PprStyle -> QueryQualifyPackage qualPackage (PprUser q _ _) m = queryQualifyPackage q m qualPackage (PprDump q) m = queryQualifyPackage q m qualPackage _other _m = True queryQual :: PprStyle -> PrintUnqualified queryQual s = QueryQualify (qualName s) (qualModule s) (qualPackage s) codeStyle :: PprStyle -> Bool codeStyle (PprCode _) = True codeStyle _ = False asmStyle :: PprStyle -> Bool asmStyle (PprCode AsmStyle) = True asmStyle _other = False dumpStyle :: PprStyle -> Bool dumpStyle (PprDump {}) = True dumpStyle _other = False userStyle :: PprStyle -> Bool userStyle (PprUser {}) = True userStyle _other = False -- | Indicate if -dppr-debug mode is enabled getPprDebug :: (Bool -> SDoc) -> SDoc {-# INLINE CONLIKE getPprDebug #-} getPprDebug d = sdocWithContext $ \ctx -> d (sdocPprDebug ctx) -- | Says what to do with and without -dppr-debug ifPprDebug :: SDoc -> SDoc -> SDoc {-# INLINE CONLIKE ifPprDebug #-} ifPprDebug yes no = getPprDebug $ \dbg -> if dbg then yes else no -- | Says what to do with -dppr-debug; without, return empty whenPprDebug :: SDoc -> SDoc -- Empty for non-debug style {-# INLINE CONLIKE whenPprDebug #-} whenPprDebug d = ifPprDebug d empty -- | The analog of 'Pretty.printDoc_' for 'SDoc', which tries to make sure the -- terminal doesn't get screwed up by the ANSI color codes if an exception -- is thrown during pretty-printing. printSDoc :: SDocContext -> Mode -> Handle -> SDoc -> IO () printSDoc ctx mode handle doc = Pretty.printDoc_ mode cols handle (runSDoc doc ctx) `finally` Pretty.printDoc_ mode cols handle (runSDoc (coloured Col.colReset empty) ctx) where cols = sdocLineLength ctx -- | Like 'printSDoc' but appends an extra newline. printSDocLn :: SDocContext -> Mode -> Handle -> SDoc -> IO () printSDocLn ctx mode handle doc = printSDoc ctx mode handle (doc $$ text "") -- | An efficient variant of 'printSDoc' specialized for 'LeftMode' that -- outputs to a 'BufHandle'. bufLeftRenderSDoc :: SDocContext -> BufHandle -> SDoc -> IO () bufLeftRenderSDoc ctx bufHandle doc = Pretty.bufLeftRender bufHandle (runSDoc doc ctx) pprCode :: LabelStyle -> SDoc -> SDoc {-# INLINE CONLIKE pprCode #-} pprCode cs d = withPprStyle (PprCode cs) d renderWithContext :: SDocContext -> SDoc -> String renderWithContext ctx sdoc = let s = Pretty.style{ Pretty.mode = PageMode False, Pretty.lineLength = sdocLineLength ctx } in Pretty.renderStyle s $ runSDoc sdoc ctx -- This shows an SDoc, but on one line only. It's cheaper than a full -- showSDoc, designed for when we're getting results like "Foo.bar" -- and "foo{uniq strictness}" so we don't want fancy layout anyway. showSDocOneLine :: SDocContext -> SDoc -> String showSDocOneLine ctx d = let s = Pretty.style{ Pretty.mode = OneLineMode, Pretty.lineLength = sdocLineLength ctx } in Pretty.renderStyle s $ runSDoc d ctx showSDocUnsafe :: SDoc -> String showSDocUnsafe sdoc = renderWithContext defaultSDocContext sdoc showPprUnsafe :: Outputable a => a -> String showPprUnsafe a = renderWithContext defaultSDocContext (ppr a) pprDebugAndThen :: SDocContext -> (String -> a) -> SDoc -> SDoc -> a pprDebugAndThen ctx cont heading pretty_msg = cont (renderWithContext ctx doc) where doc = withPprStyle defaultDumpStyle (sep [heading, nest 2 pretty_msg]) isEmpty :: SDocContext -> SDoc -> Bool isEmpty ctx sdoc = Pretty.isEmpty $ runSDoc sdoc (ctx {sdocPprDebug = True}) docToSDoc :: Doc -> SDoc docToSDoc d = SDoc (\_ -> d) empty :: SDoc char :: Char -> SDoc text :: String -> SDoc ftext :: FastString -> SDoc ptext :: PtrString -> SDoc ztext :: FastZString -> SDoc int :: Int -> SDoc integer :: Integer -> SDoc word :: Integer -> SDoc float :: Float -> SDoc double :: Double -> SDoc rational :: Rational -> SDoc {-# INLINE CONLIKE empty #-} empty = docToSDoc $ Pretty.empty {-# INLINE CONLIKE char #-} char c = docToSDoc $ Pretty.char c {-# INLINE CONLIKE text #-} -- Inline so that the RULE Pretty.text will fire text s = docToSDoc $ Pretty.text s {-# INLINE CONLIKE ftext #-} ftext s = docToSDoc $ Pretty.ftext s {-# INLINE CONLIKE ptext #-} ptext s = docToSDoc $ Pretty.ptext s {-# INLINE CONLIKE ztext #-} ztext s = docToSDoc $ Pretty.ztext s {-# INLINE CONLIKE int #-} int n = docToSDoc $ Pretty.int n {-# INLINE CONLIKE integer #-} integer n = docToSDoc $ Pretty.integer n {-# INLINE CONLIKE float #-} float n = docToSDoc $ Pretty.float n {-# INLINE CONLIKE double #-} double n = docToSDoc $ Pretty.double n {-# INLINE CONLIKE rational #-} rational n = docToSDoc $ Pretty.rational n -- See Note [Print Hexadecimal Literals] in GHC.Utils.Ppr {-# INLINE CONLIKE word #-} word n = sdocOption sdocHexWordLiterals $ \case True -> docToSDoc $ Pretty.hex n False -> docToSDoc $ Pretty.integer n -- | @doublePrec p n@ shows a floating point number @n@ with @p@ -- digits of precision after the decimal point. doublePrec :: Int -> Double -> SDoc doublePrec p n = text (showFFloat (Just p) n "") parens, braces, brackets, quotes, quote, doubleQuotes, angleBrackets :: SDoc -> SDoc {-# INLINE CONLIKE parens #-} parens d = SDoc $ Pretty.parens . runSDoc d {-# INLINE CONLIKE braces #-} braces d = SDoc $ Pretty.braces . runSDoc d {-# INLINE CONLIKE brackets #-} brackets d = SDoc $ Pretty.brackets . runSDoc d {-# INLINE CONLIKE quote #-} quote d = SDoc $ Pretty.quote . runSDoc d {-# INLINE CONLIKE doubleQuotes #-} doubleQuotes d = SDoc $ Pretty.doubleQuotes . runSDoc d {-# INLINE CONLIKE angleBrackets #-} angleBrackets d = char '<' <> d <> char '>' cparen :: Bool -> SDoc -> SDoc {-# INLINE CONLIKE cparen #-} cparen b d = SDoc $ Pretty.maybeParens b . runSDoc d -- 'quotes' encloses something in single quotes... -- but it omits them if the thing begins or ends in a single quote -- so that we don't get `foo''. Instead we just have foo'. quotes d = sdocOption sdocCanUseUnicode $ \case True -> char '‘' <> d <> char '’' False -> SDoc $ \sty -> let pp_d = runSDoc d sty str = show pp_d in case str of [] -> Pretty.quotes pp_d '\'' : _ -> pp_d _ | '\'' <- last str -> pp_d | otherwise -> Pretty.quotes pp_d semi, comma, colon, equals, space, dcolon, underscore, dot, vbar :: SDoc arrow, lollipop, larrow, darrow, arrowt, larrowt, arrowtt, larrowtt, lambda :: SDoc lparen, rparen, lbrack, rbrack, lbrace, rbrace, blankLine :: SDoc blankLine = docToSDoc Pretty.emptyText dcolon = unicodeSyntax (char '∷') (docToSDoc $ Pretty.text "::") arrow = unicodeSyntax (char '→') (docToSDoc $ Pretty.text "->") lollipop = unicodeSyntax (char '⊸') (docToSDoc $ Pretty.text "%1 ->") larrow = unicodeSyntax (char '←') (docToSDoc $ Pretty.text "<-") darrow = unicodeSyntax (char '⇒') (docToSDoc $ Pretty.text "=>") arrowt = unicodeSyntax (char '⤚') (docToSDoc $ Pretty.text ">-") larrowt = unicodeSyntax (char '⤙') (docToSDoc $ Pretty.text "-<") arrowtt = unicodeSyntax (char '⤜') (docToSDoc $ Pretty.text ">>-") larrowtt = unicodeSyntax (char '⤛') (docToSDoc $ Pretty.text "-<<") lambda = unicodeSyntax (char 'λ') (char '\\') semi = docToSDoc $ Pretty.semi comma = docToSDoc $ Pretty.comma colon = docToSDoc $ Pretty.colon equals = docToSDoc $ Pretty.equals space = docToSDoc $ Pretty.space underscore = char '_' dot = char '.' vbar = char '|' lparen = docToSDoc $ Pretty.lparen rparen = docToSDoc $ Pretty.rparen lbrack = docToSDoc $ Pretty.lbrack rbrack = docToSDoc $ Pretty.rbrack lbrace = docToSDoc $ Pretty.lbrace rbrace = docToSDoc $ Pretty.rbrace mulArrow :: SDoc -> SDoc mulArrow d = text "%" <> d <+> arrow forAllLit :: SDoc forAllLit = unicodeSyntax (char '∀') (text "forall") bullet :: SDoc bullet = unicode (char '•') (char '*') unicodeSyntax :: SDoc -> SDoc -> SDoc unicodeSyntax unicode plain = sdocOption sdocCanUseUnicode $ \can_use_unicode -> sdocOption sdocPrintUnicodeSyntax $ \print_unicode_syntax -> if can_use_unicode && print_unicode_syntax then unicode else plain unicode :: SDoc -> SDoc -> SDoc unicode unicode plain = sdocOption sdocCanUseUnicode $ \case True -> unicode False -> plain nest :: Int -> SDoc -> SDoc -- ^ Indent 'SDoc' some specified amount (<>) :: SDoc -> SDoc -> SDoc -- ^ Join two 'SDoc' together horizontally without a gap (<+>) :: SDoc -> SDoc -> SDoc -- ^ Join two 'SDoc' together horizontally with a gap between them ($$) :: SDoc -> SDoc -> SDoc -- ^ Join two 'SDoc' together vertically; if there is -- no vertical overlap it "dovetails" the two onto one line ($+$) :: SDoc -> SDoc -> SDoc -- ^ Join two 'SDoc' together vertically {-# INLINE CONLIKE nest #-} nest n d = SDoc $ Pretty.nest n . runSDoc d {-# INLINE CONLIKE (<>) #-} (<>) d1 d2 = SDoc $ \ctx -> (Pretty.<>) (runSDoc d1 ctx) (runSDoc d2 ctx) {-# INLINE CONLIKE (<+>) #-} (<+>) d1 d2 = SDoc $ \ctx -> (Pretty.<+>) (runSDoc d1 ctx) (runSDoc d2 ctx) {-# INLINE CONLIKE ($$) #-} ($$) d1 d2 = SDoc $ \ctx -> (Pretty.$$) (runSDoc d1 ctx) (runSDoc d2 ctx) {-# INLINE CONLIKE ($+$) #-} ($+$) d1 d2 = SDoc $ \ctx -> (Pretty.$+$) (runSDoc d1 ctx) (runSDoc d2 ctx) hcat :: [SDoc] -> SDoc -- ^ Concatenate 'SDoc' horizontally hsep :: [SDoc] -> SDoc -- ^ Concatenate 'SDoc' horizontally with a space between each one vcat :: [SDoc] -> SDoc -- ^ Concatenate 'SDoc' vertically with dovetailing sep :: [SDoc] -> SDoc -- ^ Separate: is either like 'hsep' or like 'vcat', depending on what fits cat :: [SDoc] -> SDoc -- ^ Catenate: is either like 'hcat' or like 'vcat', depending on what fits fsep :: [SDoc] -> SDoc -- ^ A paragraph-fill combinator. It's much like sep, only it -- keeps fitting things on one line until it can't fit any more. fcat :: [SDoc] -> SDoc -- ^ This behaves like 'fsep', but it uses '<>' for horizontal conposition rather than '<+>' -- Inline all those wrappers to help ensure we create lists of Doc, not of SDoc -- later applied to the same SDocContext. It helps the worker/wrapper -- transformation extracting only the required fields from the SDocContext. {-# INLINE CONLIKE hcat #-} hcat ds = SDoc $ \ctx -> Pretty.hcat [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE hsep #-} hsep ds = SDoc $ \ctx -> Pretty.hsep [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE vcat #-} vcat ds = SDoc $ \ctx -> Pretty.vcat [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE sep #-} sep ds = SDoc $ \ctx -> Pretty.sep [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE cat #-} cat ds = SDoc $ \ctx -> Pretty.cat [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE fsep #-} fsep ds = SDoc $ \ctx -> Pretty.fsep [runSDoc d ctx | d <- ds] {-# INLINE CONLIKE fcat #-} fcat ds = SDoc $ \ctx -> Pretty.fcat [runSDoc d ctx | d <- ds] hang :: SDoc -- ^ The header -> Int -- ^ Amount to indent the hung body -> SDoc -- ^ The hung body, indented and placed below the header -> SDoc {-# INLINE CONLIKE hang #-} hang d1 n d2 = SDoc $ \sty -> Pretty.hang (runSDoc d1 sty) n (runSDoc d2 sty) -- | This behaves like 'hang', but does not indent the second document -- when the header is empty. hangNotEmpty :: SDoc -> Int -> SDoc -> SDoc {-# INLINE CONLIKE hangNotEmpty #-} hangNotEmpty d1 n d2 = SDoc $ \ctx -> Pretty.hangNotEmpty (runSDoc d1 ctx) n (runSDoc d2 ctx) punctuate :: SDoc -- ^ The punctuation -> [SDoc] -- ^ The list that will have punctuation added between every adjacent pair of elements -> [SDoc] -- ^ Punctuated list punctuate _ [] = [] punctuate p (d:ds) = go d ds where go d [] = [d] go d (e:es) = (d <> p) : go e es ppWhen, ppUnless :: Bool -> SDoc -> SDoc {-# INLINE CONLIKE ppWhen #-} ppWhen True doc = doc ppWhen False _ = empty {-# INLINE CONLIKE ppUnless #-} ppUnless True _ = empty ppUnless False doc = doc {-# INLINE CONLIKE ppWhenOption #-} ppWhenOption :: (SDocContext -> Bool) -> SDoc -> SDoc ppWhenOption f doc = sdocOption f $ \case True -> doc False -> empty {-# INLINE CONLIKE ppUnlessOption #-} ppUnlessOption :: (SDocContext -> Bool) -> SDoc -> SDoc ppUnlessOption f doc = sdocOption f $ \case True -> empty False -> doc -- | Apply the given colour\/style for the argument. -- -- Only takes effect if colours are enabled. coloured :: Col.PprColour -> SDoc -> SDoc coloured col sdoc = sdocOption sdocShouldUseColor $ \case True -> SDoc $ \case ctx@SDC{ sdocLastColour = lastCol, sdocStyle = PprUser _ _ Coloured } -> let ctx' = ctx{ sdocLastColour = lastCol `mappend` col } in Pretty.zeroWidthText (Col.renderColour col) Pretty.<> runSDoc sdoc ctx' Pretty.<> Pretty.zeroWidthText (Col.renderColourAfresh lastCol) ctx -> runSDoc sdoc ctx False -> sdoc keyword :: SDoc -> SDoc keyword = coloured Col.colBold ----------------------------------------------------------------------- -- The @Outputable@ class ----------------------------------------------------------------------- -- | Class designating that some type has an 'SDoc' representation class Outputable a where ppr :: a -> SDoc instance Outputable Char where ppr c = text [c] instance Outputable Bool where ppr True = text "True" ppr False = text "False" instance Outputable Ordering where ppr LT = text "LT" ppr EQ = text "EQ" ppr GT = text "GT" instance Outputable Int8 where ppr n = integer $ fromIntegral n instance Outputable Int16 where ppr n = integer $ fromIntegral n instance Outputable Int32 where ppr n = integer $ fromIntegral n instance Outputable Int64 where ppr n = integer $ fromIntegral n instance Outputable Int where ppr n = int n instance Outputable Integer where ppr n = integer n instance Outputable Word8 where ppr n = integer $ fromIntegral n instance Outputable Word16 where ppr n = integer $ fromIntegral n instance Outputable Word32 where ppr n = integer $ fromIntegral n instance Outputable Word64 where ppr n = integer $ fromIntegral n instance Outputable Word where ppr n = integer $ fromIntegral n instance Outputable Float where ppr f = float f instance Outputable Double where ppr f = double f instance Outputable () where ppr _ = text "()" instance Outputable UTCTime where ppr = text . formatShow iso8601Format instance (Outputable a) => Outputable [a] where ppr xs = brackets (fsep (punctuate comma (map ppr xs))) instance (Outputable a) => Outputable (NonEmpty a) where ppr = ppr . NEL.toList instance (Outputable a) => Outputable (Set a) where ppr s = braces (fsep (punctuate comma (map ppr (Set.toList s)))) instance Outputable IntSet.IntSet where ppr s = braces (fsep (punctuate comma (map ppr (IntSet.toList s)))) instance (Outputable a, Outputable b) => Outputable (a, b) where ppr (x,y) = parens (sep [ppr x <> comma, ppr y]) instance Outputable a => Outputable (Maybe a) where ppr Nothing = text "Nothing" ppr (Just x) = text "Just" <+> ppr x instance (Outputable a, Outputable b) => Outputable (Either a b) where ppr (Left x) = text "Left" <+> ppr x ppr (Right y) = text "Right" <+> ppr y -- ToDo: may not be used instance (Outputable a, Outputable b, Outputable c) => Outputable (a, b, c) where ppr (x,y,z) = parens (sep [ppr x <> comma, ppr y <> comma, ppr z ]) instance (Outputable a, Outputable b, Outputable c, Outputable d) => Outputable (a, b, c, d) where ppr (a,b,c,d) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d]) instance (Outputable a, Outputable b, Outputable c, Outputable d, Outputable e) => Outputable (a, b, c, d, e) where ppr (a,b,c,d,e) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d <> comma, ppr e]) instance (Outputable a, Outputable b, Outputable c, Outputable d, Outputable e, Outputable f) => Outputable (a, b, c, d, e, f) where ppr (a,b,c,d,e,f) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d <> comma, ppr e <> comma, ppr f]) instance (Outputable a, Outputable b, Outputable c, Outputable d, Outputable e, Outputable f, Outputable g) => Outputable (a, b, c, d, e, f, g) where ppr (a,b,c,d,e,f,g) = parens (sep [ppr a <> comma, ppr b <> comma, ppr c <> comma, ppr d <> comma, ppr e <> comma, ppr f <> comma, ppr g]) instance Outputable FastString where ppr fs = ftext fs -- Prints an unadorned string, -- no double quotes or anything deriving newtype instance Outputable NonDetFastString deriving newtype instance Outputable LexicalFastString instance (Outputable key, Outputable elt) => Outputable (M.Map key elt) where ppr m = ppr (M.toList m) instance (Outputable elt) => Outputable (IM.IntMap elt) where ppr m = ppr (IM.toList m) instance Outputable Fingerprint where ppr (Fingerprint w1 w2) = text (printf "%016x%016x" w1 w2) instance Outputable a => Outputable (SCC a) where ppr (AcyclicSCC v) = text "NONREC" $$ (nest 3 (ppr v)) ppr (CyclicSCC vs) = text "REC" $$ (nest 3 (vcat (map ppr vs))) instance Outputable Serialized where ppr (Serialized the_type bytes) = int (length bytes) <+> text "of type" <+> text (show the_type) instance Outputable Extension where ppr = text . show ----------------------------------------------------------------------- -- The @OutputableP@ class ----------------------------------------------------------------------- -- Note [The OutputableP class] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- SDoc has become the common type to -- * display messages in the terminal -- * dump outputs (Cmm, Asm, C, etc.) -- * return messages to ghc-api clients -- -- SDoc is a kind of state Monad: SDoc ~ State SDocContext Doc -- I.e. to render a SDoc, a SDocContext must be provided. -- -- SDocContext contains legit rendering options (e.g., line length, color and -- unicode settings). Sadly SDocContext ended up also being used to thread -- values that were considered bothersome to thread otherwise: -- * current HomeModule: to decide if module names must be printed qualified -- * current UnitState: to print unit-ids as "packagename-version:component" -- * target platform: to render labels, instructions, etc. -- * selected backend: to display CLabel as C labels or Asm labels -- -- In fact the whole compiler session state that is DynFlags was passed in -- SDocContext and these values were retrieved from it. -- -- The Outputable class makes SDoc creation easy for many values by providing -- the ppr method: -- -- class Outputable a where -- ppr :: a -> SDoc -- -- Almost every type is Outputable in the compiler and it seems great because it -- is similar to the Show class. But it's a fallacious simplicity because `SDoc` -- needs a `SDocContext` to be transformed into a renderable `Doc`: who is going -- to provide the SDocContext with the correct values in it? -- -- E.g. if a SDoc is returned in an exception, how could we know the home -- module at the time it was thrown? -- -- A workaround is to pass dummy values (no home module, empty UnitState) at SDoc -- rendering time and to hope that the code that produced the SDoc has updated -- the SDocContext with meaningful values (e.g. using withPprStyle or -- pprWithUnitState). If the context isn't correctly updated, a dummy value is -- used and the printed result isn't what we expected. Note that the compiler -- doesn't help us finding spots where we need to update the SDocContext. -- -- In some cases we can't pass a dummy value because we can't create one. For -- example, how can we create a dummy Platform value? In the old days, GHC only -- supported a single Platform set when it was built, so we could use it without -- any risk of mistake. But now GHC starts supporting several Platform in the -- same session so it becomes an issue. We could be tempted to use the -- workaround described above by using "undefined" as a dummy Platform value. -- However in this case, if we forget to update it we will get a runtime -- error/crash. We could use "Maybe Platform" and die with a better error -- message at places where we really really need to know if we are on Windows or -- not, or if we use 32- or 64-bit. Still the compiler would not help us in -- finding spots where to update the context with a valid Platform. -- -- So finally here comes the OutputableP class: -- -- class OutputableP env a where -- pdoc :: env -> a -> SDoc -- -- OutputableP forces us to thread an environment necessary to print a value. -- For now we only use it to thread a Platform environment, so we have several -- "Outputable Platform XYZ" instances. In the future we could imagine using a -- Has class to retrieve a value from a generic environment to make the code -- more composable. E.g.: -- -- instance Has Platform env => OutputableP env XYZ where -- pdoc env a = ... (getter env :: Platform) -- -- A drawback of this approach over Outputable is that we have to thread an -- environment explicitly to use "pdoc" and it's more cumbersome. But it's the -- price to pay to have some help from the compiler to ensure that we... thread -- an environment down to the places where we need it, i.e. where SDoc are -- created (not rendered). On the other hand, it makes life easier for SDoc -- renderers as they only have to deal with pretty-printing related options in -- SDocContext. -- -- TODO: -- -- 1) we could use OutputableP to thread a UnitState and replace the Outputable -- instance of UnitId with: -- -- instance OutputableP UnitState UnitId where ... -- -- This would allow the removal of the `sdocUnitIdForUser` field. -- -- Be warned: I've tried to do it, but there are A LOT of other Outputable -- instances depending on UnitId's one. In particular: -- UnitId <- Unit <- Module <- Name <- Var <- Core.{Type,Expr} <- ... -- -- 2) Use it to pass the HomeModule (but I fear it will be as difficult as for -- UnitId). -- -- -- | Outputable class with an additional environment value -- -- See Note [The OutputableP class] class OutputableP env a where pdoc :: env -> a -> SDoc -- | Wrapper for types having a Outputable instance when an OutputableP instance -- is required. newtype PDoc a = PDoc a instance Outputable a => OutputableP env (PDoc a) where pdoc _ (PDoc a) = ppr a instance OutputableP env a => OutputableP env [a] where pdoc env xs = ppr (fmap (pdoc env) xs) instance OutputableP env a => OutputableP env (Maybe a) where pdoc env xs = ppr (fmap (pdoc env) xs) instance (OutputableP env a, OutputableP env b) => OutputableP env (a, b) where pdoc env (a,b) = ppr (pdoc env a, pdoc env b) instance (OutputableP env a, OutputableP env b, OutputableP env c) => OutputableP env (a, b, c) where pdoc env (a,b,c) = ppr (pdoc env a, pdoc env b, pdoc env c) instance (OutputableP env key, OutputableP env elt) => OutputableP env (M.Map key elt) where pdoc env m = ppr $ fmap (\(x,y) -> (pdoc env x, pdoc env y)) $ M.toList m instance OutputableP env a => OutputableP env (SCC a) where pdoc env scc = ppr (fmap (pdoc env) scc) instance OutputableP env SDoc where pdoc _ x = x instance (OutputableP env a) => OutputableP env (Set a) where pdoc env s = braces (fsep (punctuate comma (map (pdoc env) (Set.toList s)))) {- ************************************************************************ * * \subsection{The @OutputableBndr@ class} * * ************************************************************************ -} -- | 'BindingSite' is used to tell the thing that prints binder what -- language construct is binding the identifier. This can be used -- to decide how much info to print. -- Also see Note [Binding-site specific printing] in "GHC.Core.Ppr" data BindingSite = LambdaBind -- ^ The x in (\x. e) | CaseBind -- ^ The x in case scrut of x { (y,z) -> ... } | CasePatBind -- ^ The y,z in case scrut of x { (y,z) -> ... } | LetBind -- ^ The x in (let x = rhs in e) deriving Eq -- | When we print a binder, we often want to print its type too. -- The @OutputableBndr@ class encapsulates this idea. class Outputable a => OutputableBndr a where pprBndr :: BindingSite -> a -> SDoc pprBndr _b x = ppr x pprPrefixOcc, pprInfixOcc :: a -> SDoc -- Print an occurrence of the name, suitable either in the -- prefix position of an application, thus (f a b) or ((+) x) -- or infix position, thus (a `f` b) or (x + y) bndrIsJoin_maybe :: a -> Maybe Int bndrIsJoin_maybe _ = Nothing -- When pretty-printing we sometimes want to find -- whether the binder is a join point. You might think -- we could have a function of type (a->Var), but Var -- isn't available yet, alas {- ************************************************************************ * * \subsection{Random printing helpers} * * ************************************************************************ -} -- We have 31-bit Chars and will simply use Show instances of Char and String. -- | Special combinator for showing character literals. pprHsChar :: Char -> SDoc pprHsChar c | c > '\x10ffff' = char '\\' <> text (show (fromIntegral (ord c) :: Word32)) | otherwise = text (show c) -- | Special combinator for showing string literals. pprHsString :: FastString -> SDoc pprHsString fs = vcat (map text (showMultiLineString (unpackFS fs))) -- | Special combinator for showing bytestring literals. pprHsBytes :: ByteString -> SDoc pprHsBytes bs = let escaped = concatMap escape $ BS.unpack bs in vcat (map text (showMultiLineString escaped)) <> char '#' where escape :: Word8 -> String escape w = let c = chr (fromIntegral w) in if isAscii c then [c] else '\\' : show w -- Postfix modifiers for unboxed literals. -- See Note [Printing of literals in Core] in "GHC.Types.Literal". primCharSuffix, primFloatSuffix, primDoubleSuffix, primIntSuffix, primWordSuffix, primInt8Suffix, primWord8Suffix, primInt16Suffix, primWord16Suffix, primInt32Suffix, primWord32Suffix, primInt64Suffix, primWord64Suffix :: SDoc primCharSuffix = char '#' primFloatSuffix = char '#' primIntSuffix = char '#' primDoubleSuffix = text "##" primWordSuffix = text "##" primInt8Suffix = text "#8" primWord8Suffix = text "##8" primInt16Suffix = text "#16" primWord16Suffix = text "##16" primInt32Suffix = text "#32" primWord32Suffix = text "##32" primInt64Suffix = text "#64" primWord64Suffix = text "##64" -- | Special combinator for showing unboxed literals. pprPrimChar :: Char -> SDoc pprPrimInt, pprPrimWord, pprPrimInt8, pprPrimWord8, pprPrimInt16, pprPrimWord16, pprPrimInt32, pprPrimWord32, pprPrimInt64, pprPrimWord64 :: Integer -> SDoc pprPrimChar c = pprHsChar c <> primCharSuffix pprPrimInt i = integer i <> primIntSuffix pprPrimWord w = word w <> primWordSuffix pprPrimInt8 i = integer i <> primInt8Suffix pprPrimInt16 i = integer i <> primInt16Suffix pprPrimInt32 i = integer i <> primInt32Suffix pprPrimInt64 i = integer i <> primInt64Suffix pprPrimWord8 w = word w <> primWord8Suffix pprPrimWord16 w = word w <> primWord16Suffix pprPrimWord32 w = word w <> primWord32Suffix pprPrimWord64 w = word w <> primWord64Suffix --------------------- -- Put a name in parens if it's an operator pprPrefixVar :: Bool -> SDoc -> SDoc pprPrefixVar is_operator pp_v | is_operator = parens pp_v | otherwise = pp_v -- Put a name in backquotes if it's not an operator pprInfixVar :: Bool -> SDoc -> SDoc pprInfixVar is_operator pp_v | is_operator = pp_v | otherwise = char '`' <> pp_v <> char '`' --------------------- pprFastFilePath :: FastString -> SDoc pprFastFilePath path = text $ normalise $ unpackFS path -- | Normalise, escape and render a string representing a path -- -- e.g. "c:\\whatever" pprFilePathString :: FilePath -> SDoc pprFilePathString path = doubleQuotes $ text (escape (normalise path)) where escape [] = [] escape ('\\':xs) = '\\':'\\':escape xs escape (x:xs) = x:escape xs {- ************************************************************************ * * \subsection{Other helper functions} * * ************************************************************************ -} pprWithCommas :: (a -> SDoc) -- ^ The pretty printing function to use -> [a] -- ^ The things to be pretty printed -> SDoc -- ^ 'SDoc' where the things have been pretty printed, -- comma-separated and finally packed into a paragraph. pprWithCommas pp xs = fsep (punctuate comma (map pp xs)) pprWithBars :: (a -> SDoc) -- ^ The pretty printing function to use -> [a] -- ^ The things to be pretty printed -> SDoc -- ^ 'SDoc' where the things have been pretty printed, -- bar-separated and finally packed into a paragraph. pprWithBars pp xs = fsep (intersperse vbar (map pp xs)) -- | Returns the separated concatenation of the pretty printed things. interppSP :: Outputable a => [a] -> SDoc interppSP xs = sep (map ppr xs) -- | Returns the comma-separated concatenation of the pretty printed things. interpp'SP :: Outputable a => [a] -> SDoc interpp'SP xs = interpp'SP' ppr xs interpp'SP' :: (a -> SDoc) -> [a] -> SDoc interpp'SP' f xs = sep (punctuate comma (map f xs)) -- | Returns the comma-separated concatenation of the quoted pretty printed things. -- -- > [x,y,z] ==> `x', `y', `z' pprQuotedList :: Outputable a => [a] -> SDoc pprQuotedList = quotedList . map ppr quotedList :: [SDoc] -> SDoc quotedList xs = fsep (punctuate comma (map quotes xs)) quotedListWithOr :: [SDoc] -> SDoc -- [x,y,z] ==> `x', `y' or `z' quotedListWithOr xs@(_:_:_) = quotedList (init xs) <+> text "or" <+> quotes (last xs) quotedListWithOr xs = quotedList xs quotedListWithNor :: [SDoc] -> SDoc -- [x,y,z] ==> `x', `y' nor `z' quotedListWithNor xs@(_:_:_) = quotedList (init xs) <+> text "nor" <+> quotes (last xs) quotedListWithNor xs = quotedList xs {- ************************************************************************ * * \subsection{Printing numbers verbally} * * ************************************************************************ -} intWithCommas :: Integral a => a -> SDoc -- Prints a big integer with commas, eg 345,821 intWithCommas n | n < 0 = char '-' <> intWithCommas (-n) | q == 0 = int (fromIntegral r) | otherwise = intWithCommas q <> comma <> zeroes <> int (fromIntegral r) where (q,r) = n `quotRem` 1000 zeroes | r >= 100 = empty | r >= 10 = char '0' | otherwise = text "00" -- | Converts an integer to a verbal index: -- -- > speakNth 1 = text "first" -- > speakNth 5 = text "fifth" -- > speakNth 21 = text "21st" speakNth :: Int -> SDoc speakNth 1 = text "first" speakNth 2 = text "second" speakNth 3 = text "third" speakNth 4 = text "fourth" speakNth 5 = text "fifth" speakNth 6 = text "sixth" speakNth n = hcat [ int n, text suffix ] where suffix | n <= 20 = "th" -- 11,12,13 are non-std | last_dig == 1 = "st" | last_dig == 2 = "nd" | last_dig == 3 = "rd" | otherwise = "th" last_dig = n `rem` 10 -- | Converts an integer to a verbal multiplicity: -- -- > speakN 0 = text "none" -- > speakN 5 = text "five" -- > speakN 10 = text "10" speakN :: Int -> SDoc speakN 0 = text "none" -- E.g. "they have none" speakN 1 = text "one" -- E.g. "they have one" speakN 2 = text "two" speakN 3 = text "three" speakN 4 = text "four" speakN 5 = text "five" speakN 6 = text "six" speakN n = int n -- | Converts an integer and object description to a statement about the -- multiplicity of those objects: -- -- > speakNOf 0 (text "melon") = text "no melons" -- > speakNOf 1 (text "melon") = text "one melon" -- > speakNOf 3 (text "melon") = text "three melons" speakNOf :: Int -> SDoc -> SDoc speakNOf 0 d = text "no" <+> d <> char 's' speakNOf 1 d = text "one" <+> d -- E.g. "one argument" speakNOf n d = speakN n <+> d <> char 's' -- E.g. "three arguments" -- | Determines the pluralisation suffix appropriate for the length of a list: -- -- > plural [] = char 's' -- > plural ["Hello"] = empty -- > plural ["Hello", "World"] = char 's' plural :: [a] -> SDoc plural [_] = empty -- a bit frightening, but there you are plural _ = char 's' -- | Determines the singular verb suffix appropriate for the length of a list: -- -- > singular [] = empty -- > singular["Hello"] = char 's' -- > singular ["Hello", "World"] = empty singular :: [a] -> SDoc singular [_] = char 's' singular _ = empty -- | Determines the form of to be appropriate for the length of a list: -- -- > isOrAre [] = text "are" -- > isOrAre ["Hello"] = text "is" -- > isOrAre ["Hello", "World"] = text "are" isOrAre :: [a] -> SDoc isOrAre [_] = text "is" isOrAre _ = text "are" -- | Determines the form of to do appropriate for the length of a list: -- -- > doOrDoes [] = text "do" -- > doOrDoes ["Hello"] = text "does" -- > doOrDoes ["Hello", "World"] = text "do" doOrDoes :: [a] -> SDoc doOrDoes [_] = text "does" doOrDoes _ = text "do" -- | Determines the form of possessive appropriate for the length of a list: -- -- > itsOrTheir [x] = text "its" -- > itsOrTheir [x,y] = text "their" -- > itsOrTheir [] = text "their" -- probably avoid this itsOrTheir :: [a] -> SDoc itsOrTheir [_] = text "its" itsOrTheir _ = text "their" -- | Determines the form of subject appropriate for the length of a list: -- -- > thisOrThese [x] = text "This" -- > thisOrThese [x,y] = text "These" -- > thisOrThese [] = text "These" -- probably avoid this thisOrThese :: [a] -> SDoc thisOrThese [_] = text "This" thisOrThese _ = text "These" -- | @"has"@ or @"have"@ depending on the length of a list. hasOrHave :: [a] -> SDoc hasOrHave [_] = text "has" hasOrHave _ = text "have" ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Panic.hs0000644000000000000000000002507414472400113020332 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP Project, Glasgow University, 1992-2000 -} {-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables, LambdaCase #-} -- | Defines basic functions for printing error messages. -- -- It's hard to put these functions anywhere else without causing -- some unnecessary loops in the module dependency graph. module GHC.Utils.Panic ( GhcException(..) , showGhcException , showGhcExceptionUnsafe , throwGhcException , throwGhcExceptionIO , handleGhcException , pgmError , panic , pprPanic , assertPanic , assertPprPanic , assertPpr , assertPprM , massertPpr , sorry , panicDoc , sorryDoc , pgmErrorDoc , cmdLineError , cmdLineErrorIO , callStackDoc , prettyCallStackDoc , Exception.Exception(..) , showException , safeShowException , try , tryMost , throwTo , withSignalHandlers ) where import GHC.Prelude import GHC.Stack import GHC.Utils.Outputable import GHC.Utils.Panic.Plain import GHC.Utils.Constants import GHC.Utils.Exception as Exception import Control.Monad.IO.Class import qualified Control.Monad.Catch as MC import Control.Concurrent import Data.Typeable ( cast ) import System.IO.Unsafe #if !defined(mingw32_HOST_OS) import System.Posix.Signals as S #endif #if defined(mingw32_HOST_OS) import GHC.ConsoleHandler as S #endif import System.Mem.Weak ( deRefWeak ) -- | GHC's own exception type -- error messages all take the form: -- -- @ -- \: \ -- @ -- -- If the location is on the command line, or in GHC itself, then -- \="ghc". All of the error types below correspond to -- a \ of "ghc", except for ProgramError (where the string is -- assumed to contain a location already, so we don't print one). data GhcException -- | Some other fatal signal (SIGHUP,SIGTERM) = Signal Int -- | Prints the short usage msg after the error | UsageError String -- | A problem with the command line arguments, but don't print usage. | CmdLineError String -- | The 'impossible' happened. | Panic String | PprPanic String SDoc -- | The user tickled something that's known not to work yet, -- but we're not counting it as a bug. | Sorry String | PprSorry String SDoc -- | An installation problem. | InstallationError String -- | An error in the user's code, probably. | ProgramError String | PprProgramError String SDoc instance Exception GhcException where fromException (SomeException e) | Just ge <- cast e = Just ge | Just pge <- cast e = Just $ case pge of PlainSignal n -> Signal n PlainUsageError str -> UsageError str PlainCmdLineError str -> CmdLineError str PlainPanic str -> Panic str PlainSorry str -> Sorry str PlainInstallationError str -> InstallationError str PlainProgramError str -> ProgramError str | otherwise = Nothing instance Show GhcException where showsPrec _ e = showGhcExceptionUnsafe e -- | Show an exception as a string. showException :: Exception e => e -> String showException = show -- | Show an exception which can possibly throw other exceptions. -- Used when displaying exception thrown within TH code. safeShowException :: Exception e => e -> IO String safeShowException e = do -- ensure the whole error message is evaluated inside try r <- try (return $! forceList (showException e)) case r of Right msg -> return msg Left e' -> safeShowException (e' :: SomeException) where forceList [] = [] forceList xs@(x : xt) = x `seq` forceList xt `seq` xs -- | Append a description of the given exception to this string. -- -- Note that this uses 'defaultSDocContext', which doesn't use the options -- set by the user via DynFlags. showGhcExceptionUnsafe :: GhcException -> ShowS showGhcExceptionUnsafe = showGhcException defaultSDocContext -- | Append a description of the given exception to this string. showGhcException :: SDocContext -> GhcException -> ShowS showGhcException ctx = showPlainGhcException . \case Signal n -> PlainSignal n UsageError str -> PlainUsageError str CmdLineError str -> PlainCmdLineError str Panic str -> PlainPanic str Sorry str -> PlainSorry str InstallationError str -> PlainInstallationError str ProgramError str -> PlainProgramError str PprPanic str sdoc -> PlainPanic $ concat [str, "\n\n", renderWithContext ctx sdoc] PprSorry str sdoc -> PlainProgramError $ concat [str, "\n\n", renderWithContext ctx sdoc] PprProgramError str sdoc -> PlainProgramError $ concat [str, "\n\n", renderWithContext ctx sdoc] throwGhcException :: GhcException -> a throwGhcException = Exception.throw throwGhcExceptionIO :: GhcException -> IO a throwGhcExceptionIO = Exception.throwIO handleGhcException :: ExceptionMonad m => (GhcException -> m a) -> m a -> m a handleGhcException = MC.handle -- | Throw an exception saying "bug in GHC" with a callstack pprPanic :: HasCallStack => String -> SDoc -> a pprPanic s doc = panicDoc s (doc $$ callStackDoc) -- | Throw an exception saying "bug in GHC" panicDoc :: String -> SDoc -> a panicDoc x doc = throwGhcException (PprPanic x doc) -- | Throw an exception saying "this isn't finished yet" sorryDoc :: String -> SDoc -> a sorryDoc x doc = throwGhcException (PprSorry x doc) -- | Throw an exception saying "bug in pgm being compiled" (used for unusual program errors) pgmErrorDoc :: String -> SDoc -> a pgmErrorDoc x doc = throwGhcException (PprProgramError x doc) -- | Like try, but pass through UserInterrupt and Panic exceptions. -- Used when we want soft failures when reading interface files, for example. -- TODO: I'm not entirely sure if this is catching what we really want to catch tryMost :: IO a -> IO (Either SomeException a) tryMost action = do r <- try action case r of Left se -> case fromException se of -- Some GhcException's we rethrow, Just (Signal _) -> throwIO se Just (Panic _) -> throwIO se -- others we return Just _ -> return (Left se) Nothing -> case fromException se of -- All IOExceptions are returned Just (_ :: IOException) -> return (Left se) -- Anything else is rethrown Nothing -> throwIO se Right v -> return (Right v) -- | We use reference counting for signal handlers {-# NOINLINE signalHandlersRefCount #-} #if !defined(mingw32_HOST_OS) signalHandlersRefCount :: MVar (Word, Maybe (S.Handler,S.Handler ,S.Handler,S.Handler)) #else signalHandlersRefCount :: MVar (Word, Maybe S.Handler) #endif signalHandlersRefCount = unsafePerformIO $ newMVar (0,Nothing) -- | Temporarily install standard signal handlers for catching ^C, which just -- throw an exception in the current thread. withSignalHandlers :: ExceptionMonad m => m a -> m a withSignalHandlers act = do main_thread <- liftIO myThreadId wtid <- liftIO (mkWeakThreadId main_thread) let interrupt = do r <- deRefWeak wtid case r of Nothing -> return () Just t -> throwTo t UserInterrupt #if !defined(mingw32_HOST_OS) let installHandlers = do let installHandler' a b = installHandler a b Nothing hdlQUIT <- installHandler' sigQUIT (Catch interrupt) hdlINT <- installHandler' sigINT (Catch interrupt) -- see #3656; in the future we should install these automatically for -- all Haskell programs in the same way that we install a ^C handler. let fatal_signal n = throwTo main_thread (Signal (fromIntegral n)) hdlHUP <- installHandler' sigHUP (Catch (fatal_signal sigHUP)) hdlTERM <- installHandler' sigTERM (Catch (fatal_signal sigTERM)) return (hdlQUIT,hdlINT,hdlHUP,hdlTERM) let uninstallHandlers (hdlQUIT,hdlINT,hdlHUP,hdlTERM) = do _ <- installHandler sigQUIT hdlQUIT Nothing _ <- installHandler sigINT hdlINT Nothing _ <- installHandler sigHUP hdlHUP Nothing _ <- installHandler sigTERM hdlTERM Nothing return () #else -- GHC 6.3+ has support for console events on Windows -- NOTE: running GHCi under a bash shell for some reason requires -- you to press Ctrl-Break rather than Ctrl-C to provoke -- an interrupt. Ctrl-C is getting blocked somewhere, I don't know -- why --SDM 17/12/2004 let sig_handler ControlC = interrupt sig_handler Break = interrupt sig_handler _ = return () let installHandlers = installHandler (Catch sig_handler) let uninstallHandlers = installHandler -- directly install the old handler #endif -- install signal handlers if necessary let mayInstallHandlers = liftIO $ modifyMVar_ signalHandlersRefCount $ \case (0,Nothing) -> do hdls <- installHandlers return (1,Just hdls) (c,oldHandlers) -> return (c+1,oldHandlers) -- uninstall handlers if necessary let mayUninstallHandlers = liftIO $ modifyMVar_ signalHandlersRefCount $ \case (1,Just hdls) -> do _ <- uninstallHandlers hdls return (0,Nothing) (c,oldHandlers) -> return (c-1,oldHandlers) mayInstallHandlers act `MC.finally` mayUninstallHandlers callStackDoc :: HasCallStack => SDoc callStackDoc = prettyCallStackDoc callStack prettyCallStackDoc :: CallStack -> SDoc prettyCallStackDoc cs = hang (text "Call stack:") 4 (vcat $ map text $ lines (prettyCallStack cs)) -- | Panic with an assertion failure, recording the given file and -- line number. Should typically be accessed with the ASSERT family of macros assertPprPanic :: HasCallStack => SDoc -> a assertPprPanic msg = withFrozenCallStack (pprPanic "ASSERT failed!" msg) assertPpr :: HasCallStack => Bool -> SDoc -> a -> a {-# INLINE assertPpr #-} assertPpr cond msg a = if debugIsOn && not cond then withFrozenCallStack (assertPprPanic msg) else a massertPpr :: (HasCallStack, Applicative m) => Bool -> SDoc -> m () {-# INLINE massertPpr #-} massertPpr cond msg = withFrozenCallStack (assertPpr cond msg (pure ())) assertPprM :: (HasCallStack, Monad m) => m Bool -> SDoc -> m () {-# INLINE assertPprM #-} assertPprM mcond msg = withFrozenCallStack (mcond >>= \cond -> massertPpr cond msg) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Panic/Plain.hs0000644000000000000000000001223114472400113021364 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables, LambdaCase #-} -- | Defines a simple exception type and utilities to throw it. The -- 'PlainGhcException' type is a subset of the 'GHC.Utils.Panic.GhcException' -- type. It omits the exception constructors that involve -- pretty-printing via 'GHC.Utils.Outputable.SDoc'. -- -- There are two reasons for this: -- -- 1. To avoid import cycles / use of boot files. "GHC.Utils.Outputable" has -- many transitive dependencies. To throw exceptions from these -- modules, the functions here can be used without introducing import -- cycles. -- -- 2. To reduce the number of modules that need to be compiled to -- object code when loading GHC into GHCi. See #13101 module GHC.Utils.Panic.Plain ( PlainGhcException(..) , showPlainGhcException , panic, sorry, pgmError , cmdLineError, cmdLineErrorIO , assertPanic , assert, assertM, massert ) where import GHC.Settings.Config import GHC.Utils.Constants import GHC.Utils.Exception as Exception import GHC.Stack import GHC.Prelude import System.IO.Unsafe -- | This type is very similar to 'GHC.Utils.Panic.GhcException', but it omits -- the constructors that involve pretty-printing via -- 'GHC.Utils.Outputable.SDoc'. Due to the implementation of 'fromException' -- for 'GHC.Utils.Panic.GhcException', this type can be caught as a -- 'GHC.Utils.Panic.GhcException'. -- -- Note that this should only be used for throwing exceptions, not for -- catching, as 'GHC.Utils.Panic.GhcException' will not be converted to this -- type when catching. data PlainGhcException -- | Some other fatal signal (SIGHUP,SIGTERM) = PlainSignal Int -- | Prints the short usage msg after the error | PlainUsageError String -- | A problem with the command line arguments, but don't print usage. | PlainCmdLineError String -- | The 'impossible' happened. | PlainPanic String -- | The user tickled something that's known not to work yet, -- but we're not counting it as a bug. | PlainSorry String -- | An installation problem. | PlainInstallationError String -- | An error in the user's code, probably. | PlainProgramError String instance Exception PlainGhcException instance Show PlainGhcException where showsPrec _ e = showPlainGhcException e -- | Short usage information to display when we are given the wrong cmd line arguments. short_usage :: String short_usage = "Usage: For basic information, try the `--help' option." -- | Append a description of the given exception to this string. showPlainGhcException :: PlainGhcException -> ShowS showPlainGhcException = \case PlainSignal n -> showString "signal: " . shows n PlainUsageError str -> showString str . showChar '\n' . showString short_usage PlainCmdLineError str -> showString str PlainPanic s -> panicMsg (showString s) PlainSorry s -> sorryMsg (showString s) PlainInstallationError str -> showString str PlainProgramError str -> showString str where sorryMsg :: ShowS -> ShowS sorryMsg s = showString "sorry! (unimplemented feature or known bug)\n" . showString (" GHC version " ++ cProjectVersion ++ ":\n\t") . s . showString "\n" panicMsg :: ShowS -> ShowS panicMsg s = showString "panic! (the 'impossible' happened)\n" . showString (" GHC version " ++ cProjectVersion ++ ":\n\t") . s . showString "\n\n" . showString "Please report this as a GHC bug: https://www.haskell.org/ghc/reportabug\n" throwPlainGhcException :: PlainGhcException -> a throwPlainGhcException = Exception.throw -- | Panics and asserts. panic, sorry, pgmError :: String -> a panic x = unsafeDupablePerformIO $ do stack <- ccsToStrings =<< getCurrentCCS x if null stack then throwPlainGhcException (PlainPanic x) else throwPlainGhcException (PlainPanic (x ++ '\n' : renderStack stack)) sorry x = throwPlainGhcException (PlainSorry x) pgmError x = throwPlainGhcException (PlainProgramError x) cmdLineError :: String -> a cmdLineError = unsafeDupablePerformIO . cmdLineErrorIO cmdLineErrorIO :: String -> IO a cmdLineErrorIO x = do stack <- ccsToStrings =<< getCurrentCCS x if null stack then throwPlainGhcException (PlainCmdLineError x) else throwPlainGhcException (PlainCmdLineError (x ++ '\n' : renderStack stack)) -- | Throw a failed assertion exception for a given filename and line number. assertPanic :: String -> Int -> a assertPanic file line = Exception.throw (Exception.AssertionFailed ("ASSERT failed! file " ++ file ++ ", line " ++ show line)) assertPanic' :: HasCallStack => a assertPanic' = let doc = unlines $ fmap (" "++) $ lines (prettyCallStack callStack) in Exception.throw (Exception.AssertionFailed ("ASSERT failed!\n" ++ withFrozenCallStack doc)) assert :: HasCallStack => Bool -> a -> a {-# INLINE assert #-} assert cond a = if debugIsOn && not cond then withFrozenCallStack assertPanic' else a massert :: (HasCallStack, Applicative m) => Bool -> m () {-# INLINE massert #-} massert cond = withFrozenCallStack (assert cond (pure ())) assertM :: (HasCallStack, Monad m) => m Bool -> m () {-# INLINE assertM #-} assertM mcond = withFrozenCallStack (mcond >>= massert) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Ppr.hs0000644000000000000000000011721114472400113020034 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE MagicHash #-} ----------------------------------------------------------------------------- -- | -- Module : GHC.Utils.Ppr -- Copyright : (c) The University of Glasgow 2001 -- License : BSD-style (see the file LICENSE) -- -- Maintainer : David Terei -- Stability : stable -- Portability : portable -- -- John Hughes's and Simon Peyton Jones's Pretty Printer Combinators -- -- Based on /The Design of a Pretty-printing Library/ -- in Advanced Functional Programming, -- Johan Jeuring and Erik Meijer (eds), LNCS 925 -- -- ----------------------------------------------------------------------------- {- Note [Differences between libraries/pretty and compiler/GHC/Utils/Ppr.hs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For historical reasons, there are two different copies of `Pretty` in the GHC source tree: * `libraries/pretty` is a submodule containing https://github.com/haskell/pretty. This is the `pretty` library as released on hackage. It is used by several other libraries in the GHC source tree (e.g. template-haskell and Cabal). * `compiler/GHC/Utils/Ppr.hs` (this module). It is used by GHC only. There is an ongoing effort in https://github.com/haskell/pretty/issues/1 and https://gitlab.haskell.org/ghc/ghc/issues/10735 to try to get rid of GHC's copy of Pretty. Currently, GHC's copy of Pretty resembles pretty-1.1.2.0, with the following major differences: * GHC's copy uses `Faststring` for performance reasons. * GHC's copy has received a backported bugfix for #12227, which was released as pretty-1.1.3.4 ("Remove harmful $! forcing in beside", https://github.com/haskell/pretty/pull/35). Other differences are minor. Both copies define some extra functions and instances not defined in the other copy. To see all differences, do this in a ghc git tree: $ cd libraries/pretty $ git checkout v1.1.2.0 $ cd - $ vimdiff compiler/GHC/Utils/Ppr.hs \ libraries/pretty/src/Text/PrettyPrint/HughesPJ.hs For parity with `pretty-1.1.2.1`, the following two `pretty` commits would have to be backported: * "Resolve foldr-strictness stack overflow bug" (307b8173f41cd776eae8f547267df6d72bff2d68) * "Special-case reduce for horiz/vert" (c57c7a9dfc49617ba8d6e4fcdb019a3f29f1044c) This has not been done sofar, because these commits seem to cause more allocation in the compiler (see thomie's comments in https://github.com/haskell/pretty/pull/9). -} module GHC.Utils.Ppr ( -- * The document type Doc, TextDetails(..), -- * Constructing documents -- ** Converting values into documents char, text, ftext, ptext, ztext, sizedText, zeroWidthText, emptyText, int, integer, float, double, rational, hex, -- ** Simple derived documents semi, comma, colon, space, equals, lparen, rparen, lbrack, rbrack, lbrace, rbrace, -- ** Wrapping documents in delimiters parens, brackets, braces, quotes, quote, doubleQuotes, maybeParens, -- ** Combining documents empty, (<>), (<+>), hcat, hsep, ($$), ($+$), vcat, sep, cat, fsep, fcat, nest, hang, hangNotEmpty, punctuate, -- * Predicates on documents isEmpty, -- * Rendering documents -- ** Rendering with a particular style Style(..), style, renderStyle, Mode(..), -- ** General rendering fullRender, txtPrinter, -- ** GHC-specific rendering printDoc, printDoc_, bufLeftRender -- performance hack ) where import GHC.Prelude hiding (error) import GHC.Utils.BufHandle import GHC.Data.FastString import GHC.Utils.Panic.Plain import System.IO import Numeric (showHex) --for a RULES import GHC.Base ( unpackCString#, unpackNBytes#, Int(..) ) import GHC.Ptr ( Ptr(..) ) -- --------------------------------------------------------------------------- -- The Doc calculus {- Laws for $$ ~~~~~~~~~~~ (x $$ y) $$ z = x $$ (y $$ z) empty $$ x = x x $$ empty = x ...ditto $+$... Laws for <> ~~~~~~~~~~~ (x <> y) <> z = x <> (y <> z) empty <> x = empty x <> empty = x ...ditto <+>... Laws for text ~~~~~~~~~~~~~ text s <> text t = text (s++t) text "" <> x = x, if x non-empty ** because of law n6, t2 only holds if x doesn't ** start with `nest'. Laws for nest ~~~~~~~~~~~~~ nest 0 x = x nest k (nest k' x) = nest (k+k') x nest k (x <> y) = nest k x <> nest k y nest k (x $$ y) = nest k x $$ nest k y nest k empty = empty x <> nest k y = x <> y, if x non-empty ** Note the side condition on ! It is this that ** makes it OK for empty to be a left unit for <>. Miscellaneous ~~~~~~~~~~~~~ (text s <> x) $$ y = text s <> ((text "" <> x) $$ nest (-length s) y) (x $$ y) <> z = x $$ (y <> z) if y non-empty Laws for list versions ~~~~~~~~~~~~~~~~~~~~~~ sep (ps++[empty]++qs) = sep (ps ++ qs) ...ditto hsep, hcat, vcat, fill... nest k (sep ps) = sep (map (nest k) ps) ...ditto hsep, hcat, vcat, fill... Laws for oneLiner ~~~~~~~~~~~~~~~~~ oneLiner (nest k p) = nest k (oneLiner p) oneLiner (x <> y) = oneLiner x <> oneLiner y You might think that the following version of would be neater: <3 NO> (text s <> x) $$ y = text s <> ((empty <> x)) $$ nest (-length s) y) But it doesn't work, for if x=empty, we would have text s $$ y = text s <> (empty $$ nest (-length s) y) = text s <> nest (-length s) y -} -- --------------------------------------------------------------------------- -- Operator fixity infixl 6 <> infixl 6 <+> infixl 5 $$, $+$ -- --------------------------------------------------------------------------- -- The Doc data type -- | The abstract type of documents. -- A Doc represents a *set* of layouts. A Doc with -- no occurrences of Union or NoDoc represents just one layout. data Doc = Empty -- empty | NilAbove Doc -- text "" $$ x | TextBeside !TextDetails {-# UNPACK #-} !Int Doc -- text s <> x | Nest {-# UNPACK #-} !Int Doc -- nest k x | Union Doc Doc -- ul `union` ur | NoDoc -- The empty set of documents | Beside Doc Bool Doc -- True <=> space between | Above Doc Bool Doc -- True <=> never overlap {- Here are the invariants: 1) The argument of NilAbove is never Empty. Therefore a NilAbove occupies at least two lines. 2) The argument of @TextBeside@ is never @Nest@. 3) The layouts of the two arguments of @Union@ both flatten to the same string. 4) The arguments of @Union@ are either @TextBeside@, or @NilAbove@. 5) A @NoDoc@ may only appear on the first line of the left argument of an union. Therefore, the right argument of an union can never be equivalent to the empty set (@NoDoc@). 6) An empty document is always represented by @Empty@. It can't be hidden inside a @Nest@, or a @Union@ of two @Empty@s. 7) The first line of every layout in the left argument of @Union@ is longer than the first line of any layout in the right argument. (1) ensures that the left argument has a first line. In view of (3), this invariant means that the right argument must have at least two lines. Notice the difference between * NoDoc (no documents) * Empty (one empty document; no height and no width) * text "" (a document containing the empty string; one line high, but has no width) -} -- | RDoc is a "reduced GDoc", guaranteed not to have a top-level Above or Beside. type RDoc = Doc -- | The TextDetails data type -- -- A TextDetails represents a fragment of text that will be -- output at some point. data TextDetails = Chr {-# UNPACK #-} !Char -- ^ A single Char fragment | Str String -- ^ A whole String fragment | PStr FastString -- a hashed string | ZStr FastZString -- a z-encoded string | LStr {-# UNPACK #-} !PtrString -- a '\0'-terminated array of bytes | RStr {-# UNPACK #-} !Int {-# UNPACK #-} !Char -- a repeated character (e.g., ' ') instance Show Doc where showsPrec _ doc cont = fullRender (mode style) (lineLength style) (ribbonsPerLine style) txtPrinter cont doc -- --------------------------------------------------------------------------- -- Values and Predicates on GDocs and TextDetails -- | A document of height and width 1, containing a literal character. char :: Char -> Doc char c = textBeside_ (Chr c) 1 Empty -- | A document of height 1 containing a literal string. -- 'text' satisfies the following laws: -- -- * @'text' s '<>' 'text' t = 'text' (s'++'t)@ -- -- * @'text' \"\" '<>' x = x@, if @x@ non-empty -- -- The side condition on the last law is necessary because @'text' \"\"@ -- has height 1, while 'empty' has no height. text :: String -> Doc text s = textBeside_ (Str s) (length s) Empty {-# NOINLINE [0] text #-} -- Give the RULE a chance to fire -- It must wait till after phase 1 when -- the unpackCString first is manifested -- RULE that turns (text "abc") into (ptext (A# "abc"#)) to avoid the -- intermediate packing/unpacking of the string. {-# RULES "text/str" forall a. text (unpackCString# a) = ptext (mkPtrString# a) #-} {-# RULES "text/unpackNBytes#" forall p n. text (unpackNBytes# p n) = ptext (PtrString (Ptr p) (I# n)) #-} -- Empty strings are desugared into [] (not "unpackCString#..."), hence they are -- not matched by the text/str rule above. {-# RULES "text/[]" text [] = emptyText #-} ftext :: FastString -> Doc ftext s = textBeside_ (PStr s) (lengthFS s) Empty ptext :: PtrString -> Doc ptext s = textBeside_ (LStr s) (lengthPS s) Empty ztext :: FastZString -> Doc ztext s = textBeside_ (ZStr s) (lengthFZS s) Empty -- | Some text with any width. (@text s = sizedText (length s) s@) sizedText :: Int -> String -> Doc sizedText l s = textBeside_ (Str s) l Empty -- | Some text, but without any width. Use for non-printing text -- such as a HTML or Latex tags zeroWidthText :: String -> Doc zeroWidthText = sizedText 0 -- | Empty text (one line high but no width). (@emptyText = text ""@) emptyText :: Doc emptyText = sizedText 0 [] -- defined as a CAF. Sharing occurs especially via the text/[] rule above. -- Every use of `text ""` in user code should be replaced with this. -- | The empty document, with no height and no width. -- 'empty' is the identity for '<>', '<+>', '$$' and '$+$', and anywhere -- in the argument list for 'sep', 'hcat', 'hsep', 'vcat', 'fcat' etc. empty :: Doc empty = Empty -- | Returns 'True' if the document is empty isEmpty :: Doc -> Bool isEmpty Empty = True isEmpty _ = False {- Q: What is the reason for negative indentation (i.e. argument to indent is < 0) ? A: This indicates an error in the library client's code. If we compose a <> b, and the first line of b is more indented than some other lines of b, the law (<> eats nests) may cause the pretty printer to produce an invalid layout: doc |0123345 ------------------ d1 |a...| d2 |...b| |c...| d1<>d2 |ab..| c|....| Consider a <> b, let `s' be the length of the last line of `a', `k' the indentation of the first line of b, and `k0' the indentation of the left-most line b_i of b. The produced layout will have negative indentation if `k - k0 > s', as the first line of b will be put on the (s+1)th column, effectively translating b horizontally by (k-s). Now if the i^th line of b has an indentation k0 < (k-s), it is translated out-of-page, causing `negative indentation'. -} semi :: Doc -- ^ A ';' character comma :: Doc -- ^ A ',' character colon :: Doc -- ^ A ':' character space :: Doc -- ^ A space character equals :: Doc -- ^ A '=' character lparen :: Doc -- ^ A '(' character rparen :: Doc -- ^ A ')' character lbrack :: Doc -- ^ A '[' character rbrack :: Doc -- ^ A ']' character lbrace :: Doc -- ^ A '{' character rbrace :: Doc -- ^ A '}' character semi = char ';' comma = char ',' colon = char ':' space = char ' ' equals = char '=' lparen = char '(' rparen = char ')' lbrack = char '[' rbrack = char ']' lbrace = char '{' rbrace = char '}' spaceText, nlText :: TextDetails spaceText = Chr ' ' nlText = Chr '\n' int :: Int -> Doc -- ^ @int n = text (show n)@ integer :: Integer -> Doc -- ^ @integer n = text (show n)@ float :: Float -> Doc -- ^ @float n = text (show n)@ double :: Double -> Doc -- ^ @double n = text (show n)@ rational :: Rational -> Doc -- ^ @rational n = text (show n)@ hex :: Integer -> Doc -- ^ See Note [Print Hexadecimal Literals] int n = text (show n) integer n = text (show n) float n = text (show n) double n = text (show n) rational n = text (show n) hex n = text ('0' : 'x' : padded) where str = showHex n "" strLen = max 1 (length str) len = 2 ^ (ceiling (logBase 2 (fromIntegral strLen :: Double)) :: Int) padded = replicate (len - strLen) '0' ++ str parens :: Doc -> Doc -- ^ Wrap document in @(...)@ brackets :: Doc -> Doc -- ^ Wrap document in @[...]@ braces :: Doc -> Doc -- ^ Wrap document in @{...}@ quotes :: Doc -> Doc -- ^ Wrap document in @\'...\'@ quote :: Doc -> Doc doubleQuotes :: Doc -> Doc -- ^ Wrap document in @\"...\"@ quotes p = char '`' <> p <> char '\'' quote p = char '\'' <> p doubleQuotes p = char '"' <> p <> char '"' parens p = char '(' <> p <> char ')' brackets p = char '[' <> p <> char ']' braces p = char '{' <> p <> char '}' {- Note [Print Hexadecimal Literals] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Relevant discussions: * Phabricator: https://phabricator.haskell.org/D4465 * GHC Trac: https://gitlab.haskell.org/ghc/ghc/issues/14872 There is a flag `-dhex-word-literals` that causes literals of type `Word#` or `Word64#` to be displayed in hexadecimal instead of decimal when dumping GHC core. It also affects the presentation of these in GHC's error messages. Additionally, the hexadecimal encoding of these numbers is zero-padded so that its length is a power of two. As an example of what this does, consider the following haskell file `Literals.hs`: module Literals where alpha :: Int alpha = 100 + 200 beta :: Word -> Word beta x = x + div maxBound 255 + div 0xFFFFFFFF 255 + 0x0202 We get the following dumped core when we compile on a 64-bit machine with ghc -O2 -fforce-recomp -ddump-simpl -dsuppress-all -dhex-word-literals literals.hs: ==================== Tidy Core ==================== ... omitted for brevity ... -- RHS size: {terms: 2, types: 0, coercions: 0, joins: 0/0} alpha alpha = I# 300# -- RHS size: {terms: 12, types: 3, coercions: 0, joins: 0/0} beta beta = \ x_aYE -> case x_aYE of { W# x#_a1v0 -> W# (plusWord# (plusWord# (plusWord# x#_a1v0 0x0101010101010101##) 0x01010101##) 0x0202##) } Notice that the word literals are in hexadecimals and that they have been padded with zeroes so that their lengths are 16, 8, and 4, respectively. -} -- | Apply 'parens' to 'Doc' if boolean is true. maybeParens :: Bool -> Doc -> Doc maybeParens False = id maybeParens True = parens -- --------------------------------------------------------------------------- -- Structural operations on GDocs -- | Perform some simplification of a built up @GDoc@. reduceDoc :: Doc -> RDoc reduceDoc (Beside p g q) = p `seq` g `seq` (beside p g $! reduceDoc q) reduceDoc (Above p g q) = p `seq` g `seq` (above p g $! reduceDoc q) reduceDoc p = p -- | List version of '<>'. hcat :: [Doc] -> Doc hcat = reduceAB . foldr (beside_' False) empty -- | List version of '<+>'. hsep :: [Doc] -> Doc hsep = reduceAB . foldr (beside_' True) empty -- | List version of '$$'. vcat :: [Doc] -> Doc vcat = reduceAB . foldr (above_' False) empty -- | Nest (or indent) a document by a given number of positions -- (which may also be negative). 'nest' satisfies the laws: -- -- * @'nest' 0 x = x@ -- -- * @'nest' k ('nest' k' x) = 'nest' (k+k') x@ -- -- * @'nest' k (x '<>' y) = 'nest' k z '<>' 'nest' k y@ -- -- * @'nest' k (x '$$' y) = 'nest' k x '$$' 'nest' k y@ -- -- * @'nest' k 'empty' = 'empty'@ -- -- * @x '<>' 'nest' k y = x '<>' y@, if @x@ non-empty -- -- The side condition on the last law is needed because -- 'empty' is a left identity for '<>'. nest :: Int -> Doc -> Doc nest k p = mkNest k (reduceDoc p) -- | @hang d1 n d2 = sep [d1, nest n d2]@ hang :: Doc -> Int -> Doc -> Doc hang d1 n d2 = sep [d1, nest n d2] -- | Apply 'hang' to the arguments if the first 'Doc' is not empty. hangNotEmpty :: Doc -> Int -> Doc -> Doc hangNotEmpty d1 n d2 = if isEmpty d1 then d2 else hang d1 n d2 -- | @punctuate p [d1, ... dn] = [d1 \<> p, d2 \<> p, ... dn-1 \<> p, dn]@ punctuate :: Doc -> [Doc] -> [Doc] punctuate _ [] = [] punctuate p (x:xs) = go x xs where go y [] = [y] go y (z:zs) = (y <> p) : go z zs -- mkNest checks for Nest's invariant that it doesn't have an Empty inside it mkNest :: Int -> Doc -> Doc mkNest k _ | k `seq` False = undefined mkNest k (Nest k1 p) = mkNest (k + k1) p mkNest _ NoDoc = NoDoc mkNest _ Empty = Empty mkNest 0 p = p mkNest k p = nest_ k p -- mkUnion checks for an empty document mkUnion :: Doc -> Doc -> Doc mkUnion Empty _ = Empty mkUnion p q = p `union_` q beside_' :: Bool -> Doc -> Doc -> Doc beside_' _ p Empty = p beside_' g p q = Beside p g q above_' :: Bool -> Doc -> Doc -> Doc above_' _ p Empty = p above_' g p q = Above p g q reduceAB :: Doc -> Doc reduceAB (Above Empty _ q) = q reduceAB (Beside Empty _ q) = q reduceAB doc = doc nilAbove_ :: RDoc -> RDoc nilAbove_ = NilAbove -- Arg of a TextBeside is always an RDoc textBeside_ :: TextDetails -> Int -> RDoc -> RDoc textBeside_ = TextBeside nest_ :: Int -> RDoc -> RDoc nest_ = Nest union_ :: RDoc -> RDoc -> RDoc union_ = Union -- --------------------------------------------------------------------------- -- Vertical composition @$$@ -- | Above, except that if the last line of the first argument stops -- at least one position before the first line of the second begins, -- these two lines are overlapped. For example: -- -- > text "hi" $$ nest 5 (text "there") -- -- lays out as -- -- > hi there -- -- rather than -- -- > hi -- > there -- -- '$$' is associative, with identity 'empty', and also satisfies -- -- * @(x '$$' y) '<>' z = x '$$' (y '<>' z)@, if @y@ non-empty. -- ($$) :: Doc -> Doc -> Doc p $$ q = above_ p False q -- | Above, with no overlapping. -- '$+$' is associative, with identity 'empty'. ($+$) :: Doc -> Doc -> Doc p $+$ q = above_ p True q above_ :: Doc -> Bool -> Doc -> Doc above_ p _ Empty = p above_ Empty _ q = q above_ p g q = Above p g q above :: Doc -> Bool -> RDoc -> RDoc above (Above p g1 q1) g2 q2 = above p g1 (above q1 g2 q2) above p@(Beside{}) g q = aboveNest (reduceDoc p) g 0 (reduceDoc q) above p g q = aboveNest p g 0 (reduceDoc q) -- Specification: aboveNest p g k q = p $g$ (nest k q) aboveNest :: RDoc -> Bool -> Int -> RDoc -> RDoc aboveNest _ _ k _ | k `seq` False = undefined aboveNest NoDoc _ _ _ = NoDoc aboveNest (p1 `Union` p2) g k q = aboveNest p1 g k q `union_` aboveNest p2 g k q aboveNest Empty _ k q = mkNest k q aboveNest (Nest k1 p) g k q = nest_ k1 (aboveNest p g (k - k1) q) -- p can't be Empty, so no need for mkNest aboveNest (NilAbove p) g k q = nilAbove_ (aboveNest p g k q) aboveNest (TextBeside s sl p) g k q = textBeside_ s sl rest where !k1 = k - sl rest = case p of Empty -> nilAboveNest g k1 q _ -> aboveNest p g k1 q aboveNest (Above {}) _ _ _ = error "aboveNest Above" aboveNest (Beside {}) _ _ _ = error "aboveNest Beside" -- Specification: text s <> nilaboveNest g k q -- = text s <> (text "" $g$ nest k q) nilAboveNest :: Bool -> Int -> RDoc -> RDoc nilAboveNest _ k _ | k `seq` False = undefined nilAboveNest _ _ Empty = Empty -- Here's why the "text s <>" is in the spec! nilAboveNest g k (Nest k1 q) = nilAboveNest g (k + k1) q nilAboveNest g k q | not g && k > 0 -- No newline if no overlap = textBeside_ (RStr k ' ') k q | otherwise -- Put them really above = nilAbove_ (mkNest k q) -- --------------------------------------------------------------------------- -- Horizontal composition @<>@ -- We intentionally avoid Data.Monoid.(<>) here due to interactions of -- Data.Monoid.(<>) and (<+>). See -- http://www.haskell.org/pipermail/libraries/2011-November/017066.html -- | Beside. -- '<>' is associative, with identity 'empty'. (<>) :: Doc -> Doc -> Doc p <> q = beside_ p False q -- | Beside, separated by space, unless one of the arguments is 'empty'. -- '<+>' is associative, with identity 'empty'. (<+>) :: Doc -> Doc -> Doc p <+> q = beside_ p True q beside_ :: Doc -> Bool -> Doc -> Doc beside_ p _ Empty = p beside_ Empty _ q = q beside_ p g q = Beside p g q -- Specification: beside g p q = p q beside :: Doc -> Bool -> RDoc -> RDoc beside NoDoc _ _ = NoDoc beside (p1 `Union` p2) g q = beside p1 g q `union_` beside p2 g q beside Empty _ q = q beside (Nest k p) g q = nest_ k $! beside p g q beside p@(Beside p1 g1 q1) g2 q2 | g1 == g2 = beside p1 g1 $! beside q1 g2 q2 | otherwise = beside (reduceDoc p) g2 q2 beside p@(Above{}) g q = let !d = reduceDoc p in beside d g q beside (NilAbove p) g q = nilAbove_ $! beside p g q beside (TextBeside s sl p) g q = textBeside_ s sl rest where rest = case p of Empty -> nilBeside g q _ -> beside p g q -- Specification: text "" <> nilBeside g p -- = text "" p nilBeside :: Bool -> RDoc -> RDoc nilBeside _ Empty = Empty -- Hence the text "" in the spec nilBeside g (Nest _ p) = nilBeside g p nilBeside g p | g = textBeside_ spaceText 1 p | otherwise = p -- --------------------------------------------------------------------------- -- Separate, @sep@ -- Specification: sep ps = oneLiner (hsep ps) -- `union` -- vcat ps -- | Either 'hsep' or 'vcat'. sep :: [Doc] -> Doc sep = sepX True -- Separate with spaces -- | Either 'hcat' or 'vcat'. cat :: [Doc] -> Doc cat = sepX False -- Don't sepX :: Bool -> [Doc] -> Doc sepX _ [] = empty sepX x (p:ps) = sep1 x (reduceDoc p) 0 ps -- Specification: sep1 g k ys = sep (x : map (nest k) ys) -- = oneLiner (x nest k (hsep ys)) -- `union` x $$ nest k (vcat ys) sep1 :: Bool -> RDoc -> Int -> [Doc] -> RDoc sep1 _ _ k _ | k `seq` False = undefined sep1 _ NoDoc _ _ = NoDoc sep1 g (p `Union` q) k ys = sep1 g p k ys `union_` aboveNest q False k (reduceDoc (vcat ys)) sep1 g Empty k ys = mkNest k (sepX g ys) sep1 g (Nest n p) k ys = nest_ n (sep1 g p (k - n) ys) sep1 _ (NilAbove p) k ys = nilAbove_ (aboveNest p False k (reduceDoc (vcat ys))) sep1 g (TextBeside s sl p) k ys = textBeside_ s sl (sepNB g p (k - sl) ys) sep1 _ (Above {}) _ _ = error "sep1 Above" sep1 _ (Beside {}) _ _ = error "sep1 Beside" -- Specification: sepNB p k ys = sep1 (text "" <> p) k ys -- Called when we have already found some text in the first item -- We have to eat up nests sepNB :: Bool -> Doc -> Int -> [Doc] -> Doc sepNB g (Nest _ p) k ys = sepNB g p k ys -- Never triggered, because of invariant (2) sepNB g Empty k ys = oneLiner (nilBeside g (reduceDoc rest)) `mkUnion` -- XXX: TODO: PRETTY: Used to use True here (but GHC used False...) nilAboveNest False k (reduceDoc (vcat ys)) where rest | g = hsep ys | otherwise = hcat ys sepNB g p k ys = sep1 g p k ys -- --------------------------------------------------------------------------- -- @fill@ -- | \"Paragraph fill\" version of 'cat'. fcat :: [Doc] -> Doc fcat = fill False -- | \"Paragraph fill\" version of 'sep'. fsep :: [Doc] -> Doc fsep = fill True -- Specification: -- -- fill g docs = fillIndent 0 docs -- -- fillIndent k [] = [] -- fillIndent k [p] = p -- fillIndent k (p1:p2:ps) = -- oneLiner p1 fillIndent (k + length p1 + g ? 1 : 0) -- (remove_nests (oneLiner p2) : ps) -- `Union` -- (p1 $*$ nest (-k) (fillIndent 0 ps)) -- -- $*$ is defined for layouts (not Docs) as -- layout1 $*$ layout2 | hasMoreThanOneLine layout1 = layout1 $$ layout2 -- | otherwise = layout1 $+$ layout2 fill :: Bool -> [Doc] -> RDoc fill _ [] = empty fill g (p:ps) = fill1 g (reduceDoc p) 0 ps fill1 :: Bool -> RDoc -> Int -> [Doc] -> Doc fill1 _ _ k _ | k `seq` False = undefined fill1 _ NoDoc _ _ = NoDoc fill1 g (p `Union` q) k ys = fill1 g p k ys `union_` aboveNest q False k (fill g ys) fill1 g Empty k ys = mkNest k (fill g ys) fill1 g (Nest n p) k ys = nest_ n (fill1 g p (k - n) ys) fill1 g (NilAbove p) k ys = nilAbove_ (aboveNest p False k (fill g ys)) fill1 g (TextBeside s sl p) k ys = textBeside_ s sl (fillNB g p (k - sl) ys) fill1 _ (Above {}) _ _ = error "fill1 Above" fill1 _ (Beside {}) _ _ = error "fill1 Beside" fillNB :: Bool -> Doc -> Int -> [Doc] -> Doc fillNB _ _ k _ | k `seq` False = undefined fillNB g (Nest _ p) k ys = fillNB g p k ys -- Never triggered, because of invariant (2) fillNB _ Empty _ [] = Empty fillNB g Empty k (Empty:ys) = fillNB g Empty k ys fillNB g Empty k (y:ys) = fillNBE g k y ys fillNB g p k ys = fill1 g p k ys fillNBE :: Bool -> Int -> Doc -> [Doc] -> Doc fillNBE g k y ys = nilBeside g (fill1 g ((elideNest . oneLiner . reduceDoc) y) k' ys) -- XXX: TODO: PRETTY: Used to use True here (but GHC used False...) `mkUnion` nilAboveNest False k (fill g (y:ys)) where k' = if g then k - 1 else k elideNest :: Doc -> Doc elideNest (Nest _ d) = d elideNest d = d -- --------------------------------------------------------------------------- -- Selecting the best layout best :: Int -- Line length -> Int -- Ribbon length -> RDoc -> RDoc -- No unions in here! best w0 r = get w0 where get :: Int -- (Remaining) width of line -> Doc -> Doc get w _ | w == 0 && False = undefined get _ Empty = Empty get _ NoDoc = NoDoc get w (NilAbove p) = nilAbove_ (get w p) get w (TextBeside s sl p) = textBeside_ s sl (get1 w sl p) get w (Nest k p) = nest_ k (get (w - k) p) get w (p `Union` q) = nicest w r (get w p) (get w q) get _ (Above {}) = error "best get Above" get _ (Beside {}) = error "best get Beside" get1 :: Int -- (Remaining) width of line -> Int -- Amount of first line already eaten up -> Doc -- This is an argument to TextBeside => eat Nests -> Doc -- No unions in here! get1 w _ _ | w == 0 && False = undefined get1 _ _ Empty = Empty get1 _ _ NoDoc = NoDoc get1 w sl (NilAbove p) = nilAbove_ (get (w - sl) p) get1 w sl (TextBeside t tl p) = textBeside_ t tl (get1 w (sl + tl) p) get1 w sl (Nest _ p) = get1 w sl p get1 w sl (p `Union` q) = nicest1 w r sl (get1 w sl p) (get1 w sl q) get1 _ _ (Above {}) = error "best get1 Above" get1 _ _ (Beside {}) = error "best get1 Beside" nicest :: Int -> Int -> Doc -> Doc -> Doc nicest !w !r = nicest1 w r 0 nicest1 :: Int -> Int -> Int -> Doc -> Doc -> Doc nicest1 !w !r !sl p q | fits ((w `min` r) - sl) p = p | otherwise = q fits :: Int -- Space available -> Doc -> Bool -- True if *first line* of Doc fits in space available fits n _ | n < 0 = False fits _ NoDoc = False fits _ Empty = True fits _ (NilAbove _) = True fits n (TextBeside _ sl p) = fits (n - sl) p fits _ (Above {}) = error "fits Above" fits _ (Beside {}) = error "fits Beside" fits _ (Union {}) = error "fits Union" fits _ (Nest {}) = error "fits Nest" -- | @first@ returns its first argument if it is non-empty, otherwise its second. first :: Doc -> Doc -> Doc first p q | nonEmptySet p = p -- unused, because (get OneLineMode) is unused | otherwise = q nonEmptySet :: Doc -> Bool nonEmptySet NoDoc = False nonEmptySet (_ `Union` _) = True nonEmptySet Empty = True nonEmptySet (NilAbove _) = True nonEmptySet (TextBeside _ _ p) = nonEmptySet p nonEmptySet (Nest _ p) = nonEmptySet p nonEmptySet (Above {}) = error "nonEmptySet Above" nonEmptySet (Beside {}) = error "nonEmptySet Beside" -- @oneLiner@ returns the one-line members of the given set of @GDoc@s. oneLiner :: Doc -> Doc oneLiner NoDoc = NoDoc oneLiner Empty = Empty oneLiner (NilAbove _) = NoDoc oneLiner (TextBeside s sl p) = textBeside_ s sl (oneLiner p) oneLiner (Nest k p) = nest_ k (oneLiner p) oneLiner (p `Union` _) = oneLiner p oneLiner (Above {}) = error "oneLiner Above" oneLiner (Beside {}) = error "oneLiner Beside" -- --------------------------------------------------------------------------- -- Rendering -- | A rendering style. data Style = Style { mode :: Mode -- ^ The rendering mode , lineLength :: Int -- ^ Length of line, in chars , ribbonsPerLine :: Float -- ^ Ratio of line length to ribbon length } -- | The default style (@mode=PageMode False, lineLength=100, ribbonsPerLine=1.5@). style :: Style style = Style { lineLength = 100, ribbonsPerLine = 1.5, mode = PageMode False } -- | Rendering mode. data Mode = PageMode { asciiSpace :: Bool } -- ^ Normal | ZigZagMode -- ^ With zig-zag cuts | LeftMode -- ^ No indentation, infinitely long lines | OneLineMode -- ^ All on one line -- | Can we output an ascii space character for spaces? -- Mostly true, but not for e.g. UTF16 -- See Note [putSpaces optimizations] for why we bother -- to track this. hasAsciiSpace :: Mode -> Bool hasAsciiSpace mode = case mode of PageMode asciiSpace -> asciiSpace _ -> False -- | Render the @Doc@ to a String using the given @Style@. renderStyle :: Style -> Doc -> String renderStyle s = fullRender (mode s) (lineLength s) (ribbonsPerLine s) txtPrinter "" -- | Default TextDetails printer txtPrinter :: TextDetails -> String -> String txtPrinter (Chr c) s = c:s txtPrinter (Str s1) s2 = s1 ++ s2 txtPrinter (PStr s1) s2 = unpackFS s1 ++ s2 txtPrinter (ZStr s1) s2 = zString s1 ++ s2 txtPrinter (LStr s1) s2 = unpackPtrString s1 ++ s2 txtPrinter (RStr n c) s2 = replicate n c ++ s2 -- | The general rendering interface. fullRender :: Mode -- ^ Rendering mode -> Int -- ^ Line length -> Float -- ^ Ribbons per line -> (TextDetails -> a -> a) -- ^ What to do with text -> a -- ^ What to do at the end -> Doc -- ^ The document -> a -- ^ Result fullRender OneLineMode _ _ txt end doc = easyDisplay spaceText (\_ y -> y) txt end (reduceDoc doc) fullRender LeftMode _ _ txt end doc = easyDisplay nlText first txt end (reduceDoc doc) fullRender m lineLen ribbons txt rest doc = display m lineLen ribbonLen txt rest doc' where doc' = best bestLineLen ribbonLen (reduceDoc doc) bestLineLen, ribbonLen :: Int ribbonLen = round (fromIntegral lineLen / ribbons) bestLineLen = case m of ZigZagMode -> maxBound _ -> lineLen easyDisplay :: TextDetails -> (Doc -> Doc -> Doc) -> (TextDetails -> a -> a) -> a -> Doc -> a easyDisplay nlSpaceText choose txt end = lay where lay NoDoc = error "easyDisplay: NoDoc" lay (Union p q) = lay (choose p q) lay (Nest _ p) = lay p lay Empty = end lay (NilAbove p) = nlSpaceText `txt` lay p lay (TextBeside s _ p) = s `txt` lay p lay (Above {}) = error "easyDisplay Above" lay (Beside {}) = error "easyDisplay Beside" display :: Mode -> Int -> Int -> (TextDetails -> a -> a) -> a -> Doc -> a display m !page_width !ribbon_width txt end doc = case page_width - ribbon_width of { gap_width -> case gap_width `quot` 2 of { shift -> let lay k _ | k `seq` False = undefined lay k (Nest k1 p) = lay (k + k1) p lay _ Empty = end lay k (NilAbove p) = nlText `txt` lay k p lay k (TextBeside s sl p) = case m of ZigZagMode | k >= gap_width -> nlText `txt` ( Str (replicate shift '/') `txt` ( nlText `txt` lay1 (k - shift) s sl p )) | k < 0 -> nlText `txt` ( Str (replicate shift '\\') `txt` ( nlText `txt` lay1 (k + shift) s sl p )) _ -> lay1 k s sl p lay _ (Above {}) = error "display lay Above" lay _ (Beside {}) = error "display lay Beside" lay _ NoDoc = error "display lay NoDoc" lay _ (Union {}) = error "display lay Union" lay1 !k s !sl p = let !r = k + sl in indent k (s `txt` lay2 r p) lay2 k _ | k `seq` False = undefined lay2 k (NilAbove p) = nlText `txt` lay k p lay2 k (TextBeside s sl p) = s `txt` lay2 (k + sl) p lay2 k (Nest _ p) = lay2 k p lay2 _ Empty = end lay2 _ (Above {}) = error "display lay2 Above" lay2 _ (Beside {}) = error "display lay2 Beside" lay2 _ NoDoc = error "display lay2 NoDoc" lay2 _ (Union {}) = error "display lay2 Union" indent !n r = RStr n ' ' `txt` r in lay 0 doc }} printDoc :: Mode -> Int -> Handle -> Doc -> IO () -- printDoc adds a newline to the end printDoc mode cols hdl doc = printDoc_ mode cols hdl (doc $$ text "") {- Note [putSpaces optimizations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using dump flags a lot of what we are dumping ends up being whitespace. This is especially true for Core/Stg dumps. Enough so that it's worth optimizing. Especially in the common case of writing to an UTF8 or similarly encoded file where space is equal to ascii space we use hPutBuf to write a preallocated buffer to the file. This avoids a fair bit of allocation. For other cases we fall back to the old and slow path for simplicity. -} printDoc_ :: Mode -> Int -> Handle -> Doc -> IO () -- printDoc_ does not add a newline at the end, so that -- successive calls can output stuff on the same line -- Rather like putStr vs putStrLn printDoc_ LeftMode _ hdl doc = do { printLeftRender hdl doc; hFlush hdl } printDoc_ mode pprCols hdl doc = do { fullRender mode pprCols 1.5 put done doc ; hFlush hdl } where put (Chr c) next = hPutChar hdl c >> next put (Str s) next = hPutStr hdl s >> next put (PStr s) next = hPutStr hdl (unpackFS s) >> next -- NB. not hPutFS, we want this to go through -- the I/O library's encoding layer. (#3398) put (ZStr s) next = hPutFZS hdl s >> next put (LStr s) next = hPutPtrString hdl s >> next put (RStr n c) next | c == ' ' = putSpaces n >> next | otherwise = hPutStr hdl (replicate n c) >> next putSpaces n -- If we use ascii spaces we are allowed to use hPutBuf -- See Note [putSpaces optimizations] | hasAsciiSpace mode , n <= 100 = hPutBuf hdl (Ptr spaces') n | hasAsciiSpace mode , n > 100 = hPutBuf hdl (Ptr spaces') 100 >> putSpaces (n-100) | otherwise = hPutStr hdl (replicate n ' ') done = return () -- hPutChar hdl '\n' -- 100 spaces, so we avoid the allocation of replicate n ' ' spaces' = " "# -- some versions of hPutBuf will barf if the length is zero hPutPtrString :: Handle -> PtrString -> IO () hPutPtrString _handle (PtrString _ 0) = return () hPutPtrString handle (PtrString a l) = hPutBuf handle a l -- Printing output in LeftMode is performance critical: it's used when -- dumping C and assembly output, so we allow ourselves a few dirty -- hacks: -- -- (1) we specialise fullRender for LeftMode with IO output. -- -- (2) we add a layer of buffering on top of Handles. Handles -- don't perform well with lots of hPutChars, which is mostly -- what we're doing here, because Handles have to be thread-safe -- and async exception-safe. We only have a single thread and don't -- care about exceptions, so we add a layer of fast buffering -- over the Handle interface. printLeftRender :: Handle -> Doc -> IO () printLeftRender hdl doc = do b <- newBufHandle hdl bufLeftRender b doc bFlush b bufLeftRender :: BufHandle -> Doc -> IO () bufLeftRender b doc = layLeft b (reduceDoc doc) layLeft :: BufHandle -> Doc -> IO () layLeft b _ | b `seq` False = undefined -- make it strict in b layLeft _ NoDoc = error "layLeft: NoDoc" layLeft b (Union p q) = layLeft b $! first p q layLeft b (Nest _ p) = layLeft b $! p layLeft b Empty = bPutChar b '\n' layLeft b (NilAbove p) = p `seq` (bPutChar b '\n' >> layLeft b p) layLeft b (TextBeside s _ p) = s `seq` (put b s >> layLeft b p) where put b _ | b `seq` False = undefined put b (Chr c) = bPutChar b c put b (Str s) = bPutStr b s put b (PStr s) = bPutFS b s put b (ZStr s) = bPutFZS b s put b (LStr s) = bPutPtrString b s put b (RStr n c) = bPutReplicate b n c layLeft _ _ = panic "layLeft: Unhandled case" -- Define error=panic, for easier comparison with libraries/pretty. error :: String -> a error = panic ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Ppr/Colour.hs0000644000000000000000000000534314472400113021301 0ustar0000000000000000module GHC.Utils.Ppr.Colour where import GHC.Prelude import Data.Maybe (fromMaybe) import GHC.Data.Bool import Data.Semigroup as Semi -- | A colour\/style for use with 'coloured'. newtype PprColour = PprColour { renderColour :: String } instance Semi.Semigroup PprColour where PprColour s1 <> PprColour s2 = PprColour (s1 <> s2) -- | Allow colours to be combined (e.g. bold + red); -- In case of conflict, right side takes precedence. instance Monoid PprColour where mempty = PprColour mempty mappend = (<>) renderColourAfresh :: PprColour -> String renderColourAfresh c = renderColour (colReset `mappend` c) colCustom :: String -> PprColour colCustom "" = mempty colCustom s = PprColour ("\27[" ++ s ++ "m") colReset :: PprColour colReset = colCustom "0" colBold :: PprColour colBold = colCustom ";1" colBlackFg :: PprColour colBlackFg = colCustom "30" colRedFg :: PprColour colRedFg = colCustom "31" colGreenFg :: PprColour colGreenFg = colCustom "32" colYellowFg :: PprColour colYellowFg = colCustom "33" colBlueFg :: PprColour colBlueFg = colCustom "34" colMagentaFg :: PprColour colMagentaFg = colCustom "35" colCyanFg :: PprColour colCyanFg = colCustom "36" colWhiteFg :: PprColour colWhiteFg = colCustom "37" data Scheme = Scheme { sHeader :: PprColour , sMessage :: PprColour , sWarning :: PprColour , sError :: PprColour , sFatal :: PprColour , sMargin :: PprColour } defaultScheme :: Scheme defaultScheme = Scheme { sHeader = mempty , sMessage = colBold , sWarning = colBold `mappend` colMagentaFg , sError = colBold `mappend` colRedFg , sFatal = colBold `mappend` colRedFg , sMargin = colBold `mappend` colBlueFg } -- | Parse the colour scheme from a string (presumably from the @GHC_COLORS@ -- environment variable). parseScheme :: String -> (OverridingBool, Scheme) -> (OverridingBool, Scheme) parseScheme "always" (_, cs) = (Always, cs) parseScheme "auto" (_, cs) = (Auto, cs) parseScheme "never" (_, cs) = (Never, cs) parseScheme input (b, cs) = ( b , Scheme { sHeader = fromMaybe (sHeader cs) (lookup "header" table) , sMessage = fromMaybe (sMessage cs) (lookup "message" table) , sWarning = fromMaybe (sWarning cs) (lookup "warning" table) , sError = fromMaybe (sError cs) (lookup "error" table) , sFatal = fromMaybe (sFatal cs) (lookup "fatal" table) , sMargin = fromMaybe (sMargin cs) (lookup "margin" table) } ) where split :: Char -> String -> [String] split c s = case break (==c) s of (chunk,[]) -> [chunk] (chunk,_:rest) -> chunk : split c rest table = do w <- split ':' input let (k, v') = break (== '=') w case v' of '=' : v -> return (k, colCustom v) _ -> [] ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/TmpFs.hs0000644000000000000000000003460414472400113020330 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | Temporary file-system management module GHC.Utils.TmpFs ( TmpFs , initTmpFs , forkTmpFsFrom , mergeTmpFsInto , FilesToClean(..) , emptyFilesToClean , TempFileLifetime(..) , TempDir (..) , cleanTempDirs , cleanTempFiles , cleanCurrentModuleTempFiles , addFilesToClean , changeTempFilesLifetime , newTempName , newTempLibName , newTempDir , withSystemTempDirectory , withTempDirectory ) where import GHC.Prelude import GHC.Utils.Error import GHC.Utils.Outputable import GHC.Utils.Logger import GHC.Utils.Misc import GHC.Utils.Exception as Exception import GHC.Driver.Phases import Data.List (partition) import qualified Data.Set as Set import Data.Set (Set) import qualified Data.Map as Map import Data.Map (Map) import Data.IORef import System.Directory import System.FilePath import System.IO.Error #if !defined(mingw32_HOST_OS) import qualified System.Posix.Internals #endif -- | Temporary file-system data TmpFs = TmpFs { tmp_dirs_to_clean :: IORef (Map FilePath FilePath) -- ^ Maps system temporary directory (passed via settings or DynFlags) to -- an actual temporary directory for this process. -- -- It's a Map probably to support changing the system temporary directory -- over time. -- -- Shared with forked TmpFs. , tmp_next_suffix :: IORef Int -- ^ The next available suffix to uniquely name a temp file, updated -- atomically. -- -- Shared with forked TmpFs. , tmp_files_to_clean :: IORef FilesToClean -- ^ Files to clean (per session or per module) -- -- Not shared with forked TmpFs. } -- | A collection of files that must be deleted before ghc exits. data FilesToClean = FilesToClean { ftcGhcSession :: !(Set FilePath) -- ^ Files that will be deleted at the end of runGhc(T) , ftcCurrentModule :: !(Set FilePath) -- ^ Files that will be deleted the next time -- 'cleanCurrentModuleTempFiles' is called, or otherwise at the end of -- the session. } -- | Used when a temp file is created. This determines which component Set of -- FilesToClean will get the temp file data TempFileLifetime = TFL_CurrentModule -- ^ A file with lifetime TFL_CurrentModule will be cleaned up at the -- end of upweep_mod | TFL_GhcSession -- ^ A file with lifetime TFL_GhcSession will be cleaned up at the end of -- runGhc(T) deriving (Show) newtype TempDir = TempDir FilePath -- | An empty FilesToClean emptyFilesToClean :: FilesToClean emptyFilesToClean = FilesToClean Set.empty Set.empty -- | Merge two FilesToClean mergeFilesToClean :: FilesToClean -> FilesToClean -> FilesToClean mergeFilesToClean x y = FilesToClean { ftcGhcSession = Set.union (ftcGhcSession x) (ftcGhcSession y) , ftcCurrentModule = Set.union (ftcCurrentModule x) (ftcCurrentModule y) } -- | Initialise an empty TmpFs initTmpFs :: IO TmpFs initTmpFs = do files <- newIORef emptyFilesToClean dirs <- newIORef Map.empty next <- newIORef 0 return $ TmpFs { tmp_files_to_clean = files , tmp_dirs_to_clean = dirs , tmp_next_suffix = next } -- | Initialise an empty TmpFs sharing unique numbers and per-process temporary -- directories with the given TmpFs forkTmpFsFrom :: TmpFs -> IO TmpFs forkTmpFsFrom old = do files <- newIORef emptyFilesToClean return $ TmpFs { tmp_files_to_clean = files , tmp_dirs_to_clean = tmp_dirs_to_clean old , tmp_next_suffix = tmp_next_suffix old } -- | Merge the first TmpFs into the second. -- -- The first TmpFs is returned emptied. mergeTmpFsInto :: TmpFs -> TmpFs -> IO () mergeTmpFsInto src dst = do src_files <- atomicModifyIORef' (tmp_files_to_clean src) (\s -> (emptyFilesToClean, s)) atomicModifyIORef' (tmp_files_to_clean dst) (\s -> (mergeFilesToClean src_files s, ())) cleanTempDirs :: Logger -> TmpFs -> IO () cleanTempDirs logger tmpfs = mask_ $ do let ref = tmp_dirs_to_clean tmpfs ds <- atomicModifyIORef' ref $ \ds -> (Map.empty, ds) removeTmpDirs logger (Map.elems ds) -- | Delete all files in @tmp_files_to_clean@. cleanTempFiles :: Logger -> TmpFs -> IO () cleanTempFiles logger tmpfs = mask_ $ do let ref = tmp_files_to_clean tmpfs to_delete <- atomicModifyIORef' ref $ \FilesToClean { ftcCurrentModule = cm_files , ftcGhcSession = gs_files } -> ( emptyFilesToClean , Set.toList cm_files ++ Set.toList gs_files) removeTmpFiles logger to_delete -- | Delete all files in @tmp_files_to_clean@. That have lifetime -- TFL_CurrentModule. -- If a file must be cleaned eventually, but must survive a -- cleanCurrentModuleTempFiles, ensure it has lifetime TFL_GhcSession. cleanCurrentModuleTempFiles :: Logger -> TmpFs -> IO () cleanCurrentModuleTempFiles logger tmpfs = mask_ $ do let ref = tmp_files_to_clean tmpfs to_delete <- atomicModifyIORef' ref $ \ftc@FilesToClean{ftcCurrentModule = cm_files} -> (ftc {ftcCurrentModule = Set.empty}, Set.toList cm_files) removeTmpFiles logger to_delete -- | Ensure that new_files are cleaned on the next call of -- 'cleanTempFiles' or 'cleanCurrentModuleTempFiles', depending on lifetime. -- If any of new_files are already tracked, they will have their lifetime -- updated. addFilesToClean :: TmpFs -> TempFileLifetime -> [FilePath] -> IO () addFilesToClean tmpfs lifetime new_files = modifyIORef' (tmp_files_to_clean tmpfs) $ \FilesToClean { ftcCurrentModule = cm_files , ftcGhcSession = gs_files } -> case lifetime of TFL_CurrentModule -> FilesToClean { ftcCurrentModule = cm_files `Set.union` new_files_set , ftcGhcSession = gs_files `Set.difference` new_files_set } TFL_GhcSession -> FilesToClean { ftcCurrentModule = cm_files `Set.difference` new_files_set , ftcGhcSession = gs_files `Set.union` new_files_set } where new_files_set = Set.fromList new_files -- | Update the lifetime of files already being tracked. If any files are -- not being tracked they will be discarded. changeTempFilesLifetime :: TmpFs -> TempFileLifetime -> [FilePath] -> IO () changeTempFilesLifetime tmpfs lifetime files = do FilesToClean { ftcCurrentModule = cm_files , ftcGhcSession = gs_files } <- readIORef (tmp_files_to_clean tmpfs) let old_set = case lifetime of TFL_CurrentModule -> gs_files TFL_GhcSession -> cm_files existing_files = [f | f <- files, f `Set.member` old_set] addFilesToClean tmpfs lifetime existing_files -- Return a unique numeric temp file suffix newTempSuffix :: TmpFs -> IO Int newTempSuffix tmpfs = atomicModifyIORef' (tmp_next_suffix tmpfs) $ \n -> (n+1,n) -- Find a temporary name that doesn't already exist. newTempName :: Logger -> TmpFs -> TempDir -> TempFileLifetime -> Suffix -> IO FilePath newTempName logger tmpfs tmp_dir lifetime extn = do d <- getTempDir logger tmpfs tmp_dir findTempName (d "ghc_") -- See Note [Deterministic base name] where findTempName :: FilePath -> IO FilePath findTempName prefix = do n <- newTempSuffix tmpfs let filename = prefix ++ show n <.> extn b <- doesFileExist filename if b then findTempName prefix else do -- clean it up later addFilesToClean tmpfs lifetime [filename] return filename newTempDir :: Logger -> TmpFs -> TempDir -> IO FilePath newTempDir logger tmpfs tmp_dir = do d <- getTempDir logger tmpfs tmp_dir findTempDir (d "ghc_") where findTempDir :: FilePath -> IO FilePath findTempDir prefix = do n <- newTempSuffix tmpfs let filename = prefix ++ show n b <- doesDirectoryExist filename if b then findTempDir prefix else do createDirectory filename -- see mkTempDir below; this is wrong: -> consIORef (tmp_dirs_to_clean tmpfs) filename return filename newTempLibName :: Logger -> TmpFs -> TempDir -> TempFileLifetime -> Suffix -> IO (FilePath, FilePath, String) newTempLibName logger tmpfs tmp_dir lifetime extn = do d <- getTempDir logger tmpfs tmp_dir findTempName d ("ghc_") where findTempName :: FilePath -> String -> IO (FilePath, FilePath, String) findTempName dir prefix = do n <- newTempSuffix tmpfs -- See Note [Deterministic base name] let libname = prefix ++ show n filename = dir "lib" ++ libname <.> extn b <- doesFileExist filename if b then findTempName dir prefix else do -- clean it up later addFilesToClean tmpfs lifetime [filename] return (filename, dir, libname) -- Return our temporary directory within tmp_dir, creating one if we -- don't have one yet. getTempDir :: Logger -> TmpFs -> TempDir -> IO FilePath getTempDir logger tmpfs (TempDir tmp_dir) = do mapping <- readIORef dir_ref case Map.lookup tmp_dir mapping of Nothing -> do pid <- getProcessID let prefix = tmp_dir "ghc" ++ show pid ++ "_" mask_ $ mkTempDir prefix Just dir -> return dir where dir_ref = tmp_dirs_to_clean tmpfs mkTempDir :: FilePath -> IO FilePath mkTempDir prefix = do n <- newTempSuffix tmpfs let our_dir = prefix ++ show n -- 1. Speculatively create our new directory. createDirectory our_dir -- 2. Update the tmp_dirs_to_clean mapping unless an entry already exists -- (i.e. unless another thread beat us to it). their_dir <- atomicModifyIORef' dir_ref $ \mapping -> case Map.lookup tmp_dir mapping of Just dir -> (mapping, Just dir) Nothing -> (Map.insert tmp_dir our_dir mapping, Nothing) -- 3. If there was an existing entry, return it and delete the -- directory we created. Otherwise return the directory we created. case their_dir of Nothing -> do debugTraceMsg logger 2 $ text "Created temporary directory:" <+> text our_dir return our_dir Just dir -> do removeDirectory our_dir return dir `Exception.catchIO` \e -> if isAlreadyExistsError e then mkTempDir prefix else ioError e {- Note [Deterministic base name] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The filename of temporary files, especially the basename of C files, can end up in the output in some form, e.g. as part of linker debug information. In the interest of bit-wise exactly reproducible compilation (#4012), the basename of the temporary file no longer contains random information (it used to contain the process id). This is ok, as the temporary directory used contains the pid (see getTempDir). -} removeTmpDirs :: Logger -> [FilePath] -> IO () removeTmpDirs logger ds = traceCmd logger "Deleting temp dirs" ("Deleting: " ++ unwords ds) (mapM_ (removeWith logger removeDirectory) ds) removeTmpFiles :: Logger -> [FilePath] -> IO () removeTmpFiles logger fs = warnNon $ traceCmd logger "Deleting temp files" ("Deleting: " ++ unwords deletees) (mapM_ (removeWith logger removeFile) deletees) where -- Flat out refuse to delete files that are likely to be source input -- files (is there a worse bug than having a compiler delete your source -- files?) -- -- Deleting source files is a sign of a bug elsewhere, so prominently flag -- the condition. warnNon act | null non_deletees = act | otherwise = do putMsg logger (text "WARNING - NOT deleting source files:" <+> hsep (map text non_deletees)) act (non_deletees, deletees) = partition isHaskellUserSrcFilename fs removeWith :: Logger -> (FilePath -> IO ()) -> FilePath -> IO () removeWith logger remover f = remover f `Exception.catchIO` (\e -> let msg = if isDoesNotExistError e then text "Warning: deleting non-existent" <+> text f else text "Warning: exception raised when deleting" <+> text f <> colon $$ text (show e) in debugTraceMsg logger 2 msg ) #if defined(mingw32_HOST_OS) -- relies on Int == Int32 on Windows foreign import ccall unsafe "_getpid" getProcessID :: IO Int #else getProcessID :: IO Int getProcessID = System.Posix.Internals.c_getpid >>= return . fromIntegral #endif -- The following three functions are from the `temporary` package. -- | Create and use a temporary directory in the system standard temporary -- directory. -- -- Behaves exactly the same as 'withTempDirectory', except that the parent -- temporary directory will be that returned by 'getTemporaryDirectory'. withSystemTempDirectory :: String -- ^ Directory name template. See 'openTempFile'. -> (FilePath -> IO a) -- ^ Callback that can use the directory -> IO a withSystemTempDirectory template action = getTemporaryDirectory >>= \tmpDir -> withTempDirectory tmpDir template action -- | Create and use a temporary directory. -- -- Creates a new temporary directory inside the given directory, making use -- of the template. The temp directory is deleted after use. For example: -- -- > withTempDirectory "src" "sdist." $ \tmpDir -> do ... -- -- The @tmpDir@ will be a new subdirectory of the given directory, e.g. -- @src/sdist.342@. withTempDirectory :: FilePath -- ^ Temp directory to create the directory in -> String -- ^ Directory name template. See 'openTempFile'. -> (FilePath -> IO a) -- ^ Callback that can use the directory -> IO a withTempDirectory targetDir template = Exception.bracket (createTempDirectory targetDir template) (ignoringIOErrors . removeDirectoryRecursive) ignoringIOErrors :: IO () -> IO () ignoringIOErrors ioe = ioe `Exception.catchIO` const (return ()) createTempDirectory :: FilePath -> String -> IO FilePath createTempDirectory dir template = do pid <- getProcessID findTempName pid where findTempName x = do let path = dir template ++ show x createDirectory path return path `Exception.catchIO` \e -> if isAlreadyExistsError e then findTempName (x+1) else ioError e ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Trace.hs0000644000000000000000000000575314472400113020340 0ustar0000000000000000-- | Tracing utilities module GHC.Utils.Trace ( pprTrace , pprTraceM , pprTraceDebug , pprTraceIt , pprTraceWith , pprSTrace , pprTraceException , warnPprTrace , pprTraceUserWarning , trace ) where import GHC.Prelude import GHC.Utils.Outputable import GHC.Utils.Exception import GHC.Utils.Panic import GHC.Utils.GlobalVars import GHC.Utils.Constants import GHC.Stack import Debug.Trace (trace) import Control.Monad.IO.Class -- | If debug output is on, show some 'SDoc' on the screen pprTrace :: String -> SDoc -> a -> a pprTrace str doc x | unsafeHasNoDebugOutput = x | otherwise = pprDebugAndThen defaultSDocContext trace (text str) doc x pprTraceM :: Applicative f => String -> SDoc -> f () pprTraceM str doc = pprTrace str doc (pure ()) pprTraceDebug :: String -> SDoc -> a -> a pprTraceDebug str doc x | debugIsOn && unsafeHasPprDebug = pprTrace str doc x | otherwise = x -- | @pprTraceWith desc f x@ is equivalent to @pprTrace desc (f x) x@. -- This allows you to print details from the returned value as well as from -- ambient variables. pprTraceWith :: String -> (a -> SDoc) -> a -> a pprTraceWith desc f x = pprTrace desc (f x) x -- | @pprTraceIt desc x@ is equivalent to @pprTrace desc (ppr x) x@ pprTraceIt :: Outputable a => String -> a -> a pprTraceIt desc x = pprTraceWith desc ppr x -- | @pprTraceException desc x action@ runs action, printing a message -- if it throws an exception. pprTraceException :: ExceptionMonad m => String -> SDoc -> m a -> m a pprTraceException heading doc = handleGhcException $ \exc -> liftIO $ do putStrLn $ renderWithContext defaultSDocContext $ withPprStyle defaultDumpStyle $ sep [text heading, nest 2 doc] throwGhcExceptionIO exc -- | If debug output is on, show some 'SDoc' on the screen along -- with a call stack when available. pprSTrace :: HasCallStack => SDoc -> a -> a pprSTrace doc = pprTrace "" (doc $$ traceCallStackDoc) -- | Just warn about an assertion failure, recording the given file and line number. warnPprTrace :: HasCallStack => Bool -> String -> SDoc -> a -> a warnPprTrace _ _s _ x | not debugIsOn = x warnPprTrace _ _s _msg x | unsafeHasNoDebugOutput = x warnPprTrace False _s _msg x = x warnPprTrace True s msg x = pprDebugAndThen defaultSDocContext trace (text "WARNING:") (text s $$ msg $$ withFrozenCallStack traceCallStackDoc ) x -- | For when we want to show the user a non-fatal WARNING so that they can -- report a GHC bug, but don't want to panic. pprTraceUserWarning :: HasCallStack => SDoc -> a -> a pprTraceUserWarning msg x | unsafeHasNoDebugOutput = x | otherwise = pprDebugAndThen defaultSDocContext trace (text "WARNING:") (msg $$ withFrozenCallStack traceCallStackDoc ) x traceCallStackDoc :: HasCallStack => SDoc traceCallStackDoc = hang (text "Call stack:") 4 (vcat $ map text $ lines (prettyCallStack callStack)) ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/ghc-boot/build/GHC/Version.hs0000644000000000000000000000076114472400073025261 0ustar0000000000000000module GHC.Version where import Prelude -- See Note [Why do we import Prelude here?] cProjectGitCommitId :: String cProjectGitCommitId = "00920f176b0235d5bb52a8e054d89a664f8938fe" cProjectVersion :: String cProjectVersion = "9.4.7" cProjectVersionInt :: String cProjectVersionInt = "904" cProjectPatchLevel :: String cProjectPatchLevel = "7" cProjectPatchLevel1 :: String cProjectPatchLevel1 = "7" cProjectPatchLevel2 :: String cProjectPatchLevel2 = "0" ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/BinaryArray.hs0000644000000000000000000000602514470055371021655 0ustar0000000000000000{-# LANGUAGE BangPatterns, MagicHash, UnboxedTuples, FlexibleContexts #-} -- | Efficient serialisation for GHCi Instruction arrays -- -- Author: Ben Gamari -- module GHCi.BinaryArray(putArray, getArray) where import Prelude import Foreign.Ptr import Data.Binary import Data.Binary.Put (putBuilder) import qualified Data.Binary.Get.Internal as Binary import qualified Data.ByteString.Builder as BB import qualified Data.ByteString.Builder.Internal as BB import qualified Data.Array.Base as A import qualified Data.Array.IO.Internals as A import qualified Data.Array.Unboxed as A import GHC.Exts import GHC.IO -- | An efficient serialiser of 'A.UArray'. putArray :: Binary i => A.UArray i a -> Put putArray (A.UArray l u _ arr#) = do put l put u putBuilder $ byteArrayBuilder arr# byteArrayBuilder :: ByteArray# -> BB.Builder byteArrayBuilder arr# = BB.builder $ go 0 (I# (sizeofByteArray# arr#)) where go :: Int -> Int -> BB.BuildStep a -> BB.BuildStep a go !inStart !inEnd k (BB.BufferRange outStart outEnd) -- There is enough room in this output buffer to write all remaining array -- contents | inRemaining <= outRemaining = do copyByteArrayToAddr arr# inStart outStart inRemaining k (BB.BufferRange (outStart `plusPtr` inRemaining) outEnd) -- There is only enough space for a fraction of the remaining contents | otherwise = do copyByteArrayToAddr arr# inStart outStart outRemaining let !inStart' = inStart + outRemaining return $! BB.bufferFull 1 outEnd (go inStart' inEnd k) where inRemaining = inEnd - inStart outRemaining = outEnd `minusPtr` outStart copyByteArrayToAddr :: ByteArray# -> Int -> Ptr a -> Int -> IO () copyByteArrayToAddr src# (I# src_off#) (Ptr dst#) (I# len#) = IO $ \s -> case copyByteArrayToAddr# src# src_off# dst# len# s of s' -> (# s', () #) -- | An efficient deserialiser of 'A.UArray'. getArray :: (Binary i, A.Ix i, A.MArray A.IOUArray a IO) => Get (A.UArray i a) getArray = do l <- get u <- get arr@(A.IOUArray (A.STUArray _ _ _ arr#)) <- return $ unsafeDupablePerformIO $ A.newArray_ (l,u) let go 0 _ = return () go !remaining !off = do Binary.readNWith n $ \ptr -> copyAddrToByteArray ptr arr# off n go (remaining - n) (off + n) where n = min chunkSize remaining go (I# (sizeofMutableByteArray# arr#)) 0 return $! unsafeDupablePerformIO $ unsafeFreezeIOUArray arr where chunkSize = 10*1024 copyAddrToByteArray :: Ptr a -> MutableByteArray# RealWorld -> Int -> Int -> IO () copyAddrToByteArray (Ptr src#) dst# (I# dst_off#) (I# len#) = IO $ \s -> case copyAddrToByteArray# src# dst# dst_off# len# s of s' -> (# s', () #) -- this is inexplicably not exported in currently released array versions unsafeFreezeIOUArray :: A.IOUArray ix e -> IO (A.UArray ix e) unsafeFreezeIOUArray (A.IOUArray marr) = stToIO (A.unsafeFreezeSTUArray marr) ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/BreakArray.hs0000644000000000000000000000712414470055371021456 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-name-shadowing #-} {-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} ------------------------------------------------------------------------------- -- -- (c) The University of Glasgow 2007 -- -- | Break Arrays -- -- An array of words, indexed by a breakpoint number (breakpointId in Tickish) -- containing the ignore count for every breakpopint. -- There is one of these arrays per module. -- -- For each word with value n: -- n > 1 : the corresponding breakpoint is enabled. Next time the bp is hit, -- GHCi will decrement the ignore count and continue processing. -- n == 0 : The breakpoint is enabled, GHCi will stop next time it hits -- this breakpoint. -- n == -1: This breakpoint is disabled. -- n < -1 : Not used. -- ------------------------------------------------------------------------------- module GHCi.BreakArray ( BreakArray (BA) -- constructor is exported only for GHC.StgToByteCode , newBreakArray , getBreak , setupBreakpoint , breakOn , breakOff , showBreakArray ) where import Prelude -- See note [Why do we import Prelude here?] import Control.Monad import GHC.Exts import GHC.IO ( IO(..) ) import System.IO.Unsafe ( unsafeDupablePerformIO ) #include "MachDeps.h" data BreakArray = BA (MutableByteArray# RealWorld) breakOff, breakOn :: Int breakOn = 0 breakOff = -1 showBreakArray :: BreakArray -> IO () showBreakArray array = do forM_ [0 .. (size array - 1)] $ \i -> do val <- readBreakArray array i putStr $ ' ' : show val putStr "\n" setupBreakpoint :: BreakArray -> Int -> Int -> IO Bool setupBreakpoint breakArray ind val | safeIndex breakArray ind = do writeBreakArray breakArray ind val return True | otherwise = return False getBreak :: BreakArray -> Int -> IO (Maybe Int) getBreak array index | safeIndex array index = do val <- readBreakArray array index return $ Just val | otherwise = return Nothing safeIndex :: BreakArray -> Int -> Bool safeIndex array index = index < size array && index >= 0 size :: BreakArray -> Int size (BA array) = size `div` SIZEOF_HSWORD where -- We want to keep this operation pure. The mutable byte array -- is never resized so this is safe. size = unsafeDupablePerformIO $ sizeofMutableByteArray array sizeofMutableByteArray :: MutableByteArray# RealWorld -> IO Int sizeofMutableByteArray arr = IO $ \s -> case getSizeofMutableByteArray# arr s of (# s', n# #) -> (# s', I# n# #) allocBA :: Int# -> IO BreakArray allocBA sz# = IO $ \s1 -> case newByteArray# sz# s1 of { (# s2, array #) -> (# s2, BA array #) } -- create a new break array and initialise all elements to breakOff. newBreakArray :: Int -> IO BreakArray newBreakArray (I# sz#) = do BA array <- allocBA (sz# *# SIZEOF_HSWORD#) case breakOff of I# off -> do let loop n | isTrue# (n >=# sz#) = return () | otherwise = do writeBA# array n off; loop (n +# 1#) loop 0# return $ BA array writeBA# :: MutableByteArray# RealWorld -> Int# -> Int# -> IO () writeBA# array ind val = IO $ \s -> case writeIntArray# array ind val s of { s -> (# s, () #) } writeBreakArray :: BreakArray -> Int -> Int -> IO () writeBreakArray (BA array) (I# i) (I# val) = writeBA# array i val readBA# :: MutableByteArray# RealWorld -> Int# -> IO Int readBA# array i = IO $ \s -> case readIntArray# array i s of { (# s, c #) -> (# s, I# c #) } readBreakArray :: BreakArray -> Int -> IO Int readBreakArray (BA array) (I# ind# ) = readBA# array ind# ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/FFI.hsc0000644000000000000000000001223414472375231020202 0ustar0000000000000000----------------------------------------------------------------------------- -- -- libffi bindings -- -- (c) The University of Glasgow 2008 -- ----------------------------------------------------------------------------- -- See Note [FFI_GO_CLOSURES workaround] in ghc_ffi.h -- We can't include ghc_ffi.h here as we must build with stage0 #if defined(darwin_HOST_OS) #if !defined(FFI_GO_CLOSURES) #define FFI_GO_CLOSURES 0 #endif #endif #include {-# LANGUAGE CPP, DeriveGeneric, DeriveAnyClass #-} module GHCi.FFI ( FFIType(..) , FFIConv(..) , C_ffi_cif , prepForeignCall , freeForeignCallInfo ) where import Prelude -- See note [Why do we import Prelude here?] import Control.Exception import Data.Binary import GHC.Generics import Foreign import Foreign.C data FFIType = FFIVoid | FFIPointer | FFIFloat | FFIDouble | FFISInt8 | FFISInt16 | FFISInt32 | FFISInt64 | FFIUInt8 | FFIUInt16 | FFIUInt32 | FFIUInt64 deriving (Show, Generic, Binary) data FFIConv = FFICCall | FFIStdCall deriving (Show, Generic, Binary) prepForeignCall :: FFIConv -> [FFIType] -- arg types -> FFIType -- result type -> IO (Ptr C_ffi_cif) -- token for making calls (must be freed by caller) prepForeignCall cconv arg_types result_type = do let n_args = length arg_types arg_arr <- mallocArray n_args pokeArray arg_arr (map ffiType arg_types) cif <- mallocBytes (#const sizeof(ffi_cif)) let abi = convToABI cconv r <- ffi_prep_cif cif abi (fromIntegral n_args) (ffiType result_type) arg_arr if r /= fFI_OK then throwIO $ ErrorCall $ concat [ "prepForeignCallFailed: ", strError r, "(cconv: ", show cconv, " arg tys: ", show arg_types, " res ty: ", show result_type, ")" ] else return (castPtr cif) freeForeignCallInfo :: Ptr C_ffi_cif -> IO () freeForeignCallInfo p = do free ((#ptr ffi_cif, arg_types) p) free p strError :: C_ffi_status -> String strError r | r == fFI_BAD_ABI = "invalid ABI (FFI_BAD_ABI)" | r == fFI_BAD_TYPEDEF = "invalid type description (FFI_BAD_TYPEDEF)" | otherwise = "unknown error: " ++ show r convToABI :: FFIConv -> C_ffi_abi convToABI FFICCall = fFI_DEFAULT_ABI #if defined(mingw32_HOST_OS) && defined(i386_HOST_ARCH) convToABI FFIStdCall = fFI_STDCALL #endif -- unknown conventions are mapped to the default, (#3336) convToABI _ = fFI_DEFAULT_ABI ffiType :: FFIType -> Ptr C_ffi_type ffiType FFIVoid = ffi_type_void ffiType FFIPointer = ffi_type_pointer ffiType FFIFloat = ffi_type_float ffiType FFIDouble = ffi_type_double ffiType FFISInt8 = ffi_type_sint8 ffiType FFISInt16 = ffi_type_sint16 ffiType FFISInt32 = ffi_type_sint32 ffiType FFISInt64 = ffi_type_sint64 ffiType FFIUInt8 = ffi_type_uint8 ffiType FFIUInt16 = ffi_type_uint16 ffiType FFIUInt32 = ffi_type_uint32 ffiType FFIUInt64 = ffi_type_uint64 data C_ffi_type data C_ffi_cif type C_ffi_status = (#type ffi_status) type C_ffi_abi = (#type ffi_abi) foreign import ccall "&ffi_type_void" ffi_type_void :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint8" ffi_type_uint8 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint8" ffi_type_sint8 :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint16" ffi_type_uint16 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint16" ffi_type_sint16 :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint32" ffi_type_uint32 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint32" ffi_type_sint32 :: Ptr C_ffi_type foreign import ccall "&ffi_type_uint64" ffi_type_uint64 :: Ptr C_ffi_type foreign import ccall "&ffi_type_sint64" ffi_type_sint64 :: Ptr C_ffi_type foreign import ccall "&ffi_type_float" ffi_type_float :: Ptr C_ffi_type foreign import ccall "&ffi_type_double" ffi_type_double :: Ptr C_ffi_type foreign import ccall "&ffi_type_pointer"ffi_type_pointer :: Ptr C_ffi_type fFI_OK, fFI_BAD_ABI, fFI_BAD_TYPEDEF :: C_ffi_status fFI_OK = (#const FFI_OK) fFI_BAD_ABI = (#const FFI_BAD_ABI) fFI_BAD_TYPEDEF = (#const FFI_BAD_TYPEDEF) fFI_DEFAULT_ABI :: C_ffi_abi fFI_DEFAULT_ABI = (#const FFI_DEFAULT_ABI) #if defined(mingw32_HOST_OS) && defined(i386_HOST_ARCH) fFI_STDCALL :: C_ffi_abi fFI_STDCALL = (#const FFI_STDCALL) #endif -- ffi_status ffi_prep_cif(ffi_cif *cif, -- ffi_abi abi, -- unsigned int nargs, -- ffi_type *rtype, -- ffi_type **atypes); foreign import ccall "ffi_prep_cif" ffi_prep_cif :: Ptr C_ffi_cif -- cif -> C_ffi_abi -- abi -> CUInt -- nargs -> Ptr C_ffi_type -- result type -> Ptr (Ptr C_ffi_type) -- arg types -> IO C_ffi_status -- Currently unused: -- void ffi_call(ffi_cif *cif, -- void (*fn)(), -- void *rvalue, -- void **avalue); -- foreign import ccall "ffi_call" -- ffi_call :: Ptr C_ffi_cif -- cif -- -> FunPtr (IO ()) -- function to call -- -> Ptr () -- put result here -- -> Ptr (Ptr ()) -- arg values -- -> IO () ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/Message.hs0000644000000000000000000005466314472400113021017 0ustar0000000000000000{-# LANGUAGE GADTs, DeriveGeneric, StandaloneDeriving, ScopedTypeVariables, GeneralizedNewtypeDeriving, ExistentialQuantification, RecordWildCards, CPP #-} {-# OPTIONS_GHC -fno-warn-name-shadowing -fno-warn-orphans #-} -- | -- Remote GHCi message types and serialization. -- -- For details on Remote GHCi, see Note [Remote GHCi] in -- compiler/GHC/Runtime/Interpreter.hs. -- module GHCi.Message ( Message(..), Msg(..) , THMessage(..), THMsg(..) , QResult(..) , EvalStatus_(..), EvalStatus, EvalResult(..), EvalOpts(..), EvalExpr(..) , SerializableException(..) , toSerializableException, fromSerializableException , THResult(..), THResultType(..) , ResumeContext(..) , QState(..) , getMessage, putMessage, getTHMessage, putTHMessage , Pipe(..), remoteCall, remoteTHCall, readPipe, writePipe ) where import Prelude -- See note [Why do we import Prelude here?] import GHCi.RemoteTypes import GHCi.FFI import GHCi.TH.Binary () -- For Binary instances import GHCi.BreakArray import GHC.LanguageExtensions import qualified GHC.Exts.Heap as Heap import GHC.ForeignSrcLang import GHC.Fingerprint import Control.Concurrent import Control.Exception import Data.Binary import Data.Binary.Get import Data.Binary.Put import Data.ByteString (ByteString) import qualified Data.ByteString as B import qualified Data.ByteString.Lazy as LB import Data.Dynamic import Data.Typeable (TypeRep) import Data.IORef import Data.Map (Map) import Foreign import GHC.Generics import GHC.Stack.CCS import qualified Language.Haskell.TH as TH import qualified Language.Haskell.TH.Syntax as TH import System.Exit import System.IO import System.IO.Error -- ----------------------------------------------------------------------------- -- The RPC protocol between GHC and the interactive server -- | A @Message a@ is a message that returns a value of type @a@. -- These are requests sent from GHC to the server. data Message a where -- | Exit the iserv process Shutdown :: Message () RtsRevertCAFs :: Message () -- RTS Linker ------------------------------------------- -- These all invoke the corresponding functions in the RTS Linker API. InitLinker :: Message () LookupSymbol :: String -> Message (Maybe (RemotePtr ())) LookupClosure :: String -> Message (Maybe HValueRef) LoadDLL :: String -> Message (Maybe String) LoadArchive :: String -> Message () -- error? LoadObj :: String -> Message () -- error? UnloadObj :: String -> Message () -- error? AddLibrarySearchPath :: String -> Message (RemotePtr ()) RemoveLibrarySearchPath :: RemotePtr () -> Message Bool ResolveObjs :: Message Bool FindSystemLibrary :: String -> Message (Maybe String) -- Interpreter ------------------------------------------- -- | Create a set of BCO objects, and return HValueRefs to them -- Note: Each ByteString contains a Binary-encoded [ResolvedBCO], not -- a ResolvedBCO. The list is to allow us to serialise the ResolvedBCOs -- in parallel. See @createBCOs@ in compiler/GHC/Runtime/Interpreter.hs. CreateBCOs :: [LB.ByteString] -> Message [HValueRef] -- | Release 'HValueRef's FreeHValueRefs :: [HValueRef] -> Message () -- | Add entries to the Static Pointer Table AddSptEntry :: Fingerprint -> HValueRef -> Message () -- | Malloc some data and return a 'RemotePtr' to it MallocData :: ByteString -> Message (RemotePtr ()) MallocStrings :: [ByteString] -> Message [RemotePtr ()] -- | Calls 'GHCi.FFI.prepareForeignCall' PrepFFI :: FFIConv -> [FFIType] -> FFIType -> Message (RemotePtr C_ffi_cif) -- | Free data previously created by 'PrepFFI' FreeFFI :: RemotePtr C_ffi_cif -> Message () -- | Create an info table for a constructor MkConInfoTable :: Bool -- TABLES_NEXT_TO_CODE -> Int -- ptr words -> Int -- non-ptr words -> Int -- constr tag -> Int -- pointer tag -> ByteString -- constructor desccription -> Message (RemotePtr Heap.StgInfoTable) -- | Evaluate a statement EvalStmt :: EvalOpts -> EvalExpr HValueRef {- IO [a] -} -> Message (EvalStatus [HValueRef]) {- [a] -} -- | Resume evaluation of a statement after a breakpoint ResumeStmt :: EvalOpts -> RemoteRef (ResumeContext [HValueRef]) -> Message (EvalStatus [HValueRef]) -- | Abandon evaluation of a statement after a breakpoint AbandonStmt :: RemoteRef (ResumeContext [HValueRef]) -> Message () -- | Evaluate something of type @IO String@ EvalString :: HValueRef {- IO String -} -> Message (EvalResult String) -- | Evaluate something of type @String -> IO String@ EvalStringToString :: HValueRef {- String -> IO String -} -> String -> Message (EvalResult String) -- | Evaluate something of type @IO ()@ EvalIO :: HValueRef {- IO a -} -> Message (EvalResult ()) -- | Create a set of CostCentres with the same module name MkCostCentres :: String -- module, RemotePtr so it can be shared -> [(String,String)] -- (name, SrcSpan) -> Message [RemotePtr CostCentre] -- | Show a 'CostCentreStack' as a @[String]@ CostCentreStackInfo :: RemotePtr CostCentreStack -> Message [String] -- | Create a new array of breakpoint flags NewBreakArray :: Int -- size -> Message (RemoteRef BreakArray) -- | Set how many times a breakpoint should be ignored -- also used for enable/disable SetupBreakpoint :: RemoteRef BreakArray -> Int -- breakpoint index -> Int -- ignore count to be stored in the BreakArray -- -1 disable; 0 enable; >= 1 enable, ignore count. -> Message () -- | Query the status of a breakpoint (True <=> enabled) BreakpointStatus :: RemoteRef BreakArray -> Int -- index -> Message Bool -- True <=> enabled -- | Get a reference to a free variable at a breakpoint GetBreakpointVar :: HValueRef -- the AP_STACK from EvalBreak -> Int -> Message (Maybe HValueRef) -- Template Haskell ------------------------------------------- -- For more details on how TH works with Remote GHCi, see -- Note [Remote Template Haskell] in libraries/ghci/GHCi/TH.hs. -- | Start a new TH module, return a state token that should be StartTH :: Message (RemoteRef (IORef QState)) -- | Evaluate a TH computation. -- -- Returns a ByteString, because we have to force the result -- before returning it to ensure there are no errors lurking -- in it. The TH types don't have NFData instances, and even if -- they did, we have to serialize the value anyway, so we might -- as well serialize it to force it. RunTH :: RemoteRef (IORef QState) -> HValueRef {- e.g. TH.Q TH.Exp -} -> THResultType -> Maybe TH.Loc -> Message (QResult ByteString) -- | Run the given mod finalizers. RunModFinalizers :: RemoteRef (IORef QState) -> [RemoteRef (TH.Q ())] -> Message (QResult ()) -- | Remote interface to GHC.Exts.Heap.getClosureData. This is used by -- the GHCi debugger to inspect values in the heap for :print and -- type reconstruction. GetClosure :: HValueRef -> Message (Heap.GenClosure HValueRef) -- | Evaluate something. This is used to support :force in GHCi. Seq :: HValueRef -> Message (EvalStatus ()) -- | Resume forcing a free variable in a breakpoint (#2950) ResumeSeq :: RemoteRef (ResumeContext ()) -> Message (EvalStatus ()) deriving instance Show (Message a) -- | Template Haskell return values data QResult a = QDone a -- ^ RunTH finished successfully; return value follows | QException String -- ^ RunTH threw an exception | QFail String -- ^ RunTH called 'fail' deriving (Generic, Show) instance Binary a => Binary (QResult a) -- | Messages sent back to GHC from GHCi.TH, to implement the methods -- of 'Quasi'. For an overview of how TH works with Remote GHCi, see -- Note [Remote Template Haskell] in GHCi.TH. data THMessage a where NewName :: String -> THMessage (THResult TH.Name) Report :: Bool -> String -> THMessage (THResult ()) LookupName :: Bool -> String -> THMessage (THResult (Maybe TH.Name)) Reify :: TH.Name -> THMessage (THResult TH.Info) ReifyFixity :: TH.Name -> THMessage (THResult (Maybe TH.Fixity)) ReifyType :: TH.Name -> THMessage (THResult TH.Type) ReifyInstances :: TH.Name -> [TH.Type] -> THMessage (THResult [TH.Dec]) ReifyRoles :: TH.Name -> THMessage (THResult [TH.Role]) ReifyAnnotations :: TH.AnnLookup -> TypeRep -> THMessage (THResult [ByteString]) ReifyModule :: TH.Module -> THMessage (THResult TH.ModuleInfo) ReifyConStrictness :: TH.Name -> THMessage (THResult [TH.DecidedStrictness]) GetPackageRoot :: THMessage (THResult FilePath) AddDependentFile :: FilePath -> THMessage (THResult ()) AddTempFile :: String -> THMessage (THResult FilePath) AddModFinalizer :: RemoteRef (TH.Q ()) -> THMessage (THResult ()) AddCorePlugin :: String -> THMessage (THResult ()) AddTopDecls :: [TH.Dec] -> THMessage (THResult ()) AddForeignFilePath :: ForeignSrcLang -> FilePath -> THMessage (THResult ()) IsExtEnabled :: Extension -> THMessage (THResult Bool) ExtsEnabled :: THMessage (THResult [Extension]) PutDoc :: TH.DocLoc -> String -> THMessage (THResult ()) GetDoc :: TH.DocLoc -> THMessage (THResult (Maybe String)) StartRecover :: THMessage () EndRecover :: Bool -> THMessage () FailIfErrs :: THMessage (THResult ()) -- | Indicates that this RunTH is finished, and the next message -- will be the result of RunTH (a QResult). RunTHDone :: THMessage () deriving instance Show (THMessage a) data THMsg = forall a . (Binary a, Show a) => THMsg (THMessage a) getTHMessage :: Get THMsg getTHMessage = do b <- getWord8 case b of 0 -> THMsg <$> NewName <$> get 1 -> THMsg <$> (Report <$> get <*> get) 2 -> THMsg <$> (LookupName <$> get <*> get) 3 -> THMsg <$> Reify <$> get 4 -> THMsg <$> ReifyFixity <$> get 5 -> THMsg <$> (ReifyInstances <$> get <*> get) 6 -> THMsg <$> ReifyRoles <$> get 7 -> THMsg <$> (ReifyAnnotations <$> get <*> get) 8 -> THMsg <$> ReifyModule <$> get 9 -> THMsg <$> ReifyConStrictness <$> get 10 -> THMsg <$> AddDependentFile <$> get 11 -> THMsg <$> AddTempFile <$> get 12 -> THMsg <$> AddTopDecls <$> get 13 -> THMsg <$> (IsExtEnabled <$> get) 14 -> THMsg <$> return ExtsEnabled 15 -> THMsg <$> return StartRecover 16 -> THMsg <$> EndRecover <$> get 17 -> THMsg <$> return FailIfErrs 18 -> return (THMsg RunTHDone) 19 -> THMsg <$> AddModFinalizer <$> get 20 -> THMsg <$> (AddForeignFilePath <$> get <*> get) 21 -> THMsg <$> AddCorePlugin <$> get 22 -> THMsg <$> ReifyType <$> get 23 -> THMsg <$> (PutDoc <$> get <*> get) 24 -> THMsg <$> GetDoc <$> get 25 -> THMsg <$> return GetPackageRoot n -> error ("getTHMessage: unknown message " ++ show n) putTHMessage :: THMessage a -> Put putTHMessage m = case m of NewName a -> putWord8 0 >> put a Report a b -> putWord8 1 >> put a >> put b LookupName a b -> putWord8 2 >> put a >> put b Reify a -> putWord8 3 >> put a ReifyFixity a -> putWord8 4 >> put a ReifyInstances a b -> putWord8 5 >> put a >> put b ReifyRoles a -> putWord8 6 >> put a ReifyAnnotations a b -> putWord8 7 >> put a >> put b ReifyModule a -> putWord8 8 >> put a ReifyConStrictness a -> putWord8 9 >> put a AddDependentFile a -> putWord8 10 >> put a AddTempFile a -> putWord8 11 >> put a AddTopDecls a -> putWord8 12 >> put a IsExtEnabled a -> putWord8 13 >> put a ExtsEnabled -> putWord8 14 StartRecover -> putWord8 15 EndRecover a -> putWord8 16 >> put a FailIfErrs -> putWord8 17 RunTHDone -> putWord8 18 AddModFinalizer a -> putWord8 19 >> put a AddForeignFilePath lang a -> putWord8 20 >> put lang >> put a AddCorePlugin a -> putWord8 21 >> put a ReifyType a -> putWord8 22 >> put a PutDoc l s -> putWord8 23 >> put l >> put s GetDoc l -> putWord8 24 >> put l GetPackageRoot -> putWord8 25 data EvalOpts = EvalOpts { useSandboxThread :: Bool , singleStep :: Bool , breakOnException :: Bool , breakOnError :: Bool } deriving (Generic, Show) instance Binary EvalOpts data ResumeContext a = ResumeContext { resumeBreakMVar :: MVar () , resumeStatusMVar :: MVar (EvalStatus a) , resumeThreadId :: ThreadId } -- | We can pass simple expressions to EvalStmt, consisting of values -- and application. This allows us to wrap the statement to be -- executed in another function, which is used by GHCi to implement -- :set args and :set prog. It might be worthwhile to extend this -- little language in the future. data EvalExpr a = EvalThis a | EvalApp (EvalExpr a) (EvalExpr a) deriving (Generic, Show) instance Binary a => Binary (EvalExpr a) type EvalStatus a = EvalStatus_ a a data EvalStatus_ a b = EvalComplete Word64 (EvalResult a) | EvalBreak Bool HValueRef{- AP_STACK -} Int {- break index -} Int {- uniq of ModuleName -} (RemoteRef (ResumeContext b)) (RemotePtr CostCentreStack) -- Cost centre stack deriving (Generic, Show) instance Binary a => Binary (EvalStatus_ a b) data EvalResult a = EvalException SerializableException | EvalSuccess a deriving (Generic, Show) instance Binary a => Binary (EvalResult a) -- SomeException can't be serialized because it contains dynamic -- types. However, we do very limited things with the exceptions that -- are thrown by interpreted computations: -- -- * We print them, e.g. "*** Exception: " -- * UserInterrupt has a special meaning -- * In ghc -e, exitWith should exit with the appropriate exit code -- -- So all we need to do is distinguish UserInterrupt and ExitCode, and -- all other exceptions can be represented by their 'show' string. -- data SerializableException = EUserInterrupt | EExitCode ExitCode | EOtherException String deriving (Generic, Show) toSerializableException :: SomeException -> SerializableException toSerializableException ex | Just UserInterrupt <- fromException ex = EUserInterrupt | Just (ec::ExitCode) <- fromException ex = (EExitCode ec) | otherwise = EOtherException (show (ex :: SomeException)) fromSerializableException :: SerializableException -> SomeException fromSerializableException EUserInterrupt = toException UserInterrupt fromSerializableException (EExitCode c) = toException c fromSerializableException (EOtherException str) = toException (ErrorCall str) instance Binary ExitCode instance Binary SerializableException data THResult a = THException String | THComplete a deriving (Generic, Show) instance Binary a => Binary (THResult a) data THResultType = THExp | THPat | THType | THDec | THAnnWrapper deriving (Enum, Show, Generic) instance Binary THResultType -- | The server-side Template Haskell state. This is created by the -- StartTH message. A new one is created per module that GHC -- typechecks. data QState = QState { qsMap :: Map TypeRep Dynamic -- ^ persistent data between splices in a module , qsLocation :: Maybe TH.Loc -- ^ location for current splice, if any , qsPipe :: Pipe -- ^ pipe to communicate with GHC } instance Show QState where show _ = "" -- Orphan instances of Binary for Ptr / FunPtr by conversion to Word64. -- This is to support Binary StgInfoTable which includes these. instance Binary (Ptr a) where put p = put (fromIntegral (ptrToWordPtr p) :: Word64) get = (wordPtrToPtr . fromIntegral) <$> (get :: Get Word64) instance Binary (FunPtr a) where put = put . castFunPtrToPtr get = castPtrToFunPtr <$> get -- Binary instances to support the GetClosure message #ifndef MIN_VERSION_ghc_heap #define MIN_VERSION_ghc_heap(major1,major2,minor) (\ (major1) < 9 || \ (major1) == 9 && (major2) < 4 || \ (major1) == 9 && (major2) == 4 && (minor) <= 7) #endif /* MIN_VERSION_ghc_heap */ #if MIN_VERSION_ghc_heap(8,11,0) instance Binary Heap.StgTSOProfInfo instance Binary Heap.CostCentreStack instance Binary Heap.CostCentre instance Binary Heap.IndexTable instance Binary Heap.WhatNext instance Binary Heap.WhyBlocked instance Binary Heap.TsoFlags #endif instance Binary Heap.StgInfoTable instance Binary Heap.ClosureType instance Binary Heap.PrimType instance Binary a => Binary (Heap.GenClosure a) data Msg = forall a . (Binary a, Show a) => Msg (Message a) getMessage :: Get Msg getMessage = do b <- getWord8 case b of 0 -> Msg <$> return Shutdown 1 -> Msg <$> return InitLinker 2 -> Msg <$> LookupSymbol <$> get 3 -> Msg <$> LookupClosure <$> get 4 -> Msg <$> LoadDLL <$> get 5 -> Msg <$> LoadArchive <$> get 6 -> Msg <$> LoadObj <$> get 7 -> Msg <$> UnloadObj <$> get 8 -> Msg <$> AddLibrarySearchPath <$> get 9 -> Msg <$> RemoveLibrarySearchPath <$> get 10 -> Msg <$> return ResolveObjs 11 -> Msg <$> FindSystemLibrary <$> get 12 -> Msg <$> CreateBCOs <$> get 13 -> Msg <$> FreeHValueRefs <$> get 14 -> Msg <$> MallocData <$> get 15 -> Msg <$> MallocStrings <$> get 16 -> Msg <$> (PrepFFI <$> get <*> get <*> get) 17 -> Msg <$> FreeFFI <$> get 18 -> Msg <$> (MkConInfoTable <$> get <*> get <*> get <*> get <*> get <*> get) 19 -> Msg <$> (EvalStmt <$> get <*> get) 20 -> Msg <$> (ResumeStmt <$> get <*> get) 21 -> Msg <$> (AbandonStmt <$> get) 22 -> Msg <$> (EvalString <$> get) 23 -> Msg <$> (EvalStringToString <$> get <*> get) 24 -> Msg <$> (EvalIO <$> get) 25 -> Msg <$> (MkCostCentres <$> get <*> get) 26 -> Msg <$> (CostCentreStackInfo <$> get) 27 -> Msg <$> (NewBreakArray <$> get) 28 -> Msg <$> (SetupBreakpoint <$> get <*> get <*> get) 29 -> Msg <$> (BreakpointStatus <$> get <*> get) 30 -> Msg <$> (GetBreakpointVar <$> get <*> get) 31 -> Msg <$> return StartTH 32 -> Msg <$> (RunModFinalizers <$> get <*> get) 33 -> Msg <$> (AddSptEntry <$> get <*> get) 34 -> Msg <$> (RunTH <$> get <*> get <*> get <*> get) 35 -> Msg <$> (GetClosure <$> get) 36 -> Msg <$> (Seq <$> get) 37 -> Msg <$> return RtsRevertCAFs 38 -> Msg <$> (ResumeSeq <$> get) _ -> error $ "Unknown Message code " ++ (show b) putMessage :: Message a -> Put putMessage m = case m of Shutdown -> putWord8 0 InitLinker -> putWord8 1 LookupSymbol str -> putWord8 2 >> put str LookupClosure str -> putWord8 3 >> put str LoadDLL str -> putWord8 4 >> put str LoadArchive str -> putWord8 5 >> put str LoadObj str -> putWord8 6 >> put str UnloadObj str -> putWord8 7 >> put str AddLibrarySearchPath str -> putWord8 8 >> put str RemoveLibrarySearchPath ptr -> putWord8 9 >> put ptr ResolveObjs -> putWord8 10 FindSystemLibrary str -> putWord8 11 >> put str CreateBCOs bco -> putWord8 12 >> put bco FreeHValueRefs val -> putWord8 13 >> put val MallocData bs -> putWord8 14 >> put bs MallocStrings bss -> putWord8 15 >> put bss PrepFFI conv args res -> putWord8 16 >> put conv >> put args >> put res FreeFFI p -> putWord8 17 >> put p MkConInfoTable tc p n t pt d -> putWord8 18 >> put tc >> put p >> put n >> put t >> put pt >> put d EvalStmt opts val -> putWord8 19 >> put opts >> put val ResumeStmt opts val -> putWord8 20 >> put opts >> put val AbandonStmt val -> putWord8 21 >> put val EvalString val -> putWord8 22 >> put val EvalStringToString str val -> putWord8 23 >> put str >> put val EvalIO val -> putWord8 24 >> put val MkCostCentres mod ccs -> putWord8 25 >> put mod >> put ccs CostCentreStackInfo ptr -> putWord8 26 >> put ptr NewBreakArray sz -> putWord8 27 >> put sz SetupBreakpoint arr ix cnt -> putWord8 28 >> put arr >> put ix >> put cnt BreakpointStatus arr ix -> putWord8 29 >> put arr >> put ix GetBreakpointVar a b -> putWord8 30 >> put a >> put b StartTH -> putWord8 31 RunModFinalizers a b -> putWord8 32 >> put a >> put b AddSptEntry a b -> putWord8 33 >> put a >> put b RunTH st q loc ty -> putWord8 34 >> put st >> put q >> put loc >> put ty GetClosure a -> putWord8 35 >> put a Seq a -> putWord8 36 >> put a RtsRevertCAFs -> putWord8 37 ResumeSeq a -> putWord8 38 >> put a -- ----------------------------------------------------------------------------- -- Reading/writing messages data Pipe = Pipe { pipeRead :: Handle , pipeWrite :: Handle , pipeLeftovers :: IORef (Maybe ByteString) } remoteCall :: Binary a => Pipe -> Message a -> IO a remoteCall pipe msg = do writePipe pipe (putMessage msg) readPipe pipe get remoteTHCall :: Binary a => Pipe -> THMessage a -> IO a remoteTHCall pipe msg = do writePipe pipe (putTHMessage msg) readPipe pipe get writePipe :: Pipe -> Put -> IO () writePipe Pipe{..} put | LB.null bs = return () | otherwise = do LB.hPut pipeWrite bs hFlush pipeWrite where bs = runPut put readPipe :: Pipe -> Get a -> IO a readPipe Pipe{..} get = do leftovers <- readIORef pipeLeftovers m <- getBin pipeRead get leftovers case m of Nothing -> throw $ mkIOError eofErrorType "GHCi.Message.remoteCall" (Just pipeRead) Nothing Just (result, new_leftovers) -> do writeIORef pipeLeftovers new_leftovers return result getBin :: Handle -> Get a -> Maybe ByteString -> IO (Maybe (a, Maybe ByteString)) getBin h get leftover = go leftover (runGetIncremental get) where go Nothing (Done leftover _ msg) = return (Just (msg, if B.null leftover then Nothing else Just leftover)) go _ Done{} = throwIO (ErrorCall "getBin: Done with leftovers") go (Just leftover) (Partial fun) = do go Nothing (fun (Just leftover)) go Nothing (Partial fun) = do -- putStrLn "before hGetSome" b <- B.hGetSome h (32*1024) -- printf "hGetSome: %d\n" (B.length b) if B.null b then return Nothing else go Nothing (fun (Just b)) go _lft (Fail _rest _off str) = throwIO (ErrorCall ("getBin: " ++ str)) ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/RemoteTypes.hs0000644000000000000000000000775614472375231021730 0ustar0000000000000000{-# LANGUAGE CPP, StandaloneDeriving, GeneralizedNewtypeDeriving #-} -- | -- Types for referring to remote objects in Remote GHCi. For more -- details, see Note [External GHCi pointers] in compiler/GHC/Runtime/Interpreter.hs -- -- For details on Remote GHCi, see Note [Remote GHCi] in -- compiler/GHC/Runtime/Interpreter.hs. -- module GHCi.RemoteTypes ( RemotePtr(..), toRemotePtr, fromRemotePtr, castRemotePtr , HValue(..) , RemoteRef, mkRemoteRef, localRef, freeRemoteRef , HValueRef, toHValueRef , ForeignRef, mkForeignRef, withForeignRef , ForeignHValue , unsafeForeignRefToRemoteRef, finalizeForeignRef ) where import Prelude -- See note [Why do we import Prelude here?] import Control.DeepSeq import Data.Word import Foreign hiding (newForeignPtr) import Foreign.Concurrent import Data.Binary import Unsafe.Coerce import GHC.Exts import GHC.ForeignPtr -- ----------------------------------------------------------------------------- -- RemotePtr -- Static pointers only; don't use this for heap-resident pointers. -- Instead use HValueRef. We will fix the remote pointer to be 64 bits. This -- should cover 64 and 32bit systems, and permits the exchange of remote ptrs -- between machines of different word size. For example, when connecting to -- an iserv instance on a different architecture with different word size via -- -fexternal-interpreter. newtype RemotePtr a = RemotePtr Word64 toRemotePtr :: Ptr a -> RemotePtr a toRemotePtr p = RemotePtr (fromIntegral (ptrToWordPtr p)) fromRemotePtr :: RemotePtr a -> Ptr a fromRemotePtr (RemotePtr p) = wordPtrToPtr (fromIntegral p) castRemotePtr :: RemotePtr a -> RemotePtr b castRemotePtr (RemotePtr a) = RemotePtr a deriving instance Show (RemotePtr a) deriving instance Binary (RemotePtr a) deriving instance NFData (RemotePtr a) -- ----------------------------------------------------------------------------- -- HValueRef newtype HValue = HValue Any instance Show HValue where show _ = "" -- | A reference to a remote value. These are allocated and freed explicitly. newtype RemoteRef a = RemoteRef (RemotePtr ()) deriving (Show, Binary) -- We can discard type information if we want toHValueRef :: RemoteRef a -> RemoteRef HValue toHValueRef = unsafeCoerce -- For convenience type HValueRef = RemoteRef HValue -- | Make a reference to a local value that we can send remotely. -- This reference will keep the value that it refers to alive until -- 'freeRemoteRef' is called. mkRemoteRef :: a -> IO (RemoteRef a) mkRemoteRef a = do sp <- newStablePtr a return $! RemoteRef (toRemotePtr (castStablePtrToPtr sp)) -- | Convert an HValueRef to an HValue. Should only be used if the HValue -- originated in this process. localRef :: RemoteRef a -> IO a localRef (RemoteRef w) = deRefStablePtr (castPtrToStablePtr (fromRemotePtr w)) -- | Release an HValueRef that originated in this process freeRemoteRef :: RemoteRef a -> IO () freeRemoteRef (RemoteRef w) = freeStablePtr (castPtrToStablePtr (fromRemotePtr w)) -- | An HValueRef with a finalizer newtype ForeignRef a = ForeignRef (ForeignPtr ()) instance NFData (ForeignRef a) where rnf x = x `seq` () type ForeignHValue = ForeignRef HValue -- | Create a 'ForeignRef' from a 'RemoteRef'. The finalizer -- should arrange to call 'freeHValueRef' on the 'HValueRef'. (since -- this function needs to be called in the process that created the -- 'HValueRef', it cannot be called directly from the finalizer). mkForeignRef :: RemoteRef a -> IO () -> IO (ForeignRef a) mkForeignRef (RemoteRef hvref) finalizer = ForeignRef <$> newForeignPtr (fromRemotePtr hvref) finalizer -- | Use a 'ForeignHValue' withForeignRef :: ForeignRef a -> (RemoteRef a -> IO b) -> IO b withForeignRef (ForeignRef fp) f = withForeignPtr fp (f . RemoteRef . toRemotePtr) unsafeForeignRefToRemoteRef :: ForeignRef a -> RemoteRef a unsafeForeignRefToRemoteRef (ForeignRef fp) = RemoteRef (toRemotePtr (unsafeForeignPtrToPtr fp)) finalizeForeignRef :: ForeignRef a -> IO () finalizeForeignRef (ForeignRef fp) = finalizeForeignPtr fp ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/ResolvedBCO.hs0000644000000000000000000000452314470055371021542 0ustar0000000000000000{-# LANGUAGE RecordWildCards, DeriveGeneric, GeneralizedNewtypeDeriving, BangPatterns, CPP #-} module GHCi.ResolvedBCO ( ResolvedBCO(..) , ResolvedBCOPtr(..) , isLittleEndian ) where import Prelude -- See note [Why do we import Prelude here?] import GHC.Data.SizedSeq import GHCi.RemoteTypes import GHCi.BreakArray import Data.Array.Unboxed import Data.Binary import GHC.Generics import GHCi.BinaryArray #include "MachDeps.h" isLittleEndian :: Bool #if defined(WORDS_BIGENDIAN) isLittleEndian = False #else isLittleEndian = True #endif -- ----------------------------------------------------------------------------- -- ResolvedBCO -- | A 'ResolvedBCO' is one in which all the 'Name' references have been -- resolved to actual addresses or 'RemoteHValues'. -- -- Note, all arrays are zero-indexed (we assume this when -- serializing/deserializing) data ResolvedBCO = ResolvedBCO { resolvedBCOIsLE :: Bool, resolvedBCOArity :: {-# UNPACK #-} !Int, resolvedBCOInstrs :: UArray Int Word16, -- insns resolvedBCOBitmap :: UArray Int Word64, -- bitmap resolvedBCOLits :: UArray Int Word64, -- non-ptrs resolvedBCOPtrs :: (SizedSeq ResolvedBCOPtr) -- ptrs } deriving (Generic, Show) -- | The Binary instance for ResolvedBCOs. -- -- Note, that we do encode the endianness, however there is no support for mixed -- endianness setups. This is primarily to ensure that ghc and iserv share the -- same endianness. instance Binary ResolvedBCO where put ResolvedBCO{..} = do put resolvedBCOIsLE put resolvedBCOArity putArray resolvedBCOInstrs putArray resolvedBCOBitmap putArray resolvedBCOLits put resolvedBCOPtrs get = ResolvedBCO <$> get <*> get <*> getArray <*> getArray <*> getArray <*> get data ResolvedBCOPtr = ResolvedBCORef {-# UNPACK #-} !Int -- ^ reference to the Nth BCO in the current set | ResolvedBCOPtr {-# UNPACK #-} !(RemoteRef HValue) -- ^ reference to a previously created BCO | ResolvedBCOStaticPtr {-# UNPACK #-} !(RemotePtr ()) -- ^ reference to a static ptr | ResolvedBCOPtrBCO ResolvedBCO -- ^ a nested BCO | ResolvedBCOPtrBreakArray {-# UNPACK #-} !(RemoteRef BreakArray) -- ^ Resolves to the MutableArray# inside the BreakArray deriving (Generic, Show) instance Binary ResolvedBCOPtr ghc-lib-parser-9.4.7.20230826/libraries/ghci/GHCi/TH/Binary.hs0000644000000000000000000000474214472375231021177 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} -- This module is full of orphans, unfortunately module GHCi.TH.Binary () where import Prelude -- See note [Why do we import Prelude here?] import Data.Binary import qualified Data.ByteString as B import qualified Data.ByteString.Internal as B import GHC.Serialized import qualified Language.Haskell.TH as TH import qualified Language.Haskell.TH.Syntax as TH -- Put these in a separate module because they take ages to compile instance Binary TH.Loc instance Binary TH.Name instance Binary TH.ModName instance Binary TH.NameFlavour instance Binary TH.PkgName instance Binary TH.NameSpace instance Binary TH.Module instance Binary TH.Info instance Binary TH.Type instance Binary TH.TyLit instance Binary TH.Specificity instance Binary flag => Binary (TH.TyVarBndr flag) instance Binary TH.Role instance Binary TH.Lit instance Binary TH.Range instance Binary TH.Stmt instance Binary TH.Pat instance Binary TH.Exp instance Binary TH.Dec instance Binary TH.Overlap instance Binary TH.DerivClause instance Binary TH.DerivStrategy instance Binary TH.Guard instance Binary TH.Body instance Binary TH.Match instance Binary TH.Fixity instance Binary TH.TySynEqn instance Binary TH.FunDep instance Binary TH.AnnTarget instance Binary TH.RuleBndr instance Binary TH.Phases instance Binary TH.RuleMatch instance Binary TH.Inline instance Binary TH.Pragma instance Binary TH.Safety instance Binary TH.Callconv instance Binary TH.Foreign instance Binary TH.Bang instance Binary TH.SourceUnpackedness instance Binary TH.SourceStrictness instance Binary TH.DecidedStrictness instance Binary TH.FixityDirection instance Binary TH.OccName instance Binary TH.Con instance Binary TH.AnnLookup instance Binary TH.ModuleInfo instance Binary TH.Clause instance Binary TH.InjectivityAnn instance Binary TH.FamilyResultSig instance Binary TH.TypeFamilyHead instance Binary TH.PatSynDir instance Binary TH.PatSynArgs instance Binary TH.DocLoc -- We need Binary TypeRep for serializing annotations instance Binary Serialized where put (Serialized tyrep wds) = put tyrep >> put (B.pack wds) get = Serialized <$> get <*> (B.unpack <$> get) instance Binary TH.Bytes where put (TH.Bytes ptr off sz) = put bs where bs = B.PS ptr (fromIntegral off) (fromIntegral sz) get = do B.PS ptr off sz <- get return (TH.Bytes ptr (fromIntegral off) (fromIntegral sz)) ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax.hs0000644000000000000000000000430514472400113022165 0ustar0000000000000000{- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section{Haskell abstract syntax definition} This module glues together the pieces of the Haskell abstract syntax, which is declared in the various \tr{Hs*} modules. This module, therefore, is almost nothing but re-exporting. -} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} -- For deriving instance Data {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax ( module Language.Haskell.Syntax.Binds, module Language.Haskell.Syntax.Decls, module Language.Haskell.Syntax.Expr, module Language.Haskell.Syntax.Lit, module Language.Haskell.Syntax.Pat, module Language.Haskell.Syntax.Type, module Language.Haskell.Syntax.Extension, ) where import Language.Haskell.Syntax.Decls import Language.Haskell.Syntax.Binds import Language.Haskell.Syntax.Expr import Language.Haskell.Syntax.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Pat import Language.Haskell.Syntax.Type {- Note [Language.Haskell.Syntax.* Hierarchy] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why are these modules not 'GHC.Hs.*', or some other 'GHC.*'? The answer is that they are to be separated from GHC and put into another package, in accordance with the final goals of Trees That Grow. (See Note [Trees That Grow] in 'Language.Haskell.Syntax.Extension'.) In short, the 'Language.Haskell.Syntax.*' tree should be entirely GHC-independent. GHC-specific stuff related to source-language syntax should be in 'GHC.Hs.*'. We cannot move them to the separate package yet, but by giving them names like so, we hope to remind others that the goal is to factor them out, and therefore dependencies on the rest of GHC should never be added, only removed. For more details, see https://gitlab.haskell.org/ghc/ghc/-/wikis/implementing-trees-that-grow -} -- TODO Add TTG parameter to 'HsModule' and move here. ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Binds.hs0000644000000000000000000005522414472400113023232 0ustar0000000000000000{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ViewPatterns #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[HsBinds]{Abstract syntax: top-level bindings and signatures} Datatype for: @BindGroup@, @Bind@, @Sig@, @Bind@. -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Binds where import GHC.Prelude import {-# SOURCE #-} Language.Haskell.Syntax.Expr ( LHsExpr , MatchGroup , GRHSs ) import {-# SOURCE #-} Language.Haskell.Syntax.Pat ( LPat ) import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Type import GHC.Types.Name.Reader(RdrName) import GHC.Types.Basic import GHC.Types.SourceText import GHC.Types.Tickish import GHC.Types.Var import GHC.Types.Fixity import GHC.Data.Bag import GHC.Data.BooleanFormula (LBooleanFormula) import GHC.Utils.Outputable import GHC.Utils.Panic (pprPanic) import Data.Void {- ************************************************************************ * * \subsection{Bindings: @BindGroup@} * * ************************************************************************ Global bindings (where clauses) -} -- During renaming, we need bindings where the left-hand sides -- have been renamed but the right-hand sides have not. -- Other than during renaming, these will be the same. -- | Haskell Local Bindings type HsLocalBinds id = HsLocalBindsLR id id -- | Located Haskell local bindings type LHsLocalBinds id = XRec id (HsLocalBinds id) -- | Haskell Local Bindings with separate Left and Right identifier types -- -- Bindings in a 'let' expression -- or a 'where' clause data HsLocalBindsLR idL idR = HsValBinds (XHsValBinds idL idR) (HsValBindsLR idL idR) -- ^ Haskell Value Bindings -- There should be no pattern synonyms in the HsValBindsLR -- These are *local* (not top level) bindings -- The parser accepts them, however, leaving the -- renamer to report them | HsIPBinds (XHsIPBinds idL idR) (HsIPBinds idR) -- ^ Haskell Implicit Parameter Bindings | EmptyLocalBinds (XEmptyLocalBinds idL idR) -- ^ Empty Local Bindings | XHsLocalBindsLR !(XXHsLocalBindsLR idL idR) type LHsLocalBindsLR idL idR = XRec idL (HsLocalBindsLR idL idR) -- | Haskell Value Bindings type HsValBinds id = HsValBindsLR id id -- | Haskell Value bindings with separate Left and Right identifier types -- (not implicit parameters) -- Used for both top level and nested bindings -- May contain pattern synonym bindings data HsValBindsLR idL idR = -- | Value Bindings In -- -- Before renaming RHS; idR is always RdrName -- Not dependency analysed -- Recursive by default ValBinds (XValBinds idL idR) (LHsBindsLR idL idR) [LSig idR] -- | Value Bindings Out -- -- After renaming RHS; idR can be Name or Id Dependency analysed, -- later bindings in the list may depend on earlier ones. | XValBindsLR !(XXValBindsLR idL idR) -- --------------------------------------------------------------------- -- | Located Haskell Binding type LHsBind id = LHsBindLR id id -- | Located Haskell Bindings type LHsBinds id = LHsBindsLR id id -- | Haskell Binding type HsBind id = HsBindLR id id -- | Located Haskell Bindings with separate Left and Right identifier types type LHsBindsLR idL idR = Bag (LHsBindLR idL idR) -- | Located Haskell Binding with separate Left and Right identifier types type LHsBindLR idL idR = XRec idL (HsBindLR idL idR) {- Note [FunBind vs PatBind] ~~~~~~~~~~~~~~~~~~~~~~~~~ The distinction between FunBind and PatBind is a bit subtle. FunBind covers patterns which resemble function bindings and simple variable bindings. f x = e f !x = e f = e !x = e -- FunRhs has SrcStrict x `f` y = e -- FunRhs has Infix The actual patterns and RHSs of a FunBind are encoding in fun_matches. The m_ctxt field of each Match in fun_matches will be FunRhs and carries two bits of information about the match, * The mc_fixity field on each Match describes the fixity of the function binder in that match. E.g. this is legal: f True False = e1 True `f` True = e2 * The mc_strictness field is used /only/ for nullary FunBinds: ones with one Match, which has no pats. For these, it describes whether the match is decorated with a bang (e.g. `!x = e`). By contrast, PatBind represents data constructor patterns, as well as a few other interesting cases. Namely, Just x = e (x) = e x :: Ty = e -} -- | Haskell Binding with separate Left and Right id's data HsBindLR idL idR = -- | Function-like Binding -- -- FunBind is used for both functions @f x = e@ -- and variables @f = \x -> e@ -- and strict variables @!x = x + 1@ -- -- Reason 1: Special case for type inference: see 'GHC.Tc.Gen.Bind.tcMonoBinds'. -- -- Reason 2: Instance decls can only have FunBinds, which is convenient. -- If you change this, you'll need to change e.g. rnMethodBinds -- -- But note that the form @f :: a->a = ...@ -- parses as a pattern binding, just like -- @(f :: a -> a) = ... @ -- -- Strict bindings have their strictness recorded in the 'SrcStrictness' of their -- 'MatchContext'. See Note [FunBind vs PatBind] for -- details about the relationship between FunBind and PatBind. -- -- 'GHC.Parser.Annotation.AnnKeywordId's -- -- - 'GHC.Parser.Annotation.AnnFunId', attached to each element of fun_matches -- -- - 'GHC.Parser.Annotation.AnnEqual','GHC.Parser.Annotation.AnnWhere', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation FunBind { fun_ext :: XFunBind idL idR, fun_id :: LIdP idL, -- Note [fun_id in Match] in GHC.Hs.Expr fun_matches :: MatchGroup idR (LHsExpr idR), -- ^ The payload fun_tick :: [CoreTickish] -- ^ Ticks to put on the rhs, if any } -- | Pattern Binding -- -- The pattern is never a simple variable; -- That case is done by FunBind. -- See Note [FunBind vs PatBind] for details about the -- relationship between FunBind and PatBind. -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnBang', -- 'GHC.Parser.Annotation.AnnEqual','GHC.Parser.Annotation.AnnWhere', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | PatBind { pat_ext :: XPatBind idL idR, pat_lhs :: LPat idL, pat_rhs :: GRHSs idR (LHsExpr idR), pat_ticks :: ([CoreTickish], [[CoreTickish]]) -- ^ Ticks to put on the rhs, if any, and ticks to put on -- the bound variables. } -- | Variable Binding -- -- Dictionary binding and suchlike. -- All VarBinds are introduced by the type checker | VarBind { var_ext :: XVarBind idL idR, var_id :: IdP idL, var_rhs :: LHsExpr idR -- ^ Located only for consistency } -- | Patterns Synonym Binding | PatSynBind (XPatSynBind idL idR) (PatSynBind idL idR) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnPattern', -- 'GHC.Parser.Annotation.AnnLarrow','GHC.Parser.Annotation.AnnEqual', -- 'GHC.Parser.Annotation.AnnWhere' -- 'GHC.Parser.Annotation.AnnOpen' @'{'@,'GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XHsBindsLR !(XXHsBindsLR idL idR) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnPattern', -- 'GHC.Parser.Annotation.AnnEqual','GHC.Parser.Annotation.AnnLarrow', -- 'GHC.Parser.Annotation.AnnWhere','GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@, -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Pattern Synonym binding data PatSynBind idL idR = PSB { psb_ext :: XPSB idL idR, psb_id :: LIdP idL, -- ^ Name of the pattern synonym psb_args :: HsPatSynDetails idR, -- ^ Formal parameter names psb_def :: LPat idR, -- ^ Right-hand side psb_dir :: HsPatSynDir idR -- ^ Directionality } | XPatSynBind !(XXPatSynBind idL idR) {- ************************************************************************ * * Implicit parameter bindings * * ************************************************************************ -} -- | Haskell Implicit Parameter Bindings data HsIPBinds id = IPBinds (XIPBinds id) [LIPBind id] -- TcEvBinds -- Only in typechecker output; binds -- -- uses of the implicit parameters | XHsIPBinds !(XXHsIPBinds id) -- | Located Implicit Parameter Binding type LIPBind id = XRec id (IPBind id) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnSemi' when in a -- list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Implicit parameter bindings. -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnEqual' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data IPBind id = IPBind (XCIPBind id) (XRec id HsIPName) (LHsExpr id) | XIPBind !(XXIPBind id) {- ************************************************************************ * * \subsection{@Sig@: type signatures and value-modifying user pragmas} * * ************************************************************************ It is convenient to lump ``value-modifying'' user-pragmas (e.g., ``specialise this function to these four types...'') in with type signatures. Then all the machinery to move them into place, etc., serves for both. -} -- | Located Signature type LSig pass = XRec pass (Sig pass) -- | Signatures and pragmas data Sig pass = -- | An ordinary type signature -- -- > f :: Num a => a -> a -- -- After renaming, this list of Names contains the named -- wildcards brought into scope by this signature. For a signature -- @_ -> _a -> Bool@, the renamer will leave the unnamed wildcard @_@ -- untouched, and the named wildcard @_a@ is then replaced with -- fresh meta vars in the type. Their names are stored in the type -- signature that brought them into scope, in this third field to be -- more specific. -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDcolon', -- 'GHC.Parser.Annotation.AnnComma' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation TypeSig (XTypeSig pass) [LIdP pass] -- LHS of the signature; e.g. f,g,h :: blah (LHsSigWcType pass) -- RHS of the signature; can have wildcards -- | A pattern synonym type signature -- -- > pattern Single :: () => (Show a) => a -> [a] -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnPattern', -- 'GHC.Parser.Annotation.AnnDcolon','GHC.Parser.Annotation.AnnForall' -- 'GHC.Parser.Annotation.AnnDot','GHC.Parser.Annotation.AnnDarrow' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | PatSynSig (XPatSynSig pass) [LIdP pass] (LHsSigType pass) -- P :: forall a b. Req => Prov => ty -- | A signature for a class method -- False: ordinary class-method signature -- True: generic-default class method signature -- e.g. class C a where -- op :: a -> a -- Ordinary -- default op :: Eq a => a -> a -- Generic default -- No wildcards allowed here -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDefault', -- 'GHC.Parser.Annotation.AnnDcolon' | ClassOpSig (XClassOpSig pass) Bool [LIdP pass] (LHsSigType pass) -- | A type signature in generated code, notably the code -- generated for record selectors. We simply record -- the desired Id itself, replete with its name, type -- and IdDetails. Otherwise it's just like a type -- signature: there should be an accompanying binding | IdSig (XIdSig pass) Id -- | An ordinary fixity declaration -- -- > infixl 8 *** -- -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnInfix', -- 'GHC.Parser.Annotation.AnnVal' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | FixSig (XFixSig pass) (FixitySig pass) -- | An inline pragma -- -- > {#- INLINE f #-} -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpen' @'{-\# INLINE'@ and @'['@, -- 'GHC.Parser.Annotation.AnnClose','GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnVal','GHC.Parser.Annotation.AnnTilde', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | InlineSig (XInlineSig pass) (LIdP pass) -- Function name InlinePragma -- Never defaultInlinePragma -- | A specialisation pragma -- -- > {-# SPECIALISE f :: Int -> Int #-} -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnOpen' @'{-\# SPECIALISE'@ and @'['@, -- 'GHC.Parser.Annotation.AnnTilde', -- 'GHC.Parser.Annotation.AnnVal', -- 'GHC.Parser.Annotation.AnnClose' @']'@ and @'\#-}'@, -- 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | SpecSig (XSpecSig pass) (LIdP pass) -- Specialise a function or datatype ... [LHsSigType pass] -- ... to these types InlinePragma -- The pragma on SPECIALISE_INLINE form. -- If it's just defaultInlinePragma, then we said -- SPECIALISE, not SPECIALISE_INLINE -- | A specialisation pragma for instance declarations only -- -- > {-# SPECIALISE instance Eq [Int] #-} -- -- (Class tys); should be a specialisation of the -- current instance declaration -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnInstance','GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | SpecInstSig (XSpecInstSig pass) SourceText (LHsSigType pass) -- Note [Pragma source text] in GHC.Types.SourceText -- | A minimal complete definition pragma -- -- > {-# MINIMAL a | (b, c | (d | e)) #-} -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnVbar','GHC.Parser.Annotation.AnnComma', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | MinimalSig (XMinimalSig pass) SourceText (LBooleanFormula (LIdP pass)) -- Note [Pragma source text] in GHC.Types.SourceText -- | A "set cost centre" pragma for declarations -- -- > {-# SCC funName #-} -- -- or -- -- > {-# SCC funName "cost_centre_name" #-} | SCCFunSig (XSCCFunSig pass) SourceText -- Note [Pragma source text] in GHC.Types.SourceText (LIdP pass) -- Function name (Maybe (XRec pass StringLiteral)) -- | A complete match pragma -- -- > {-# COMPLETE C, D [:: T] #-} -- -- Used to inform the pattern match checker about additional -- complete matchings which, for example, arise from pattern -- synonym definitions. | CompleteMatchSig (XCompleteMatchSig pass) SourceText (XRec pass [LIdP pass]) (Maybe (LIdP pass)) | XSig !(XXSig pass) -- | Located Fixity Signature type LFixitySig pass = XRec pass (FixitySig pass) -- | Fixity Signature data FixitySig pass = FixitySig (XFixitySig pass) [LIdP pass] Fixity | XFixitySig !(XXFixitySig pass) isFixityLSig :: forall p. UnXRec p => LSig p -> Bool isFixityLSig (unXRec @p -> FixSig {}) = True isFixityLSig _ = False isTypeLSig :: forall p. UnXRec p => LSig p -> Bool -- Type signatures isTypeLSig (unXRec @p -> TypeSig {}) = True isTypeLSig (unXRec @p -> ClassOpSig {}) = True isTypeLSig (unXRec @p -> IdSig {}) = True isTypeLSig _ = False isSpecLSig :: forall p. UnXRec p => LSig p -> Bool isSpecLSig (unXRec @p -> SpecSig {}) = True isSpecLSig _ = False isSpecInstLSig :: forall p. UnXRec p => LSig p -> Bool isSpecInstLSig (unXRec @p -> SpecInstSig {}) = True isSpecInstLSig _ = False isPragLSig :: forall p. UnXRec p => LSig p -> Bool -- Identifies pragmas isPragLSig (unXRec @p -> SpecSig {}) = True isPragLSig (unXRec @p -> InlineSig {}) = True isPragLSig (unXRec @p -> SCCFunSig {}) = True isPragLSig (unXRec @p -> CompleteMatchSig {}) = True isPragLSig _ = False isInlineLSig :: forall p. UnXRec p => LSig p -> Bool -- Identifies inline pragmas isInlineLSig (unXRec @p -> InlineSig {}) = True isInlineLSig _ = False isMinimalLSig :: forall p. UnXRec p => LSig p -> Bool isMinimalLSig (unXRec @p -> MinimalSig {}) = True isMinimalLSig _ = False isSCCFunSig :: forall p. UnXRec p => LSig p -> Bool isSCCFunSig (unXRec @p -> SCCFunSig {}) = True isSCCFunSig _ = False isCompleteMatchSig :: forall p. UnXRec p => LSig p -> Bool isCompleteMatchSig (unXRec @p -> CompleteMatchSig {} ) = True isCompleteMatchSig _ = False hsSigDoc :: Sig name -> SDoc hsSigDoc (TypeSig {}) = text "type signature" hsSigDoc (PatSynSig {}) = text "pattern synonym signature" hsSigDoc (ClassOpSig _ is_deflt _ _) | is_deflt = text "default type signature" | otherwise = text "class method signature" hsSigDoc (IdSig {}) = text "id signature" hsSigDoc (SpecSig _ _ _ inl) = (inlinePragmaName . inl_inline $ inl) <+> text "pragma" hsSigDoc (InlineSig _ _ prag) = (inlinePragmaName . inl_inline $ prag) <+> text "pragma" -- Using the 'inlinePragmaName' function ensures that the pragma name for any -- one of the INLINE/INLINABLE/NOINLINE pragmas are printed after being extracted -- from the InlineSpec field of the pragma. hsSigDoc (SpecInstSig _ src _) = text (extractSpecPragName src) <+> text "instance pragma" hsSigDoc (FixSig {}) = text "fixity declaration" hsSigDoc (MinimalSig {}) = text "MINIMAL pragma" hsSigDoc (SCCFunSig {}) = text "SCC pragma" hsSigDoc (CompleteMatchSig {}) = text "COMPLETE pragma" hsSigDoc (XSig {}) = text "XSIG TTG extension" -- | Extracts the name for a SPECIALIZE instance pragma. In 'hsSigDoc', the src -- field of 'SpecInstSig' signature contains the SourceText for a SPECIALIZE -- instance pragma of the form: "SourceText {-# SPECIALIZE" -- -- Extraction ensures that all variants of the pragma name (with a 'Z' or an -- 'S') are output exactly as used in the pragma. extractSpecPragName :: SourceText -> String extractSpecPragName srcTxt = case (words $ show srcTxt) of (_:_:pragName:_) -> filter (/= '\"') pragName _ -> pprPanic "hsSigDoc: Misformed SPECIALISE instance pragma:" (ppr srcTxt) {- ************************************************************************ * * \subsection[PatSynBind]{A pattern synonym definition} * * ************************************************************************ -} -- | Haskell Pattern Synonym Details type HsPatSynDetails pass = HsConDetails Void (LIdP pass) [RecordPatSynField pass] -- See Note [Record PatSyn Fields] -- | Record Pattern Synonym Field data RecordPatSynField pass = RecordPatSynField { recordPatSynField :: FieldOcc pass -- ^ Field label visible in rest of the file , recordPatSynPatVar :: LIdP pass -- ^ Filled in by renamer, the name used internally by the pattern } {- Note [Record PatSyn Fields] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the following two pattern synonyms. pattern P x y = ([x,True], [y,'v']) pattern Q{ x, y } =([x,True], [y,'v']) In P, we just have two local binders, x and y. In Q, we have local binders but also top-level record selectors x :: ([Bool], [Char]) -> Bool y :: ([Bool], [Char]) -> Char Both are recorded in the `RecordPatSynField`s for `x` and `y`: * recordPatSynField: the top-level record selector * recordPatSynPatVar: the local `x`, bound only in the RHS of the pattern synonym. It would make sense to support record-like syntax pattern Q{ x=x1, y=y1 } = ([x1,True], [y1,'v']) when we have a different name for the local and top-level binder, making the distinction between the two names clear. -} instance Outputable (XRec a RdrName) => Outputable (RecordPatSynField a) where ppr (RecordPatSynField { recordPatSynField = v }) = ppr v -- | Haskell Pattern Synonym Direction data HsPatSynDir id = Unidirectional | ImplicitBidirectional | ExplicitBidirectional (MatchGroup id (LHsExpr id)) ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Decls.hs0000644000000000000000000022261314472400113023223 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ViewPatterns #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} {-# OPTIONS_GHC -Wno-incomplete-record-updates #-} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* -- | Abstract syntax of global declarations. -- -- Definitions for: @SynDecl@ and @ConDecl@, @ClassDecl@, -- @InstDecl@, @DefaultDecl@ and @ForeignDecl@. module Language.Haskell.Syntax.Decls ( -- * Toplevel declarations HsDecl(..), LHsDecl, HsDataDefn(..), HsDeriving, LHsFunDep, FunDep(..), HsDerivingClause(..), LHsDerivingClause, DerivClauseTys(..), LDerivClauseTys, NewOrData(..), newOrDataToFlavour, StandaloneKindSig(..), LStandaloneKindSig, -- ** Class or type declarations TyClDecl(..), LTyClDecl, DataDeclRn(..), TyClGroup(..), tyClGroupTyClDecls, tyClGroupInstDecls, tyClGroupRoleDecls, tyClGroupKindSigs, isClassDecl, isDataDecl, isSynDecl, isFamilyDecl, isTypeFamilyDecl, isDataFamilyDecl, isOpenTypeFamilyInfo, isClosedTypeFamilyInfo, countTyClDecls, tyClDeclTyVars, FamilyDecl(..), LFamilyDecl, -- ** Instance declarations InstDecl(..), LInstDecl, FamilyInfo(..), pprFlavour, TyFamInstDecl(..), LTyFamInstDecl, TyFamDefltDecl, LTyFamDefltDecl, DataFamInstDecl(..), LDataFamInstDecl, FamEqn(..), TyFamInstEqn, LTyFamInstEqn, HsTyPats, LClsInstDecl, ClsInstDecl(..), -- ** Standalone deriving declarations DerivDecl(..), LDerivDecl, -- ** Deriving strategies DerivStrategy(..), LDerivStrategy, derivStrategyName, -- ** @RULE@ declarations LRuleDecls,RuleDecls(..),RuleDecl(..),LRuleDecl,HsRuleRn(..), RuleBndr(..),LRuleBndr, collectRuleBndrSigTys, pprFullRuleName, -- ** @default@ declarations DefaultDecl(..), LDefaultDecl, -- ** Template haskell declaration splice SpliceExplicitFlag(..), SpliceDecl(..), LSpliceDecl, -- ** Foreign function interface declarations ForeignDecl(..), LForeignDecl, ForeignImport(..), ForeignExport(..), CImportSpec(..), -- ** Data-constructor declarations ConDecl(..), LConDecl, HsConDeclH98Details, HsConDeclGADTDetails(..), -- ** Document comments DocDecl(..), LDocDecl, docDeclDoc, -- ** Deprecations WarnDecl(..), LWarnDecl, WarnDecls(..), LWarnDecls, -- ** Annotations AnnDecl(..), LAnnDecl, AnnProvenance(..), annProvenanceName_maybe, -- ** Role annotations RoleAnnotDecl(..), LRoleAnnotDecl, -- ** Injective type families FamilyResultSig(..), LFamilyResultSig, InjectivityAnn(..), LInjectivityAnn, -- * Grouping HsGroup(..), hsGroupInstDecls, ) where -- friends: import GHC.Prelude import {-# SOURCE #-} Language.Haskell.Syntax.Expr ( HsExpr, HsSplice ) -- Because Expr imports Decls via HsBracket import Language.Haskell.Syntax.Binds import Language.Haskell.Syntax.Type import GHC.Hs.Doc import GHC.Core.TyCon import GHC.Types.Basic import GHC.Types.ForeignCall import Language.Haskell.Syntax.Extension import GHC.Types.Name.Set import GHC.Types.Fixity -- others: import GHC.Utils.Outputable import GHC.Utils.Misc import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Core.Type import GHC.Unit.Module.Warnings import GHC.Data.Maybe import Data.Data hiding (TyCon,Fixity, Infix) import Data.Void {- ************************************************************************ * * \subsection[HsDecl]{Declarations} * * ************************************************************************ -} type LHsDecl p = XRec p (HsDecl p) -- ^ When in a list this may have -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnSemi' -- -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | A Haskell Declaration data HsDecl p = TyClD (XTyClD p) (TyClDecl p) -- ^ Type or Class Declaration | InstD (XInstD p) (InstDecl p) -- ^ Instance declaration | DerivD (XDerivD p) (DerivDecl p) -- ^ Deriving declaration | ValD (XValD p) (HsBind p) -- ^ Value declaration | SigD (XSigD p) (Sig p) -- ^ Signature declaration | KindSigD (XKindSigD p) (StandaloneKindSig p) -- ^ Standalone kind signature | DefD (XDefD p) (DefaultDecl p) -- ^ 'default' declaration | ForD (XForD p) (ForeignDecl p) -- ^ Foreign declaration | WarningD (XWarningD p) (WarnDecls p) -- ^ Warning declaration | AnnD (XAnnD p) (AnnDecl p) -- ^ Annotation declaration | RuleD (XRuleD p) (RuleDecls p) -- ^ Rule declaration | SpliceD (XSpliceD p) (SpliceDecl p) -- ^ Splice declaration -- (Includes quasi-quotes) | DocD (XDocD p) (DocDecl p) -- ^ Documentation comment -- declaration | RoleAnnotD (XRoleAnnotD p) (RoleAnnotDecl p) -- ^Role annotation declaration | XHsDecl !(XXHsDecl p) {- Note [Top-level fixity signatures in an HsGroup] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An `HsGroup p` stores every top-level fixity declarations in one of two places: 1. hs_fixds :: [LFixitySig p] This stores fixity signatures for top-level declarations (e.g., functions, data constructors, classes, type families, etc.) as well as fixity signatures for class methods written outside of the class, as in this example: infixl 4 `m1` class C1 a where m1 :: a -> a -> a 2. hs_tyclds :: [TyClGroup p] Each type class can be found in a TyClDecl inside a TyClGroup, and that TyClDecl stores the fixity signatures for its methods written inside of the class, as in this example: class C2 a where infixl 4 `m2` m2 :: a -> a -> a The story for fixity signatures for class methods is made slightly complicated by the fact that they can appear both inside and outside of the class itself, and both forms of fixity signatures are considered top-level. This matters in `GHC.Rename.Module.rnSrcDecls`, which must create a fixity environment out of all top-level fixity signatures before doing anything else. Therefore, `rnSrcDecls` must be aware of both (1) and (2) above. The `hsGroupTopLevelFixitySigs` function is responsible for collecting this information from an `HsGroup`. One might wonder why we even bother separating top-level fixity signatures into two places at all. That is, why not just take the fixity signatures from `hs_tyclds` and put them into `hs_fixds` so that they are all in one location? This ends up causing problems for `GHC.HsToCore.Quote.repTopDs`, which translates each fixity signature in `hs_fixds` and `hs_tyclds` into a Template Haskell `Dec`. If there are any duplicate signatures between the two fields, this will result in an error (#17608). -} -- | Haskell Group -- -- A 'HsDecl' is categorised into a 'HsGroup' before being -- fed to the renamer. data HsGroup p = HsGroup { hs_ext :: XCHsGroup p, hs_valds :: HsValBinds p, hs_splcds :: [LSpliceDecl p], hs_tyclds :: [TyClGroup p], -- A list of mutually-recursive groups; -- This includes `InstDecl`s as well; -- Parser generates a singleton list; -- renamer does dependency analysis hs_derivds :: [LDerivDecl p], hs_fixds :: [LFixitySig p], -- A list of fixity signatures defined for top-level -- declarations and class methods (defined outside of the class -- itself). -- See Note [Top-level fixity signatures in an HsGroup] hs_defds :: [LDefaultDecl p], hs_fords :: [LForeignDecl p], hs_warnds :: [LWarnDecls p], hs_annds :: [LAnnDecl p], hs_ruleds :: [LRuleDecls p], hs_docs :: [LDocDecl p] } | XHsGroup !(XXHsGroup p) hsGroupInstDecls :: HsGroup id -> [LInstDecl id] hsGroupInstDecls = (=<<) group_instds . hs_tyclds -- | Located Splice Declaration type LSpliceDecl pass = XRec pass (SpliceDecl pass) -- | Splice Declaration data SpliceDecl p = SpliceDecl -- Top level splice (XSpliceDecl p) (XRec p (HsSplice p)) SpliceExplicitFlag | XSpliceDecl !(XXSpliceDecl p) {- ************************************************************************ * * Type and class declarations * * ************************************************************************ Note [The Naming story] ~~~~~~~~~~~~~~~~~~~~~~~ Here is the story about the implicit names that go with type, class, and instance decls. It's a bit tricky, so pay attention! "Implicit" (or "system") binders ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each data type decl defines a worker name for each constructor to-T and from-T convertors Each class decl defines a tycon for the class a data constructor for that tycon the worker for that constructor a selector for each superclass All have occurrence names that are derived uniquely from their parent declaration. None of these get separate definitions in an interface file; they are fully defined by the data or class decl. But they may *occur* in interface files, of course. Any such occurrence must haul in the relevant type or class decl. Plan of attack: - Ensure they "point to" the parent data/class decl when loading that decl from an interface file (See RnHiFiles.getSysBinders) - When typechecking the decl, we build the implicit TyCons and Ids. When doing so we look them up in the name cache (GHC.Rename.Env.lookupSysName), to ensure correct module and provenance is set These are the two places that we have to conjure up the magic derived names. (The actual magic is in GHC.Types.Name.Occurrence.mkWorkerOcc, etc.) Default methods ~~~~~~~~~~~~~~~ - Occurrence name is derived uniquely from the method name E.g. $dmmax - If there is a default method name at all, it's recorded in the ClassOpSig (in GHC.Hs.Binds), in the DefMethInfo field. (DefMethInfo is defined in GHC.Core.Class) Source-code class decls and interface-code class decls are treated subtly differently, which has given me a great deal of confusion over the years. Here's the deal. (We distinguish the two cases because source-code decls have (Just binds) in the tcdMeths field, whereas interface decls have Nothing. In *source-code* class declarations: - When parsing, every ClassOpSig gets a DefMeth with a suitable RdrName This is done by GHC.Parser.PostProcess.mkClassOpSigDM - The renamer renames it to a Name - During typechecking, we generate a binding for each $dm for which there's a programmer-supplied default method: class Foo a where op1 :: op2 :: op1 = ... We generate a binding for $dmop1 but not for $dmop2. The Class for Foo has a Nothing for op2 and a Just ($dm_op1, VanillaDM) for op1. The Name for $dmop2 is simply discarded. In *interface-file* class declarations: - When parsing, we see if there's an explicit programmer-supplied default method because there's an '=' sign to indicate it: class Foo a where op1 = :: -- NB the '=' op2 :: We use this info to generate a DefMeth with a suitable RdrName for op1, and a NoDefMeth for op2 - The interface file has a separate definition for $dmop1, with unfolding etc. - The renamer renames it to a Name. - The renamer treats $dmop1 as a free variable of the declaration, so that the binding for $dmop1 will be sucked in. (See RnHsSyn.tyClDeclFVs) This doesn't happen for source code class decls, because they *bind* the default method. Dictionary functions ~~~~~~~~~~~~~~~~~~~~ Each instance declaration gives rise to one dictionary function binding. The type checker makes up new source-code instance declarations (e.g. from 'deriving' or generic default methods --- see GHC.Tc.TyCl.Instance.tcInstDecls1). So we can't generate the names for dictionary functions in advance (we don't know how many we need). On the other hand for interface-file instance declarations, the decl specifies the name of the dictionary function, and it has a binding elsewhere in the interface file: instance {Eq Int} = dEqInt dEqInt :: {Eq Int} So again we treat source code and interface file code slightly differently. Source code: - Source code instance decls have a Nothing in the (Maybe name) field (see data InstDecl below) - The typechecker makes up a Local name for the dict fun for any source-code instance decl, whether it comes from a source-code instance decl, or whether the instance decl is derived from some other construct (e.g. 'deriving'). - The occurrence name it chooses is derived from the instance decl (just for documentation really) --- e.g. dNumInt. Two dict funs may share a common occurrence name, but will have different uniques. E.g. instance Foo [Int] where ... instance Foo [Bool] where ... These might both be dFooList - The CoreTidy phase externalises the name, and ensures the occurrence name is unique (this isn't special to dict funs). So we'd get dFooList and dFooList1. - We can take this relaxed approach (changing the occurrence name later) because dict fun Ids are not captured in a TyCon or Class (unlike default methods, say). Instead, they are kept separately in the InstEnv. This makes it easy to adjust them after compiling a module. (Once we've finished compiling that module, they don't change any more.) Interface file code: - The instance decl gives the dict fun name, so the InstDecl has a (Just name) in the (Maybe name) field. - RnHsSyn.instDeclFVs treats the dict fun name as free in the decl, so that we suck in the dfun binding -} -- | Located Declaration of a Type or Class type LTyClDecl pass = XRec pass (TyClDecl pass) -- | A type or class declaration. data TyClDecl pass = -- | @type/data family T :: *->*@ -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnData', -- 'GHC.Parser.Annotation.AnnFamily','GHC.Parser.Annotation.AnnDcolon', -- 'GHC.Parser.Annotation.AnnWhere','GHC.Parser.Annotation.AnnOpenP', -- 'GHC.Parser.Annotation.AnnDcolon','GHC.Parser.Annotation.AnnCloseP', -- 'GHC.Parser.Annotation.AnnEqual','GHC.Parser.Annotation.AnnRarrow', -- 'GHC.Parser.Annotation.AnnVbar' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation FamDecl { tcdFExt :: XFamDecl pass, tcdFam :: FamilyDecl pass } | -- | @type@ declaration -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnEqual', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation SynDecl { tcdSExt :: XSynDecl pass -- ^ Post renameer, FVs , tcdLName :: LIdP pass -- ^ Type constructor , tcdTyVars :: LHsQTyVars pass -- ^ Type variables; for an -- associated type these -- include outer binders , tcdFixity :: LexicalFixity -- ^ Fixity used in the declaration , tcdRhs :: LHsType pass } -- ^ RHS of type declaration | -- | @data@ declaration -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnData', -- 'GHC.Parser.Annotation.AnnFamily', -- 'GHC.Parser.Annotation.AnnNewType', -- 'GHC.Parser.Annotation.AnnNewType','GHC.Parser.Annotation.AnnDcolon' -- 'GHC.Parser.Annotation.AnnWhere', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation DataDecl { tcdDExt :: XDataDecl pass -- ^ Post renamer, CUSK flag, FVs , tcdLName :: LIdP pass -- ^ Type constructor , tcdTyVars :: LHsQTyVars pass -- ^ Type variables -- See Note [TyVar binders for associated decls] , tcdFixity :: LexicalFixity -- ^ Fixity used in the declaration , tcdDataDefn :: HsDataDefn pass } -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnClass', -- 'GHC.Parser.Annotation.AnnWhere','GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' -- - The tcdFDs will have 'GHC.Parser.Annotation.AnnVbar', -- 'GHC.Parser.Annotation.AnnComma' -- 'GHC.Parser.Annotation.AnnRarrow' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | ClassDecl { tcdCExt :: XClassDecl pass, -- ^ Post renamer, FVs tcdCtxt :: Maybe (LHsContext pass), -- ^ Context... tcdLName :: LIdP pass, -- ^ Name of the class tcdTyVars :: LHsQTyVars pass, -- ^ Class type variables tcdFixity :: LexicalFixity, -- ^ Fixity used in the declaration tcdFDs :: [LHsFunDep pass], -- ^ Functional deps tcdSigs :: [LSig pass], -- ^ Methods' signatures tcdMeths :: LHsBinds pass, -- ^ Default methods tcdATs :: [LFamilyDecl pass], -- ^ Associated types; tcdATDefs :: [LTyFamDefltDecl pass], -- ^ Associated type defaults tcdDocs :: [LDocDecl pass] -- ^ Haddock docs } | XTyClDecl !(XXTyClDecl pass) data FunDep pass = FunDep (XCFunDep pass) [LIdP pass] [LIdP pass] | XFunDep !(XXFunDep pass) type LHsFunDep pass = XRec pass (FunDep pass) data DataDeclRn = DataDeclRn { tcdDataCusk :: Bool -- ^ does this have a CUSK? -- See Note [CUSKs: complete user-supplied kind signatures] , tcdFVs :: NameSet } deriving Data {- Note [TyVar binders for associated decls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For an /associated/ data, newtype, or type-family decl, the LHsQTyVars /includes/ outer binders. For example class T a where data D a c type F a b :: * type F a b = a -> a Here the data decl for 'D', and type-family decl for 'F', both include 'a' in their LHsQTyVars (tcdTyVars and fdTyVars resp). Ditto any implicit binders in the hsq_implicit field of the LHSQTyVars. The idea is that the associated type is really a top-level decl in its own right. However we are careful to use the same name 'a', so that we can match things up. c.f. Note [Associated type tyvar names] in GHC.Core.Class Note [Family instance declaration binders] -} {- Note [Class LayoutInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~ The LayoutInfo is used to associate Haddock comments with parts of the declaration. Compare the following examples: class C a where f :: a -> Int -- ^ comment on f class C a where f :: a -> Int -- ^ comment on C Notice how "comment on f" and "comment on C" differ only by indentation level. Thus we have to record the indentation level of the class declarations. See also Note [Adding Haddock comments to the syntax tree] in GHC.Parser.PostProcess.Haddock -} -- Simple classifiers for TyClDecl -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- | @True@ <=> argument is a @data@\/@newtype@ -- declaration. isDataDecl :: TyClDecl pass -> Bool isDataDecl (DataDecl {}) = True isDataDecl _other = False -- | type or type instance declaration isSynDecl :: TyClDecl pass -> Bool isSynDecl (SynDecl {}) = True isSynDecl _other = False -- | type class isClassDecl :: TyClDecl pass -> Bool isClassDecl (ClassDecl {}) = True isClassDecl _ = False -- | type/data family declaration isFamilyDecl :: TyClDecl pass -> Bool isFamilyDecl (FamDecl {}) = True isFamilyDecl _other = False -- | type family declaration isTypeFamilyDecl :: TyClDecl pass -> Bool isTypeFamilyDecl (FamDecl _ (FamilyDecl { fdInfo = info })) = case info of OpenTypeFamily -> True ClosedTypeFamily {} -> True _ -> False isTypeFamilyDecl _ = False -- | open type family info isOpenTypeFamilyInfo :: FamilyInfo pass -> Bool isOpenTypeFamilyInfo OpenTypeFamily = True isOpenTypeFamilyInfo _ = False -- | closed type family info isClosedTypeFamilyInfo :: FamilyInfo pass -> Bool isClosedTypeFamilyInfo (ClosedTypeFamily {}) = True isClosedTypeFamilyInfo _ = False -- | data family declaration isDataFamilyDecl :: TyClDecl pass -> Bool isDataFamilyDecl (FamDecl _ (FamilyDecl { fdInfo = DataFamily })) = True isDataFamilyDecl _other = False -- Dealing with names tyClDeclTyVars :: TyClDecl pass -> LHsQTyVars pass tyClDeclTyVars (FamDecl { tcdFam = FamilyDecl { fdTyVars = tvs } }) = tvs tyClDeclTyVars d = tcdTyVars d countTyClDecls :: [TyClDecl pass] -> (Int, Int, Int, Int, Int) -- class, synonym decls, data, newtype, family decls countTyClDecls decls = (count isClassDecl decls, count isSynDecl decls, -- excluding... count isDataTy decls, -- ...family... count isNewTy decls, -- ...instances count isFamilyDecl decls) where isDataTy DataDecl{ tcdDataDefn = HsDataDefn { dd_ND = DataType } } = True isDataTy _ = False isNewTy DataDecl{ tcdDataDefn = HsDataDefn { dd_ND = NewType } } = True isNewTy _ = False {- Note [CUSKs: complete user-supplied kind signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We kind-check declarations differently if they have a complete, user-supplied kind signature (CUSK). This is because we can safely generalise a CUSKed declaration before checking all of the others, supporting polymorphic recursion. See https://gitlab.haskell.org/ghc/ghc/wikis/ghc-kinds/kind-inference#proposed-new-strategy and #9200 for lots of discussion of how we got here. The detection of CUSKs is enabled by the -XCUSKs extension, switched on by default. Under -XNoCUSKs, all declarations are treated as if they have no CUSK. See https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0036-kind-signatures.rst PRINCIPLE: a type declaration has a CUSK iff we could produce a separate kind signature for it, just like a type signature for a function, looking only at the header of the declaration. Examples: * data T1 (a :: *->*) (b :: *) = .... -- Has CUSK; equivalant to T1 :: (*->*) -> * -> * * data T2 a b = ... -- No CUSK; we do not want to guess T2 :: * -> * -> * -- because the full decl might be data T a b = MkT (a b) * data T3 (a :: k -> *) (b :: *) = ... -- CUSK; equivalent to T3 :: (k -> *) -> * -> * -- We lexically generalise over k to get -- T3 :: forall k. (k -> *) -> * -> * -- The generalisation is here is purely lexical, just like -- f3 :: a -> a -- means -- f3 :: forall a. a -> a * data T4 (a :: j k) = ... -- CUSK; equivalent to T4 :: j k -> * -- which we lexically generalise to T4 :: forall j k. j k -> * -- and then, if PolyKinds is on, we further generalise to -- T4 :: forall kk (j :: kk -> *) (k :: kk). j k -> * -- Again this is exactly like what happens as the term level -- when you write -- f4 :: forall a b. a b -> Int NOTE THAT * A CUSK does /not/ mean that everything about the kind signature is fully specified by the user. Look at T4 and f4: we had to do kind inference to figure out the kind-quantification. But in both cases (T4 and f4) that inference is done looking /only/ at the header of T4 (or signature for f4), not at the definition thereof. * The CUSK completely fixes the kind of the type constructor, forever. * The precise rules, for each declaration form, for whether a declaration has a CUSK are given in the user manual section "Complete user-supplied kind signatures and polymorphic recursion". But they simply implement PRINCIPLE above. * Open type families are interesting: type family T5 a b :: * There simply /is/ no accompanying declaration, so that info is all we'll ever get. So we it has a CUSK by definition, and we default any un-fixed kind variables to *. * Associated types are a bit tricker: class C6 a where type family T6 a b :: * op :: a Int -> Int Here C6 does not have a CUSK (in fact we ultimately discover that a :: * -> *). And hence neither does T6, the associated family, because we can't fix its kind until we have settled C6. Another way to say it: unlike a top-level, we /may/ discover more about a's kind from C6's definition. * A data definition with a top-level :: must explicitly bind all kind variables to the right of the ::. See test dependent/should_compile/KindLevels, which requires this case. (Naturally, any kind variable mentioned before the :: should not be bound after it.) This last point is much more debatable than the others; see #15142 comment:22 Because this is fiddly to check, there is a field in the DataDeclRn structure (included in a DataDecl after the renamer) that stores whether or not the declaration has a CUSK. -} {- ********************************************************************* * * TyClGroup Strongly connected components of type, class, instance, and role declarations * * ********************************************************************* -} {- Note [TyClGroups and dependency analysis] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A TyClGroup represents a strongly connected components of type/class/instance decls, together with the role annotations for the type/class declarations. The hs_tyclds :: [TyClGroup] field of a HsGroup is a dependency-order sequence of strongly-connected components. Invariants * The type and class declarations, group_tyclds, may depend on each other, or earlier TyClGroups, but not on later ones * The role annotations, group_roles, are role-annotations for some or all of the types and classes in group_tyclds (only). * The instance declarations, group_instds, may (and usually will) depend on group_tyclds, or on earlier TyClGroups, but not on later ones. See Note [Dependency analysis of type, class, and instance decls] in GHC.Rename.Module for more info. -} -- | Type or Class Group data TyClGroup pass -- See Note [TyClGroups and dependency analysis] = TyClGroup { group_ext :: XCTyClGroup pass , group_tyclds :: [LTyClDecl pass] , group_roles :: [LRoleAnnotDecl pass] , group_kisigs :: [LStandaloneKindSig pass] , group_instds :: [LInstDecl pass] } | XTyClGroup !(XXTyClGroup pass) tyClGroupTyClDecls :: [TyClGroup pass] -> [LTyClDecl pass] tyClGroupTyClDecls = concatMap group_tyclds tyClGroupInstDecls :: [TyClGroup pass] -> [LInstDecl pass] tyClGroupInstDecls = concatMap group_instds tyClGroupRoleDecls :: [TyClGroup pass] -> [LRoleAnnotDecl pass] tyClGroupRoleDecls = concatMap group_roles tyClGroupKindSigs :: [TyClGroup pass] -> [LStandaloneKindSig pass] tyClGroupKindSigs = concatMap group_kisigs {- ********************************************************************* * * Data and type family declarations * * ********************************************************************* -} {- Note [FamilyResultSig] ~~~~~~~~~~~~~~~~~~~~~~~~~ This data type represents the return signature of a type family. Possible values are: * NoSig - the user supplied no return signature: type family Id a where ... * KindSig - the user supplied the return kind: type family Id a :: * where ... * TyVarSig - user named the result with a type variable and possibly provided a kind signature for that variable: type family Id a = r where ... type family Id a = (r :: *) where ... Naming result of a type family is required if we want to provide injectivity annotation for a type family: type family Id a = r | r -> a where ... See also: Note [Injectivity annotation] Note [Injectivity annotation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A user can declare a type family to be injective: type family Id a = r | r -> a where ... * The part after the "|" is called "injectivity annotation". * "r -> a" part is called "injectivity condition"; at the moment terms "injectivity annotation" and "injectivity condition" are synonymous because we only allow a single injectivity condition. * "r" is the "LHS of injectivity condition". LHS can only contain the variable naming the result of a type family. * "a" is the "RHS of injectivity condition". RHS contains space-separated type and kind variables representing the arguments of a type family. Variables can be omitted if a type family is not injective in these arguments. Example: type family Foo a b c = d | d -> a c where ... Note that: (a) naming of type family result is required to provide injectivity annotation (b) for associated types if the result was named then injectivity annotation is mandatory. Otherwise result type variable is indistinguishable from associated type default. It is possible that in the future this syntax will be extended to support more complicated injectivity annotations. For example we could declare that if we know the result of Plus and one of its arguments we can determine the other argument: type family Plus a b = (r :: Nat) | r a -> b, r b -> a where ... Here injectivity annotation would consist of two comma-separated injectivity conditions. See also Note [Injective type families] in GHC.Core.TyCon -} -- | Located type Family Result Signature type LFamilyResultSig pass = XRec pass (FamilyResultSig pass) -- | type Family Result Signature data FamilyResultSig pass = -- see Note [FamilyResultSig] NoSig (XNoSig pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | KindSig (XCKindSig pass) (LHsKind pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpenP','GHC.Parser.Annotation.AnnDcolon', -- 'GHC.Parser.Annotation.AnnCloseP' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | TyVarSig (XTyVarSig pass) (LHsTyVarBndr () pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpenP','GHC.Parser.Annotation.AnnDcolon', -- 'GHC.Parser.Annotation.AnnCloseP', 'GHC.Parser.Annotation.AnnEqual' | XFamilyResultSig !(XXFamilyResultSig pass) -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Located type Family Declaration type LFamilyDecl pass = XRec pass (FamilyDecl pass) -- | type Family Declaration data FamilyDecl pass = FamilyDecl { fdExt :: XCFamilyDecl pass , fdInfo :: FamilyInfo pass -- type/data, closed/open , fdTopLevel :: TopLevelFlag -- used for printing only , fdLName :: LIdP pass -- type constructor , fdTyVars :: LHsQTyVars pass -- type variables -- See Note [TyVar binders for associated decls] , fdFixity :: LexicalFixity -- Fixity used in the declaration , fdResultSig :: LFamilyResultSig pass -- result signature , fdInjectivityAnn :: Maybe (LInjectivityAnn pass) -- optional injectivity ann } | XFamilyDecl !(XXFamilyDecl pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnData', 'GHC.Parser.Annotation.AnnFamily', -- 'GHC.Parser.Annotation.AnnWhere', 'GHC.Parser.Annotation.AnnOpenP', -- 'GHC.Parser.Annotation.AnnDcolon', 'GHC.Parser.Annotation.AnnCloseP', -- 'GHC.Parser.Annotation.AnnEqual', 'GHC.Parser.Annotation.AnnRarrow', -- 'GHC.Parser.Annotation.AnnVbar' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Located Injectivity Annotation type LInjectivityAnn pass = XRec pass (InjectivityAnn pass) -- | If the user supplied an injectivity annotation it is represented using -- InjectivityAnn. At the moment this is a single injectivity condition - see -- Note [Injectivity annotation]. `Located name` stores the LHS of injectivity -- condition. `[Located name]` stores the RHS of injectivity condition. Example: -- -- type family Foo a b c = r | r -> a c where ... -- -- This will be represented as "InjectivityAnn `r` [`a`, `c`]" data InjectivityAnn pass = InjectivityAnn (XCInjectivityAnn pass) (LIdP pass) [LIdP pass] -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnRarrow', 'GHC.Parser.Annotation.AnnVbar' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XInjectivityAnn !(XXInjectivityAnn pass) data FamilyInfo pass = DataFamily | OpenTypeFamily -- | 'Nothing' if we're in an hs-boot file and the user -- said "type family Foo x where .." | ClosedTypeFamily (Maybe [LTyFamInstEqn pass]) ------------- Pretty printing FamilyDecls ----------- pprFlavour :: FamilyInfo pass -> SDoc pprFlavour DataFamily = text "data" pprFlavour OpenTypeFamily = text "type" pprFlavour (ClosedTypeFamily {}) = text "type" instance Outputable (FamilyInfo pass) where ppr info = pprFlavour info <+> text "family" {- ********************************************************************* * * Data types and data constructors * * ********************************************************************* -} -- | Haskell Data type Definition data HsDataDefn pass -- The payload of a data type defn -- Used *both* for vanilla data declarations, -- *and* for data family instances = -- | Declares a data type or newtype, giving its constructors -- @ -- data/newtype T a = -- data/newtype instance T [a] = -- @ HsDataDefn { dd_ext :: XCHsDataDefn pass, dd_ND :: NewOrData, dd_ctxt :: Maybe (LHsContext pass), -- ^ Context dd_cType :: Maybe (XRec pass CType), dd_kindSig:: Maybe (LHsKind pass), -- ^ Optional kind signature. -- -- @(Just k)@ for a GADT-style @data@, -- or @data instance@ decl, with explicit kind sig -- -- Always @Nothing@ for H98-syntax decls dd_cons :: [LConDecl pass], -- ^ Data constructors -- -- For @data T a = T1 | T2 a@ -- the 'LConDecl's all have 'ConDeclH98'. -- For @data T a where { T1 :: T a }@ -- the 'LConDecls' all have 'ConDeclGADT'. dd_derivs :: HsDeriving pass -- ^ Optional 'deriving' clause -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation } | XHsDataDefn !(XXHsDataDefn pass) -- | Haskell Deriving clause type HsDeriving pass = [LHsDerivingClause pass] -- ^ The optional @deriving@ clauses of a data declaration. "Clauses" is -- plural because one can specify multiple deriving clauses using the -- @-XDerivingStrategies@ language extension. -- -- The list of 'LHsDerivingClause's corresponds to exactly what the user -- requested to derive, in order. If no deriving clauses were specified, -- the list is empty. type LHsDerivingClause pass = XRec pass (HsDerivingClause pass) -- | A single @deriving@ clause of a data declaration. -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnDeriving', 'GHC.Parser.Annotation.AnnStock', -- 'GHC.Parser.Annotation.AnnAnyClass', 'GHC.Parser.Annotation.AnnNewtype', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose' data HsDerivingClause pass -- See Note [Deriving strategies] in GHC.Tc.Deriv = HsDerivingClause { deriv_clause_ext :: XCHsDerivingClause pass , deriv_clause_strategy :: Maybe (LDerivStrategy pass) -- ^ The user-specified strategy (if any) to use when deriving -- 'deriv_clause_tys'. , deriv_clause_tys :: LDerivClauseTys pass -- ^ The types to derive. } | XHsDerivingClause !(XXHsDerivingClause pass) type LDerivClauseTys pass = XRec pass (DerivClauseTys pass) -- | The types mentioned in a single @deriving@ clause. This can come in two -- forms, 'DctSingle' or 'DctMulti', depending on whether the types are -- surrounded by enclosing parentheses or not. These parentheses are -- semantically different than 'HsParTy'. For example, @deriving ()@ means -- \"derive zero classes\" rather than \"derive an instance of the 0-tuple\". -- -- 'DerivClauseTys' use 'LHsSigType' because @deriving@ clauses can mention -- type variables that aren't bound by the datatype, e.g. -- -- > data T b = ... deriving (C [a]) -- -- should produce a derived instance for @C [a] (T b)@. data DerivClauseTys pass = -- | A @deriving@ clause with a single type. Moreover, that type can only -- be a type constructor without any arguments. -- -- Example: @deriving Eq@ DctSingle (XDctSingle pass) (LHsSigType pass) -- | A @deriving@ clause with a comma-separated list of types, surrounded -- by enclosing parentheses. -- -- Example: @deriving (Eq, C a)@ | DctMulti (XDctMulti pass) [LHsSigType pass] | XDerivClauseTys !(XXDerivClauseTys pass) -- | Located Standalone Kind Signature type LStandaloneKindSig pass = XRec pass (StandaloneKindSig pass) data StandaloneKindSig pass = StandaloneKindSig (XStandaloneKindSig pass) (LIdP pass) -- Why a single binder? See #16754 (LHsSigType pass) -- Why not LHsSigWcType? See Note [Wildcards in standalone kind signatures] | XStandaloneKindSig !(XXStandaloneKindSig pass) {- Note [Wildcards in standalone kind signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Standalone kind signatures enable polymorphic recursion, and it is unclear how to reconcile this with partial type signatures, so we disallow wildcards in them. We reject wildcards in 'rnStandaloneKindSignature' by returning False for 'StandaloneKindSigCtx' in 'wildCardsAllowed'. The alternative design is to have special treatment for partial standalone kind signatures, much like we have special treatment for partial type signatures in terms. However, partial standalone kind signatures are not a proper replacement for CUSKs, so this would be a separate feature. -} data NewOrData = NewType -- ^ @newtype Blah ...@ | DataType -- ^ @data Blah ...@ deriving( Eq, Data ) -- Needed because Demand derives Eq -- | Convert a 'NewOrData' to a 'TyConFlavour' newOrDataToFlavour :: NewOrData -> TyConFlavour newOrDataToFlavour NewType = NewtypeFlavour newOrDataToFlavour DataType = DataTypeFlavour -- | Located data Constructor Declaration type LConDecl pass = XRec pass (ConDecl pass) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnSemi' when -- in a GADT constructor list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | -- -- @ -- data T b = forall a. Eq a => MkT a b -- MkT :: forall b a. Eq a => MkT a b -- -- data T b where -- MkT1 :: Int -> T Int -- -- data T = Int `MkT` Int -- | MkT2 -- -- data T a where -- Int `MkT` Int :: T Int -- @ -- -- - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnDotdot','GHC.Parser.Annotation.AnnCLose', -- 'GHC.Parser.Annotation.AnnEqual','GHC.Parser.Annotation.AnnVbar', -- 'GHC.Parser.Annotation.AnnDarrow','GHC.Parser.Annotation.AnnDarrow', -- 'GHC.Parser.Annotation.AnnForall','GHC.Parser.Annotation.AnnDot' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | data Constructor Declaration data ConDecl pass = ConDeclGADT { con_g_ext :: XConDeclGADT pass , con_names :: [LIdP pass] -- The following fields describe the type after the '::' -- See Note [GADT abstract syntax] , con_bndrs :: XRec pass (HsOuterSigTyVarBndrs pass) -- ^ The outermost type variable binders, be they explicit or -- implicit. The 'XRec' is used to anchor exact print -- annotations, AnnForall and AnnDot. , con_mb_cxt :: Maybe (LHsContext pass) -- ^ User-written context (if any) , con_g_args :: HsConDeclGADTDetails pass -- ^ Arguments; never infix , con_res_ty :: LHsType pass -- ^ Result type , con_doc :: Maybe (LHsDoc pass) -- ^ A possible Haddock -- comment. } | ConDeclH98 { con_ext :: XConDeclH98 pass , con_name :: LIdP pass , con_forall :: Bool -- ^ True <=> explicit user-written forall -- e.g. data T a = forall b. MkT b (b->a) -- con_ex_tvs = {b} -- False => con_ex_tvs is empty , con_ex_tvs :: [LHsTyVarBndr Specificity pass] -- ^ Existentials only , con_mb_cxt :: Maybe (LHsContext pass) -- ^ User-written context (if any) , con_args :: HsConDeclH98Details pass -- ^ Arguments; can be infix , con_doc :: Maybe (LHsDoc pass) -- ^ A possible Haddock comment. } | XConDecl !(XXConDecl pass) {- Note [GADT abstract syntax] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The types of both forms of GADT constructors are very structured, as they must consist of the quantified type variables (if provided), followed by the context (if provided), followed by the argument types (if provided), followed by the result type. (See "Wrinkle: No nested foralls or contexts" below for more discussion on the restrictions imposed here.) As a result, instead of storing the type of a GADT constructor as a single LHsType, we split it up into its constituent components for easier access. There are two broad ways to classify GADT constructors: * Record-syntax constructors. For example: data T a where K :: forall a. Ord a => { x :: [a], ... } -> T a * Prefix constructors, which do not use record syntax. For example: data T a where K :: forall a. Ord a => [a] -> ... -> T a This distinction is recorded in the `con_args :: HsConDetails pass`, which tracks if we're dealing with a RecCon or PrefixCon. It is easy to distinguish the two in the AST since record GADT constructors use HsRecTy. This distinction is made in GHC.Parser.PostProcess.mkGadtDecl. It is worth elaborating a bit more on the process of splitting the argument types of a GADT constructor, since there are some non-obvious details involved. While splitting the argument types of a record GADT constructor is easy (they are stored in an HsRecTy), splitting the arguments of a prefix GADT constructor is trickier. The basic idea is that we must split along the outermost function arrows ((->) and (%1 ->)) in the type, which GHC.Hs.Type.splitHsFunType accomplishes. But what about type operators? Consider: C :: a :*: b -> a :*: b -> a :+: b This could parse in many different ways depending on the precedences of each type operator. In particular, if (:*:) were to have lower precedence than (->), then it could very well parse like this: a :*: ((b -> a) :*: ((b -> a) :+: b))) This would give the false impression that the whole type is part of one large return type, with no arguments. Note that we do not fully resolve the exact precedences of each user-defined type operator until the renamer, so this a more difficult task for the parser. Fortunately, there is no risk of the above happening. GHC's parser gives special treatment to function arrows, and as a result, they are always parsed with a lower precedence than any other type operator. As a result, the type above is actually parsed like this: (a :*: b) -> ((a :*: b) -> (a :+: b)) While we won't know the exact precedences of (:*:) and (:+:) until the renamer, all we are concerned about in the parser is identifying the overall shape of the argument and result types, which we can accomplish by piggybacking on the special treatment given to function arrows. In a future where function arrows aren't given special status in the parser, we will likely have to modify GHC.Parser.PostProcess.mkHsOpTyPV to preserve this trick. ----- -- Wrinkle: No nested foralls or contexts ----- GADT constructors provide some freedom to change the order of foralls in their types (see Note [DataCon user type variable binders] in GHC.Core.DataCon), but this freedom is still limited. GADTs still require that all quantification occurs "prenex". That is, any explicitly quantified type variables must occur at the front of the GADT type, followed by any contexts, followed by the body of the GADT type, in precisely that order. For instance: data T where MkT1 :: forall a b. (Eq a, Eq b) => a -> b -> T -- OK MkT2 :: forall a. Eq a => forall b. a -> b -> T -- Rejected, `forall b` is nested MkT3 :: forall a b. Eq a => Eq b => a -> b -> T -- Rejected, `Eq b` is nested MkT4 :: Int -> forall a. a -> T -- Rejected, `forall a` is nested MkT5 :: forall a. Int -> Eq a => a -> T -- Rejected, `Eq a` is nested MkT6 :: (forall a. a -> T) -- Rejected, `forall a` is nested due to the surrounding parentheses MkT7 :: (Eq a => a -> t) -- Rejected, `Eq a` is nested due to the surrounding parentheses For the full details, see the "Formal syntax for GADTs" section of the GHC User's Guide. GHC enforces that GADT constructors do not have nested `forall`s or contexts in two parts: 1. GHC, in the process of splitting apart a GADT's type, extracts out the leading `forall` and context (if they are provided). To accomplish this splitting, the renamer uses the GHC.Hs.Type.splitLHsGADTPrefixTy function, which is careful not to remove parentheses surrounding the leading `forall` or context (as these parentheses can be syntactically significant). If the third result returned by splitLHsGADTPrefixTy contains any `forall`s or contexts, then they must be nested, so they will be rejected. Note that this step applies to both prefix and record GADTs alike, as they both have syntax which permits `forall`s and contexts. The difference is where this step happens: * For prefix GADTs, this happens in the renamer (in rnConDecl), as we cannot split until after the type operator fixities have been resolved. * For record GADTs, this happens in the parser (in mkGadtDecl). 2. If the GADT type is prefix, the renamer (in the ConDeclGADTPrefixPs case of rnConDecl) will then check for nested `forall`s/contexts in the body of a prefix GADT type, after it has determined what all of the argument types are. This step is necessary to catch examples like MkT4 above, where the nested quantification occurs after a visible argument type. -} -- | The arguments in a Haskell98-style data constructor. type HsConDeclH98Details pass = HsConDetails Void (HsScaled pass (LBangType pass)) (XRec pass [LConDeclField pass]) -- The Void argument to HsConDetails here is a reflection of the fact that -- type applications are not allowed in data constructor declarations. -- | The arguments in a GADT constructor. Unlike Haskell98-style constructors, -- GADT constructors cannot be declared with infix syntax. As a result, we do -- not use 'HsConDetails' here, as 'InfixCon' would be an unrepresentable -- state. (There is a notion of infix GADT constructors for the purposes of -- derived Show instances—see Note [Infix GADT constructors] in -- GHC.Tc.TyCl—but that is an orthogonal concern.) data HsConDeclGADTDetails pass = PrefixConGADT [HsScaled pass (LBangType pass)] | RecConGADT (XRec pass [LConDeclField pass]) (LHsUniToken "->" "→" pass) instance Outputable NewOrData where ppr NewType = text "newtype" ppr DataType = text "data" {- ************************************************************************ * * Instance declarations * * ************************************************************************ Note [Type family instance declarations in HsSyn] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The data type FamEqn represents one equation of a type family instance. Aside from the pass, it is also parameterised over another field, feqn_rhs. feqn_rhs is either an HsDataDefn (for data family instances) or an LHsType (for type family instances). Type family instances also include associated type family default equations. That is because a default for a type family looks like this: class C a where type family F a b :: Type type F c d = (c,d) -- Default instance The default declaration is really just a `type instance` declaration, but one with particularly simple patterns: they must all be distinct type variables. That's because we will instantiate it (in an instance declaration for `C`) if we don't give an explicit instance for `F`. Note that the names of the variables don't need to match those of the class: it really is like a free-standing `type instance` declaration. -} ----------------- Type synonym family instances ------------- -- | Located Type Family Instance Equation type LTyFamInstEqn pass = XRec pass (TyFamInstEqn pass) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnSemi' -- when in a list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Haskell Type Patterns type HsTyPats pass = [LHsTypeArg pass] {- Note [Family instance declaration binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The feqn_pats field of FamEqn (family instance equation) stores the LHS type (and kind) patterns. Any type (and kind) variables contained in these type patterns are bound in the feqn_bndrs field. Note that in particular: * The feqn_bndrs *include* any anonymous wildcards. For example type instance F a _ = a The feqn_bndrs will be HsOuterImplicit {a, _}. Remember that each separate wildcard '_' gets its own unique. In this context wildcards behave just like an ordinary type variable, only anonymous. * The feqn_bndrs *include* type variables that are already in scope Eg class C s t where type F t p :: * instance C w (a,b) where type F (a,b) x = x->a The feqn_bndrs of the F decl is HsOuterImplicit {a,b,x}, even though the F decl is nested inside the 'instance' decl. However after the renamer, the uniques will match up: instance C w7 (a8,b9) where type F (a8,b9) x10 = x10->a8 so that we can compare the type pattern in the 'instance' decl and in the associated 'type' decl c.f. Note [TyVar binders for associated decls] -} -- | Type Family Instance Equation type TyFamInstEqn pass = FamEqn pass (LHsType pass) -- Here, the @pats@ are type patterns (with kind and type bndrs). -- See Note [Family instance declaration binders] -- | Type family default declarations. -- A convenient synonym for 'TyFamInstDecl'. -- See @Note [Type family instance declarations in HsSyn]@. type TyFamDefltDecl = TyFamInstDecl -- | Located type family default declarations. type LTyFamDefltDecl pass = XRec pass (TyFamDefltDecl pass) -- | Located Type Family Instance Declaration type LTyFamInstDecl pass = XRec pass (TyFamInstDecl pass) -- | Type Family Instance Declaration data TyFamInstDecl pass = TyFamInstDecl { tfid_xtn :: XCTyFamInstDecl pass , tfid_eqn :: TyFamInstEqn pass } -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnInstance', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XTyFamInstDecl !(XXTyFamInstDecl pass) ----------------- Data family instances ------------- -- | Located Data Family Instance Declaration type LDataFamInstDecl pass = XRec pass (DataFamInstDecl pass) -- | Data Family Instance Declaration newtype DataFamInstDecl pass = DataFamInstDecl { dfid_eqn :: FamEqn pass (HsDataDefn pass) } -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnData', -- 'GHC.Parser.Annotation.AnnNewType','GHC.Parser.Annotation.AnnInstance', -- 'GHC.Parser.Annotation.AnnDcolon' -- 'GHC.Parser.Annotation.AnnWhere','GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ----------------- Family instances (common types) ------------- -- | Family Equation -- -- One equation in a type family instance declaration, data family instance -- declaration, or type family default. -- See Note [Type family instance declarations in HsSyn] -- See Note [Family instance declaration binders] data FamEqn pass rhs = FamEqn { feqn_ext :: XCFamEqn pass rhs , feqn_tycon :: LIdP pass , feqn_bndrs :: HsOuterFamEqnTyVarBndrs pass -- ^ Optional quantified type vars , feqn_pats :: HsTyPats pass , feqn_fixity :: LexicalFixity -- ^ Fixity used in the declaration , feqn_rhs :: rhs } -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnEqual' | XFamEqn !(XXFamEqn pass rhs) -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ----------------- Class instances ------------- -- | Located Class Instance Declaration type LClsInstDecl pass = XRec pass (ClsInstDecl pass) -- | Class Instance Declaration -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnInstance', -- 'GHC.Parser.Annotation.AnnWhere', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data ClsInstDecl pass = ClsInstDecl { cid_ext :: XCClsInstDecl pass , cid_poly_ty :: LHsSigType pass -- Context => Class Instance-type -- Using a polytype means that the renamer conveniently -- figures out the quantified type variables for us. , cid_binds :: LHsBinds pass -- Class methods , cid_sigs :: [LSig pass] -- User-supplied pragmatic info , cid_tyfam_insts :: [LTyFamInstDecl pass] -- Type family instances , cid_datafam_insts :: [LDataFamInstDecl pass] -- Data family instances , cid_overlap_mode :: Maybe (XRec pass OverlapMode) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation } | XClsInstDecl !(XXClsInstDecl pass) ----------------- Instances of all kinds ------------- -- | Located Instance Declaration type LInstDecl pass = XRec pass (InstDecl pass) -- | Instance Declaration data InstDecl pass -- Both class and family instances = ClsInstD { cid_d_ext :: XClsInstD pass , cid_inst :: ClsInstDecl pass } | DataFamInstD -- data family instance { dfid_ext :: XDataFamInstD pass , dfid_inst :: DataFamInstDecl pass } | TyFamInstD -- type family instance { tfid_ext :: XTyFamInstD pass , tfid_inst :: TyFamInstDecl pass } | XInstDecl !(XXInstDecl pass) {- ************************************************************************ * * \subsection[DerivDecl]{A stand-alone instance deriving declaration} * * ************************************************************************ -} -- | Located stand-alone 'deriving instance' declaration type LDerivDecl pass = XRec pass (DerivDecl pass) -- | Stand-alone 'deriving instance' declaration data DerivDecl pass = DerivDecl { deriv_ext :: XCDerivDecl pass , deriv_type :: LHsSigWcType pass -- ^ The instance type to derive. -- -- It uses an 'LHsSigWcType' because the context is allowed to be a -- single wildcard: -- -- > deriving instance _ => Eq (Foo a) -- -- Which signifies that the context should be inferred. -- See Note [Inferring the instance context] in GHC.Tc.Deriv.Infer. , deriv_strategy :: Maybe (LDerivStrategy pass) , deriv_overlap_mode :: Maybe (XRec pass OverlapMode) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDeriving', -- 'GHC.Parser.Annotation.AnnInstance', 'GHC.Parser.Annotation.AnnStock', -- 'GHC.Parser.Annotation.AnnAnyClass', 'GHC.Parser.Annotation.AnnNewtype', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation } | XDerivDecl !(XXDerivDecl pass) {- ************************************************************************ * * Deriving strategies * * ************************************************************************ -} -- | A 'Located' 'DerivStrategy'. type LDerivStrategy pass = XRec pass (DerivStrategy pass) -- | Which technique the user explicitly requested when deriving an instance. data DerivStrategy pass -- See Note [Deriving strategies] in GHC.Tc.Deriv = StockStrategy (XStockStrategy pass) -- ^ GHC's \"standard\" strategy, which is to implement a -- custom instance for the data type. This only works -- for certain types that GHC knows about (e.g., 'Eq', -- 'Show', 'Functor' when @-XDeriveFunctor@ is enabled, -- etc.) | AnyclassStrategy (XAnyClassStrategy pass) -- ^ @-XDeriveAnyClass@ | NewtypeStrategy (XNewtypeStrategy pass) -- ^ @-XGeneralizedNewtypeDeriving@ | ViaStrategy (XViaStrategy pass) -- ^ @-XDerivingVia@ -- | A short description of a @DerivStrategy'@. derivStrategyName :: DerivStrategy a -> SDoc derivStrategyName = text . go where go StockStrategy {} = "stock" go AnyclassStrategy {} = "anyclass" go NewtypeStrategy {} = "newtype" go ViaStrategy {} = "via" {- ************************************************************************ * * \subsection[DefaultDecl]{A @default@ declaration} * * ************************************************************************ There can only be one default declaration per module, but it is hard for the parser to check that; we pass them all through in the abstract syntax, and that restriction must be checked in the front end. -} -- | Located Default Declaration type LDefaultDecl pass = XRec pass (DefaultDecl pass) -- | Default Declaration data DefaultDecl pass = DefaultDecl (XCDefaultDecl pass) [LHsType pass] -- ^ - 'GHC.Parser.Annotation.AnnKeywordId's : 'GHC.Parser.Annotation.AnnDefault', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XDefaultDecl !(XXDefaultDecl pass) {- ************************************************************************ * * \subsection{Foreign function interface declaration} * * ************************************************************************ -} -- foreign declarations are distinguished as to whether they define or use a -- Haskell name -- -- * the Boolean value indicates whether the pre-standard deprecated syntax -- has been used -- | Located Foreign Declaration type LForeignDecl pass = XRec pass (ForeignDecl pass) -- | Foreign Declaration data ForeignDecl pass = ForeignImport { fd_i_ext :: XForeignImport pass -- Post typechecker, rep_ty ~ sig_ty , fd_name :: LIdP pass -- defines this name , fd_sig_ty :: LHsSigType pass -- sig_ty , fd_fi :: ForeignImport } | ForeignExport { fd_e_ext :: XForeignExport pass -- Post typechecker, rep_ty ~ sig_ty , fd_name :: LIdP pass -- uses this name , fd_sig_ty :: LHsSigType pass -- sig_ty , fd_fe :: ForeignExport } -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnForeign', -- 'GHC.Parser.Annotation.AnnImport','GHC.Parser.Annotation.AnnExport', -- 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XForeignDecl !(XXForeignDecl pass) {- In both ForeignImport and ForeignExport: sig_ty is the type given in the Haskell code rep_ty is the representation for this type, i.e. with newtypes coerced away and type functions evaluated. Thus if the declaration is valid, then rep_ty will only use types such as Int and IO that we know how to make foreign calls with. -} -- Specification Of an imported external entity in dependence on the calling -- convention -- data ForeignImport = -- import of a C entity -- -- * the two strings specifying a header file or library -- may be empty, which indicates the absence of a -- header or object specification (both are not used -- in the case of `CWrapper' and when `CFunction' -- has a dynamic target) -- -- * the calling convention is irrelevant for code -- generation in the case of `CLabel', but is needed -- for pretty printing -- -- * `Safety' is irrelevant for `CLabel' and `CWrapper' -- CImport (Located CCallConv) -- ccall or stdcall (Located Safety) -- interruptible, safe or unsafe (Maybe Header) -- name of C header CImportSpec -- details of the C entity (Located SourceText) -- original source text for -- the C entity deriving Data -- details of an external C entity -- data CImportSpec = CLabel CLabelString -- import address of a C label | CFunction CCallTarget -- static or dynamic function | CWrapper -- wrapper to expose closures -- (former f.e.d.) deriving Data -- specification of an externally exported entity in dependence on the calling -- convention -- data ForeignExport = CExport (Located CExportSpec) -- contains the calling -- convention (Located SourceText) -- original source text for -- the C entity deriving Data -- pretty printing of foreign declarations -- instance Outputable ForeignImport where ppr (CImport cconv safety mHeader spec (L _ srcText)) = ppr cconv <+> ppr safety <+> pprWithSourceText srcText (pprCEntity spec "") where pp_hdr = case mHeader of Nothing -> empty Just (Header _ header) -> ftext header pprCEntity (CLabel lbl) _ = doubleQuotes $ text "static" <+> pp_hdr <+> char '&' <> ppr lbl pprCEntity (CFunction (StaticTarget st _lbl _ isFun)) src = if dqNeeded then doubleQuotes ce else empty where dqNeeded = (take 6 src == "static") || isJust mHeader || not isFun || st /= NoSourceText ce = -- We may need to drop leading spaces first (if take 6 src == "static" then text "static" else empty) <+> pp_hdr <+> (if isFun then empty else text "value") <+> (pprWithSourceText st empty) pprCEntity (CFunction DynamicTarget) _ = doubleQuotes $ text "dynamic" pprCEntity CWrapper _ = doubleQuotes $ text "wrapper" instance Outputable ForeignExport where ppr (CExport (L _ (CExportStatic _ lbl cconv)) _) = ppr cconv <+> char '"' <> ppr lbl <> char '"' {- ************************************************************************ * * \subsection{Rewrite rules} * * ************************************************************************ -} -- | Located Rule Declarations type LRuleDecls pass = XRec pass (RuleDecls pass) -- Note [Pragma source text] in GHC.Types.SourceText -- | Rule Declarations data RuleDecls pass = HsRules { rds_ext :: XCRuleDecls pass , rds_src :: SourceText , rds_rules :: [LRuleDecl pass] } | XRuleDecls !(XXRuleDecls pass) -- | Located Rule Declaration type LRuleDecl pass = XRec pass (RuleDecl pass) -- | Rule Declaration data RuleDecl pass = HsRule -- Source rule { rd_ext :: XHsRule pass -- ^ After renamer, free-vars from the LHS and RHS , rd_name :: XRec pass (SourceText,RuleName) -- ^ Note [Pragma source text] in "GHC.Types.Basic" , rd_act :: Activation , rd_tyvs :: Maybe [LHsTyVarBndr () (NoGhcTc pass)] -- ^ Forall'd type vars , rd_tmvs :: [LRuleBndr pass] -- ^ Forall'd term vars, before typechecking; after typechecking -- this includes all forall'd vars , rd_lhs :: XRec pass (HsExpr pass) , rd_rhs :: XRec pass (HsExpr pass) } -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnTilde', -- 'GHC.Parser.Annotation.AnnVal', -- 'GHC.Parser.Annotation.AnnClose', -- 'GHC.Parser.Annotation.AnnForall','GHC.Parser.Annotation.AnnDot', -- 'GHC.Parser.Annotation.AnnEqual', | XRuleDecl !(XXRuleDecl pass) data HsRuleRn = HsRuleRn NameSet NameSet -- Free-vars from the LHS and RHS deriving Data -- | Located Rule Binder type LRuleBndr pass = XRec pass (RuleBndr pass) -- | Rule Binder data RuleBndr pass = RuleBndr (XCRuleBndr pass) (LIdP pass) | RuleBndrSig (XRuleBndrSig pass) (LIdP pass) (HsPatSigType pass) | XRuleBndr !(XXRuleBndr pass) -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnDcolon','GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation collectRuleBndrSigTys :: [RuleBndr pass] -> [HsPatSigType pass] collectRuleBndrSigTys bndrs = [ty | RuleBndrSig _ _ ty <- bndrs] pprFullRuleName :: GenLocated a (SourceText, RuleName) -> SDoc pprFullRuleName (L _ (st, n)) = pprWithSourceText st (doubleQuotes $ ftext n) {- ************************************************************************ * * \subsection[DocDecl]{Document comments} * * ************************************************************************ -} -- | Located Documentation comment Declaration type LDocDecl pass = XRec pass (DocDecl pass) -- | Documentation comment Declaration data DocDecl pass = DocCommentNext (LHsDoc pass) | DocCommentPrev (LHsDoc pass) | DocCommentNamed String (LHsDoc pass) | DocGroup Int (LHsDoc pass) deriving instance (Data pass, Data (IdP pass)) => Data (DocDecl pass) -- Okay, I need to reconstruct the document comments, but for now: instance Outputable (DocDecl name) where ppr _ = text "" docDeclDoc :: DocDecl pass -> LHsDoc pass docDeclDoc (DocCommentNext d) = d docDeclDoc (DocCommentPrev d) = d docDeclDoc (DocCommentNamed _ d) = d docDeclDoc (DocGroup _ d) = d {- ************************************************************************ * * \subsection[DeprecDecl]{Deprecations} * * ************************************************************************ We use exported entities for things to deprecate. -} -- | Located Warning Declarations type LWarnDecls pass = XRec pass (WarnDecls pass) -- Note [Pragma source text] in GHC.Types.SourceText -- | Warning pragma Declarations data WarnDecls pass = Warnings { wd_ext :: XWarnings pass , wd_src :: SourceText , wd_warnings :: [LWarnDecl pass] } | XWarnDecls !(XXWarnDecls pass) -- | Located Warning pragma Declaration type LWarnDecl pass = XRec pass (WarnDecl pass) -- | Warning pragma Declaration data WarnDecl pass = Warning (XWarning pass) [LIdP pass] (WarningTxt pass) | XWarnDecl !(XXWarnDecl pass) {- ************************************************************************ * * \subsection[AnnDecl]{Annotations} * * ************************************************************************ -} -- | Located Annotation Declaration type LAnnDecl pass = XRec pass (AnnDecl pass) -- | Annotation Declaration data AnnDecl pass = HsAnnotation (XHsAnnotation pass) SourceText -- Note [Pragma source text] in GHC.Types.SourceText (AnnProvenance pass) (XRec pass (HsExpr pass)) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnType' -- 'GHC.Parser.Annotation.AnnModule' -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XAnnDecl !(XXAnnDecl pass) -- | Annotation Provenance data AnnProvenance pass = ValueAnnProvenance (LIdP pass) | TypeAnnProvenance (LIdP pass) | ModuleAnnProvenance -- deriving instance Functor AnnProvenance -- deriving instance Foldable AnnProvenance -- deriving instance Traversable AnnProvenance -- deriving instance (Data pass) => Data (AnnProvenance pass) annProvenanceName_maybe :: forall p. UnXRec p => AnnProvenance p -> Maybe (IdP p) annProvenanceName_maybe (ValueAnnProvenance (unXRec @p -> name)) = Just name annProvenanceName_maybe (TypeAnnProvenance (unXRec @p -> name)) = Just name annProvenanceName_maybe ModuleAnnProvenance = Nothing {- ************************************************************************ * * \subsection[RoleAnnot]{Role annotations} * * ************************************************************************ -} -- | Located Role Annotation Declaration type LRoleAnnotDecl pass = XRec pass (RoleAnnotDecl pass) -- See #8185 for more info about why role annotations are -- top-level declarations -- | Role Annotation Declaration data RoleAnnotDecl pass = RoleAnnotDecl (XCRoleAnnotDecl pass) (LIdP pass) -- type constructor [XRec pass (Maybe Role)] -- optional annotations -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnType', -- 'GHC.Parser.Annotation.AnnRole' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XRoleAnnotDecl !(XXRoleAnnotDecl pass) ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Expr.hs0000644000000000000000000022772614472400113023121 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* -- | Abstract Haskell syntax for expressions. module Language.Haskell.Syntax.Expr where import GHC.Prelude import Language.Haskell.Syntax.Decls import Language.Haskell.Syntax.Pat import Language.Haskell.Syntax.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Type import Language.Haskell.Syntax.Binds -- others: import GHC.Core.DataCon (FieldLabelString) import GHC.Types.Name import GHC.Types.Basic import GHC.Types.Fixity import GHC.Types.Name.Reader import GHC.Types.SourceText import GHC.Types.SrcLoc import GHC.Unit.Module (ModuleName) import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import GHC.Core.Type -- libraries: import Data.Data hiding (Fixity(..)) import qualified Data.Data as Data (Fixity(..)) import Data.List.NonEmpty ( NonEmpty ) import GHCi.RemoteTypes ( ForeignRef ) import qualified Language.Haskell.TH as TH (Q) {- Note [RecordDotSyntax field updates] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The extensions @OverloadedRecordDot@ @OverloadedRecordUpdate@ together enable record updates like @a{foo.bar.baz = 1}@. Introducing this syntax slightly complicates parsing. This note explains how it's done. In the event a record is being constructed or updated, it's this production that's in play: @ aexp1 -> aexp1 '{' fbinds '}' { ... mkHsRecordPV ... $1 (snd $3) } @ @fbinds@ is a list of field bindings. @mkHsRecordPV@ is a function of the @DisambECP b@ typeclass, see Note [Ambiguous syntactic categories]. The "normal" rules for an @fbind@ are: @ fbind : qvar '=' texp | qvar @ These rules compute values of @LHsRecField GhcPs (Located b)@. They apply in the context of record construction, record updates, record patterns and record expressions. That is, @b@ ranges over @HsExpr GhcPs@, @HsPat GhcPs@ and @HsCmd GhcPs@. When @OverloadedRecordDot@ and @OverloadedRecordUpdate@ are both enabled, two additional @fbind@ rules are admitted: @ | field TIGHT_INFIX_PROJ fieldToUpdate '=' texp | field TIGHT_INFIX_PROJ fieldToUpdate @ These rules only make sense when parsing record update expressions (that is, patterns and commands cannot be parsed by these rules and neither record constructions). The results of these new rules cannot be represented by @LHsRecField GhcPs (LHsExpr GhcPs)@ values as the type is defined today. We minimize modifying existing code by having these new rules calculate @LHsRecProj GhcPs (LHsExpr GhcPs)@ ("record projection") values instead: @ newtype FieldLabelStrings = FieldLabelStrings [XRec p (DotFieldOcc p)] type RecProj arg = HsFieldBind FieldLabelStrings arg type LHsRecProj p arg = XRec p (RecProj arg) @ The @fbind@ rule is then given the type @fbind :: { forall b. DisambECP b => PV (Fbind b) }@ accomodating both alternatives: @ type Fbind b = Either (LHsRecField GhcPs (LocatedA b)) ( LHsRecProj GhcPs (LocatedA b)) @ In @data HsExpr p@, the @RecordUpd@ constuctor indicates regular updates vs. projection updates by means of the @rupd_flds@ member type, an @Either@ instance: @ | RecordUpd { rupd_ext :: XRecordUpd p , rupd_expr :: LHsExpr p , rupd_flds :: Either [LHsRecUpdField p] [LHsRecUpdProj p] } @ Here, @ type RecUpdProj p = RecProj p (LHsExpr p) type LHsRecUpdProj p = XRec p (RecUpdProj p) @ and @Left@ values indicating regular record update, @Right@ values updates desugared to @setField@s. If @OverloadedRecordUpdate@ is enabled, any updates parsed as @LHsRecField GhcPs@ values are converted to @LHsRecUpdProj GhcPs@ values (see function @mkRdrRecordUpd@ in 'GHC.Parser.PostProcess'). -} -- | RecordDotSyntax field updates type LFieldLabelStrings p = XRec p (FieldLabelStrings p) newtype FieldLabelStrings p = FieldLabelStrings [XRec p (DotFieldOcc p)] instance (UnXRec p, Outputable (XRec p FieldLabelString)) => Outputable (FieldLabelStrings p) where ppr (FieldLabelStrings flds) = hcat (punctuate dot (map (ppr . unXRec @p) flds)) instance (UnXRec p, Outputable (XRec p FieldLabelString)) => OutputableBndr (FieldLabelStrings p) where pprInfixOcc = pprFieldLabelStrings pprPrefixOcc = pprFieldLabelStrings instance (UnXRec p, Outputable (XRec p FieldLabelString)) => OutputableBndr (Located (FieldLabelStrings p)) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprInfixOcc . unLoc pprFieldLabelStrings :: forall p. (UnXRec p, Outputable (XRec p FieldLabelString)) => FieldLabelStrings p -> SDoc pprFieldLabelStrings (FieldLabelStrings flds) = hcat (punctuate dot (map (ppr . unXRec @p) flds)) pprPrefixFieldLabelStrings :: forall p. (UnXRec p, Outputable (XRec p FieldLabelString)) => FieldLabelStrings p -> SDoc pprPrefixFieldLabelStrings (FieldLabelStrings flds) = hcat (punctuate dot (map (pprPrefixFieldLabelString . unXRec @p) flds)) pprPrefixFieldLabelString :: forall p. UnXRec p => DotFieldOcc p -> SDoc pprPrefixFieldLabelString (DotFieldOcc _ s) = (pprPrefixFastString . unXRec @p) s pprPrefixFieldLabelString XDotFieldOcc{} = text "XDotFieldOcc" pprPrefixFastString :: FastString -> SDoc pprPrefixFastString fs = pprPrefixOcc (mkVarUnqual fs) instance UnXRec p => Outputable (DotFieldOcc p) where ppr (DotFieldOcc _ s) = (pprPrefixFastString . unXRec @p) s ppr XDotFieldOcc{} = text "XDotFieldOcc" -- Field projection updates (e.g. @foo.bar.baz = 1@). See Note -- [RecordDotSyntax field updates]. type RecProj p arg = HsFieldBind (LFieldLabelStrings p) arg -- The phantom type parameter @p@ is for symmetry with @LHsRecField p -- arg@ in the definition of @data Fbind@ (see GHC.Parser.Process). type LHsRecProj p arg = XRec p (RecProj p arg) -- These two synonyms are used in the definition of syntax @RecordUpd@ -- below. type RecUpdProj p = RecProj p (LHsExpr p) type LHsRecUpdProj p = XRec p (RecUpdProj p) {- ************************************************************************ * * \subsection{Expressions proper} * * ************************************************************************ -} -- * Expressions proper -- | Located Haskell Expression type LHsExpr p = XRec p (HsExpr p) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnComma' when -- in a list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ------------------------- {- Note [NoSyntaxExpr] ~~~~~~~~~~~~~~~~~~~~~~ Syntax expressions can be missing (NoSyntaxExprRn or NoSyntaxExprTc) for several reasons: 1. As described in Note [Rebindable if] 2. In order to suppress "not in scope: xyz" messages when a bit of rebindable syntax does not apply. For example, when using an irrefutable pattern in a BindStmt, we don't need a `fail` operator. 3. Rebindable syntax might just not make sense. For example, a BodyStmt contains the syntax for `guard`, but that's used only in monad comprehensions. If we had more of a whiz-bang type system, we might be able to rule this case out statically. -} -- | Syntax Expression -- -- SyntaxExpr is represents the function used in interpreting rebindable -- syntax. In the parser, we have no information to supply; in the renamer, -- we have the name of the function (but see -- Note [Monad fail : Rebindable syntax, overloaded strings] for a wrinkle) -- and in the type-checker we have a more elaborate structure 'SyntaxExprTc'. -- -- In some contexts, rebindable syntax is not implemented, and so we have -- constructors to represent that possibility in both the renamer and -- typechecker instantiations. -- -- E.g. @(>>=)@ is filled in before the renamer by the appropriate 'Name' for -- @(>>=)@, and then instantiated by the type checker with its type args -- etc type family SyntaxExpr p -- | Command Syntax Table (for Arrow syntax) type CmdSyntaxTable p = [(Name, HsExpr p)] -- See Note [CmdSyntaxTable] {- Note [CmdSyntaxTable] ~~~~~~~~~~~~~~~~~~~~~ Used only for arrow-syntax stuff (HsCmdTop), the CmdSyntaxTable keeps track of the methods needed for a Cmd. * Before the renamer, this list is an empty list * After the renamer, it takes the form @[(std_name, HsVar actual_name)]@ For example, for the 'arr' method * normal case: (GHC.Control.Arrow.arr, HsVar GHC.Control.Arrow.arr) * with rebindable syntax: (GHC.Control.Arrow.arr, arr_22) where @arr_22@ is whatever 'arr' is in scope * After the type checker, it takes the form [(std_name, )] where is the evidence for the method. This evidence is instantiated with the class, but is still polymorphic in everything else. For example, in the case of 'arr', the evidence has type forall b c. (b->c) -> a b c where 'a' is the ambient type of the arrow. This polymorphism is important because the desugarer uses the same evidence at multiple different types. This is Less Cool than what we normally do for rebindable syntax, which is to make fully-instantiated piece of evidence at every use site. The Cmd way is Less Cool because * The renamer has to predict which methods are needed. See the tedious GHC.Rename.Expr.methodNamesCmd. * The desugarer has to know the polymorphic type of the instantiated method. This is checked by Inst.tcSyntaxName, but is less flexible than the rest of rebindable syntax, where the type is less pre-ordained. (And this flexibility is useful; for example we can typecheck do-notation with (>>=) :: m1 a -> (a -> m2 b) -> m2 b.) -} {- Note [Record selectors in the AST] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is how record selectors are expressed in GHC's AST: Example data type data T = MkT { size :: Int } Record selectors: | GhcPs | GhcRn | GhcTc | ----------------------------------------------------------------------------------| size (assuming one | HsVar | HsRecSel | HsRecSel | 'size' in scope) | | | | ----------------------|--------------|----------------------|---------------------| .size (assuming | HsProjection | getField @"size" | getField @"size" | OverloadedRecordDot) | | | | ----------------------|--------------|----------------------|---------------------| e.size (assuming | HsGetField | getField @"size" e | getField @"size" e | OverloadedRecordDot) | | | | NB 1: DuplicateRecordFields makes no difference to the first row of this table, except that if 'size' is a field of more than one data type, then a naked use of the record selector 'size' may well be ambiguous. You have to use a qualified name. And there is no way to do this if both data types are declared in the same module. NB 2: The notation getField @"size" e is short for HsApp (HsAppType (HsVar "getField") (HsWC (HsTyLit (HsStrTy "size")) [])) e. We track the original parsed syntax via HsExpanded. -} {- Note [Non-overloaded record field selectors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider data T = MkT { x,y :: Int } f r x = x + y r This parses with HsVar for x, y, r on the RHS of f. Later, the renamer recognises that y in the RHS of f is really a record selector, and changes it to a HsRecSel. In contrast x is locally bound, shadowing the record selector, and stays as an HsVar. The renamer adds the Name of the record selector into the XCFieldOcc extension field, The typechecker keeps HsRecSel as HsRecSel, and transforms the record-selector Name to an Id. -} -- | A Haskell expression. data HsExpr p = HsVar (XVar p) (LIdP p) -- ^ Variable -- See Note [Located RdrNames] | HsUnboundVar (XUnboundVar p) OccName -- ^ Unbound variable; also used for "holes" -- (_ or _x). -- Turned from HsVar to HsUnboundVar by the -- renamer, when it finds an out-of-scope -- variable or hole. -- The (XUnboundVar p) field becomes an HoleExprRef -- after typechecking; this is where the -- erroring expression will be written after -- solving. See Note [Holes] in GHC.Tc.Types.Constraint. | HsRecSel (XRecSel p) (FieldOcc p) -- ^ Variable pointing to record selector -- See Note [Non-overloaded record field selectors] and -- Note [Record selectors in the AST] | HsOverLabel (XOverLabel p) FastString -- ^ Overloaded label (Note [Overloaded labels] in GHC.OverloadedLabels) | HsIPVar (XIPVar p) HsIPName -- ^ Implicit parameter (not in use after typechecking) | HsOverLit (XOverLitE p) (HsOverLit p) -- ^ Overloaded literals | HsLit (XLitE p) (HsLit p) -- ^ Simple (non-overloaded) literals | HsLam (XLam p) (MatchGroup p (LHsExpr p)) -- ^ Lambda abstraction. Currently always a single match -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLam', -- 'GHC.Parser.Annotation.AnnRarrow', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Lambda-case -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLam', -- 'GHC.Parser.Annotation.AnnCase','GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLam', -- 'GHC.Parser.Annotation.AnnCases','GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsLamCase (XLamCase p) LamCaseVariant (MatchGroup p (LHsExpr p)) | HsApp (XApp p) (LHsExpr p) (LHsExpr p) -- ^ Application | HsAppType (XAppTypeE p) -- After typechecking: the type argument (LHsExpr p) (LHsWcType (NoGhcTc p)) -- ^ Visible type application -- -- Explicit type argument; e.g f @Int x y -- NB: Has wildcards, but no implicit quantification -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnAt', -- | Operator applications: -- NB Bracketed ops such as (+) come out as Vars. -- NB Sadly, we need an expr for the operator in an OpApp/Section since -- the renamer may turn a HsVar into HsRecSel or HsUnboundVar | OpApp (XOpApp p) (LHsExpr p) -- left operand (LHsExpr p) -- operator (LHsExpr p) -- right operand -- | Negation operator. Contains the negated expression and the name -- of 'negate' -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnMinus' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | NegApp (XNegApp p) (LHsExpr p) (SyntaxExpr p) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'('@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsPar (XPar p) !(LHsToken "(" p) (LHsExpr p) -- ^ Parenthesised expr; see Note [Parens in HsSyn] !(LHsToken ")" p) | SectionL (XSectionL p) (LHsExpr p) -- operand; see Note [Sections in HsSyn] (LHsExpr p) -- operator | SectionR (XSectionR p) (LHsExpr p) -- operator; see Note [Sections in HsSyn] (LHsExpr p) -- operand -- | Used for explicit tuples and sections thereof -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- Note [ExplicitTuple] | ExplicitTuple (XExplicitTuple p) [HsTupArg p] Boxity -- | Used for unboxed sum types -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'(#'@, -- 'GHC.Parser.Annotation.AnnVbar', 'GHC.Parser.Annotation.AnnClose' @'#)'@, -- -- There will be multiple 'GHC.Parser.Annotation.AnnVbar', (1 - alternative) before -- the expression, (arity - alternative) after it | ExplicitSum (XExplicitSum p) ConTag -- Alternative (one-based) Arity -- Sum arity (LHsExpr p) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnCase', -- 'GHC.Parser.Annotation.AnnOf','GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCase (XCase p) (LHsExpr p) (MatchGroup p (LHsExpr p)) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnIf', -- 'GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnThen','GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnElse', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsIf (XIf p) -- GhcPs: this is a Bool; False <=> do not use -- rebindable syntax (LHsExpr p) -- predicate (LHsExpr p) -- then part (LHsExpr p) -- else part -- | Multi-way if -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnIf' -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsMultiIf (XMultiIf p) [LGRHS p (LHsExpr p)] -- | let(rec) -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLet', -- 'GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@,'GHC.Parser.Annotation.AnnIn' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsLet (XLet p) !(LHsToken "let" p) (HsLocalBinds p) !(LHsToken "in" p) (LHsExpr p) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDo', -- 'GHC.Parser.Annotation.AnnOpen', 'GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnVbar', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsDo (XDo p) -- Type of the whole expression HsDoFlavour (XRec p [ExprLStmt p]) -- "do":one or more stmts -- | Syntactic list: [a,b,c,...] -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'['@, -- 'GHC.Parser.Annotation.AnnClose' @']'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- See Note [Empty lists] | ExplicitList (XExplicitList p) -- Gives type of components of list [LHsExpr p] -- | Record construction -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnDotdot','GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | RecordCon { rcon_ext :: XRecordCon p , rcon_con :: XRec p (ConLikeP p) -- The constructor , rcon_flds :: HsRecordBinds p } -- The fields -- | Record update -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnDotdot','GHC.Parser.Annotation.AnnClose' @'}'@ -- 'GHC.Parser.Annotation.AnnComma, 'GHC.Parser.Annotation.AnnDot', -- 'GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | RecordUpd { rupd_ext :: XRecordUpd p , rupd_expr :: LHsExpr p , rupd_flds :: Either [LHsRecUpdField p] [LHsRecUpdProj p] } -- For a type family, the arg types are of the *instance* tycon, -- not the family tycon -- | Record field selection e.g @z.x@. -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDot' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- This case only arises when the OverloadedRecordDot langauge -- extension is enabled. See Note [Record selectors in the AST]. | HsGetField { gf_ext :: XGetField p , gf_expr :: LHsExpr p , gf_field :: XRec p (DotFieldOcc p) } -- | Record field selector. e.g. @(.x)@ or @(.x.y)@ -- -- This case only arises when the OverloadedRecordDot langauge -- extensions is enabled. See Note [Record selectors in the AST]. -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpenP' -- 'GHC.Parser.Annotation.AnnDot', 'GHC.Parser.Annotation.AnnCloseP' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsProjection { proj_ext :: XProjection p , proj_flds :: NonEmpty (XRec p (DotFieldOcc p)) } -- | Expression with an explicit type signature. @e :: type@ -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | ExprWithTySig (XExprWithTySig p) (LHsExpr p) (LHsSigWcType (NoGhcTc p)) -- | Arithmetic sequence -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'['@, -- 'GHC.Parser.Annotation.AnnComma','GHC.Parser.Annotation.AnnDotdot', -- 'GHC.Parser.Annotation.AnnClose' @']'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | ArithSeq (XArithSeq p) (Maybe (SyntaxExpr p)) -- For OverloadedLists, the fromList witness (ArithSeqInfo p) -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ----------------------------------------------------------- -- MetaHaskell Extensions -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnOpenE','GHC.Parser.Annotation.AnnOpenEQ', -- 'GHC.Parser.Annotation.AnnClose','GHC.Parser.Annotation.AnnCloseQ' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsTypedBracket (XTypedBracket p) (LHsExpr p) | HsUntypedBracket (XUntypedBracket p) (HsQuote p) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsSpliceE (XSpliceE p) (HsSplice p) ----------------------------------------------------------- -- Arrow notation extension -- | @proc@ notation for Arrows -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnProc', -- 'GHC.Parser.Annotation.AnnRarrow' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsProc (XProc p) (LPat p) -- arrow abstraction, proc (LHsCmdTop p) -- body of the abstraction -- always has an empty stack --------------------------------------- -- static pointers extension -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnStatic', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsStatic (XStatic p) -- Free variables of the body, and type after typechecking (LHsExpr p) -- Body --------------------------------------- -- Expressions annotated with pragmas, written as {-# ... #-} | HsPragE (XPragE p) (HsPragE p) (LHsExpr p) | XExpr !(XXExpr p) -- Note [Trees That Grow] in Language.Haskell.Syntax.Extension for the -- general idea, and Note [Rebindable syntax and HsExpansion] in GHC.Hs.Expr -- for an example of how we use it. -- --------------------------------------------------------------------- data DotFieldOcc p = DotFieldOcc { dfoExt :: XCDotFieldOcc p , dfoLabel :: XRec p FieldLabelString } | XDotFieldOcc !(XXDotFieldOcc p) -- --------------------------------------------------------------------- -- | A pragma, written as {-# ... #-}, that may appear within an expression. data HsPragE p = HsPragSCC (XSCC p) SourceText -- Note [Pragma source text] in GHC.Types.SourceText StringLiteral -- "set cost centre" SCC pragma -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnOpen' @'{-\# GENERATED'@, -- 'GHC.Parser.Annotation.AnnVal','GHC.Parser.Annotation.AnnVal', -- 'GHC.Parser.Annotation.AnnColon','GHC.Parser.Annotation.AnnVal', -- 'GHC.Parser.Annotation.AnnMinus', -- 'GHC.Parser.Annotation.AnnVal','GHC.Parser.Annotation.AnnColon', -- 'GHC.Parser.Annotation.AnnVal', -- 'GHC.Parser.Annotation.AnnClose' @'\#-}'@ | XHsPragE !(XXPragE p) -- | Located Haskell Tuple Argument -- -- 'HsTupArg' is used for tuple sections -- @(,a,)@ is represented by -- @ExplicitTuple [Missing ty1, Present a, Missing ty3]@ -- Which in turn stands for @(\x:ty1 \y:ty2. (x,a,y))@ type LHsTupArg id = XRec id (HsTupArg id) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnComma' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Haskell Tuple Argument data HsTupArg id = Present (XPresent id) (LHsExpr id) -- ^ The argument | Missing (XMissing id) -- ^ The argument is missing, but this is its type | XTupArg !(XXTupArg id) -- ^ Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension -- | Which kind of lambda case are we dealing with? data LamCaseVariant = LamCase -- ^ `\case` | LamCases -- ^ `\cases` deriving (Data, Eq) lamCaseKeyword :: LamCaseVariant -> SDoc lamCaseKeyword LamCase = text "\\case" lamCaseKeyword LamCases = text "\\cases" {- Note [Parens in HsSyn] ~~~~~~~~~~~~~~~~~~~~~~ HsPar (and ParPat in patterns, HsParTy in types) is used as follows * HsPar is required; the pretty printer does not add parens. * HsPars are respected when rearranging operator fixities. So a * (b + c) means what it says (where the parens are an HsPar) * For ParPat and HsParTy the pretty printer does add parens but this should be a no-op for ParsedSource, based on the pretty printer round trip feature introduced in https://phabricator.haskell.org/rGHC499e43824bda967546ebf95ee33ec1f84a114a7c * ParPat and HsParTy are pretty printed as '( .. )' regardless of whether or not they are strictly necessary. This should be addressed when #13238 is completed, to be treated the same as HsPar. Note [Sections in HsSyn] ~~~~~~~~~~~~~~~~~~~~~~~~ Sections should always appear wrapped in an HsPar, thus HsPar (SectionR ...) The parser parses sections in a wider variety of situations (See Note [Parsing sections]), but the renamer checks for those parens. This invariant makes pretty-printing easier; we don't need a special case for adding the parens round sections. Note [Rebindable if] ~~~~~~~~~~~~~~~~~~~~ The rebindable syntax for 'if' is a bit special, because when rebindable syntax is *off* we do not want to treat (if c then t else e) as if it was an application (ifThenElse c t e). Why not? Because we allow an 'if' to return *unboxed* results, thus if blah then 3# else 4# whereas that would not be possible using a all to a polymorphic function (because you can't call a polymorphic function at an unboxed type). So we use NoSyntaxExpr to mean "use the old built-in typing rule". A further complication is that, in the `deriving` code, we never want to use rebindable syntax. So, even in GhcPs, we want to denote whether to use rebindable syntax or not. This is done via the type instance for XIf GhcPs. Note [Record Update HsWrapper] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is a wrapper in RecordUpd which is used for the *required* constraints for pattern synonyms. This wrapper is created in the typechecking and is then directly used in the desugaring without modification. For example, if we have the record pattern synonym P, pattern P :: (Show a) => a -> Maybe a pattern P{x} = Just x foo = (Just True) { x = False } then `foo` desugars to something like foo = case Just True of P x -> P False hence we need to provide the correct dictionaries to P's matcher on the RHS so that we can build the expression. Note [Located RdrNames] ~~~~~~~~~~~~~~~~~~~~~~~ A number of syntax elements have seemingly redundant locations attached to them. This is deliberate, to allow transformations making use of the exact print annotations to easily correlate a Located Name in the RenamedSource with a Located RdrName in the ParsedSource. There are unfortunately enough differences between the ParsedSource and the RenamedSource that the exact print annotations cannot be used directly with RenamedSource, so this allows a simple mapping to be used based on the location. Note [ExplicitTuple] ~~~~~~~~~~~~~~~~~~~~ An ExplicitTuple is never just a data constructor like (,,,). That is, the `[LHsTupArg p]` argument of `ExplicitTuple` has at least one `Present` member (and is thus never empty). A tuple data constructor like () or (,,,) is parsed as an `HsVar`, not an `ExplicitTuple`, and stays that way. This is important for two reasons: 1. We don't need -XTupleSections for (,,,) 2. The type variables in (,,,) can be instantiated with visible type application. That is, (,,) :: forall a b c. a -> b -> c -> (a,b,c) (True,,) :: forall {b} {c}. b -> c -> (Bool,b,c) Note that the tuple section has *inferred* arguments, while the data constructor has *specified* ones. (See Note [Required, Specified, and Inferred for types] in GHC.Tc.TyCl for background.) Sadly, the grammar for this is actually ambiguous, and it's only thanks to the preference of a shift in a shift/reduce conflict that the parser works as this Note details. Search for a reference to this Note in GHC.Parser for further explanation. Note [Empty lists] ~~~~~~~~~~~~~~~~~~ An empty list could be considered either a data constructor (stored with HsVar) or an ExplicitList. This Note describes how empty lists flow through the various phases and why. Parsing ------- An empty list is parsed by the sysdcon nonterminal. It thus comes to life via HsVar nilDataCon (defined in GHC.Builtin.Types). A freshly-parsed (HsExpr GhcPs) empty list is never a ExplicitList. Renaming -------- If -XOverloadedLists is enabled, we must type-check the empty list as if it were a call to fromListN. (This is true regardless of the setting of -XRebindableSyntax.) This is very easy if the empty list is an ExplicitList, but an annoying special case if it's an HsVar. So the renamer changes a HsVar nilDataCon to an ExplicitList [], but only if -XOverloadedLists is on. (Why not always? Read on, dear friend.) This happens in the HsVar case of rnExpr. Type-checking ------------- We want to accept an expression like [] @Int. To do this, we must infer that [] :: forall a. [a]. This is easy if [] is a HsVar with the right DataCon inside. However, the type-checking for explicit lists works differently: [x,y,z] is never polymorphic. Instead, we unify the types of x, y, and z together, and use the unified type as the argument to the cons and nil constructors. Thus, treating [] as an empty ExplicitList in the type-checker would prevent [] @Int from working. However, if -XOverloadedLists is on, then [] @Int really shouldn't be allowed: it's just like fromListN 0 [] @Int. Since fromListN :: forall list. IsList list => Int -> [Item list] -> list that expression really should be rejected. Thus, the renamer's behaviour is exactly what we want: treat [] as a datacon when -XNoOverloadedLists, and as an empty ExplicitList when -XOverloadedLists. See also #13680, which requested [] @Int to work. -} ----------------------- pprExternalSrcLoc :: (StringLiteral,(Int,Int),(Int,Int)) -> SDoc pprExternalSrcLoc (StringLiteral _ src _,(n1,n2),(n3,n4)) = ppr (src,(n1,n2),(n3,n4)) {- HsSyn records exactly where the user put parens, with HsPar. So generally speaking we print without adding any parens. However, some code is internally generated, and in some places parens are absolutely required; so for these places we use pprParendLExpr (but don't print double parens of course). For operator applications we don't add parens, because the operator fixities should do the job, except in debug mode (-dppr-debug) so we can see the structure of the parse tree. -} {- ************************************************************************ * * \subsection{Commands (in arrow abstractions)} * * ************************************************************************ We re-use HsExpr to represent these. -} -- | Located Haskell Command (for arrow syntax) type LHsCmd id = XRec id (HsCmd id) -- | Haskell Command (e.g. a "statement" in an Arrow proc block) data HsCmd id -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.Annlarrowtail', -- 'GHC.Parser.Annotation.Annrarrowtail','GHC.Parser.Annotation.AnnLarrowtail', -- 'GHC.Parser.Annotation.AnnRarrowtail' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation = HsCmdArrApp -- Arrow tail, or arrow application (f -< arg) (XCmdArrApp id) -- type of the arrow expressions f, -- of the form a t t', where arg :: t (LHsExpr id) -- arrow expression, f (LHsExpr id) -- input expression, arg HsArrAppType -- higher-order (-<<) or first-order (-<) Bool -- True => right-to-left (f -< arg) -- False => left-to-right (arg >- f) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpenB' @'(|'@, -- 'GHC.Parser.Annotation.AnnCloseB' @'|)'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCmdArrForm -- Command formation, (| e cmd1 .. cmdn |) (XCmdArrForm id) (LHsExpr id) -- The operator. -- After type-checking, a type abstraction to be -- applied to the type of the local environment tuple LexicalFixity -- Whether the operator appeared prefix or infix when -- parsed. (Maybe Fixity) -- fixity (filled in by the renamer), for forms that -- were converted from OpApp's by the renamer [LHsCmdTop id] -- argument commands | HsCmdApp (XCmdApp id) (LHsCmd id) (LHsExpr id) | HsCmdLam (XCmdLam id) (MatchGroup id (LHsCmd id)) -- kappa -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLam', -- 'GHC.Parser.Annotation.AnnRarrow', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCmdPar (XCmdPar id) !(LHsToken "(" id) (LHsCmd id) -- parenthesised command !(LHsToken ")" id) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'('@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCmdCase (XCmdCase id) (LHsExpr id) (MatchGroup id (LHsCmd id)) -- bodies are HsCmd's -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnCase', -- 'GHC.Parser.Annotation.AnnOf','GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Lambda-case -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLam', -- 'GHC.Parser.Annotation.AnnCase','GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLam', -- 'GHC.Parser.Annotation.AnnCases','GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCmdLamCase (XCmdLamCase id) LamCaseVariant (MatchGroup id (LHsCmd id)) -- bodies are HsCmd's | HsCmdIf (XCmdIf id) (SyntaxExpr id) -- cond function (LHsExpr id) -- predicate (LHsCmd id) -- then part (LHsCmd id) -- else part -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnIf', -- 'GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnThen','GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnElse', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCmdLet (XCmdLet id) !(LHsToken "let" id) (HsLocalBinds id) -- let(rec) !(LHsToken "in" id) (LHsCmd id) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLet', -- 'GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@,'GHC.Parser.Annotation.AnnIn' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsCmdDo (XCmdDo id) -- Type of the whole expression (XRec id [CmdLStmt id]) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDo', -- 'GHC.Parser.Annotation.AnnOpen', 'GHC.Parser.Annotation.AnnSemi', -- 'GHC.Parser.Annotation.AnnVbar', -- 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XCmd !(XXCmd id) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension -- | Haskell arrow application type. data HsArrAppType -- | First order arrow application '-<' = HsHigherOrderApp -- | Higher order arrow application '-<<' | HsFirstOrderApp deriving Data pprHsArrType :: HsArrAppType -> SDoc pprHsArrType HsHigherOrderApp = text "higher order arrow application" pprHsArrType HsFirstOrderApp = text "first order arrow application" {- | Top-level command, introducing a new arrow. This may occur inside a proc (where the stack is empty) or as an argument of a command-forming operator. -} -- | Located Haskell Top-level Command type LHsCmdTop p = XRec p (HsCmdTop p) -- | Haskell Top-level Command data HsCmdTop p = HsCmdTop (XCmdTop p) (LHsCmd p) | XCmdTop !(XXCmdTop p) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension ----------------------- {- ************************************************************************ * * \subsection{Record binds} * * ************************************************************************ -} -- | Haskell Record Bindings type HsRecordBinds p = HsRecFields p (LHsExpr p) {- ************************************************************************ * * \subsection{@Match@, @GRHSs@, and @GRHS@ datatypes} * * ************************************************************************ @Match@es are sets of pattern bindings and right hand sides for functions, patterns or case branches. For example, if a function @g@ is defined as: \begin{verbatim} g (x,y) = y g ((x:ys),y) = y+1, \end{verbatim} then \tr{g} has two @Match@es: @(x,y) = y@ and @((x:ys),y) = y+1@. It is always the case that each element of an @[Match]@ list has the same number of @pats@s inside it. This corresponds to saying that a function defined by pattern matching must have the same number of patterns in each equation. -} data MatchGroup p body = MG { mg_ext :: XMG p body -- Post-typechecker, types of args and result , mg_alts :: XRec p [LMatch p body] -- The alternatives , mg_origin :: Origin } -- The type is the type of the entire group -- t1 -> ... -> tn -> tr -- where there are n patterns | XMatchGroup !(XXMatchGroup p body) data MatchGroupTc = MatchGroupTc { mg_arg_tys :: [Scaled Type] -- Types of the arguments, t1..tn , mg_res_ty :: Type -- Type of the result, tr } deriving Data -- | Located Match type LMatch id body = XRec id (Match id body) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnSemi' when in a -- list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data Match p body = Match { m_ext :: XCMatch p body, m_ctxt :: HsMatchContext p, -- See Note [m_ctxt in Match] m_pats :: [LPat p], -- The patterns m_grhss :: (GRHSs p body) } | XMatch !(XXMatch p body) {- Note [m_ctxt in Match] ~~~~~~~~~~~~~~~~~~~~~~ A Match can occur in a number of contexts, such as a FunBind, HsCase, HsLam and so on. In order to simplify tooling processing and pretty print output, the provenance is captured in an HsMatchContext. This is particularly important for the exact print annotations for a multi-equation FunBind. The parser initially creates a FunBind with a single Match in it for every function definition it sees. These are then grouped together by getMonoBind into a single FunBind, where all the Matches are combined. In the process, all the original FunBind fun_id's bar one are discarded, including the locations. This causes a problem for source to source conversions via exact print annotations, so the original fun_ids and infix flags are preserved in the Match, when it originates from a FunBind. Example infix function definition requiring individual exact print annotations (&&& ) [] [] = [] xs &&& [] = xs ( &&& ) [] ys = ys -} isInfixMatch :: Match id body -> Bool isInfixMatch match = case m_ctxt match of FunRhs {mc_fixity = Infix} -> True _ -> False -- | Guarded Right-Hand Sides -- -- GRHSs are used both for pattern bindings and for Matches -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnVbar', -- 'GHC.Parser.Annotation.AnnEqual','GHC.Parser.Annotation.AnnWhere', -- 'GHC.Parser.Annotation.AnnOpen','GHC.Parser.Annotation.AnnClose' -- 'GHC.Parser.Annotation.AnnRarrow','GHC.Parser.Annotation.AnnSemi' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data GRHSs p body = GRHSs { grhssExt :: XCGRHSs p body, grhssGRHSs :: [LGRHS p body], -- ^ Guarded RHSs grhssLocalBinds :: HsLocalBinds p -- ^ The where clause } | XGRHSs !(XXGRHSs p body) -- | Located Guarded Right-Hand Side type LGRHS id body = XRec id (GRHS id body) -- | Guarded Right Hand Side. data GRHS p body = GRHS (XCGRHS p body) [GuardLStmt p] -- Guards body -- Right hand side | XGRHS !(XXGRHS p body) -- We know the list must have at least one @Match@ in it. {- ************************************************************************ * * \subsection{Do stmts and list comprehensions} * * ************************************************************************ -} -- | Located @do@ block Statement type LStmt id body = XRec id (StmtLR id id body) -- | Located Statement with separate Left and Right id's type LStmtLR idL idR body = XRec idL (StmtLR idL idR body) -- | @do@ block Statement type Stmt id body = StmtLR id id body -- | Command Located Statement type CmdLStmt id = LStmt id (LHsCmd id) -- | Command Statement type CmdStmt id = Stmt id (LHsCmd id) -- | Expression Located Statement type ExprLStmt id = LStmt id (LHsExpr id) -- | Expression Statement type ExprStmt id = Stmt id (LHsExpr id) -- | Guard Located Statement type GuardLStmt id = LStmt id (LHsExpr id) -- | Guard Statement type GuardStmt id = Stmt id (LHsExpr id) -- | Ghci Located Statement type GhciLStmt id = LStmt id (LHsExpr id) -- | Ghci Statement type GhciStmt id = Stmt id (LHsExpr id) -- The SyntaxExprs in here are used *only* for do-notation and monad -- comprehensions, which have rebindable syntax. Otherwise they are unused. -- | Exact print annotations when in qualifier lists or guards -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnVbar', -- 'GHC.Parser.Annotation.AnnComma','GHC.Parser.Annotation.AnnThen', -- 'GHC.Parser.Annotation.AnnBy','GHC.Parser.Annotation.AnnBy', -- 'GHC.Parser.Annotation.AnnGroup','GHC.Parser.Annotation.AnnUsing' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data StmtLR idL idR body -- body should always be (LHs**** idR) = LastStmt -- Always the last Stmt in ListComp, MonadComp, -- and (after the renamer, see GHC.Rename.Expr.checkLastStmt) DoExpr, MDoExpr -- Not used for GhciStmtCtxt, PatGuard, which scope over other stuff (XLastStmt idL idR body) body (Maybe Bool) -- Whether return was stripped -- Just True <=> return with a dollar was stripped by ApplicativeDo -- Just False <=> return without a dollar was stripped by ApplicativeDo -- Nothing <=> Nothing was stripped (SyntaxExpr idR) -- The return operator -- The return operator is used only for MonadComp -- For ListComp we use the baked-in 'return' -- For DoExpr, MDoExpr, we don't apply a 'return' at all -- See Note [Monad Comprehensions] -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLarrow' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | BindStmt (XBindStmt idL idR body) -- ^ Post renaming has optional fail and bind / (>>=) operator. -- Post typechecking, also has multiplicity of the argument -- and the result type of the function passed to bind; -- that is, (P, S) in (>>=) :: Q -> (R % P -> S) -> T -- See Note [The type of bind in Stmts] (LPat idL) body -- | 'ApplicativeStmt' represents an applicative expression built with -- '<$>' and '<*>'. It is generated by the renamer, and is desugared into the -- appropriate applicative expression by the desugarer, but it is intended -- to be invisible in error messages. -- -- For full details, see Note [ApplicativeDo] in "GHC.Rename.Expr" -- | ApplicativeStmt (XApplicativeStmt idL idR body) -- Post typecheck, Type of the body [ ( SyntaxExpr idR , ApplicativeArg idL) ] -- [(<$>, e1), (<*>, e2), ..., (<*>, en)] (Maybe (SyntaxExpr idR)) -- 'join', if necessary | BodyStmt (XBodyStmt idL idR body) -- Post typecheck, element type -- of the RHS (used for arrows) body -- See Note [BodyStmt] (SyntaxExpr idR) -- The (>>) operator (SyntaxExpr idR) -- The `guard` operator; used only in MonadComp -- See notes [Monad Comprehensions] -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnLet' -- 'GHC.Parser.Annotation.AnnOpen' @'{'@,'GHC.Parser.Annotation.AnnClose' @'}'@, -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | LetStmt (XLetStmt idL idR body) (HsLocalBindsLR idL idR) -- ParStmts only occur in a list/monad comprehension | ParStmt (XParStmt idL idR body) -- Post typecheck, -- S in (>>=) :: Q -> (R -> S) -> T [ParStmtBlock idL idR] (HsExpr idR) -- Polymorphic `mzip` for monad comprehensions (SyntaxExpr idR) -- The `>>=` operator -- See notes [Monad Comprehensions] -- After renaming, the ids are the binders -- bound by the stmts and used after themp | TransStmt { trS_ext :: XTransStmt idL idR body, -- Post typecheck, -- R in (>>=) :: Q -> (R -> S) -> T trS_form :: TransForm, trS_stmts :: [ExprLStmt idL], -- Stmts to the *left* of the 'group' -- which generates the tuples to be grouped trS_bndrs :: [(IdP idR, IdP idR)], -- See Note [TransStmt binder map] trS_using :: LHsExpr idR, trS_by :: Maybe (LHsExpr idR), -- "by e" (optional) -- Invariant: if trS_form = GroupBy, then grp_by = Just e trS_ret :: SyntaxExpr idR, -- The monomorphic 'return' function for -- the inner monad comprehensions trS_bind :: SyntaxExpr idR, -- The '(>>=)' operator trS_fmap :: HsExpr idR -- The polymorphic 'fmap' function for desugaring -- Only for 'group' forms -- Just a simple HsExpr, because it's -- too polymorphic for tcSyntaxOp } -- See Note [Monad Comprehensions] -- Recursive statement (see Note [How RecStmt works] below) -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnRec' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | RecStmt { recS_ext :: XRecStmt idL idR body , recS_stmts :: XRec idR [LStmtLR idL idR body] -- Assume XRec is the same for idL and idR, pick one arbitrarily -- The next two fields are only valid after renaming , recS_later_ids :: [IdP idR] -- The ids are a subset of the variables bound by the -- stmts that are used in stmts that follow the RecStmt , recS_rec_ids :: [IdP idR] -- Ditto, but these variables are the "recursive" ones, -- that are used before they are bound in the stmts of -- the RecStmt. -- An Id can be in both groups -- Both sets of Ids are (now) treated monomorphically -- See Note [How RecStmt works] for why they are separate -- Rebindable syntax , recS_bind_fn :: SyntaxExpr idR -- The bind function , recS_ret_fn :: SyntaxExpr idR -- The return function , recS_mfix_fn :: SyntaxExpr idR -- The mfix function } | XStmtLR !(XXStmtLR idL idR body) data TransForm -- The 'f' below is the 'using' function, 'e' is the by function = ThenForm -- then f or then f by e (depending on trS_by) | GroupForm -- then group using f or then group by e using f (depending on trS_by) deriving Data -- | Parenthesised Statement Block data ParStmtBlock idL idR = ParStmtBlock (XParStmtBlock idL idR) [ExprLStmt idL] [IdP idR] -- The variables to be returned (SyntaxExpr idR) -- The return operator | XParStmtBlock !(XXParStmtBlock idL idR) -- | The fail operator -- -- This is used for `.. <-` "bind statements" in do notation, including -- non-monadic "binds" in applicative. -- -- The fail operator is 'Just expr' if it potentially fail monadically. if the -- pattern match cannot fail, or shouldn't fail monadically (regular incomplete -- pattern exception), it is 'Nothing'. -- -- See Note [Monad fail : Rebindable syntax, overloaded strings] for the type of -- expression in the 'Just' case, and why it is so. -- -- See Note [Failing pattern matches in Stmts] for which contexts for -- '@BindStmt@'s should use the monadic fail and which shouldn't. type FailOperator id = Maybe (SyntaxExpr id) -- | Applicative Argument data ApplicativeArg idL = ApplicativeArgOne -- A single statement (BindStmt or BodyStmt) { xarg_app_arg_one :: XApplicativeArgOne idL -- ^ The fail operator, after renaming -- -- The fail operator is needed if this is a BindStmt -- where the pattern can fail. E.g.: -- (Just a) <- stmt -- The fail operator will be invoked if the pattern -- match fails. -- It is also used for guards in MonadComprehensions. -- The fail operator is Nothing -- if the pattern match can't fail , app_arg_pattern :: LPat idL -- WildPat if it was a BodyStmt (see below) , arg_expr :: LHsExpr idL , is_body_stmt :: Bool -- ^ True <=> was a BodyStmt, -- False <=> was a BindStmt. -- See Note [Applicative BodyStmt] } | ApplicativeArgMany -- do { stmts; return vars } { xarg_app_arg_many :: XApplicativeArgMany idL , app_stmts :: [ExprLStmt idL] -- stmts , final_expr :: HsExpr idL -- return (v1,..,vn), or just (v1,..,vn) , bv_pattern :: LPat idL -- (v1,...,vn) , stmt_context :: HsDoFlavour -- ^ context of the do expression, used in pprArg } | XApplicativeArg !(XXApplicativeArg idL) {- Note [The type of bind in Stmts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some Stmts, notably BindStmt, keep the (>>=) bind operator. We do NOT assume that it has type (>>=) :: m a -> (a -> m b) -> m b In some cases (see #303, #1537) it might have a more exotic type, such as (>>=) :: m i j a -> (a -> m j k b) -> m i k b So we must be careful not to make assumptions about the type. In particular, the monad may not be uniform throughout. Note [TransStmt binder map] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The [(idR,idR)] in a TransStmt behaves as follows: * Before renaming: [] * After renaming: [ (x27,x27), ..., (z35,z35) ] These are the variables bound by the stmts to the left of the 'group' and used either in the 'by' clause, or in the stmts following the 'group' Each item is a pair of identical variables. * After typechecking: [ (x27:Int, x27:[Int]), ..., (z35:Bool, z35:[Bool]) ] Each pair has the same unique, but different *types*. Note [BodyStmt] ~~~~~~~~~~~~~~~ BodyStmts are a bit tricky, because what they mean depends on the context. Consider the following contexts: A do expression of type (m res_ty) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E any_ty: do { ....; E; ... } E :: m any_ty Translation: E >> ... A list comprehensions of type [elt_ty] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E Bool: [ .. | .... E ] [ .. | ..., E, ... ] [ .. | .... | ..., E | ... ] E :: Bool Translation: if E then fail else ... A guard list, guarding a RHS of type rhs_ty ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E BooParStmtBlockl: f x | ..., E, ... = ...rhs... E :: Bool Translation: if E then fail else ... A monad comprehension of type (m res_ty) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * BodyStmt E Bool: [ .. | .... E ] E :: Bool Translation: guard E >> ... Array comprehensions are handled like list comprehensions. Note [How RecStmt works] ~~~~~~~~~~~~~~~~~~~~~~~~ Example: HsDo [ BindStmt x ex , RecStmt { recS_rec_ids = [a, c] , recS_stmts = [ BindStmt b (return (a,c)) , LetStmt a = ...b... , BindStmt c ec ] , recS_later_ids = [a, b] , return (a b) ] Here, the RecStmt binds a,b,c; but - Only a,b are used in the stmts *following* the RecStmt, - Only a,c are used in the stmts *inside* the RecStmt *before* their bindings Why do we need *both* rec_ids and later_ids? For monads they could be combined into a single set of variables, but not for arrows. That follows from the types of the respective feedback operators: mfix :: MonadFix m => (a -> m a) -> m a loop :: ArrowLoop a => a (b,d) (c,d) -> a b c * For mfix, the 'a' covers the union of the later_ids and the rec_ids * For 'loop', 'c' is the later_ids and 'd' is the rec_ids Note [Typing a RecStmt] ~~~~~~~~~~~~~~~~~~~~~~~ A (RecStmt stmts) types as if you had written (v1,..,vn, _, ..., _) <- mfix (\~(_, ..., _, r1, ..., rm) -> do { stmts ; return (v1,..vn, r1, ..., rm) }) where v1..vn are the later_ids r1..rm are the rec_ids Note [Monad Comprehensions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Monad comprehensions require separate functions like 'return' and '>>=' for desugaring. These functions are stored in the statements used in monad comprehensions. For example, the 'return' of the 'LastStmt' expression is used to lift the body of the monad comprehension: [ body | stmts ] => stmts >>= \bndrs -> return body In transform and grouping statements ('then ..' and 'then group ..') the 'return' function is required for nested monad comprehensions, for example: [ body | stmts, then f, rest ] => f [ env | stmts ] >>= \bndrs -> [ body | rest ] BodyStmts require the 'Control.Monad.guard' function for boolean expressions: [ body | exp, stmts ] => guard exp >> [ body | stmts ] Parallel statements require the 'Control.Monad.Zip.mzip' function: [ body | stmts1 | stmts2 | .. ] => mzip stmts1 (mzip stmts2 (..)) >>= \(bndrs1, (bndrs2, ..)) -> return body In any other context than 'MonadComp', the fields for most of these 'SyntaxExpr's stay bottom. Note [Applicative BodyStmt] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ (#12143) For the purposes of ApplicativeDo, we treat any BodyStmt as if it was a BindStmt with a wildcard pattern. For example, do x <- A B return x is transformed as if it were do x <- A _ <- B return x so it transforms to (\(x,_) -> x) <$> A <*> B But we have to remember when we treat a BodyStmt like a BindStmt, because in error messages we want to emit the original syntax the user wrote, not our internal representation. So ApplicativeArgOne has a Bool flag that is True when the original statement was a BodyStmt, so that we can pretty-print it correctly. -} {- ************************************************************************ * * Template Haskell quotation brackets * * ************************************************************************ -} -- | Haskell Splice data HsSplice id = HsTypedSplice -- $$z or $$(f 4) (XTypedSplice id) SpliceDecoration -- Whether $$( ) variant found, for pretty printing (IdP id) -- A unique name to identify this splice point (LHsExpr id) -- See Note [Pending Splices] | HsUntypedSplice -- $z or $(f 4) (XUntypedSplice id) SpliceDecoration -- Whether $( ) variant found, for pretty printing (IdP id) -- A unique name to identify this splice point (LHsExpr id) -- See Note [Pending Splices] | HsQuasiQuote -- See Note [Quasi-quote overview] in GHC.Tc.Gen.Splice (XQuasiQuote id) (IdP id) -- Splice point (IdP id) -- Quoter SrcSpan -- The span of the enclosed string FastString -- The enclosed string -- AZ:TODO: use XSplice instead of HsSpliced | HsSpliced -- See Note [Delaying modFinalizers in untyped splices] in -- GHC.Rename.Splice. -- This is the result of splicing a splice. It is produced by -- the renamer and consumed by the typechecker. It lives only -- between the two. (XSpliced id) ThModFinalizers -- TH finalizers produced by the splice. (HsSplicedThing id) -- The result of splicing | XSplice !(XXSplice id) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension -- | A splice can appear with various decorations wrapped around it. This data -- type captures explicitly how it was originally written, for use in the pretty -- printer. data SpliceDecoration = DollarSplice -- ^ $splice or $$splice | BareSplice -- ^ bare splice deriving (Data, Eq, Show) instance Outputable SpliceDecoration where ppr x = text $ show x isTypedSplice :: HsSplice id -> Bool isTypedSplice (HsTypedSplice {}) = True isTypedSplice _ = False -- Quasi-quotes are untyped splices -- | Finalizers produced by a splice with -- 'Language.Haskell.TH.Syntax.addModFinalizer' -- -- See Note [Delaying modFinalizers in untyped splices] in GHC.Rename.Splice. For how -- this is used. -- newtype ThModFinalizers = ThModFinalizers [ForeignRef (TH.Q ())] -- A Data instance which ignores the argument of 'ThModFinalizers'. instance Data ThModFinalizers where gunfold _ z _ = z $ ThModFinalizers [] toConstr a = mkConstr (dataTypeOf a) "ThModFinalizers" [] Data.Prefix dataTypeOf a = mkDataType "HsExpr.ThModFinalizers" [toConstr a] -- | Haskell Spliced Thing -- -- Values that can result from running a splice. data HsSplicedThing id = HsSplicedExpr (HsExpr id) -- ^ Haskell Spliced Expression | HsSplicedTy (HsType id) -- ^ Haskell Spliced Type | HsSplicedPat (Pat id) -- ^ Haskell Spliced Pattern data UntypedSpliceFlavour = UntypedExpSplice | UntypedPatSplice | UntypedTypeSplice | UntypedDeclSplice deriving Data -- | Haskell (Untyped) Quote = Expr + Pat + Type + Var data HsQuote p = ExpBr (XExpBr p) (LHsExpr p) -- [| expr |] | PatBr (XPatBr p) (LPat p) -- [p| pat |] | DecBrL (XDecBrL p) [LHsDecl p] -- [d| decls |]; result of parser | DecBrG (XDecBrG p) (HsGroup p) -- [d| decls |]; result of renamer | TypBr (XTypBr p) (LHsType p) -- [t| type |] | VarBr (XVarBr p) Bool (LIdP p) -- True: 'x, False: ''T | XQuote !(XXQuote p) -- Extension point; see Note [Trees That Grow] -- in Language.Haskell.Syntax.Extension {- ************************************************************************ * * \subsection{Enumerations and list comprehensions} * * ************************************************************************ -} -- | Arithmetic Sequence Information data ArithSeqInfo id = From (LHsExpr id) | FromThen (LHsExpr id) (LHsExpr id) | FromTo (LHsExpr id) (LHsExpr id) | FromThenTo (LHsExpr id) (LHsExpr id) (LHsExpr id) -- AZ: Should ArithSeqInfo have a TTG extension? {- ************************************************************************ * * \subsection{HsMatchCtxt} * * ************************************************************************ -} -- | Haskell Match Context -- -- Context of a pattern match. This is more subtle than it would seem. See -- Note [FunBind vs PatBind]. data HsMatchContext p = FunRhs -- ^ A pattern matching on an argument of a -- function binding { mc_fun :: LIdP (NoGhcTc p) -- ^ function binder of @f@ -- See Note [mc_fun field of FunRhs] -- See #20415 for a long discussion about -- this field and why it uses NoGhcTc. , mc_fixity :: LexicalFixity -- ^ fixing of @f@ , mc_strictness :: SrcStrictness -- ^ was @f@ banged? -- See Note [FunBind vs PatBind] } | LambdaExpr -- ^Patterns of a lambda | CaseAlt -- ^Patterns and guards in a case alternative | LamCaseAlt LamCaseVariant -- ^Patterns and guards in @\case@ and @\cases@ | IfAlt -- ^Guards of a multi-way if alternative | ArrowMatchCtxt -- ^A pattern match inside arrow notation HsArrowMatchContext | PatBindRhs -- ^A pattern binding eg [y] <- e = e | PatBindGuards -- ^Guards of pattern bindings, e.g., -- (Just b) | Just _ <- x = e -- | otherwise = e' | RecUpd -- ^Record update [used only in GHC.HsToCore.Expr to -- tell matchWrapper what sort of -- runtime error message to generate] | StmtCtxt (HsStmtContext p) -- ^Pattern of a do-stmt, list comprehension, -- pattern guard, etc | ThPatSplice -- ^A Template Haskell pattern splice | ThPatQuote -- ^A Template Haskell pattern quotation [p| (a,b) |] | PatSyn -- ^A pattern synonym declaration {- Note [mc_fun field of FunRhs] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The mc_fun field of FunRhs has type `LIdP (NoGhcTc p)`, which means it will be a `RdrName` in pass `GhcPs`, a `Name` in `GhcRn`, and (importantly) still a `Name` in `GhcTc` -- not an `Id`. See Note [NoGhcTc] in GHC.Hs.Extension. Why a `Name` in the typechecker phase? Because: * A `Name` is all we need, as it turns out. * Using an `Id` involves knot-tying in the monad, which led to #22695. See #20415 for a long discussion. -} isPatSynCtxt :: HsMatchContext p -> Bool isPatSynCtxt ctxt = case ctxt of PatSyn -> True _ -> False -- | Haskell Statement Context. data HsStmtContext p = HsDoStmt HsDoFlavour -- ^Context for HsDo (do-notation and comprehensions) | PatGuard (HsMatchContext p) -- ^Pattern guard for specified thing | ParStmtCtxt (HsStmtContext p) -- ^A branch of a parallel stmt | TransStmtCtxt (HsStmtContext p) -- ^A branch of a transform stmt | ArrowExpr -- ^do-notation in an arrow-command context -- | Haskell arrow match context. data HsArrowMatchContext = ProcExpr -- ^ A proc expression | ArrowCaseAlt -- ^ A case alternative inside arrow notation | ArrowLamCaseAlt LamCaseVariant -- ^ A \case or \cases alternative inside arrow notation | KappaExpr -- ^ An arrow kappa abstraction data HsDoFlavour = DoExpr (Maybe ModuleName) -- ^[ModuleName.]do { ... } | MDoExpr (Maybe ModuleName) -- ^[ModuleName.]mdo { ... } ie recursive do-expression | GhciStmtCtxt -- ^A command-line Stmt in GHCi pat <- rhs | ListComp | MonadComp qualifiedDoModuleName_maybe :: HsStmtContext p -> Maybe ModuleName qualifiedDoModuleName_maybe ctxt = case ctxt of HsDoStmt (DoExpr m) -> m HsDoStmt (MDoExpr m) -> m _ -> Nothing isComprehensionContext :: HsStmtContext id -> Bool -- Uses comprehension syntax [ e | quals ] isComprehensionContext (ParStmtCtxt c) = isComprehensionContext c isComprehensionContext (TransStmtCtxt c) = isComprehensionContext c isComprehensionContext ArrowExpr = False isComprehensionContext (PatGuard _) = False isComprehensionContext (HsDoStmt flavour) = isDoComprehensionContext flavour isDoComprehensionContext :: HsDoFlavour -> Bool isDoComprehensionContext GhciStmtCtxt = False isDoComprehensionContext (DoExpr _) = False isDoComprehensionContext (MDoExpr _) = False isDoComprehensionContext ListComp = True isDoComprehensionContext MonadComp = True -- | Is this a monadic context? isMonadStmtContext :: HsStmtContext id -> Bool isMonadStmtContext (ParStmtCtxt ctxt) = isMonadStmtContext ctxt isMonadStmtContext (TransStmtCtxt ctxt) = isMonadStmtContext ctxt isMonadStmtContext (HsDoStmt flavour) = isMonadDoStmtContext flavour isMonadStmtContext (PatGuard _) = False isMonadStmtContext ArrowExpr = False isMonadDoStmtContext :: HsDoFlavour -> Bool isMonadDoStmtContext ListComp = False isMonadDoStmtContext MonadComp = True isMonadDoStmtContext DoExpr{} = True isMonadDoStmtContext MDoExpr{} = True isMonadDoStmtContext GhciStmtCtxt = True isMonadCompContext :: HsStmtContext id -> Bool isMonadCompContext (HsDoStmt flavour) = isMonadDoCompContext flavour isMonadCompContext (ParStmtCtxt _) = False isMonadCompContext (TransStmtCtxt _) = False isMonadCompContext (PatGuard _) = False isMonadCompContext ArrowExpr = False isMonadDoCompContext :: HsDoFlavour -> Bool isMonadDoCompContext MonadComp = True isMonadDoCompContext ListComp = False isMonadDoCompContext GhciStmtCtxt = False isMonadDoCompContext (DoExpr _) = False isMonadDoCompContext (MDoExpr _) = False matchSeparator :: HsMatchContext p -> SDoc matchSeparator FunRhs{} = text "=" matchSeparator CaseAlt = text "->" matchSeparator LamCaseAlt{} = text "->" matchSeparator IfAlt = text "->" matchSeparator LambdaExpr = text "->" matchSeparator ArrowMatchCtxt{} = text "->" matchSeparator PatBindRhs = text "=" matchSeparator PatBindGuards = text "=" matchSeparator StmtCtxt{} = text "<-" matchSeparator RecUpd = text "=" -- This can be printed by the pattern -- match checker trace matchSeparator ThPatSplice = panic "unused" matchSeparator ThPatQuote = panic "unused" matchSeparator PatSyn = panic "unused" pprMatchContext :: (Outputable (IdP (NoGhcTc p)), UnXRec (NoGhcTc p)) => HsMatchContext p -> SDoc pprMatchContext ctxt | want_an ctxt = text "an" <+> pprMatchContextNoun ctxt | otherwise = text "a" <+> pprMatchContextNoun ctxt where want_an (FunRhs {}) = True -- Use "an" in front want_an (ArrowMatchCtxt ProcExpr) = True want_an (ArrowMatchCtxt KappaExpr) = True want_an _ = False pprMatchContextNoun :: forall p. (Outputable (IdP (NoGhcTc p)), UnXRec (NoGhcTc p)) => HsMatchContext p -> SDoc pprMatchContextNoun (FunRhs {mc_fun=fun}) = text "equation for" <+> quotes (ppr (unXRec @(NoGhcTc p) fun)) pprMatchContextNoun CaseAlt = text "case alternative" pprMatchContextNoun (LamCaseAlt lc_variant) = lamCaseKeyword lc_variant <+> text "alternative" pprMatchContextNoun IfAlt = text "multi-way if alternative" pprMatchContextNoun RecUpd = text "record-update construct" pprMatchContextNoun ThPatSplice = text "Template Haskell pattern splice" pprMatchContextNoun ThPatQuote = text "Template Haskell pattern quotation" pprMatchContextNoun PatBindRhs = text "pattern binding" pprMatchContextNoun PatBindGuards = text "pattern binding guards" pprMatchContextNoun LambdaExpr = text "lambda abstraction" pprMatchContextNoun (ArrowMatchCtxt c) = pprArrowMatchContextNoun c pprMatchContextNoun (StmtCtxt ctxt) = text "pattern binding in" $$ pprAStmtContext ctxt pprMatchContextNoun PatSyn = text "pattern synonym declaration" pprMatchContextNouns :: forall p. (Outputable (IdP (NoGhcTc p)), UnXRec (NoGhcTc p)) => HsMatchContext p -> SDoc pprMatchContextNouns (FunRhs {mc_fun=fun}) = text "equations for" <+> quotes (ppr (unXRec @(NoGhcTc p) fun)) pprMatchContextNouns PatBindGuards = text "pattern binding guards" pprMatchContextNouns (ArrowMatchCtxt c) = pprArrowMatchContextNouns c pprMatchContextNouns (StmtCtxt ctxt) = text "pattern bindings in" $$ pprAStmtContext ctxt pprMatchContextNouns ctxt = pprMatchContextNoun ctxt <> char 's' pprArrowMatchContextNoun :: HsArrowMatchContext -> SDoc pprArrowMatchContextNoun ProcExpr = text "arrow proc pattern" pprArrowMatchContextNoun ArrowCaseAlt = text "case alternative within arrow notation" pprArrowMatchContextNoun (ArrowLamCaseAlt lc_variant) = lamCaseKeyword lc_variant <+> text "alternative within arrow notation" pprArrowMatchContextNoun KappaExpr = text "arrow kappa abstraction" pprArrowMatchContextNouns :: HsArrowMatchContext -> SDoc pprArrowMatchContextNouns ArrowCaseAlt = text "case alternatives within arrow notation" pprArrowMatchContextNouns (ArrowLamCaseAlt lc_variant) = lamCaseKeyword lc_variant <+> text "alternatives within arrow notation" pprArrowMatchContextNouns ctxt = pprArrowMatchContextNoun ctxt <> char 's' ----------------- pprAStmtContext, pprStmtContext :: (Outputable (IdP (NoGhcTc p)), UnXRec (NoGhcTc p)) => HsStmtContext p -> SDoc pprAStmtContext (HsDoStmt flavour) = pprAHsDoFlavour flavour pprAStmtContext ctxt = text "a" <+> pprStmtContext ctxt ----------------- pprStmtContext (HsDoStmt flavour) = pprHsDoFlavour flavour pprStmtContext (PatGuard ctxt) = text "pattern guard for" $$ pprMatchContext ctxt pprStmtContext ArrowExpr = text "'do' block in an arrow command" -- Drop the inner contexts when reporting errors, else we get -- Unexpected transform statement -- in a transformed branch of -- transformed branch of -- transformed branch of monad comprehension pprStmtContext (ParStmtCtxt c) = ifPprDebug (sep [text "parallel branch of", pprAStmtContext c]) (pprStmtContext c) pprStmtContext (TransStmtCtxt c) = ifPprDebug (sep [text "transformed branch of", pprAStmtContext c]) (pprStmtContext c) pprAHsDoFlavour, pprHsDoFlavour :: HsDoFlavour -> SDoc pprAHsDoFlavour flavour = article <+> pprHsDoFlavour flavour where pp_an = text "an" pp_a = text "a" article = case flavour of MDoExpr Nothing -> pp_an GhciStmtCtxt -> pp_an _ -> pp_a pprHsDoFlavour (DoExpr m) = prependQualified m (text "'do' block") pprHsDoFlavour (MDoExpr m) = prependQualified m (text "'mdo' block") pprHsDoFlavour ListComp = text "list comprehension" pprHsDoFlavour MonadComp = text "monad comprehension" pprHsDoFlavour GhciStmtCtxt = text "interactive GHCi command" prependQualified :: Maybe ModuleName -> SDoc -> SDoc prependQualified Nothing t = t prependQualified (Just _) t = text "qualified" <+> t ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Extension.hs0000644000000000000000000005356714472400113024157 0ustar0000000000000000{-# LANGUAGE AllowAmbiguousTypes #-} -- for unXRec, etc. {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE EmptyCase #-} {-# LANGUAGE EmptyDataDeriving #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilyDependencies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Extension where -- This module captures the type families to precisely identify the extension -- points for GHC.Hs syntax import GHC.Prelude import GHC.TypeLits (Symbol, KnownSymbol) import Data.Data hiding ( Fixity ) import Data.Kind (Type) import GHC.Utils.Outputable {- Note [Trees That Grow] ~~~~~~~~~~~~~~~~~~~~~~ See https://gitlab.haskell.org/ghc/ghc/wikis/implementing-trees-that-grow The hsSyn AST is reused across multiple compiler passes. We also have the Template Haskell AST, and the haskell-src-exts one (outside of GHC) Supporting multiple passes means the AST has various warts on it to cope with the specifics for the phases, such as the 'ValBindsOut', 'ConPatOut', 'SigPatOut' etc. The growable AST will allow each of these variants to be captured explicitly, such that they only exist in the given compiler pass AST, as selected by the type parameter to the AST. In addition it will allow tool writers to define their own extensions to capture additional information for the tool, in a natural way. A further goal is to provide a means to harmonise the Template Haskell and haskell-src-exts ASTs as well. Wrinkle: In order to print out the AST, we need to know it is Outputable. We also sometimes need to branch on the particular pass that we're in (e.g. to print out type information once we know it). In order to allow both of these actions, we define OutputableBndrId, which gathers the necessary OutputableBndr and IsPass constraints. The use of this constraint in instances generally requires UndecidableInstances. See also Note [IsPass] and Note [NoGhcTc] in GHC.Hs.Extension. -} -- | A placeholder type for TTG extension points that are not currently -- unused to represent any particular value. -- -- This should not be confused with 'DataConCantHappen', which are found in unused -- extension /constructors/ and therefore should never be inhabited. In -- contrast, 'NoExtField' is used in extension /points/ (e.g., as the field of -- some constructor), so it must have an inhabitant to construct AST passes -- that manipulate fields with that extension point as their type. data NoExtField = NoExtField deriving (Data,Eq,Ord) instance Outputable NoExtField where ppr _ = text "NoExtField" -- | Used when constructing a term with an unused extension point. noExtField :: NoExtField noExtField = NoExtField {- Note [Constructor cannot occur] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some data constructors can't occur in certain phases; e.g. the output of the type checker never has OverLabel. We signal this by * setting the extension field to DataConCantHappen * using dataConCantHappen in the cases that can't happen For example: type instance XOverLabel GhcTc = DataConCantHappen dsExpr :: HsExpr GhcTc -> blah dsExpr (HsOverLabel x _) = dataConCantHappen x The function dataConCantHappen is defined thus: dataConCantHappen :: DataConCantHappen -> a dataConCantHappen x = case x of {} (i.e. identically to Data.Void.absurd, but more helpfully named). Remember DataConCantHappen is a type whose only element is bottom. This should not be confused with 'NoExtField', which are found in unused extension /points/ (not /constructors/) and therefore can be inhabited. It would be better to omit the pattern match altogether, but we can only do that if the extension field was strict (#18764). See also [DataConCantHappen and strict fields]. -} data DataConCantHappen deriving (Data,Eq,Ord) instance Outputable DataConCantHappen where ppr = dataConCantHappen -- | Eliminate a 'DataConCantHappen'. See Note [Constructor cannot occur]. dataConCantHappen :: DataConCantHappen -> a dataConCantHappen x = case x of {} -- | GHC's L prefixed variants wrap their vanilla variant in this type family, -- to add 'SrcLoc' info via 'Located'. Other passes than 'GhcPass' not -- interested in location information can define this as -- @type instance XRec NoLocated a = a@. -- See Note [XRec and SrcSpans in the AST] type family XRec p a = r | r -> a type family Anno a = b -- See Note [XRec and Anno in the AST] in GHC.Parser.Annotation {- Note [XRec and SrcSpans in the AST] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ XRec is meant to replace most of the uses of `Located` in the AST. It is another extension point meant to make it easier for non-GHC applications to reuse the AST for their own purposes, and not have to deal the hassle of (perhaps) useless SrcSpans everywhere. instead of `Located (HsExpr p)` or similar types, we will now have `XRec p (HsExpr p)` XRec allows annotating certain points in the AST with extra information. This maybe be source spans (for GHC), nothing (for TH), types (for HIE files), exact print annotations (for exactprint) or anything else. This should hopefully bring us one step closer to sharing the AST between GHC and TH. We use the `UnXRec`, `MapXRec` and `WrapXRec` type classes to aid us in writing pass-polymorphic code that deals with `XRec`s -} -- | We can strip off the XRec to access the underlying data. -- See Note [XRec and SrcSpans in the AST] class UnXRec p where unXRec :: XRec p a -> a -- | We can map over the underlying type contained in an @XRec@ while preserving -- the annotation as is. class MapXRec p where mapXRec :: (Anno a ~ Anno b) => (a -> b) -> XRec p a -> XRec p b -- See Note [XRec and SrcSpans in the AST] -- See Note [XRec and Anno in the AST] in GHC.Parser.Annotation -- AZ: Is there a way to not have Anno in this file, but still have MapXRec? -- Perhaps define XRec with an additional b parameter, only used in Hs as (Anno b)? -- | The trivial wrapper that carries no additional information -- See Note [XRec and SrcSpans in the AST] class WrapXRec p a where wrapXRec :: a -> XRec p a -- | Maps the "normal" id type for a given pass type family IdP p type LIdP p = XRec p (IdP p) -- ===================================================================== -- Type families for the HsBinds extension points -- HsLocalBindsLR type families type family XHsValBinds x x' type family XHsIPBinds x x' type family XEmptyLocalBinds x x' type family XXHsLocalBindsLR x x' -- HsValBindsLR type families type family XValBinds x x' type family XXValBindsLR x x' -- HsBindLR type families type family XFunBind x x' type family XPatBind x x' type family XVarBind x x' type family XPatSynBind x x' type family XXHsBindsLR x x' -- PatSynBind type families type family XPSB x x' type family XXPatSynBind x x' -- HsIPBinds type families type family XIPBinds x type family XXHsIPBinds x -- IPBind type families type family XCIPBind x type family XXIPBind x -- Sig type families type family XTypeSig x type family XPatSynSig x type family XClassOpSig x type family XIdSig x type family XFixSig x type family XInlineSig x type family XSpecSig x type family XSpecInstSig x type family XMinimalSig x type family XSCCFunSig x type family XCompleteMatchSig x type family XXSig x -- FixitySig type families type family XFixitySig x type family XXFixitySig x -- StandaloneKindSig type families type family XStandaloneKindSig x type family XXStandaloneKindSig x -- ===================================================================== -- Type families for the HsDecls extension points -- HsDecl type families type family XTyClD x type family XInstD x type family XDerivD x type family XValD x type family XSigD x type family XKindSigD x type family XDefD x type family XForD x type family XWarningD x type family XAnnD x type family XRuleD x type family XSpliceD x type family XDocD x type family XRoleAnnotD x type family XXHsDecl x -- ------------------------------------- -- HsGroup type families type family XCHsGroup x type family XXHsGroup x -- ------------------------------------- -- SpliceDecl type families type family XSpliceDecl x type family XXSpliceDecl x -- ------------------------------------- -- TyClDecl type families type family XFamDecl x type family XSynDecl x type family XDataDecl x type family XClassDecl x type family XXTyClDecl x -- ------------------------------------- -- FunDep type families type family XCFunDep x type family XXFunDep x -- ------------------------------------- -- TyClGroup type families type family XCTyClGroup x type family XXTyClGroup x -- ------------------------------------- -- FamilyResultSig type families type family XNoSig x type family XCKindSig x -- Clashes with XKindSig above type family XTyVarSig x type family XXFamilyResultSig x -- ------------------------------------- -- FamilyDecl type families type family XCFamilyDecl x type family XXFamilyDecl x -- ------------------------------------- -- HsDataDefn type families type family XCHsDataDefn x type family XXHsDataDefn x -- ------------------------------------- -- HsDerivingClause type families type family XCHsDerivingClause x type family XXHsDerivingClause x -- ------------------------------------- -- DerivClauseTys type families type family XDctSingle x type family XDctMulti x type family XXDerivClauseTys x -- ------------------------------------- -- ConDecl type families type family XConDeclGADT x type family XConDeclH98 x type family XXConDecl x -- ------------------------------------- -- FamEqn type families type family XCFamEqn x r type family XXFamEqn x r -- ------------------------------------- -- TyFamInstDecl type families type family XCTyFamInstDecl x type family XXTyFamInstDecl x -- ------------------------------------- -- ClsInstDecl type families type family XCClsInstDecl x type family XXClsInstDecl x -- ------------------------------------- -- InstDecl type families type family XClsInstD x type family XDataFamInstD x type family XTyFamInstD x type family XXInstDecl x -- ------------------------------------- -- DerivDecl type families type family XCDerivDecl x type family XXDerivDecl x -- ------------------------------------- -- DerivStrategy type family type family XStockStrategy x type family XAnyClassStrategy x type family XNewtypeStrategy x type family XViaStrategy x -- ------------------------------------- -- DefaultDecl type families type family XCDefaultDecl x type family XXDefaultDecl x -- ------------------------------------- -- ForeignDecl type families type family XForeignImport x type family XForeignExport x type family XXForeignDecl x -- ------------------------------------- -- RuleDecls type families type family XCRuleDecls x type family XXRuleDecls x -- ------------------------------------- -- RuleDecl type families type family XHsRule x type family XXRuleDecl x -- ------------------------------------- -- RuleBndr type families type family XCRuleBndr x type family XRuleBndrSig x type family XXRuleBndr x -- ------------------------------------- -- WarnDecls type families type family XWarnings x type family XXWarnDecls x -- ------------------------------------- -- WarnDecl type families type family XWarning x type family XXWarnDecl x -- ------------------------------------- -- AnnDecl type families type family XHsAnnotation x type family XXAnnDecl x -- ------------------------------------- -- RoleAnnotDecl type families type family XCRoleAnnotDecl x type family XXRoleAnnotDecl x -- ------------------------------------- -- InjectivityAnn type families type family XCInjectivityAnn x type family XXInjectivityAnn x -- ===================================================================== -- Type families for the HsExpr extension points type family XVar x type family XUnboundVar x type family XRecSel x type family XOverLabel x type family XIPVar x type family XOverLitE x type family XLitE x type family XLam x type family XLamCase x type family XApp x type family XAppTypeE x type family XOpApp x type family XNegApp x type family XPar x type family XSectionL x type family XSectionR x type family XExplicitTuple x type family XExplicitSum x type family XCase x type family XIf x type family XMultiIf x type family XLet x type family XDo x type family XExplicitList x type family XRecordCon x type family XRecordUpd x type family XGetField x type family XProjection x type family XExprWithTySig x type family XArithSeq x type family XTypedBracket x type family XUntypedBracket x type family XSpliceE x type family XProc x type family XStatic x type family XTick x type family XBinTick x type family XPragE x type family XXExpr x -- ------------------------------------- -- DotFieldOcc type families type family XCDotFieldOcc x type family XXDotFieldOcc x -- ------------------------------------- -- HsPragE type families type family XSCC x type family XXPragE x -- ------------------------------------- -- AmbiguousFieldOcc type families type family XUnambiguous x type family XAmbiguous x type family XXAmbiguousFieldOcc x -- ------------------------------------- -- HsTupArg type families type family XPresent x type family XMissing x type family XXTupArg x -- ------------------------------------- -- HsSplice type families type family XTypedSplice x type family XUntypedSplice x type family XQuasiQuote x type family XSpliced x type family XXSplice x -- ------------------------------------- -- HsQuoteBracket type families type family XExpBr x type family XPatBr x type family XDecBrL x type family XDecBrG x type family XTypBr x type family XVarBr x type family XXQuote x -- ------------------------------------- -- HsCmdTop type families type family XCmdTop x type family XXCmdTop x -- ------------------------------------- -- MatchGroup type families type family XMG x b type family XXMatchGroup x b -- ------------------------------------- -- Match type families type family XCMatch x b type family XXMatch x b -- ------------------------------------- -- GRHSs type families type family XCGRHSs x b type family XXGRHSs x b -- ------------------------------------- -- GRHS type families type family XCGRHS x b type family XXGRHS x b -- ------------------------------------- -- StmtLR type families type family XLastStmt x x' b type family XBindStmt x x' b type family XApplicativeStmt x x' b type family XBodyStmt x x' b type family XLetStmt x x' b type family XParStmt x x' b type family XTransStmt x x' b type family XRecStmt x x' b type family XXStmtLR x x' b -- ------------------------------------- -- HsCmd type families type family XCmdArrApp x type family XCmdArrForm x type family XCmdApp x type family XCmdLam x type family XCmdPar x type family XCmdCase x type family XCmdLamCase x type family XCmdIf x type family XCmdLet x type family XCmdDo x type family XCmdWrap x type family XXCmd x -- ------------------------------------- -- ParStmtBlock type families type family XParStmtBlock x x' type family XXParStmtBlock x x' -- ------------------------------------- -- ApplicativeArg type families type family XApplicativeArgOne x type family XApplicativeArgMany x type family XXApplicativeArg x -- ===================================================================== -- Type families for the HsImpExp extension points -- TODO -- ===================================================================== -- Type families for the HsLit extension points -- We define a type family for each extension point. This is based on prepending -- 'X' to the constructor name, for ease of reference. type family XHsChar x type family XHsCharPrim x type family XHsString x type family XHsStringPrim x type family XHsInt x type family XHsIntPrim x type family XHsWordPrim x type family XHsInt64Prim x type family XHsWord64Prim x type family XHsInteger x type family XHsRat x type family XHsFloatPrim x type family XHsDoublePrim x type family XXLit x -- ------------------------------------- -- HsOverLit type families type family XOverLit x type family XXOverLit x -- ===================================================================== -- Type families for the HsPat extension points type family XWildPat x type family XVarPat x type family XLazyPat x type family XAsPat x type family XParPat x type family XBangPat x type family XListPat x type family XTuplePat x type family XSumPat x type family XConPat x type family XViewPat x type family XSplicePat x type family XLitPat x type family XNPat x type family XNPlusKPat x type family XSigPat x type family XCoPat x type family XXPat x type family XHsFieldBind x -- ===================================================================== -- Type families for the HsTypes type families -- ------------------------------------- -- LHsQTyVars type families type family XHsQTvs x type family XXLHsQTyVars x -- ------------------------------------- -- HsOuterTyVarBndrs type families type family XHsOuterImplicit x type family XHsOuterExplicit x flag type family XXHsOuterTyVarBndrs x -- ------------------------------------- -- HsSigType type families type family XHsSig x type family XXHsSigType x -- ------------------------------------- -- HsWildCardBndrs type families type family XHsWC x b type family XXHsWildCardBndrs x b -- ------------------------------------- -- HsPatSigType type families type family XHsPS x type family XXHsPatSigType x -- ------------------------------------- -- HsType type families type family XForAllTy x type family XQualTy x type family XTyVar x type family XAppTy x type family XAppKindTy x type family XFunTy x type family XListTy x type family XTupleTy x type family XSumTy x type family XOpTy x type family XParTy x type family XIParamTy x type family XStarTy x type family XKindSig x type family XSpliceTy x type family XDocTy x type family XBangTy x type family XRecTy x type family XExplicitListTy x type family XExplicitTupleTy x type family XTyLit x type family XWildCardTy x type family XXType x -- --------------------------------------------------------------------- -- HsForAllTelescope type families type family XHsForAllVis x type family XHsForAllInvis x type family XXHsForAllTelescope x -- --------------------------------------------------------------------- -- HsTyVarBndr type families type family XUserTyVar x type family XKindedTyVar x type family XXTyVarBndr x -- --------------------------------------------------------------------- -- ConDeclField type families type family XConDeclField x type family XXConDeclField x -- --------------------------------------------------------------------- -- FieldOcc type families type family XCFieldOcc x type family XXFieldOcc x -- ===================================================================== -- Type families for the HsImpExp type families -- ------------------------------------- -- ImportDecl type families type family XCImportDecl x type family XXImportDecl x -- ------------------------------------- -- IE type families type family XIEVar x type family XIEThingAbs x type family XIEThingAll x type family XIEThingWith x type family XIEModuleContents x type family XIEGroup x type family XIEDoc x type family XIEDocNamed x type family XXIE x -- ------------------------------------- -- ===================================================================== -- Misc -- | See Note [NoGhcTc] in GHC.Hs.Extension. It has to be in this -- module because it is used like an extension point (in the data definitions -- of types that should be parameter-agnostic. type family NoGhcTc (p :: Type) -- ===================================================================== -- End of Type family definitions -- ===================================================================== -- ===================================================================== -- Token information type LHsToken tok p = XRec p (HsToken tok) data HsToken (tok :: Symbol) = HsTok deriving instance KnownSymbol tok => Data (HsToken tok) type LHsUniToken tok utok p = XRec p (HsUniToken tok utok) -- With UnicodeSyntax, there might be multiple ways to write the same token. -- For example an arrow could be either "->" or "→". This choice must be -- recorded in order to exactprint such tokens, -- so instead of HsToken "->" we introduce HsUniToken "->" "→". -- -- See also IsUnicodeSyntax in GHC.Parser.Annotation; we do not use here to -- avoid a dependency. data HsUniToken (tok :: Symbol) (utok :: Symbol) = HsNormalTok | HsUnicodeTok deriving instance (KnownSymbol tok, KnownSymbol utok) => Data (HsUniToken tok utok) ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Lit.hs0000644000000000000000000001577414472400113022731 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* -- | Source-language literals module Language.Haskell.Syntax.Lit where import GHC.Prelude import GHC.Types.Basic (PprPrec(..), topPrec ) import GHC.Types.SourceText import GHC.Core.Type import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.FastString import Language.Haskell.Syntax.Extension import Data.ByteString (ByteString) import Data.Data hiding ( Fixity ) {- ************************************************************************ * * \subsection[HsLit]{Literals} * * ************************************************************************ -} -- Note [Literal source text] in GHC.Types.Basic for SourceText fields in -- the following -- Note [Trees That Grow] in Language.Haskell.Syntax.Extension for the Xxxxx -- fields in the following -- | Haskell Literal data HsLit x = HsChar (XHsChar x) {- SourceText -} Char -- ^ Character | HsCharPrim (XHsCharPrim x) {- SourceText -} Char -- ^ Unboxed character | HsString (XHsString x) {- SourceText -} FastString -- ^ String | HsStringPrim (XHsStringPrim x) {- SourceText -} !ByteString -- ^ Packed bytes | HsInt (XHsInt x) IntegralLit -- ^ Genuinely an Int; arises from -- "GHC.Tc.Deriv.Generate", and from TRANSLATION | HsIntPrim (XHsIntPrim x) {- SourceText -} Integer -- ^ literal @Int#@ | HsWordPrim (XHsWordPrim x) {- SourceText -} Integer -- ^ literal @Word#@ | HsInt64Prim (XHsInt64Prim x) {- SourceText -} Integer -- ^ literal @Int64#@ | HsWord64Prim (XHsWord64Prim x) {- SourceText -} Integer -- ^ literal @Word64#@ | HsInteger (XHsInteger x) {- SourceText -} Integer Type -- ^ Genuinely an integer; arises only -- from TRANSLATION (overloaded -- literals are done with HsOverLit) | HsRat (XHsRat x) FractionalLit Type -- ^ Genuinely a rational; arises only from -- TRANSLATION (overloaded literals are -- done with HsOverLit) | HsFloatPrim (XHsFloatPrim x) FractionalLit -- ^ Unboxed Float | HsDoublePrim (XHsDoublePrim x) FractionalLit -- ^ Unboxed Double | XLit !(XXLit x) instance Eq (HsLit x) where (HsChar _ x1) == (HsChar _ x2) = x1==x2 (HsCharPrim _ x1) == (HsCharPrim _ x2) = x1==x2 (HsString _ x1) == (HsString _ x2) = x1==x2 (HsStringPrim _ x1) == (HsStringPrim _ x2) = x1==x2 (HsInt _ x1) == (HsInt _ x2) = x1==x2 (HsIntPrim _ x1) == (HsIntPrim _ x2) = x1==x2 (HsWordPrim _ x1) == (HsWordPrim _ x2) = x1==x2 (HsInt64Prim _ x1) == (HsInt64Prim _ x2) = x1==x2 (HsWord64Prim _ x1) == (HsWord64Prim _ x2) = x1==x2 (HsInteger _ x1 _) == (HsInteger _ x2 _) = x1==x2 (HsRat _ x1 _) == (HsRat _ x2 _) = x1==x2 (HsFloatPrim _ x1) == (HsFloatPrim _ x2) = x1==x2 (HsDoublePrim _ x1) == (HsDoublePrim _ x2) = x1==x2 _ == _ = False -- | Haskell Overloaded Literal data HsOverLit p = OverLit { ol_ext :: (XOverLit p), ol_val :: OverLitVal} | XOverLit !(XXOverLit p) -- Note [Literal source text] in GHC.Types.Basic for SourceText fields in -- the following -- | Overloaded Literal Value data OverLitVal = HsIntegral !IntegralLit -- ^ Integer-looking literals; | HsFractional !FractionalLit -- ^ Frac-looking literals | HsIsString !SourceText !FastString -- ^ String-looking literals deriving Data negateOverLitVal :: OverLitVal -> OverLitVal negateOverLitVal (HsIntegral i) = HsIntegral (negateIntegralLit i) negateOverLitVal (HsFractional f) = HsFractional (negateFractionalLit f) negateOverLitVal _ = panic "negateOverLitVal: argument is not a number" -- Comparison operations are needed when grouping literals -- for compiling pattern-matching (module GHC.HsToCore.Match.Literal) instance (Eq (XXOverLit p)) => Eq (HsOverLit p) where (OverLit _ val1) == (OverLit _ val2) = val1 == val2 (XOverLit val1) == (XOverLit val2) = val1 == val2 _ == _ = panic "Eq HsOverLit" instance Eq OverLitVal where (HsIntegral i1) == (HsIntegral i2) = i1 == i2 (HsFractional f1) == (HsFractional f2) = f1 == f2 (HsIsString _ s1) == (HsIsString _ s2) = s1 == s2 _ == _ = False instance (Ord (XXOverLit p)) => Ord (HsOverLit p) where compare (OverLit _ val1) (OverLit _ val2) = val1 `compare` val2 compare (XOverLit val1) (XOverLit val2) = val1 `compare` val2 compare _ _ = panic "Ord HsOverLit" instance Ord OverLitVal where compare (HsIntegral i1) (HsIntegral i2) = i1 `compare` i2 compare (HsIntegral _) (HsFractional _) = LT compare (HsIntegral _) (HsIsString _ _) = LT compare (HsFractional f1) (HsFractional f2) = f1 `compare` f2 compare (HsFractional _) (HsIntegral _) = GT compare (HsFractional _) (HsIsString _ _) = LT compare (HsIsString _ s1) (HsIsString _ s2) = s1 `lexicalCompareFS` s2 compare (HsIsString _ _) (HsIntegral _) = GT compare (HsIsString _ _) (HsFractional _) = GT instance Outputable OverLitVal where ppr (HsIntegral i) = pprWithSourceText (il_text i) (integer (il_value i)) ppr (HsFractional f) = ppr f ppr (HsIsString st s) = pprWithSourceText st (pprHsString s) -- | @'hsLitNeedsParens' p l@ returns 'True' if a literal @l@ needs -- to be parenthesized under precedence @p@. hsLitNeedsParens :: PprPrec -> HsLit x -> Bool hsLitNeedsParens p = go where go (HsChar {}) = False go (HsCharPrim {}) = False go (HsString {}) = False go (HsStringPrim {}) = False go (HsInt _ x) = p > topPrec && il_neg x go (HsIntPrim _ x) = p > topPrec && x < 0 go (HsWordPrim {}) = False go (HsInt64Prim _ x) = p > topPrec && x < 0 go (HsWord64Prim {}) = False go (HsInteger _ x _) = p > topPrec && x < 0 go (HsRat _ x _) = p > topPrec && fl_neg x go (HsFloatPrim _ x) = p > topPrec && fl_neg x go (HsDoublePrim _ x) = p > topPrec && fl_neg x go (XLit _) = False -- | @'hsOverLitNeedsParens' p ol@ returns 'True' if an overloaded literal -- @ol@ needs to be parenthesized under precedence @p@. hsOverLitNeedsParens :: PprPrec -> HsOverLit x -> Bool hsOverLitNeedsParens p (OverLit { ol_val = olv }) = go olv where go :: OverLitVal -> Bool go (HsIntegral x) = p > topPrec && il_neg x go (HsFractional x) = p > topPrec && fl_neg x go (HsIsString {}) = False hsOverLitNeedsParens _ (XOverLit { }) = False ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Pat.hs0000644000000000000000000003510614472400113022714 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# LANGUAGE ViewPatterns #-} {-# LANGUAGE DataKinds #-} {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 \section[PatSyntax]{Abstract Haskell syntax---patterns} -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Pat ( Pat(..), LPat, ConLikeP, HsConPatDetails, hsConPatArgs, HsRecFields(..), HsFieldBind(..), LHsFieldBind, HsRecField, LHsRecField, HsRecUpdField, LHsRecUpdField, hsRecFields, hsRecFieldSel, hsRecFieldsArgs, ) where import GHC.Prelude import {-# SOURCE #-} Language.Haskell.Syntax.Expr (SyntaxExpr, LHsExpr, HsSplice) -- friends: import Language.Haskell.Syntax.Lit import Language.Haskell.Syntax.Extension import Language.Haskell.Syntax.Type import GHC.Types.Basic -- others: import GHC.Core.Ppr ( {- instance OutputableBndr TyVar -} ) import GHC.Utils.Outputable import GHC.Types.SrcLoc -- libraries: type LPat p = XRec p (Pat p) -- | Pattern -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnBang' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data Pat p = ------------ Simple patterns --------------- WildPat (XWildPat p) -- ^ Wildcard Pattern -- The sole reason for a type on a WildPat is to -- support hsPatType :: Pat Id -> Type -- AZ:TODO above comment needs to be updated | VarPat (XVarPat p) (LIdP p) -- ^ Variable Pattern -- See Note [Located RdrNames] in GHC.Hs.Expr | LazyPat (XLazyPat p) (LPat p) -- ^ Lazy Pattern -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnTilde' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | AsPat (XAsPat p) (LIdP p) (LPat p) -- ^ As pattern -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnAt' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | ParPat (XParPat p) !(LHsToken "(" p) (LPat p) -- ^ Parenthesised pattern !(LHsToken ")" p) -- See Note [Parens in HsSyn] in GHC.Hs.Expr -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'('@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | BangPat (XBangPat p) (LPat p) -- ^ Bang pattern -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnBang' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ------------ Lists, tuples, arrays --------------- | ListPat (XListPat p) [LPat p] -- ^ Syntactic List -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'['@, -- 'GHC.Parser.Annotation.AnnClose' @']'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | TuplePat (XTuplePat p) -- after typechecking, holds the types of the tuple components [LPat p] -- Tuple sub-patterns Boxity -- UnitPat is TuplePat [] -- You might think that the post typechecking Type was redundant, -- because we can get the pattern type by getting the types of the -- sub-patterns. -- But it's essential -- data T a where -- T1 :: Int -> T Int -- f :: (T a, a) -> Int -- f (T1 x, z) = z -- When desugaring, we must generate -- f = /\a. \v::a. case v of (t::T a, w::a) -> -- case t of (T1 (x::Int)) -> -- Note the (w::a), NOT (w::Int), because we have not yet -- refined 'a' to Int. So we must know that the second component -- of the tuple is of type 'a' not Int. See selectMatchVar -- (June 14: I'm not sure this comment is right; the sub-patterns -- will be wrapped in CoPats, no?) -- ^ Tuple sub-patterns -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpen' @'('@ or @'(#'@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ or @'#)'@ | SumPat (XSumPat p) -- after typechecker, types of the alternative (LPat p) -- Sum sub-pattern ConTag -- Alternative (one-based) Arity -- Arity (INVARIANT: ≥ 2) -- ^ Anonymous sum pattern -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpen' @'(#'@, -- 'GHC.Parser.Annotation.AnnClose' @'#)'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation ------------ Constructor patterns --------------- | ConPat { pat_con_ext :: XConPat p, pat_con :: XRec p (ConLikeP p), pat_args :: HsConPatDetails p } -- ^ Constructor Pattern ------------ View patterns --------------- -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnRarrow' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | ViewPat (XViewPat p) (LHsExpr p) (LPat p) -- ^ View Pattern ------------ Pattern splices --------------- -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'$('@ -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | SplicePat (XSplicePat p) (HsSplice p) -- ^ Splice Pattern (Includes quasi-quotes) ------------ Literal and n+k patterns --------------- | LitPat (XLitPat p) (HsLit p) -- ^ Literal Pattern -- Used for *non-overloaded* literal patterns: -- Int#, Char#, Int, Char, String, etc. | NPat -- Natural Pattern -- Used for all overloaded literals, -- including overloaded strings with -XOverloadedStrings (XNPat p) -- Overall type of pattern. Might be -- different than the literal's type -- if (==) or negate changes the type (XRec p (HsOverLit p)) -- ALWAYS positive (Maybe (SyntaxExpr p)) -- Just (Name of 'negate') for -- negative patterns, Nothing -- otherwise (SyntaxExpr p) -- Equality checker, of type t->t->Bool -- ^ Natural Pattern -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnVal' @'+'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | NPlusKPat (XNPlusKPat p) -- Type of overall pattern (LIdP p) -- n+k pattern (XRec p (HsOverLit p)) -- It'll always be an HsIntegral (HsOverLit p) -- See Note [NPlusK patterns] in GHC.Tc.Gen.Pat -- NB: This could be (PostTc ...), but that induced a -- a new hs-boot file. Not worth it. (SyntaxExpr p) -- (>=) function, of type t1->t2->Bool (SyntaxExpr p) -- Name of '-' (see GHC.Rename.Env.lookupSyntax) -- ^ n+k pattern ------------ Pattern type signatures --------------- -- | - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | SigPat (XSigPat p) -- After typechecker: Type (LPat p) -- Pattern with a type signature (HsPatSigType (NoGhcTc p)) -- Signature can bind both -- kind and type vars -- ^ Pattern with a type signature -- Extension point; see Note [Trees That Grow] in Language.Haskell.Syntax.Extension | XPat !(XXPat p) type family ConLikeP x -- --------------------------------------------------------------------- -- | Haskell Constructor Pattern Details type HsConPatDetails p = HsConDetails (HsPatSigType (NoGhcTc p)) (LPat p) (HsRecFields p (LPat p)) hsConPatArgs :: forall p . (UnXRec p) => HsConPatDetails p -> [LPat p] hsConPatArgs (PrefixCon _ ps) = ps hsConPatArgs (RecCon fs) = map (hfbRHS . unXRec @p) (rec_flds fs) hsConPatArgs (InfixCon p1 p2) = [p1,p2] -- | Haskell Record Fields -- -- HsRecFields is used only for patterns and expressions (not data type -- declarations) data HsRecFields p arg -- A bunch of record fields -- { x = 3, y = True } -- Used for both expressions and patterns = HsRecFields { rec_flds :: [LHsRecField p arg], rec_dotdot :: Maybe (Located Int) } -- Note [DotDot fields] -- AZ:The XRec for LHsRecField makes the derivings fail. -- deriving (Functor, Foldable, Traversable) -- Note [DotDot fields] -- ~~~~~~~~~~~~~~~~~~~~ -- The rec_dotdot field means this: -- Nothing => the normal case -- Just n => the group uses ".." notation, -- -- In the latter case: -- -- *before* renamer: rec_flds are exactly the n user-written fields -- -- *after* renamer: rec_flds includes *all* fields, with -- the first 'n' being the user-written ones -- and the remainder being 'filled in' implicitly -- | Located Haskell Record Field type LHsFieldBind p id arg = XRec p (HsFieldBind id arg) -- | Located Haskell Record Field type LHsRecField p arg = XRec p (HsRecField p arg) -- | Located Haskell Record Update Field type LHsRecUpdField p = XRec p (HsRecUpdField p) -- | Haskell Record Field type HsRecField p arg = HsFieldBind (LFieldOcc p) arg -- | Haskell Record Update Field type HsRecUpdField p = HsFieldBind (LAmbiguousFieldOcc p) (LHsExpr p) -- | Haskell Field Binding -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnEqual', -- -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation data HsFieldBind lhs rhs = HsFieldBind { hfbAnn :: XHsFieldBind lhs, hfbLHS :: lhs, hfbRHS :: rhs, -- ^ Filled in by renamer when punning hfbPun :: Bool -- ^ Note [Punning] } deriving (Functor, Foldable, Traversable) -- Note [Punning] -- ~~~~~~~~~~~~~~ -- If you write T { x, y = v+1 }, the HsRecFields will be -- HsRecField x x True ... -- HsRecField y (v+1) False ... -- That is, for "punned" field x is expanded (in the renamer) -- to x=x; but with a punning flag so we can detect it later -- (e.g. when pretty printing) -- -- If the original field was qualified, we un-qualify it, thus -- T { A.x } means T { A.x = x } -- Note [HsRecField and HsRecUpdField] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- A HsRecField (used for record construction and pattern matching) -- contains an unambiguous occurrence of a field (i.e. a FieldOcc). -- We can't just store the Name, because thanks to -- DuplicateRecordFields this may not correspond to the label the user -- wrote. -- -- A HsRecUpdField (used for record update) contains a potentially -- ambiguous occurrence of a field (an AmbiguousFieldOcc). The -- renamer will fill in the selector function if it can, but if the -- selector is ambiguous the renamer will defer to the typechecker. -- After the typechecker, a unique selector will have been determined. -- -- The renamer produces an Unambiguous result if it can, rather than -- just doing the lookup in the typechecker, so that completely -- unambiguous updates can be represented by 'GHC.HsToCore.Quote.repUpdFields'. -- -- For example, suppose we have: -- -- data S = MkS { x :: Int } -- data T = MkT { x :: Int } -- -- f z = (z { x = 3 }) :: S -- -- The parsed HsRecUpdField corresponding to the record update will have: -- -- hfbLHS = Unambiguous "x" noExtField :: AmbiguousFieldOcc RdrName -- -- After the renamer, this will become: -- -- hfbLHS = Ambiguous "x" noExtField :: AmbiguousFieldOcc Name -- -- (note that the Unambiguous constructor is not type-correct here). -- The typechecker will determine the particular selector: -- -- hfbLHS = Unambiguous "x" $sel:x:MkS :: AmbiguousFieldOcc Id -- -- See also Note [Disambiguating record fields] in GHC.Tc.Gen.Head. hsRecFields :: forall p arg.UnXRec p => HsRecFields p arg -> [XCFieldOcc p] hsRecFields rbinds = map (hsRecFieldSel . unXRec @p) (rec_flds rbinds) hsRecFieldsArgs :: forall p arg. UnXRec p => HsRecFields p arg -> [arg] hsRecFieldsArgs rbinds = map (hfbRHS . unXRec @p) (rec_flds rbinds) hsRecFieldSel :: forall p arg. UnXRec p => HsRecField p arg -> XCFieldOcc p hsRecFieldSel = foExt . unXRec @p . hfbLHS {- ************************************************************************ * * * Printing patterns * * ************************************************************************ -} instance (Outputable arg, Outputable (XRec p (HsRecField p arg))) => Outputable (HsRecFields p arg) where ppr (HsRecFields { rec_flds = flds, rec_dotdot = Nothing }) = braces (fsep (punctuate comma (map ppr flds))) ppr (HsRecFields { rec_flds = flds, rec_dotdot = Just (unLoc -> n) }) = braces (fsep (punctuate comma (map ppr (take n flds) ++ [dotdot]))) where dotdot = text ".." <+> whenPprDebug (ppr (drop n flds)) instance (Outputable p, OutputableBndr p, Outputable arg) => Outputable (HsFieldBind p arg) where ppr (HsFieldBind { hfbLHS = f, hfbRHS = arg, hfbPun = pun }) = pprPrefixOcc f <+> (ppUnless pun $ equals <+> ppr arg) ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Type.hs0000644000000000000000000015435314472400113023117 0ustar0000000000000000 {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {- (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 GHC.Hs.Type: Abstract syntax: user-defined types -} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Type ( Mult, HsScaled(..), hsMult, hsScaledThing, HsArrow(..), HsLinearArrowTokens(..), HsType(..), HsCoreTy, LHsType, HsKind, LHsKind, HsForAllTelescope(..), HsTyVarBndr(..), LHsTyVarBndr, LHsQTyVars(..), HsOuterTyVarBndrs(..), HsOuterFamEqnTyVarBndrs, HsOuterSigTyVarBndrs, HsWildCardBndrs(..), HsPatSigType(..), HsPSRn(..), HsSigType(..), LHsSigType, LHsSigWcType, LHsWcType, HsTupleSort(..), HsContext, LHsContext, HsTyLit(..), HsIPName(..), hsIPNameFS, HsArg(..), numVisibleArgs, pprHsArgsApp, LHsTypeArg, LBangType, BangType, HsSrcBang(..), HsImplBang(..), SrcStrictness(..), SrcUnpackedness(..), ConDeclField(..), LConDeclField, HsConDetails(..), noTypeArgs, FieldOcc(..), LFieldOcc, AmbiguousFieldOcc(..), LAmbiguousFieldOcc, mapHsOuterImplicit, hsQTvExplicit, isHsKindedTyVar, hsPatSigType, ) where import GHC.Prelude import {-# SOURCE #-} Language.Haskell.Syntax.Expr ( HsSplice ) import Language.Haskell.Syntax.Extension import GHC.Types.SourceText import GHC.Types.Name( Name ) import GHC.Types.Name.Reader ( RdrName ) import GHC.Core.DataCon( HsSrcBang(..), HsImplBang(..), SrcStrictness(..), SrcUnpackedness(..) ) import GHC.Core.Type import GHC.Hs.Doc import GHC.Types.Basic import GHC.Types.Fixity import GHC.Types.SrcLoc import GHC.Utils.Outputable import GHC.Data.FastString import GHC.Utils.Misc ( count ) import GHC.Parser.Annotation import Data.Data hiding ( Fixity, Prefix, Infix ) import Data.Void {- ************************************************************************ * * \subsection{Bang annotations} * * ************************************************************************ -} -- | Located Bang Type type LBangType pass = XRec pass (BangType pass) -- | Bang Type -- -- In the parser, strictness and packedness annotations bind more tightly -- than docstrings. This means that when consuming a 'BangType' (and looking -- for 'HsBangTy') we must be ready to peer behind a potential layer of -- 'HsDocTy'. See #15206 for motivation and 'getBangType' for an example. type BangType pass = HsType pass -- Bangs are in the HsType data type {- ************************************************************************ * * \subsection{Data types} * * ************************************************************************ This is the syntax for types as seen in type signatures. Note [HsBSig binder lists] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider a binder (or pattern) decorated with a type or kind, \ (x :: a -> a). blah forall (a :: k -> *) (b :: k). blah Then we use a LHsBndrSig on the binder, so that the renamer can decorate it with the variables bound by the pattern ('a' in the first example, 'k' in the second), assuming that neither of them is in scope already See also Note [Kind and type-variable binders] in GHC.Rename.HsType Note [HsType binders] ~~~~~~~~~~~~~~~~~~~~~ The system for recording type and kind-variable binders in HsTypes is a bit complicated. Here's how it works. * In a HsType, HsForAllTy represents an /explicit, user-written/ 'forall' that is nested within another HsType e.g. forall a b. {...} or forall a b -> {...} Note that top-level 'forall's are represented with a different AST form. See the description of HsOuterTyVarBndrs below. HsQualTy represents an /explicit, user-written/ context e.g. (Eq a, Show a) => ... The context can be empty if that's what the user wrote These constructors represent what the user wrote, no more and no less. * The ForAllTelescope field of HsForAllTy represents whether a forall is invisible (e.g., forall a b. {...}, with a dot) or visible (e.g., forall a b -> {...}, with an arrow). * HsTyVarBndr describes a quantified type variable written by the user. For example f :: forall a (b :: *). blah here 'a' and '(b::*)' are each a HsTyVarBndr. A HsForAllTy has a list of LHsTyVarBndrs. * HsOuterTyVarBndrs is used to represent the outermost quantified type variables in a type that obeys the forall-or-nothing rule. An HsOuterTyVarBndrs can be one of the following: HsOuterImplicit (implicit quantification, added by renamer) f :: a -> a -- Desugars to f :: forall {a}. a -> a HsOuterExplicit (explicit user quantifiation): f :: forall a. a -> a See Note [forall-or-nothing rule]. * An HsSigType is an LHsType with an accompanying HsOuterTyVarBndrs that represents the presence (or absence) of its outermost 'forall'. See Note [Representing type signatures]. * HsWildCardBndrs is a wrapper that binds the wildcard variables of the wrapped thing. It is filled in by the renamer f :: _a -> _ The enclosing HsWildCardBndrs binds the wildcards _a and _. * HsSigPatType describes types that appear in pattern signatures and the signatures of term-level binders in RULES. Like HsWildCardBndrs/HsOuterTyVarBndrs, they track the names of wildcard variables and implicitly bound type variables. Unlike HsOuterTyVarBndrs, however, HsSigPatTypes do not obey the forall-or-nothing rule. See Note [Pattern signature binders and scoping]. * The explicit presence of these wrappers specifies, in the HsSyn, exactly where implicit quantification is allowed, and where wildcards are allowed. * LHsQTyVars is used in data/class declarations, where the user gives explicit *type* variable bindings, but we need to implicitly bind *kind* variables. For example class C (a :: k -> *) where ... The 'k' is implicitly bound in the hsq_tvs field of LHsQTyVars Note [The wildcard story for types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Types can have wildcards in them, to support partial type signatures, like f :: Int -> (_ , _a) -> _a A wildcard in a type can be * An anonymous wildcard, written '_' In HsType this is represented by HsWildCardTy. The renamer leaves it untouched, and it is later given a fresh meta tyvar in the typechecker. * A named wildcard, written '_a', '_foo', etc In HsType this is represented by (HsTyVar "_a") i.e. a perfectly ordinary type variable that happens to start with an underscore Note carefully: * When NamedWildCards is off, type variables that start with an underscore really /are/ ordinary type variables. And indeed, even when NamedWildCards is on you can bind _a explicitly as an ordinary type variable: data T _a _b = MkT _b _a Or even: f :: forall _a. _a -> _b Here _a is an ordinary forall'd binder, but (With NamedWildCards) _b is a named wildcard. (See the comments in #10982) * Named wildcards are bound by the HsWildCardBndrs (for types that obey the forall-or-nothing rule) and HsPatSigType (for type signatures in patterns and term-level binders in RULES), which wrap types that are allowed to have wildcards. Unnamed wildcards, however are left unchanged until typechecking, where we give them fresh wild tyvars and determine whether or not to emit hole constraints on each wildcard (we don't if it's a visible type/kind argument or a type family pattern). See related notes Note [Wildcards in visible kind application] and Note [Wildcards in visible type application] in GHC.Tc.Gen.HsType. * After type checking is done, we report what types the wildcards got unified with. Note [Ordering of implicit variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since the advent of -XTypeApplications, GHC makes promises about the ordering of implicit variable quantification. Specifically, we offer that implicitly quantified variables (such as those in const :: a -> b -> a, without a `forall`) will occur in left-to-right order of first occurrence. Here are a few examples: const :: a -> b -> a -- forall a b. ... f :: Eq a => b -> a -> a -- forall a b. ... contexts are included type a <-< b = b -> a g :: a <-< b -- forall a b. ... type synonyms matter class Functor f where fmap :: (a -> b) -> f a -> f b -- forall f a b. ... -- The f is quantified by the class, so only a and b are considered in fmap This simple story is complicated by the possibility of dependency: all variables must come after any variables mentioned in their kinds. typeRep :: Typeable a => TypeRep (a :: k) -- forall k a. ... The k comes first because a depends on k, even though the k appears later than the a in the code. Thus, GHC does a *stable topological sort* on the variables. By "stable", we mean that any two variables who do not depend on each other preserve their existing left-to-right ordering. Implicitly bound variables are collected by the extract- family of functions (extractHsTysRdrTyVars, extractHsTyVarBndrsKVs, etc.) in GHC.Rename.HsType. These functions thus promise to keep left-to-right ordering. Look for pointers to this note to see the places where the action happens. Note that we also maintain this ordering in kind signatures. Even though there's no visible kind application (yet), having implicit variables be quantified in left-to-right order in kind signatures is nice since: * It's consistent with the treatment for type signatures. * It can affect how types are displayed with -fprint-explicit-kinds (see #15568 for an example), which is a situation where knowing the order in which implicit variables are quantified can be useful. * In the event that visible kind application is implemented, the order in which we would expect implicit variables to be ordered in kinds will have already been established. -} -- | Located Haskell Context type LHsContext pass = XRec pass (HsContext pass) -- ^ 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnUnit' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Haskell Context type HsContext pass = [LHsType pass] -- | Located Haskell Type type LHsType pass = XRec pass (HsType pass) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnComma' when -- in a list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Haskell Kind type HsKind pass = HsType pass -- | Located Haskell Kind type LHsKind pass = XRec pass (HsKind pass) -- ^ 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -------------------------------------------------- -- LHsQTyVars -- The explicitly-quantified binders in a data/type declaration -- | The type variable binders in an 'HsForAllTy'. -- See also @Note [Variable Specificity and Forall Visibility]@ in -- "GHC.Tc.Gen.HsType". data HsForAllTelescope pass = HsForAllVis -- ^ A visible @forall@ (e.g., @forall a -> {...}@). -- These do not have any notion of specificity, so we use -- '()' as a placeholder value. { hsf_xvis :: XHsForAllVis pass , hsf_vis_bndrs :: [LHsTyVarBndr () pass] } | HsForAllInvis -- ^ An invisible @forall@ (e.g., @forall a {b} c. {...}@), -- where each binder has a 'Specificity'. { hsf_xinvis :: XHsForAllInvis pass , hsf_invis_bndrs :: [LHsTyVarBndr Specificity pass] } | XHsForAllTelescope !(XXHsForAllTelescope pass) -- | Located Haskell Type Variable Binder type LHsTyVarBndr flag pass = XRec pass (HsTyVarBndr flag pass) -- See Note [HsType binders] -- | Located Haskell Quantified Type Variables data LHsQTyVars pass -- See Note [HsType binders] = HsQTvs { hsq_ext :: XHsQTvs pass , hsq_explicit :: [LHsTyVarBndr () pass] -- Explicit variables, written by the user } | XLHsQTyVars !(XXLHsQTyVars pass) hsQTvExplicit :: LHsQTyVars pass -> [LHsTyVarBndr () pass] hsQTvExplicit = hsq_explicit ------------------------------------------------ -- HsOuterTyVarBndrs -- Used to quantify the outermost type variable binders of a type that obeys -- the forall-or-nothing rule. These are used to represent the outermost -- quantification in: -- * Type signatures (LHsSigType/LHsSigWcType) -- * Patterns in a type/data family instance (HsTyPats) -- -- We support two forms: -- HsOuterImplicit (implicit quantification, added by renamer) -- f :: a -> a -- Desugars to f :: forall {a}. a -> a -- type instance F (a,b) = a->b -- HsOuterExplicit (explicit user quantifiation): -- f :: forall a. a -> a -- type instance forall a b. F (a,b) = a->b -- -- In constrast, when the user writes /visible/ quanitification -- T :: forall k -> k -> Type -- we use use HsOuterImplicit, wrapped around a HsForAllTy -- for the visible quantification -- -- See Note [forall-or-nothing rule] -- | The outermost type variables in a type that obeys the @forall@-or-nothing -- rule. See @Note [forall-or-nothing rule]@. data HsOuterTyVarBndrs flag pass = HsOuterImplicit -- ^ Implicit forall, e.g., -- @f :: a -> b -> b@ { hso_ximplicit :: XHsOuterImplicit pass } | HsOuterExplicit -- ^ Explicit forall, e.g., -- @f :: forall a b. a -> b -> b@ { hso_xexplicit :: XHsOuterExplicit pass flag , hso_bndrs :: [LHsTyVarBndr flag (NoGhcTc pass)] } | XHsOuterTyVarBndrs !(XXHsOuterTyVarBndrs pass) -- | Used for signatures, e.g., -- -- @ -- f :: forall a {b}. blah -- @ -- -- We use 'Specificity' for the 'HsOuterTyVarBndrs' @flag@ to allow -- distinguishing between specified and inferred type variables. type HsOuterSigTyVarBndrs = HsOuterTyVarBndrs Specificity -- | Used for type-family instance equations, e.g., -- -- @ -- type instance forall a. F [a] = Tree a -- @ -- -- The notion of specificity is irrelevant in type family equations, so we use -- @()@ for the 'HsOuterTyVarBndrs' @flag@. type HsOuterFamEqnTyVarBndrs = HsOuterTyVarBndrs () -- | Haskell Wildcard Binders data HsWildCardBndrs pass thing -- See Note [HsType binders] -- See Note [The wildcard story for types] = HsWC { hswc_ext :: XHsWC pass thing -- after the renamer -- Wild cards, only named -- See Note [Wildcards in visible kind application] , hswc_body :: thing -- Main payload (type or list of types) -- If there is an extra-constraints wildcard, -- it's still there in the hsc_body. } | XHsWildCardBndrs !(XXHsWildCardBndrs pass thing) -- | Types that can appear in pattern signatures, as well as the signatures for -- term-level binders in RULES. -- See @Note [Pattern signature binders and scoping]@. -- -- This is very similar to 'HsSigWcType', but with -- slightly different semantics: see @Note [HsType binders]@. -- See also @Note [The wildcard story for types]@. data HsPatSigType pass = HsPS { hsps_ext :: XHsPS pass -- ^ After renamer: 'HsPSRn' , hsps_body :: LHsType pass -- ^ Main payload (the type itself) } | XHsPatSigType !(XXHsPatSigType pass) -- | The extension field for 'HsPatSigType', which is only used in the -- renamer onwards. See @Note [Pattern signature binders and scoping]@. data HsPSRn = HsPSRn { hsps_nwcs :: [Name] -- ^ Wildcard names , hsps_imp_tvs :: [Name] -- ^ Implicitly bound variable names } deriving Data -- | Located Haskell Signature Type type LHsSigType pass = XRec pass (HsSigType pass) -- Implicit only -- | Located Haskell Wildcard Type type LHsWcType pass = HsWildCardBndrs pass (LHsType pass) -- Wildcard only -- | Located Haskell Signature Wildcard Type type LHsSigWcType pass = HsWildCardBndrs pass (LHsSigType pass) -- Both -- | A type signature that obeys the @forall@-or-nothing rule. In other -- words, an 'LHsType' that uses an 'HsOuterSigTyVarBndrs' to represent its -- outermost type variable quantification. -- See @Note [Representing type signatures]@. data HsSigType pass = HsSig { sig_ext :: XHsSig pass , sig_bndrs :: HsOuterSigTyVarBndrs pass , sig_body :: LHsType pass } | XHsSigType !(XXHsSigType pass) hsPatSigType :: HsPatSigType pass -> LHsType pass hsPatSigType = hsps_body {- Note [forall-or-nothing rule] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Free variables in signatures are usually bound in an implicit 'forall' at the beginning of user-written signatures. However, if the signature has an explicit, invisible forall at the beginning, this is disabled. This is referred to as the forall-or-nothing rule. The idea is nested foralls express something which is only expressible explicitly, while a top level forall could (usually) be replaced with an implicit binding. Top-level foralls alone ("forall.") are therefore an indication that the user is trying to be fastidious, so we don't implicitly bind any variables. Note that this rule only applies to outermost /in/visible 'forall's, and not outermost visible 'forall's. See #18660 for more on this point. Here are some concrete examples to demonstrate the forall-or-nothing rule in action: type F1 :: a -> b -> b -- Legal; a,b are implicitly quantified. -- Equivalently: forall a b. a -> b -> b type F2 :: forall a b. a -> b -> b -- Legal; explicitly quantified type F3 :: forall a. a -> b -> b -- Illegal; the forall-or-nothing rule says that -- if you quantify a, you must also quantify b type F4 :: forall a -> b -> b -- Legal; the top quantifier (forall a) is a /visible/ -- quantifer, so the "nothing" part of the forall-or-nothing -- rule applies, and b is therefore implicitly quantified. -- Equivalently: forall b. forall a -> b -> b type F5 :: forall b. forall a -> b -> c -- Illegal; the forall-or-nothing rule says that -- if you quantify b, you must also quantify c type F6 :: forall a -> forall b. b -> c -- Legal: just like F4. For a complete list of all places where the forall-or-nothing rule applies, see "The `forall`-or-nothing rule" section of the GHC User's Guide. Any type that obeys the forall-or-nothing rule is represented in the AST with an HsOuterTyVarBndrs: * If the type has an outermost, invisible 'forall', it uses HsOuterExplicit, which contains a list of the explicitly quantified type variable binders in `hso_bndrs`. After typechecking, HsOuterExplicit also stores a list of the explicitly quantified `InvisTVBinder`s in `hso_xexplicit :: XHsOuterExplicit GhcTc`. * Otherwise, it uses HsOuterImplicit. HsOuterImplicit is used for different things depending on the phase: * After parsing, it does not store anything in particular. * After renaming, it stores the implicitly bound type variable `Name`s in `hso_ximplicit :: XHsOuterImplicit GhcRn`. * After typechecking, it stores the implicitly bound `TyVar`s in `hso_ximplicit :: XHsOuterImplicit GhcTc`. NB: this implicit quantification is purely lexical: we bind any type or kind variables that are not in scope. The type checker may subsequently quantify over further kind variables. See Note [Binding scoped type variables] in GHC.Tc.Gen.Sig. HsOuterTyVarBndrs GhcTc is used in the typechecker as an intermediate data type for storing the outermost TyVars/InvisTVBinders in a type. See GHC.Tc.Gen.HsType.bindOuterTKBndrsX for an example of this. Note [Representing type signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HsSigType is used to represent an explicit user type signature. These are used in a variety of places. Some examples include: * Type signatures (e.g., f :: a -> a) * Standalone kind signatures (e.g., type G :: a -> a) * GADT constructor types (e.g., data T where MkT :: a -> T) A HsSigType is the combination of an HsOuterSigTyVarBndrs and an LHsType: * The HsOuterSigTyVarBndrs binds the /explicitly/ quantified type variables when the type signature has an outermost, user-written 'forall' (i.e, the HsOuterExplicit constructor is used). If there is no outermost 'forall', then it binds the /implicitly/ quantified type variables instead (i.e., the HsOuterImplicit constructor is used). * The LHsType represents the rest of the type. E.g. For a signature like f :: forall k (a::k). blah we get HsSig { sig_bndrs = HsOuterExplicit { hso_bndrs = [k, (a :: k)] } , sig_body = blah } Note [Pattern signature binders and scoping] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider the pattern signatures like those on `t` and `g` in: f = let h = \(t :: (b, b) -> \(g :: forall a. a -> b) -> ...(t :: (Int,Int))... in woggle * The `b` in t's pattern signature is implicitly bound and scopes over the signature and the body of the lambda. It stands for a type (any type); indeed we subsequently discover that b=Int. (See Note [TyVarTv] in GHC.Tc.Utils.TcMType for more on this point.) * The `b` in g's pattern signature is an /occurrence/ of the `b` bound by t's pattern signature. * The `a` in `forall a` scopes only over the type `a -> b`, not over the body of the lambda. * There is no forall-or-nothing rule for pattern signatures, which is why the type `forall a. a -> b` is permitted in `g`'s pattern signature, even though `b` is not explicitly bound. See Note [forall-or-nothing rule]. Similar scoping rules apply to term variable binders in RULES, like in the following example: {-# RULES "h" forall (t :: (b, b)) (g :: forall a. a -> b). h t g = ... #-} Just like in pattern signatures, the `b` in t's signature is implicitly bound and scopes over the remainder of the RULE. As a result, the `b` in g's signature is an occurrence. Moreover, the `a` in `forall a` scopes only over the type `a -> b`, and the forall-or-nothing rule does not apply. While quite similar, RULE term binder signatures behave slightly differently from pattern signatures in two ways: 1. Unlike in pattern signatures, where type variables can stand for any type, type variables in RULE term binder signatures are skolems. See Note [Typechecking pattern signature binders] in GHC.Tc.Gen.HsType for more on this point. In this sense, type variables in pattern signatures are quite similar to named wildcards, as both can refer to arbitrary types. The main difference lies in error reporting: if a named wildcard `_a` in a pattern signature stands for Int, then by default GHC will emit a warning stating as much. Changing `_a` to `a`, on the other hand, will cause it not to be reported. 2. In the `h` RULE above, only term variables are explicitly bound, so any free type variables in the term variables' signatures are implicitly bound. This is just like how the free type variables in pattern signatures are implicitly bound. If a RULE explicitly binds both term and type variables, however, then free type variables in term signatures are /not/ implicitly bound. For example, this RULE would be ill scoped: {-# RULES "h2" forall b. forall (t :: (b, c)) (g :: forall a. a -> b). h2 t g = ... #-} This is because `b` and `c` occur free in the signature for `t`, but only `b` was explicitly bound, leaving `c` out of scope. If the RULE had started with `forall b c.`, then it would have been accepted. The types in pattern signatures and RULE term binder signatures are represented in the AST by HsSigPatType. From the renamer onward, the hsps_ext field (of type HsPSRn) tracks the names of named wildcards and implicitly bound type variables so that they can be brought into scope during renaming and typechecking. Note [Lexically scoped type variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ScopedTypeVariables extension does two things: * It allows the use of type signatures in patterns (e.g., `f (x :: a -> a) = ...`). See Note [Pattern signature binders and scoping] for more on this point. * It brings lexically scoped type variables into scope for certain type signatures with outermost invisible 'forall's. This Note concerns the latter bullet point. Per the "Lexically scoped type variables" section of the GHC User's Guide, the following forms of type signatures can have lexically scoped type variables: * In declarations with type signatures, e.g., f :: forall a. a -> a f x = e @a Here, the 'forall a' brings 'a' into scope over the body of 'f'. Note that ScopedTypeVariables does /not/ interact with standalone kind signatures, only type signatures. * In explicit type annotations in expressions, e.g., id @a :: forall a. a -> a * In instance declarations, e.g., instance forall a. C [a] where m = e @a Note that unlike the examples above, the use of an outermost 'forall' isn't required to bring 'a' into scope. That is, the following would also work: instance forall a. C [a] where m = e @a Note that all of the types above obey the forall-or-nothing rule. As a result, the places in the AST that can have lexically scoped type variables are a subset of the places that use HsOuterTyVarBndrs (See Note [forall-or-nothing rule].) Some other observations about lexically scoped type variables: * Only type variables bound by an /invisible/ forall can be lexically scoped. See Note [hsScopedTvs and visible foralls]. * The lexically scoped type variables may be a strict subset of the type variables brought into scope by a type signature. See Note [Binding scoped type variables] in GHC.Tc.Gen.Sig. -} mapHsOuterImplicit :: (XHsOuterImplicit pass -> XHsOuterImplicit pass) -> HsOuterTyVarBndrs flag pass -> HsOuterTyVarBndrs flag pass mapHsOuterImplicit f (HsOuterImplicit{hso_ximplicit = imp}) = HsOuterImplicit{hso_ximplicit = f imp} mapHsOuterImplicit _ hso@(HsOuterExplicit{}) = hso mapHsOuterImplicit _ hso@(XHsOuterTyVarBndrs{}) = hso -------------------------------------------------- -- | These names are used early on to store the names of implicit -- parameters. They completely disappear after type-checking. newtype HsIPName = HsIPName FastString deriving( Eq, Data ) hsIPNameFS :: HsIPName -> FastString hsIPNameFS (HsIPName n) = n instance Outputable HsIPName where ppr (HsIPName n) = char '?' <> ftext n -- Ordinary implicit parameters instance OutputableBndr HsIPName where pprBndr _ n = ppr n -- Simple for now pprInfixOcc n = ppr n pprPrefixOcc n = ppr n -------------------------------------------------- -- | Haskell Type Variable Binder -- The flag annotates the binder. It is 'Specificity' in places where -- explicit specificity is allowed (e.g. x :: forall {a} b. ...) or -- '()' in other places. data HsTyVarBndr flag pass = UserTyVar -- no explicit kinding (XUserTyVar pass) flag (LIdP pass) -- See Note [Located RdrNames] in GHC.Hs.Expr | KindedTyVar (XKindedTyVar pass) flag (LIdP pass) (LHsKind pass) -- The user-supplied kind signature -- ^ -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen', -- 'GHC.Parser.Annotation.AnnDcolon', 'GHC.Parser.Annotation.AnnClose' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XTyVarBndr !(XXTyVarBndr pass) -- | Does this 'HsTyVarBndr' come with an explicit kind annotation? isHsKindedTyVar :: HsTyVarBndr flag pass -> Bool isHsKindedTyVar (UserTyVar {}) = False isHsKindedTyVar (KindedTyVar {}) = True isHsKindedTyVar (XTyVarBndr {}) = False -- | Haskell Type data HsType pass = HsForAllTy -- See Note [HsType binders] { hst_xforall :: XForAllTy pass , hst_tele :: HsForAllTelescope pass -- Explicit, user-supplied 'forall a {b} c' , hst_body :: LHsType pass -- body type } -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnForall', -- 'GHC.Parser.Annotation.AnnDot','GHC.Parser.Annotation.AnnDarrow' -- For details on above see Note [exact print annotations] in "GHC.Parser.Annotation" | HsQualTy -- See Note [HsType binders] { hst_xqual :: XQualTy pass , hst_ctxt :: LHsContext pass -- Context C => blah , hst_body :: LHsType pass } | HsTyVar (XTyVar pass) PromotionFlag -- Whether explicitly promoted, -- for the pretty printer (LIdP pass) -- Type variable, type constructor, or data constructor -- see Note [Promotions (HsTyVar)] -- See Note [Located RdrNames] in GHC.Hs.Expr -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsAppTy (XAppTy pass) (LHsType pass) (LHsType pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsAppKindTy (XAppKindTy pass) -- type level type app (LHsType pass) (LHsKind pass) | HsFunTy (XFunTy pass) (HsArrow pass) (LHsType pass) -- function type (LHsType pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnRarrow', -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsListTy (XListTy pass) (LHsType pass) -- Element type -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'['@, -- 'GHC.Parser.Annotation.AnnClose' @']'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsTupleTy (XTupleTy pass) HsTupleSort [LHsType pass] -- Element types (length gives arity) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'(' or '(#'@, -- 'GHC.Parser.Annotation.AnnClose' @')' or '#)'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsSumTy (XSumTy pass) [LHsType pass] -- Element types (length gives arity) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'(#'@, -- 'GHC.Parser.Annotation.AnnClose' '#)'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsOpTy (XOpTy pass) PromotionFlag -- Whether explicitly promoted, -- for the pretty printer (LHsType pass) (LIdP pass) (LHsType pass) -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsParTy (XParTy pass) (LHsType pass) -- See Note [Parens in HsSyn] in GHC.Hs.Expr -- Parenthesis preserved for the precedence re-arrangement in -- GHC.Rename.HsType -- It's important that a * (b + c) doesn't get rearranged to (a*b) + c! -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'('@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsIParamTy (XIParamTy pass) (XRec pass HsIPName) -- (?x :: ty) (LHsType pass) -- Implicit parameters as they occur in -- contexts -- ^ -- > (?x :: ty) -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsStarTy (XStarTy pass) Bool -- Is this the Unicode variant? -- Note [HsStarTy] -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None | HsKindSig (XKindSig pass) (LHsType pass) -- (ty :: kind) (LHsKind pass) -- A type with a kind signature -- ^ -- > (ty :: kind) -- -- - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'('@, -- 'GHC.Parser.Annotation.AnnDcolon','GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsSpliceTy (XSpliceTy pass) (HsSplice pass) -- Includes quasi-quotes -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'$('@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsDocTy (XDocTy pass) (LHsType pass) (LHsDoc pass) -- A documented type -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsBangTy (XBangTy pass) HsSrcBang (LHsType pass) -- Bang-style type annotations -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : -- 'GHC.Parser.Annotation.AnnOpen' @'{-\# UNPACK' or '{-\# NOUNPACK'@, -- 'GHC.Parser.Annotation.AnnClose' @'#-}'@ -- 'GHC.Parser.Annotation.AnnBang' @\'!\'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsRecTy (XRecTy pass) [LConDeclField pass] -- Only in data type declarations -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @'{'@, -- 'GHC.Parser.Annotation.AnnClose' @'}'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsExplicitListTy -- A promoted explicit list (XExplicitListTy pass) PromotionFlag -- whether explicitly promoted, for pretty printer [LHsType pass] -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @"'["@, -- 'GHC.Parser.Annotation.AnnClose' @']'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsExplicitTupleTy -- A promoted explicit tuple (XExplicitTupleTy pass) [LHsType pass] -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnOpen' @"'("@, -- 'GHC.Parser.Annotation.AnnClose' @')'@ -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsTyLit (XTyLit pass) HsTyLit -- A promoted numeric literal. -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | HsWildCardTy (XWildCardTy pass) -- A type wildcard -- See Note [The wildcard story for types] -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : None -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- Extension point; see Note [Trees That Grow] in Language.Haskell.Syntax.Extension | XHsType !(XXType pass) -- An escape hatch for tunnelling a Core 'Type' through 'HsType'. -- For more details on how this works, see: -- -- * @Note [Renaming HsCoreTys]@ in "GHC.Rename.HsType" -- -- * @Note [Typechecking HsCoreTys]@ in "GHC.Tc.Gen.HsType" type HsCoreTy = Type -- Note [Literal source text] in GHC.Types.Basic for SourceText fields in -- the following -- | Haskell Type Literal data HsTyLit = HsNumTy SourceText Integer | HsStrTy SourceText FastString | HsCharTy SourceText Char deriving Data -- | Denotes the type of arrows in the surface language data HsArrow pass = HsUnrestrictedArrow !(LHsUniToken "->" "→" pass) -- ^ a -> b or a → b | HsLinearArrow !(HsLinearArrowTokens pass) -- ^ a %1 -> b or a %1 → b, or a ⊸ b | HsExplicitMult !(LHsToken "%" pass) !(LHsType pass) !(LHsUniToken "->" "→" pass) -- ^ a %m -> b or a %m → b (very much including `a %Many -> b`! -- This is how the programmer wrote it). It is stored as an -- `HsType` so as to preserve the syntax as written in the -- program. data HsLinearArrowTokens pass = HsPct1 !(LHsToken "%1" pass) !(LHsUniToken "->" "→" pass) | HsLolly !(LHsToken "⊸" pass) -- | This is used in the syntax. In constructor declaration. It must keep the -- arrow representation. data HsScaled pass a = HsScaled (HsArrow pass) a hsMult :: HsScaled pass a -> HsArrow pass hsMult (HsScaled m _) = m hsScaledThing :: HsScaled pass a -> a hsScaledThing (HsScaled _ t) = t {- Note [Unit tuples] ~~~~~~~~~~~~~~~~~~ Consider the type type instance F Int = () We want to parse that "()" as HsTupleTy HsBoxedOrConstraintTuple [], NOT as HsTyVar unitTyCon Why? Because F might have kind (* -> Constraint), so we when parsing we don't know if that tuple is going to be a constraint tuple or an ordinary unit tuple. The HsTupleSort flag is specifically designed to deal with that, but it has to work for unit tuples too. Note [Promotions (HsTyVar)] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ HsTyVar: A name in a type or kind. Here are the allowed namespaces for the name. In a type: Var: not allowed Data: promoted data constructor Tv: type variable TcCls before renamer: type constructor, class constructor, or promoted data constructor TcCls after renamer: type constructor or class constructor In a kind: Var, Data: not allowed Tv: kind variable TcCls: kind constructor or promoted type constructor The 'Promoted' field in an HsTyVar captures whether the type was promoted in the source code by prefixing an apostrophe. Note [HsStarTy] ~~~~~~~~~~~~~~~ When the StarIsType extension is enabled, we want to treat '*' and its Unicode variant identically to 'Data.Kind.Type'. Unfortunately, doing so in the parser would mean that when we pretty-print it back, we don't know whether the user wrote '*' or 'Type', and lose the parse/ppr roundtrip property. As a workaround, we parse '*' as HsStarTy (if it stands for 'Data.Kind.Type') and then desugar it to 'Data.Kind.Type' in the typechecker (see tc_hs_type). When '*' is a regular type operator (StarIsType is disabled), HsStarTy is not involved. Note [Promoted lists and tuples] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notice the difference between HsListTy HsExplicitListTy HsTupleTy HsExplicitListTupleTy E.g. f :: [Int] HsListTy g3 :: T '[] All these use g2 :: T '[True] HsExplicitListTy g1 :: T '[True,False] g1a :: T [True,False] (can omit ' where unambiguous) kind of T :: [Bool] -> * This kind uses HsListTy! E.g. h :: (Int,Bool) HsTupleTy; f is a pair k :: S '(True,False) HsExplicitTypleTy; S is indexed by a type-level pair of booleans kind of S :: (Bool,Bool) -> * This kind uses HsExplicitTupleTy Note [Distinguishing tuple kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Apart from promotion, tuples can have one of three different kinds: x :: (Int, Bool) -- Regular boxed tuples f :: Int# -> (# Int#, Int# #) -- Unboxed tuples g :: (Eq a, Ord a) => a -- Constraint tuples For convenience, internally we use a single constructor for all of these, namely HsTupleTy, but keep track of the tuple kind (in the first argument to HsTupleTy, a HsTupleSort). We can tell if a tuple is unboxed while parsing, because of the #. However, with -XConstraintKinds we can only distinguish between constraint and boxed tuples during type checking, in general. Hence the two constructors of HsTupleSort: HsUnboxedTuple -> Produced by the parser HsBoxedOrConstraintTuple -> Could be a boxed or a constraint tuple. Produced by the parser only, disappears after type checking After typechecking, we use TupleSort (which clearly distinguishes between constraint tuples and boxed tuples) rather than HsTupleSort. -} -- | Haskell Tuple Sort data HsTupleSort = HsUnboxedTuple | HsBoxedOrConstraintTuple deriving Data -- | Located Constructor Declaration Field type LConDeclField pass = XRec pass (ConDeclField pass) -- ^ May have 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnComma' when -- in a list -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation -- | Constructor Declaration Field data ConDeclField pass -- Record fields have Haddock docs on them = ConDeclField { cd_fld_ext :: XConDeclField pass, cd_fld_names :: [LFieldOcc pass], -- ^ See Note [ConDeclField passs] cd_fld_type :: LBangType pass, cd_fld_doc :: Maybe (LHsDoc pass)} -- ^ - 'GHC.Parser.Annotation.AnnKeywordId' : 'GHC.Parser.Annotation.AnnDcolon' -- For details on above see Note [exact print annotations] in GHC.Parser.Annotation | XConDeclField !(XXConDeclField pass) -- | Describes the arguments to a data constructor. This is a common -- representation for several constructor-related concepts, including: -- -- * The arguments in a Haskell98-style constructor declaration -- (see 'HsConDeclH98Details' in "GHC.Hs.Decls"). -- -- * The arguments in constructor patterns in @case@/function definitions -- (see 'HsConPatDetails' in "GHC.Hs.Pat"). -- -- * The left-hand side arguments in a pattern synonym binding -- (see 'HsPatSynDetails' in "GHC.Hs.Binds"). -- -- One notable exception is the arguments in a GADT constructor, which uses -- a separate data type entirely (see 'HsConDeclGADTDetails' in -- "GHC.Hs.Decls"). This is because GADT constructors cannot be declared with -- infix syntax, unlike the concepts above (#18844). data HsConDetails tyarg arg rec = PrefixCon [tyarg] [arg] -- C @t1 @t2 p1 p2 p3 | RecCon rec -- C { x = p1, y = p2 } | InfixCon arg arg -- p1 `C` p2 deriving Data -- | An empty list that can be used to indicate that there are no -- type arguments allowed in cases where HsConDetails is applied to Void. noTypeArgs :: [Void] noTypeArgs = [] instance (Outputable tyarg, Outputable arg, Outputable rec) => Outputable (HsConDetails tyarg arg rec) where ppr (PrefixCon tyargs args) = text "PrefixCon:" <+> hsep (map (\t -> text "@" <> ppr t) tyargs) <+> ppr args ppr (RecCon rec) = text "RecCon:" <+> ppr rec ppr (InfixCon l r) = text "InfixCon:" <+> ppr [l, r] {- Note [ConDeclField passs] ~~~~~~~~~~~~~~~~~~~~~~~~~ A ConDeclField contains a list of field occurrences: these always include the field label as the user wrote it. After the renamer, it will additionally contain the identity of the selector function in the second component. Due to DuplicateRecordFields, the OccName of the selector function may have been mangled, which is why we keep the original field label separately. For example, when DuplicateRecordFields is enabled data T = MkT { x :: Int } gives ConDeclField { cd_fld_names = [L _ (FieldOcc "x" $sel:x:MkT)], ... }. -} ----------------------- -- A valid type must have a for-all at the top of the type, or of the fn arg -- types --------------------- {- Note [Scoping of named wildcards] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f :: _a -> _a f x = let g :: _a -> _a g = ... in ... Currently, for better or worse, the "_a" variables are all the same. So although there is no explicit forall, the "_a" scopes over the definition. I don't know if this is a good idea, but there it is. -} {- Note [hsScopedTvs and visible foralls] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -XScopedTypeVariables can be defined in terms of a desugaring to -XTypeAbstractions (GHC Proposal #50): fn :: forall a b c. tau(a,b,c) fn :: forall a b c. tau(a,b,c) fn = defn(a,b,c) ==> fn @x @y @z = defn(x,y,z) That is, for every type variable of the leading 'forall' in the type signature, we add an invisible binder at term level. This model does not extend to visible forall, as discussed here: * https://gitlab.haskell.org/ghc/ghc/issues/16734#note_203412 * https://github.com/ghc-proposals/ghc-proposals/pull/238 The conclusion of these discussions can be summarized as follows: > Assuming support for visible 'forall' in terms, consider this example: > > vfn :: forall x y -> tau(x,y) > vfn = \a b -> ... > > The user has written their own binders 'a' and 'b' to stand for 'x' and > 'y', and we definitely should not desugar this into: > > vfn :: forall x y -> tau(x,y) > vfn x y = \a b -> ... -- bad! This design choice is reflected in the design of HsOuterSigTyVarBndrs, which are used in every place that ScopedTypeVariables takes effect: data HsOuterTyVarBndrs flag pass = HsOuterImplicit { ... } | HsOuterExplicit { ..., hso_bndrs :: [LHsTyVarBndr flag pass] } | ... type HsOuterSigTyVarBndrs = HsOuterTyVarBndrs Specificity The HsOuterExplicit constructor is only used in type signatures with outermost, /invisible/ 'forall's. Any other type—including those with outermost, /visible/ 'forall's—will use HsOuterImplicit. Therefore, when we determine which type variables to bring into scope over the body of a function (in hsScopedTvs), we /only/ bring the type variables bound by the hso_bndrs in an HsOuterExplicit into scope. If we have an HsOuterImplicit instead, then we do not bring any type variables into scope over the body of a function at all. At the moment, GHC does not support visible 'forall' in terms. Nevertheless, it is still possible to write erroneous programs that use visible 'forall's in terms, such as this example: x :: forall a -> a -> a x = x Previous versions of GHC would bring `a` into scope over the body of `x` in the hopes that the typechecker would error out later (see `GHC.Tc.Validity.vdqAllowed`). However, this can wreak havoc in the renamer before GHC gets to that point (see #17687 for an example of this). Bottom line: nip problems in the bud by refraining from bringing any type variables in an HsOuterImplicit into scope over the body of a function, even if they correspond to a visible 'forall'. -} {- ************************************************************************ * * Decomposing HsTypes * * ************************************************************************ -} -- Arguments in an expression/type after splitting data HsArg tm ty = HsValArg tm -- Argument is an ordinary expression (f arg) | HsTypeArg SrcSpan ty -- Argument is a visible type application (f @ty) -- SrcSpan is location of the `@` | HsArgPar SrcSpan -- See Note [HsArgPar] numVisibleArgs :: [HsArg tm ty] -> Arity numVisibleArgs = count is_vis where is_vis (HsValArg _) = True is_vis _ = False -- type level equivalent type LHsTypeArg p = HsArg (LHsType p) (LHsKind p) -- | @'pprHsArgsApp' id fixity args@ pretty-prints an application of @id@ -- to @args@, using the @fixity@ to tell whether @id@ should be printed prefix -- or infix. Examples: -- -- @ -- pprHsArgsApp T Prefix [HsTypeArg Bool, HsValArg Int] = T \@Bool Int -- pprHsArgsApp T Prefix [HsTypeArg Bool, HsArgPar, HsValArg Int] = (T \@Bool) Int -- pprHsArgsApp (++) Infix [HsValArg Char, HsValArg Double] = Char ++ Double -- pprHsArgsApp (++) Infix [HsValArg Char, HsValArg Double, HsVarArg Ordering] = (Char ++ Double) Ordering -- @ pprHsArgsApp :: (OutputableBndr id, Outputable tm, Outputable ty) => id -> LexicalFixity -> [HsArg tm ty] -> SDoc pprHsArgsApp thing fixity (argl:argr:args) | Infix <- fixity = let pp_op_app = hsep [ ppr_single_hs_arg argl , pprInfixOcc thing , ppr_single_hs_arg argr ] in case args of [] -> pp_op_app _ -> ppr_hs_args_prefix_app (parens pp_op_app) args pprHsArgsApp thing _fixity args = ppr_hs_args_prefix_app (pprPrefixOcc thing) args -- | Pretty-print a prefix identifier to a list of 'HsArg's. ppr_hs_args_prefix_app :: (Outputable tm, Outputable ty) => SDoc -> [HsArg tm ty] -> SDoc ppr_hs_args_prefix_app acc [] = acc ppr_hs_args_prefix_app acc (arg:args) = case arg of HsValArg{} -> ppr_hs_args_prefix_app (acc <+> ppr_single_hs_arg arg) args HsTypeArg{} -> ppr_hs_args_prefix_app (acc <+> ppr_single_hs_arg arg) args HsArgPar{} -> ppr_hs_args_prefix_app (parens acc) args -- | Pretty-print an 'HsArg' in isolation. ppr_single_hs_arg :: (Outputable tm, Outputable ty) => HsArg tm ty -> SDoc ppr_single_hs_arg (HsValArg tm) = ppr tm ppr_single_hs_arg (HsTypeArg _ ty) = char '@' <> ppr ty -- GHC shouldn't be constructing ASTs such that this case is ever reached. -- Still, it's possible some wily user might construct their own AST that -- allows this to be reachable, so don't fail here. ppr_single_hs_arg (HsArgPar{}) = empty -- | This instance is meant for debug-printing purposes. If you wish to -- pretty-print an application of 'HsArg's, use 'pprHsArgsApp' instead. instance (Outputable tm, Outputable ty) => Outputable (HsArg tm ty) where ppr (HsValArg tm) = text "HsValArg" <+> ppr tm ppr (HsTypeArg sp ty) = text "HsTypeArg" <+> ppr sp <+> ppr ty ppr (HsArgPar sp) = text "HsArgPar" <+> ppr sp {- Note [HsArgPar] ~~~~~~~~~~~~~~~ A HsArgPar indicates that everything to the left of this in the argument list is enclosed in parentheses together with the function itself. It is necessary so that we can recreate the parenthesis structure in the original source after typechecking the arguments. The SrcSpan is the span of the original HsPar ((f arg1) arg2 arg3) results in an input argument list of [HsValArg arg1, HsArgPar span1, HsValArg arg2, HsValArg arg3, HsArgPar span2] -} -------------------------------- {- ************************************************************************ * * FieldOcc * * ************************************************************************ -} -- | Located Field Occurrence type LFieldOcc pass = XRec pass (FieldOcc pass) -- | Field Occurrence -- -- Represents an *occurrence* of a field. This may or may not be a -- binding occurrence (e.g. this type is used in 'ConDeclField' and -- 'RecordPatSynField' which bind their fields, but also in -- 'HsRecField' for record construction and patterns, which do not). -- -- We store both the 'RdrName' the user originally wrote, and after -- the renamer we use the extension field to store the selector -- function. data FieldOcc pass = FieldOcc { foExt :: XCFieldOcc pass , foLabel :: XRec pass RdrName -- See Note [Located RdrNames] in Language.Haskell.Syntax.Expr } | XFieldOcc !(XXFieldOcc pass) deriving instance ( Eq (XRec pass RdrName) , Eq (XCFieldOcc pass) , Eq (XXFieldOcc pass) ) => Eq (FieldOcc pass) instance Outputable (XRec pass RdrName) => Outputable (FieldOcc pass) where ppr = ppr . foLabel instance (UnXRec pass, OutputableBndr (XRec pass RdrName)) => OutputableBndr (FieldOcc pass) where pprInfixOcc = pprInfixOcc . unXRec @pass . foLabel pprPrefixOcc = pprPrefixOcc . unXRec @pass . foLabel instance (UnXRec pass, OutputableBndr (XRec pass RdrName)) => OutputableBndr (GenLocated SrcSpan (FieldOcc pass)) where pprInfixOcc = pprInfixOcc . unLoc pprPrefixOcc = pprPrefixOcc . unLoc -- | Located Ambiguous Field Occurence type LAmbiguousFieldOcc pass = XRec pass (AmbiguousFieldOcc pass) -- | Ambiguous Field Occurrence -- -- Represents an *occurrence* of a field that is potentially -- ambiguous after the renamer, with the ambiguity resolved by the -- typechecker. We always store the 'RdrName' that the user -- originally wrote, and store the selector function after the renamer -- (for unambiguous occurrences) or the typechecker (for ambiguous -- occurrences). -- -- See Note [HsRecField and HsRecUpdField] in "GHC.Hs.Pat". -- See Note [Located RdrNames] in "GHC.Hs.Expr". data AmbiguousFieldOcc pass = Unambiguous (XUnambiguous pass) (LocatedN RdrName) | Ambiguous (XAmbiguous pass) (LocatedN RdrName) | XAmbiguousFieldOcc !(XXAmbiguousFieldOcc pass) {- ************************************************************************ * * \subsection{Pretty printing} * * ************************************************************************ -} instance Outputable HsTyLit where ppr = ppr_tylit -------------------------- ppr_tylit :: HsTyLit -> SDoc ppr_tylit (HsNumTy source i) = pprWithSourceText source (integer i) ppr_tylit (HsStrTy source s) = pprWithSourceText source (text (show s)) ppr_tylit (HsCharTy source c) = pprWithSourceText source (text (show c)) ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH.hs0000644000000000000000000000723614472375231024632 0ustar0000000000000000{- | The public face of Template Haskell For other documentation, refer to: -} {-# LANGUAGE Safe #-} module Language.Haskell.TH( -- * The monad and its operations Q, runQ, Quote(..), -- ** Administration: errors, locations and IO reportError, -- :: String -> Q () reportWarning, -- :: String -> Q () report, -- :: Bool -> String -> Q () recover, -- :: Q a -> Q a -> Q a location, -- :: Q Loc Loc(..), runIO, -- :: IO a -> Q a -- ** Querying the compiler -- *** Reify reify, -- :: Name -> Q Info reifyModule, newDeclarationGroup, Info(..), ModuleInfo(..), InstanceDec, ParentName, SumAlt, SumArity, Arity, Unlifted, -- *** Language extension lookup Extension(..), extsEnabled, isExtEnabled, -- *** Name lookup lookupTypeName, -- :: String -> Q (Maybe Name) lookupValueName, -- :: String -> Q (Maybe Name) -- *** Fixity lookup reifyFixity, -- *** Type lookup reifyType, -- *** Instance lookup reifyInstances, isInstance, -- *** Roles lookup reifyRoles, -- *** Annotation lookup reifyAnnotations, AnnLookup(..), -- *** Constructor strictness lookup reifyConStrictness, -- * Typed expressions TExp, unType, Code(..), unTypeCode, unsafeCodeCoerce, hoistCode, bindCode, bindCode_, joinCode, liftCode, -- * Names Name, NameSpace, -- Abstract -- ** Constructing names mkName, -- :: String -> Name -- ** Deconstructing names nameBase, -- :: Name -> String nameModule, -- :: Name -> Maybe String namePackage, -- :: Name -> Maybe String nameSpace, -- :: Name -> Maybe NameSpace -- ** Built-in names tupleTypeName, tupleDataName, -- Int -> Name unboxedTupleTypeName, unboxedTupleDataName, -- :: Int -> Name unboxedSumTypeName, -- :: SumArity -> Name unboxedSumDataName, -- :: SumAlt -> SumArity -> Name -- * The algebraic data types -- | The lowercase versions (/syntax operators/) of these constructors are -- preferred to these constructors, since they compose better with -- quotations (@[| |]@) and splices (@$( ... )@) -- ** Declarations Dec(..), Con(..), Clause(..), SourceUnpackedness(..), SourceStrictness(..), DecidedStrictness(..), Bang(..), Strict, Foreign(..), Callconv(..), Safety(..), Pragma(..), Inline(..), RuleMatch(..), Phases(..), RuleBndr(..), AnnTarget(..), FunDep(..), TySynEqn(..), TypeFamilyHead(..), Fixity(..), FixityDirection(..), defaultFixity, maxPrecedence, PatSynDir(..), PatSynArgs(..), -- ** Expressions Exp(..), Match(..), Body(..), Guard(..), Stmt(..), Range(..), Lit(..), -- ** Patterns Pat(..), FieldExp, FieldPat, -- ** Types Type(..), TyVarBndr(..), TyLit(..), Kind, Cxt, Pred, Syntax.Role(..), Syntax.Specificity(..), FamilyResultSig(..), Syntax.InjectivityAnn(..), PatSynType, BangType, VarBangType, -- ** Documentation putDoc, getDoc, DocLoc(..), -- * Library functions module Language.Haskell.TH.Lib, -- * Pretty-printer Ppr(..), pprint, pprExp, pprLit, pprPat, pprParendType ) where import Language.Haskell.TH.Syntax as Syntax import Language.Haskell.TH.Lib import Language.Haskell.TH.Ppr ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/LanguageExtensions.hs0000644000000000000000000000142214470055371030422 0ustar0000000000000000{-# LANGUAGE Safe #-} ----------------------------------------------------------------------------- -- | -- Module : Language.Haskell.TH.LanguageExtensions -- Copyright : (c) The University of Glasgow 2015 -- License : BSD-style (see the file libraries/base/LICENSE) -- -- Maintainer : libraries@haskell.org -- Stability : experimental -- Portability : portable -- -- Language extensions known to GHC -- ----------------------------------------------------------------------------- module Language.Haskell.TH.LanguageExtensions ( Extension(..) ) where -- This module exists primarily to avoid inserting a massive list of language -- extensions into the already quite large Haddocks for Language.Haskell.TH import GHC.LanguageExtensions.Type (Extension(..)) ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/Lib.hs0000644000000000000000000002571614472375231025343 0ustar0000000000000000{-# LANGUAGE Safe #-} -- | -- Language.Haskell.TH.Lib contains lots of useful helper functions for -- generating and manipulating Template Haskell terms -- Note: this module mostly re-exports functions from -- Language.Haskell.TH.Lib.Internal, but if a change occurs to Template -- Haskell which requires breaking the API offered in this module, we opt to -- copy the old definition here, and make the changes in -- Language.Haskell.TH.Lib.Internal. This way, we can retain backwards -- compatibility while still allowing GHC to make changes as it needs. module Language.Haskell.TH.Lib ( -- All of the exports from this module should -- be "public" functions. The main module TH -- re-exports them all. -- * Library functions -- ** Abbreviations InfoQ, ExpQ, TExpQ, CodeQ, DecQ, DecsQ, ConQ, TypeQ, KindQ, TyLitQ, CxtQ, PredQ, DerivClauseQ, MatchQ, ClauseQ, BodyQ, GuardQ, StmtQ, RangeQ, SourceStrictnessQ, SourceUnpackednessQ, BangQ, BangTypeQ, VarBangTypeQ, StrictTypeQ, VarStrictTypeQ, FieldExpQ, PatQ, FieldPatQ, RuleBndrQ, TySynEqnQ, PatSynDirQ, PatSynArgsQ, FamilyResultSigQ, DerivStrategyQ, TyVarBndrUnit, TyVarBndrSpec, -- ** Constructors lifted to 'Q' -- *** Literals intPrimL, wordPrimL, floatPrimL, doublePrimL, integerL, rationalL, charL, stringL, stringPrimL, charPrimL, bytesPrimL, mkBytes, -- *** Patterns litP, varP, tupP, unboxedTupP, unboxedSumP, conP, uInfixP, parensP, infixP, tildeP, bangP, asP, wildP, recP, listP, sigP, viewP, fieldPat, -- *** Pattern Guards normalB, guardedB, normalG, normalGE, patG, patGE, match, clause, -- *** Expressions dyn, varE, unboundVarE, labelE, implicitParamVarE, conE, litE, staticE, appE, appTypeE, uInfixE, parensE, infixE, infixApp, sectionL, sectionR, lamE, lam1E, lamCaseE, lamCasesE, tupE, unboxedTupE, unboxedSumE, condE, multiIfE, letE, caseE, appsE, listE, sigE, recConE, recUpdE, stringE, fieldExp, getFieldE, projectionE, -- **** Ranges fromE, fromThenE, fromToE, fromThenToE, -- ***** Ranges with more indirection arithSeqE, fromR, fromThenR, fromToR, fromThenToR, -- **** Statements doE, mdoE, compE, bindS, letS, noBindS, parS, recS, -- *** Types forallT, forallVisT, varT, conT, appT, appKindT, arrowT, mulArrowT, infixT, uInfixT, promotedInfixT, promotedUInfixT, parensT, equalityT, listT, tupleT, unboxedTupleT, unboxedSumT, sigT, litT, wildCardT, promotedT, promotedTupleT, promotedNilT, promotedConsT, implicitParamT, -- **** Type literals numTyLit, strTyLit, charTyLit, -- **** Strictness noSourceUnpackedness, sourceNoUnpack, sourceUnpack, noSourceStrictness, sourceLazy, sourceStrict, isStrict, notStrict, unpacked, bang, bangType, varBangType, strictType, varStrictType, -- **** Class Contexts cxt, classP, equalP, -- **** Constructors normalC, recC, infixC, forallC, gadtC, recGadtC, -- *** Kinds varK, conK, tupleK, arrowK, listK, appK, starK, constraintK, -- *** Type variable binders plainTV, kindedTV, plainInvisTV, kindedInvisTV, specifiedSpec, inferredSpec, -- *** Roles nominalR, representationalR, phantomR, inferR, -- *** Top Level Declarations -- **** Data valD, funD, tySynD, dataD, newtypeD, derivClause, DerivClause(..), stockStrategy, anyclassStrategy, newtypeStrategy, viaStrategy, DerivStrategy(..), -- **** Class classD, instanceD, instanceWithOverlapD, Overlap(..), sigD, kiSigD, standaloneDerivD, standaloneDerivWithStrategyD, defaultSigD, -- **** Role annotations roleAnnotD, -- **** Type Family / Data Family dataFamilyD, openTypeFamilyD, closedTypeFamilyD, dataInstD, newtypeInstD, tySynInstD, tySynEqn, injectivityAnn, noSig, kindSig, tyVarSig, -- **** Fixity infixLD, infixRD, infixND, -- **** Default declaration defaultD, -- **** Foreign Function Interface (FFI) cCall, stdCall, cApi, prim, javaScript, unsafe, safe, interruptible, forImpD, -- **** Functional dependencies funDep, -- **** Pragmas ruleVar, typedRuleVar, valueAnnotation, typeAnnotation, moduleAnnotation, pragInlD, pragSpecD, pragSpecInlD, pragSpecInstD, pragRuleD, pragAnnD, pragLineD, pragCompleteD, -- **** Pattern Synonyms patSynD, patSynSigD, unidir, implBidir, explBidir, prefixPatSyn, infixPatSyn, recordPatSyn, -- **** Implicit Parameters implicitParamBindD, -- ** Reify thisModule, -- ** Documentation withDecDoc, withDecsDoc, funD_doc, dataD_doc, newtypeD_doc, dataInstD_doc, newtypeInstD_doc, patSynD_doc ) where import Language.Haskell.TH.Lib.Internal hiding ( tySynD , dataD , newtypeD , classD , pragRuleD , dataInstD , newtypeInstD , dataFamilyD , openTypeFamilyD , closedTypeFamilyD , tySynEqn , forallC , forallT , sigT , plainTV , kindedTV , starK , constraintK , noSig , kindSig , tyVarSig , derivClause , standaloneDerivWithStrategyD , doE , mdoE , tupE , unboxedTupE , conP , Role , InjectivityAnn ) import qualified Language.Haskell.TH.Lib.Internal as Internal import Language.Haskell.TH.Syntax import Control.Applicative ( liftA2 ) import Foreign.ForeignPtr import Data.Word import Prelude -- All definitions below represent the "old" API, since their definitions are -- different in Language.Haskell.TH.Lib.Internal. Please think carefully before -- deciding to change the APIs of the functions below, as they represent the -- public API (as opposed to the Internal module, which has no API promises.) ------------------------------------------------------------------------------- -- * Dec tySynD :: Quote m => Name -> [TyVarBndr ()] -> m Type -> m Dec tySynD tc tvs rhs = do { rhs1 <- rhs; return (TySynD tc tvs rhs1) } dataD :: Quote m => m Cxt -> Name -> [TyVarBndr ()] -> Maybe Kind -> [m Con] -> [m DerivClause] -> m Dec dataD ctxt tc tvs ksig cons derivs = do ctxt1 <- ctxt cons1 <- sequenceA cons derivs1 <- sequenceA derivs return (DataD ctxt1 tc tvs ksig cons1 derivs1) newtypeD :: Quote m => m Cxt -> Name -> [TyVarBndr ()] -> Maybe Kind -> m Con -> [m DerivClause] -> m Dec newtypeD ctxt tc tvs ksig con derivs = do ctxt1 <- ctxt con1 <- con derivs1 <- sequenceA derivs return (NewtypeD ctxt1 tc tvs ksig con1 derivs1) classD :: Quote m => m Cxt -> Name -> [TyVarBndr ()] -> [FunDep] -> [m Dec] -> m Dec classD ctxt cls tvs fds decs = do decs1 <- sequenceA decs ctxt1 <- ctxt return $ ClassD ctxt1 cls tvs fds decs1 pragRuleD :: Quote m => String -> [m RuleBndr] -> m Exp -> m Exp -> Phases -> m Dec pragRuleD n bndrs lhs rhs phases = do bndrs1 <- sequenceA bndrs lhs1 <- lhs rhs1 <- rhs return $ PragmaD $ RuleP n Nothing bndrs1 lhs1 rhs1 phases dataInstD :: Quote m => m Cxt -> Name -> [m Type] -> Maybe Kind -> [m Con] -> [m DerivClause] -> m Dec dataInstD ctxt tc tys ksig cons derivs = do ctxt1 <- ctxt ty1 <- foldl appT (conT tc) tys cons1 <- sequenceA cons derivs1 <- sequenceA derivs return (DataInstD ctxt1 Nothing ty1 ksig cons1 derivs1) newtypeInstD :: Quote m => m Cxt -> Name -> [m Type] -> Maybe Kind -> m Con -> [m DerivClause] -> m Dec newtypeInstD ctxt tc tys ksig con derivs = do ctxt1 <- ctxt ty1 <- foldl appT (conT tc) tys con1 <- con derivs1 <- sequenceA derivs return (NewtypeInstD ctxt1 Nothing ty1 ksig con1 derivs1) dataFamilyD :: Quote m => Name -> [TyVarBndr ()] -> Maybe Kind -> m Dec dataFamilyD tc tvs kind = pure $ DataFamilyD tc tvs kind openTypeFamilyD :: Quote m => Name -> [TyVarBndr ()] -> FamilyResultSig -> Maybe InjectivityAnn -> m Dec openTypeFamilyD tc tvs res inj = pure $ OpenTypeFamilyD (TypeFamilyHead tc tvs res inj) closedTypeFamilyD :: Quote m => Name -> [TyVarBndr ()] -> FamilyResultSig -> Maybe InjectivityAnn -> [m TySynEqn] -> m Dec closedTypeFamilyD tc tvs result injectivity eqns = do eqns1 <- sequenceA eqns return (ClosedTypeFamilyD (TypeFamilyHead tc tvs result injectivity) eqns1) tySynEqn :: Quote m => (Maybe [TyVarBndr ()]) -> m Type -> m Type -> m TySynEqn tySynEqn tvs lhs rhs = do lhs1 <- lhs rhs1 <- rhs return (TySynEqn tvs lhs1 rhs1) forallC :: Quote m => [TyVarBndr Specificity] -> m Cxt -> m Con -> m Con forallC ns ctxt con = liftA2 (ForallC ns) ctxt con ------------------------------------------------------------------------------- -- * Type forallT :: Quote m => [TyVarBndr Specificity] -> m Cxt -> m Type -> m Type forallT tvars ctxt ty = do ctxt1 <- ctxt ty1 <- ty return $ ForallT tvars ctxt1 ty1 sigT :: Quote m => m Type -> Kind -> m Type sigT t k = do t' <- t return $ SigT t' k ------------------------------------------------------------------------------- -- * Kind plainTV :: Name -> TyVarBndr () plainTV n = PlainTV n () kindedTV :: Name -> Kind -> TyVarBndr () kindedTV n k = KindedTV n () k starK :: Kind starK = StarT constraintK :: Kind constraintK = ConstraintT ------------------------------------------------------------------------------- -- * Type family result noSig :: FamilyResultSig noSig = NoSig kindSig :: Kind -> FamilyResultSig kindSig = KindSig tyVarSig :: TyVarBndr () -> FamilyResultSig tyVarSig = TyVarSig ------------------------------------------------------------------------------- -- * Top Level Declarations derivClause :: Quote m => Maybe DerivStrategy -> [m Pred] -> m DerivClause derivClause mds p = do p' <- cxt p return $ DerivClause mds p' standaloneDerivWithStrategyD :: Quote m => Maybe DerivStrategy -> m Cxt -> m Type -> m Dec standaloneDerivWithStrategyD mds ctxt ty = do ctxt' <- ctxt ty' <- ty return $ StandaloneDerivD mds ctxt' ty' ------------------------------------------------------------------------------- -- * Bytes literals -- | Create a Bytes datatype representing raw bytes to be embedded into the -- program/library binary. -- -- @since 2.16.0.0 mkBytes :: ForeignPtr Word8 -- ^ Pointer to the data -> Word -- ^ Offset from the pointer -> Word -- ^ Number of bytes -> Bytes mkBytes = Bytes ------------------------------------------------------------------------------- -- * Tuple expressions tupE :: Quote m => [m Exp] -> m Exp tupE es = do { es1 <- sequenceA es; return (TupE $ map Just es1)} unboxedTupE :: Quote m => [m Exp] -> m Exp unboxedTupE es = do { es1 <- sequenceA es; return (UnboxedTupE $ map Just es1)} ------------------------------------------------------------------------------- -- * Do expressions doE :: Quote m => [m Stmt] -> m Exp doE = Internal.doE Nothing mdoE :: Quote m => [m Stmt] -> m Exp mdoE = Internal.mdoE Nothing ------------------------------------------------------------------------------- -- * Patterns conP :: Quote m => Name -> [m Pat] -> m Pat conP n xs = Internal.conP n [] xs ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/Lib/Internal.hs0000644000000000000000000011603014472375231027105 0ustar0000000000000000{-# LANGUAGE PolyKinds #-} {-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE Trustworthy #-} -- | -- Language.Haskell.TH.Lib.Internal exposes some additional functionality that -- is used internally in GHC's integration with Template Haskell. This is not a -- part of the public API, and as such, there are no API guarantees for this -- module from version to version. -- Why do we have both Language.Haskell.TH.Lib.Internal and -- Language.Haskell.TH.Lib? Ultimately, it's because the functions in the -- former (which are tailored for GHC's use) need different type signatures -- than the ones in the latter. Syncing up the Internal type signatures would -- involve a massive amount of breaking changes, so for the time being, we -- relegate as many changes as we can to just the Internal module, where it -- is safe to break things. module Language.Haskell.TH.Lib.Internal where import Language.Haskell.TH.Syntax hiding (Role, InjectivityAnn) import qualified Language.Haskell.TH.Syntax as TH import Control.Applicative(liftA, liftA2) import qualified Data.Kind as Kind (Type) import Data.Word( Word8 ) import Data.List.NonEmpty ( NonEmpty(..) ) import GHC.Exts (TYPE) import Prelude ---------------------------------------------------------- -- * Type synonyms ---------------------------------------------------------- -- | Representation-polymorphic since /template-haskell-2.17.0.0/. type TExpQ :: TYPE r -> Kind.Type type TExpQ a = Q (TExp a) type CodeQ :: TYPE r -> Kind.Type type CodeQ = Code Q type InfoQ = Q Info type PatQ = Q Pat type FieldPatQ = Q FieldPat type ExpQ = Q Exp type DecQ = Q Dec type DecsQ = Q [Dec] type Decs = [Dec] -- Defined as it is more convenient to wire-in type ConQ = Q Con type TypeQ = Q Type type KindQ = Q Kind type TyLitQ = Q TyLit type CxtQ = Q Cxt type PredQ = Q Pred type DerivClauseQ = Q DerivClause type MatchQ = Q Match type ClauseQ = Q Clause type BodyQ = Q Body type GuardQ = Q Guard type StmtQ = Q Stmt type RangeQ = Q Range type SourceStrictnessQ = Q SourceStrictness type SourceUnpackednessQ = Q SourceUnpackedness type BangQ = Q Bang type BangTypeQ = Q BangType type VarBangTypeQ = Q VarBangType type StrictTypeQ = Q StrictType type VarStrictTypeQ = Q VarStrictType type FieldExpQ = Q FieldExp type RuleBndrQ = Q RuleBndr type TySynEqnQ = Q TySynEqn type PatSynDirQ = Q PatSynDir type PatSynArgsQ = Q PatSynArgs type FamilyResultSigQ = Q FamilyResultSig type DerivStrategyQ = Q DerivStrategy -- must be defined here for DsMeta to find it type Role = TH.Role type InjectivityAnn = TH.InjectivityAnn type TyVarBndrUnit = TyVarBndr () type TyVarBndrSpec = TyVarBndr Specificity ---------------------------------------------------------- -- * Lowercase pattern syntax functions ---------------------------------------------------------- intPrimL :: Integer -> Lit intPrimL = IntPrimL wordPrimL :: Integer -> Lit wordPrimL = WordPrimL floatPrimL :: Rational -> Lit floatPrimL = FloatPrimL doublePrimL :: Rational -> Lit doublePrimL = DoublePrimL integerL :: Integer -> Lit integerL = IntegerL charL :: Char -> Lit charL = CharL charPrimL :: Char -> Lit charPrimL = CharPrimL stringL :: String -> Lit stringL = StringL stringPrimL :: [Word8] -> Lit stringPrimL = StringPrimL bytesPrimL :: Bytes -> Lit bytesPrimL = BytesPrimL rationalL :: Rational -> Lit rationalL = RationalL litP :: Quote m => Lit -> m Pat litP l = pure (LitP l) varP :: Quote m => Name -> m Pat varP v = pure (VarP v) tupP :: Quote m => [m Pat] -> m Pat tupP ps = do { ps1 <- sequenceA ps; pure (TupP ps1)} unboxedTupP :: Quote m => [m Pat] -> m Pat unboxedTupP ps = do { ps1 <- sequenceA ps; pure (UnboxedTupP ps1)} unboxedSumP :: Quote m => m Pat -> SumAlt -> SumArity -> m Pat unboxedSumP p alt arity = do { p1 <- p; pure (UnboxedSumP p1 alt arity) } conP :: Quote m => Name -> [m Type] -> [m Pat] -> m Pat conP n ts ps = do ps' <- sequenceA ps ts' <- sequenceA ts pure (ConP n ts' ps') infixP :: Quote m => m Pat -> Name -> m Pat -> m Pat infixP p1 n p2 = do p1' <- p1 p2' <- p2 pure (InfixP p1' n p2') uInfixP :: Quote m => m Pat -> Name -> m Pat -> m Pat uInfixP p1 n p2 = do p1' <- p1 p2' <- p2 pure (UInfixP p1' n p2') parensP :: Quote m => m Pat -> m Pat parensP p = do p' <- p pure (ParensP p') tildeP :: Quote m => m Pat -> m Pat tildeP p = do p' <- p pure (TildeP p') bangP :: Quote m => m Pat -> m Pat bangP p = do p' <- p pure (BangP p') asP :: Quote m => Name -> m Pat -> m Pat asP n p = do p' <- p pure (AsP n p') wildP :: Quote m => m Pat wildP = pure WildP recP :: Quote m => Name -> [m FieldPat] -> m Pat recP n fps = do fps' <- sequenceA fps pure (RecP n fps') listP :: Quote m => [m Pat] -> m Pat listP ps = do ps' <- sequenceA ps pure (ListP ps') sigP :: Quote m => m Pat -> m Type -> m Pat sigP p t = do p' <- p t' <- t pure (SigP p' t') viewP :: Quote m => m Exp -> m Pat -> m Pat viewP e p = do e' <- e p' <- p pure (ViewP e' p') fieldPat :: Quote m => Name -> m Pat -> m FieldPat fieldPat n p = do p' <- p pure (n, p') ------------------------------------------------------------------------------- -- * Stmt bindS :: Quote m => m Pat -> m Exp -> m Stmt bindS p e = liftA2 BindS p e letS :: Quote m => [m Dec] -> m Stmt letS ds = do { ds1 <- sequenceA ds; pure (LetS ds1) } noBindS :: Quote m => m Exp -> m Stmt noBindS e = do { e1 <- e; pure (NoBindS e1) } parS :: Quote m => [[m Stmt]] -> m Stmt parS sss = do { sss1 <- traverse sequenceA sss; pure (ParS sss1) } recS :: Quote m => [m Stmt] -> m Stmt recS ss = do { ss1 <- sequenceA ss; pure (RecS ss1) } ------------------------------------------------------------------------------- -- * Range fromR :: Quote m => m Exp -> m Range fromR x = do { a <- x; pure (FromR a) } fromThenR :: Quote m => m Exp -> m Exp -> m Range fromThenR x y = do { a <- x; b <- y; pure (FromThenR a b) } fromToR :: Quote m => m Exp -> m Exp -> m Range fromToR x y = do { a <- x; b <- y; pure (FromToR a b) } fromThenToR :: Quote m => m Exp -> m Exp -> m Exp -> m Range fromThenToR x y z = do { a <- x; b <- y; c <- z; pure (FromThenToR a b c) } ------------------------------------------------------------------------------- -- * Body normalB :: Quote m => m Exp -> m Body normalB e = do { e1 <- e; pure (NormalB e1) } guardedB :: Quote m => [m (Guard,Exp)] -> m Body guardedB ges = do { ges' <- sequenceA ges; pure (GuardedB ges') } ------------------------------------------------------------------------------- -- * Guard normalG :: Quote m => m Exp -> m Guard normalG e = do { e1 <- e; pure (NormalG e1) } normalGE :: Quote m => m Exp -> m Exp -> m (Guard, Exp) normalGE g e = do { g1 <- g; e1 <- e; pure (NormalG g1, e1) } patG :: Quote m => [m Stmt] -> m Guard patG ss = do { ss' <- sequenceA ss; pure (PatG ss') } patGE :: Quote m => [m Stmt] -> m Exp -> m (Guard, Exp) patGE ss e = do { ss' <- sequenceA ss; e' <- e; pure (PatG ss', e') } ------------------------------------------------------------------------------- -- * Match and Clause -- | Use with 'caseE' match :: Quote m => m Pat -> m Body -> [m Dec] -> m Match match p rhs ds = do { p' <- p; r' <- rhs; ds' <- sequenceA ds; pure (Match p' r' ds') } -- | Use with 'funD' clause :: Quote m => [m Pat] -> m Body -> [m Dec] -> m Clause clause ps r ds = do { ps' <- sequenceA ps; r' <- r; ds' <- sequenceA ds; pure (Clause ps' r' ds') } --------------------------------------------------------------------------- -- * Exp -- | Dynamically binding a variable (unhygenic) dyn :: Quote m => String -> m Exp dyn s = pure (VarE (mkName s)) varE :: Quote m => Name -> m Exp varE s = pure (VarE s) conE :: Quote m => Name -> m Exp conE s = pure (ConE s) litE :: Quote m => Lit -> m Exp litE c = pure (LitE c) appE :: Quote m => m Exp -> m Exp -> m Exp appE x y = do { a <- x; b <- y; pure (AppE a b)} appTypeE :: Quote m => m Exp -> m Type -> m Exp appTypeE x t = do { a <- x; s <- t; pure (AppTypeE a s) } parensE :: Quote m => m Exp -> m Exp parensE x = do { x' <- x; pure (ParensE x') } uInfixE :: Quote m => m Exp -> m Exp -> m Exp -> m Exp uInfixE x s y = do { x' <- x; s' <- s; y' <- y; pure (UInfixE x' s' y') } infixE :: Quote m => Maybe (m Exp) -> m Exp -> Maybe (m Exp) -> m Exp infixE (Just x) s (Just y) = do { a <- x; s' <- s; b <- y; pure (InfixE (Just a) s' (Just b))} infixE Nothing s (Just y) = do { s' <- s; b <- y; pure (InfixE Nothing s' (Just b))} infixE (Just x) s Nothing = do { a <- x; s' <- s; pure (InfixE (Just a) s' Nothing)} infixE Nothing s Nothing = do { s' <- s; pure (InfixE Nothing s' Nothing) } infixApp :: Quote m => m Exp -> m Exp -> m Exp -> m Exp infixApp x y z = infixE (Just x) y (Just z) sectionL :: Quote m => m Exp -> m Exp -> m Exp sectionL x y = infixE (Just x) y Nothing sectionR :: Quote m => m Exp -> m Exp -> m Exp sectionR x y = infixE Nothing x (Just y) lamE :: Quote m => [m Pat] -> m Exp -> m Exp lamE ps e = do ps' <- sequenceA ps e' <- e pure (LamE ps' e') -- | Single-arg lambda lam1E :: Quote m => m Pat -> m Exp -> m Exp lam1E p e = lamE [p] e -- | Lambda-case (@\case@) lamCaseE :: Quote m => [m Match] -> m Exp lamCaseE ms = LamCaseE <$> sequenceA ms -- | Lambda-cases (@\cases@) lamCasesE :: Quote m => [m Clause] -> m Exp lamCasesE ms = LamCasesE <$> sequenceA ms tupE :: Quote m => [Maybe (m Exp)] -> m Exp tupE es = do { es1 <- traverse sequenceA es; pure (TupE es1)} unboxedTupE :: Quote m => [Maybe (m Exp)] -> m Exp unboxedTupE es = do { es1 <- traverse sequenceA es; pure (UnboxedTupE es1)} unboxedSumE :: Quote m => m Exp -> SumAlt -> SumArity -> m Exp unboxedSumE e alt arity = do { e1 <- e; pure (UnboxedSumE e1 alt arity) } condE :: Quote m => m Exp -> m Exp -> m Exp -> m Exp condE x y z = do { a <- x; b <- y; c <- z; pure (CondE a b c)} multiIfE :: Quote m => [m (Guard, Exp)] -> m Exp multiIfE alts = MultiIfE <$> sequenceA alts letE :: Quote m => [m Dec] -> m Exp -> m Exp letE ds e = do { ds2 <- sequenceA ds; e2 <- e; pure (LetE ds2 e2) } caseE :: Quote m => m Exp -> [m Match] -> m Exp caseE e ms = do { e1 <- e; ms1 <- sequenceA ms; pure (CaseE e1 ms1) } doE :: Quote m => Maybe ModName -> [m Stmt] -> m Exp doE m ss = do { ss1 <- sequenceA ss; pure (DoE m ss1) } mdoE :: Quote m => Maybe ModName -> [m Stmt] -> m Exp mdoE m ss = do { ss1 <- sequenceA ss; pure (MDoE m ss1) } compE :: Quote m => [m Stmt] -> m Exp compE ss = do { ss1 <- sequenceA ss; pure (CompE ss1) } arithSeqE :: Quote m => m Range -> m Exp arithSeqE r = do { r' <- r; pure (ArithSeqE r') } listE :: Quote m => [m Exp] -> m Exp listE es = do { es1 <- sequenceA es; pure (ListE es1) } sigE :: Quote m => m Exp -> m Type -> m Exp sigE e t = do { e1 <- e; t1 <- t; pure (SigE e1 t1) } recConE :: Quote m => Name -> [m (Name,Exp)] -> m Exp recConE c fs = do { flds <- sequenceA fs; pure (RecConE c flds) } recUpdE :: Quote m => m Exp -> [m (Name,Exp)] -> m Exp recUpdE e fs = do { e1 <- e; flds <- sequenceA fs; pure (RecUpdE e1 flds) } stringE :: Quote m => String -> m Exp stringE = litE . stringL fieldExp :: Quote m => Name -> m Exp -> m (Name, Exp) fieldExp s e = do { e' <- e; pure (s,e') } -- | @staticE x = [| static x |]@ staticE :: Quote m => m Exp -> m Exp staticE = fmap StaticE unboundVarE :: Quote m => Name -> m Exp unboundVarE s = pure (UnboundVarE s) labelE :: Quote m => String -> m Exp labelE s = pure (LabelE s) implicitParamVarE :: Quote m => String -> m Exp implicitParamVarE n = pure (ImplicitParamVarE n) getFieldE :: Quote m => m Exp -> String -> m Exp getFieldE e f = do e' <- e pure (GetFieldE e' f) projectionE :: Quote m => NonEmpty String -> m Exp projectionE xs = pure (ProjectionE xs) -- ** 'arithSeqE' Shortcuts fromE :: Quote m => m Exp -> m Exp fromE x = do { a <- x; pure (ArithSeqE (FromR a)) } fromThenE :: Quote m => m Exp -> m Exp -> m Exp fromThenE x y = do { a <- x; b <- y; pure (ArithSeqE (FromThenR a b)) } fromToE :: Quote m => m Exp -> m Exp -> m Exp fromToE x y = do { a <- x; b <- y; pure (ArithSeqE (FromToR a b)) } fromThenToE :: Quote m => m Exp -> m Exp -> m Exp -> m Exp fromThenToE x y z = do { a <- x; b <- y; c <- z; pure (ArithSeqE (FromThenToR a b c)) } ------------------------------------------------------------------------------- -- * Dec valD :: Quote m => m Pat -> m Body -> [m Dec] -> m Dec valD p b ds = do { p' <- p ; ds' <- sequenceA ds ; b' <- b ; pure (ValD p' b' ds') } funD :: Quote m => Name -> [m Clause] -> m Dec funD nm cs = do { cs1 <- sequenceA cs ; pure (FunD nm cs1) } tySynD :: Quote m => Name -> [m (TyVarBndr ())] -> m Type -> m Dec tySynD tc tvs rhs = do { tvs1 <- sequenceA tvs ; rhs1 <- rhs ; pure (TySynD tc tvs1 rhs1) } dataD :: Quote m => m Cxt -> Name -> [m (TyVarBndr ())] -> Maybe (m Kind) -> [m Con] -> [m DerivClause] -> m Dec dataD ctxt tc tvs ksig cons derivs = do ctxt1 <- ctxt tvs1 <- sequenceA tvs ksig1 <- sequenceA ksig cons1 <- sequenceA cons derivs1 <- sequenceA derivs pure (DataD ctxt1 tc tvs1 ksig1 cons1 derivs1) newtypeD :: Quote m => m Cxt -> Name -> [m (TyVarBndr ())] -> Maybe (m Kind) -> m Con -> [m DerivClause] -> m Dec newtypeD ctxt tc tvs ksig con derivs = do ctxt1 <- ctxt tvs1 <- sequenceA tvs ksig1 <- sequenceA ksig con1 <- con derivs1 <- sequenceA derivs pure (NewtypeD ctxt1 tc tvs1 ksig1 con1 derivs1) classD :: Quote m => m Cxt -> Name -> [m (TyVarBndr ())] -> [FunDep] -> [m Dec] -> m Dec classD ctxt cls tvs fds decs = do tvs1 <- sequenceA tvs decs1 <- sequenceA decs ctxt1 <- ctxt pure $ ClassD ctxt1 cls tvs1 fds decs1 instanceD :: Quote m => m Cxt -> m Type -> [m Dec] -> m Dec instanceD = instanceWithOverlapD Nothing instanceWithOverlapD :: Quote m => Maybe Overlap -> m Cxt -> m Type -> [m Dec] -> m Dec instanceWithOverlapD o ctxt ty decs = do ctxt1 <- ctxt decs1 <- sequenceA decs ty1 <- ty pure $ InstanceD o ctxt1 ty1 decs1 sigD :: Quote m => Name -> m Type -> m Dec sigD fun ty = liftA (SigD fun) $ ty kiSigD :: Quote m => Name -> m Kind -> m Dec kiSigD fun ki = liftA (KiSigD fun) $ ki forImpD :: Quote m => Callconv -> Safety -> String -> Name -> m Type -> m Dec forImpD cc s str n ty = do ty' <- ty pure $ ForeignD (ImportF cc s str n ty') infixLD :: Quote m => Int -> Name -> m Dec infixLD prec nm = pure (InfixD (Fixity prec InfixL) nm) infixRD :: Quote m => Int -> Name -> m Dec infixRD prec nm = pure (InfixD (Fixity prec InfixR) nm) infixND :: Quote m => Int -> Name -> m Dec infixND prec nm = pure (InfixD (Fixity prec InfixN) nm) defaultD :: Quote m => [m Type] -> m Dec defaultD tys = DefaultD <$> sequenceA tys pragInlD :: Quote m => Name -> Inline -> RuleMatch -> Phases -> m Dec pragInlD name inline rm phases = pure $ PragmaD $ InlineP name inline rm phases pragOpaqueD :: Quote m => Name -> m Dec pragOpaqueD name = pure $ PragmaD $ OpaqueP name pragSpecD :: Quote m => Name -> m Type -> Phases -> m Dec pragSpecD n ty phases = do ty1 <- ty pure $ PragmaD $ SpecialiseP n ty1 Nothing phases pragSpecInlD :: Quote m => Name -> m Type -> Inline -> Phases -> m Dec pragSpecInlD n ty inline phases = do ty1 <- ty pure $ PragmaD $ SpecialiseP n ty1 (Just inline) phases pragSpecInstD :: Quote m => m Type -> m Dec pragSpecInstD ty = do ty1 <- ty pure $ PragmaD $ SpecialiseInstP ty1 pragRuleD :: Quote m => String -> Maybe [m (TyVarBndr ())] -> [m RuleBndr] -> m Exp -> m Exp -> Phases -> m Dec pragRuleD n ty_bndrs tm_bndrs lhs rhs phases = do ty_bndrs1 <- traverse sequenceA ty_bndrs tm_bndrs1 <- sequenceA tm_bndrs lhs1 <- lhs rhs1 <- rhs pure $ PragmaD $ RuleP n ty_bndrs1 tm_bndrs1 lhs1 rhs1 phases pragAnnD :: Quote m => AnnTarget -> m Exp -> m Dec pragAnnD target expr = do exp1 <- expr pure $ PragmaD $ AnnP target exp1 pragLineD :: Quote m => Int -> String -> m Dec pragLineD line file = pure $ PragmaD $ LineP line file pragCompleteD :: Quote m => [Name] -> Maybe Name -> m Dec pragCompleteD cls mty = pure $ PragmaD $ CompleteP cls mty dataInstD :: Quote m => m Cxt -> (Maybe [m (TyVarBndr ())]) -> m Type -> Maybe (m Kind) -> [m Con] -> [m DerivClause] -> m Dec dataInstD ctxt mb_bndrs ty ksig cons derivs = do ctxt1 <- ctxt mb_bndrs1 <- traverse sequenceA mb_bndrs ty1 <- ty ksig1 <- sequenceA ksig cons1 <- sequenceA cons derivs1 <- sequenceA derivs pure (DataInstD ctxt1 mb_bndrs1 ty1 ksig1 cons1 derivs1) newtypeInstD :: Quote m => m Cxt -> (Maybe [m (TyVarBndr ())]) -> m Type -> Maybe (m Kind) -> m Con -> [m DerivClause] -> m Dec newtypeInstD ctxt mb_bndrs ty ksig con derivs = do ctxt1 <- ctxt mb_bndrs1 <- traverse sequenceA mb_bndrs ty1 <- ty ksig1 <- sequenceA ksig con1 <- con derivs1 <- sequenceA derivs pure (NewtypeInstD ctxt1 mb_bndrs1 ty1 ksig1 con1 derivs1) tySynInstD :: Quote m => m TySynEqn -> m Dec tySynInstD eqn = do eqn1 <- eqn pure (TySynInstD eqn1) dataFamilyD :: Quote m => Name -> [m (TyVarBndr ())] -> Maybe (m Kind) -> m Dec dataFamilyD tc tvs kind = do tvs' <- sequenceA tvs kind' <- sequenceA kind pure $ DataFamilyD tc tvs' kind' openTypeFamilyD :: Quote m => Name -> [m (TyVarBndr ())] -> m FamilyResultSig -> Maybe InjectivityAnn -> m Dec openTypeFamilyD tc tvs res inj = do tvs' <- sequenceA tvs res' <- res pure $ OpenTypeFamilyD (TypeFamilyHead tc tvs' res' inj) closedTypeFamilyD :: Quote m => Name -> [m (TyVarBndr ())] -> m FamilyResultSig -> Maybe InjectivityAnn -> [m TySynEqn] -> m Dec closedTypeFamilyD tc tvs result injectivity eqns = do tvs1 <- sequenceA tvs result1 <- result eqns1 <- sequenceA eqns pure (ClosedTypeFamilyD (TypeFamilyHead tc tvs1 result1 injectivity) eqns1) roleAnnotD :: Quote m => Name -> [Role] -> m Dec roleAnnotD name roles = pure $ RoleAnnotD name roles standaloneDerivD :: Quote m => m Cxt -> m Type -> m Dec standaloneDerivD = standaloneDerivWithStrategyD Nothing standaloneDerivWithStrategyD :: Quote m => Maybe (m DerivStrategy) -> m Cxt -> m Type -> m Dec standaloneDerivWithStrategyD mdsq ctxtq tyq = do mds <- sequenceA mdsq ctxt <- ctxtq ty <- tyq pure $ StandaloneDerivD mds ctxt ty defaultSigD :: Quote m => Name -> m Type -> m Dec defaultSigD n tyq = do ty <- tyq pure $ DefaultSigD n ty -- | Pattern synonym declaration patSynD :: Quote m => Name -> m PatSynArgs -> m PatSynDir -> m Pat -> m Dec patSynD name args dir pat = do args' <- args dir' <- dir pat' <- pat pure (PatSynD name args' dir' pat') -- | Pattern synonym type signature patSynSigD :: Quote m => Name -> m Type -> m Dec patSynSigD nm ty = do ty' <- ty pure $ PatSynSigD nm ty' -- | Implicit parameter binding declaration. Can only be used in let -- and where clauses which consist entirely of implicit bindings. implicitParamBindD :: Quote m => String -> m Exp -> m Dec implicitParamBindD n e = do e' <- e pure $ ImplicitParamBindD n e' tySynEqn :: Quote m => (Maybe [m (TyVarBndr ())]) -> m Type -> m Type -> m TySynEqn tySynEqn mb_bndrs lhs rhs = do mb_bndrs1 <- traverse sequenceA mb_bndrs lhs1 <- lhs rhs1 <- rhs pure (TySynEqn mb_bndrs1 lhs1 rhs1) cxt :: Quote m => [m Pred] -> m Cxt cxt = sequenceA derivClause :: Quote m => Maybe (m DerivStrategy) -> [m Pred] -> m DerivClause derivClause mds p = do mds' <- sequenceA mds p' <- cxt p pure $ DerivClause mds' p' stockStrategy :: Quote m => m DerivStrategy stockStrategy = pure StockStrategy anyclassStrategy :: Quote m => m DerivStrategy anyclassStrategy = pure AnyclassStrategy newtypeStrategy :: Quote m => m DerivStrategy newtypeStrategy = pure NewtypeStrategy viaStrategy :: Quote m => m Type -> m DerivStrategy viaStrategy = fmap ViaStrategy normalC :: Quote m => Name -> [m BangType] -> m Con normalC con strtys = liftA (NormalC con) $ sequenceA strtys recC :: Quote m => Name -> [m VarBangType] -> m Con recC con varstrtys = liftA (RecC con) $ sequenceA varstrtys infixC :: Quote m => m (Bang, Type) -> Name -> m (Bang, Type) -> m Con infixC st1 con st2 = do st1' <- st1 st2' <- st2 pure $ InfixC st1' con st2' forallC :: Quote m => [m (TyVarBndr Specificity)] -> m Cxt -> m Con -> m Con forallC ns ctxt con = do ns' <- sequenceA ns ctxt' <- ctxt con' <- con pure $ ForallC ns' ctxt' con' gadtC :: Quote m => [Name] -> [m StrictType] -> m Type -> m Con gadtC cons strtys ty = liftA2 (GadtC cons) (sequenceA strtys) ty recGadtC :: Quote m => [Name] -> [m VarStrictType] -> m Type -> m Con recGadtC cons varstrtys ty = liftA2 (RecGadtC cons) (sequenceA varstrtys) ty ------------------------------------------------------------------------------- -- * Type forallT :: Quote m => [m (TyVarBndr Specificity)] -> m Cxt -> m Type -> m Type forallT tvars ctxt ty = do tvars1 <- sequenceA tvars ctxt1 <- ctxt ty1 <- ty pure $ ForallT tvars1 ctxt1 ty1 forallVisT :: Quote m => [m (TyVarBndr ())] -> m Type -> m Type forallVisT tvars ty = ForallVisT <$> sequenceA tvars <*> ty varT :: Quote m => Name -> m Type varT = pure . VarT conT :: Quote m => Name -> m Type conT = pure . ConT infixT :: Quote m => m Type -> Name -> m Type -> m Type infixT t1 n t2 = do t1' <- t1 t2' <- t2 pure (InfixT t1' n t2') uInfixT :: Quote m => m Type -> Name -> m Type -> m Type uInfixT t1 n t2 = do t1' <- t1 t2' <- t2 pure (UInfixT t1' n t2') promotedInfixT :: Quote m => m Type -> Name -> m Type -> m Type promotedInfixT t1 n t2 = do t1' <- t1 t2' <- t2 pure (PromotedInfixT t1' n t2') promotedUInfixT :: Quote m => m Type -> Name -> m Type -> m Type promotedUInfixT t1 n t2 = do t1' <- t1 t2' <- t2 pure (PromotedUInfixT t1' n t2') parensT :: Quote m => m Type -> m Type parensT t = do t' <- t pure (ParensT t') appT :: Quote m => m Type -> m Type -> m Type appT t1 t2 = do t1' <- t1 t2' <- t2 pure $ AppT t1' t2' appKindT :: Quote m => m Type -> m Kind -> m Type appKindT ty ki = do ty' <- ty ki' <- ki pure $ AppKindT ty' ki' arrowT :: Quote m => m Type arrowT = pure ArrowT mulArrowT :: Quote m => m Type mulArrowT = pure MulArrowT listT :: Quote m => m Type listT = pure ListT litT :: Quote m => m TyLit -> m Type litT l = fmap LitT l tupleT :: Quote m => Int -> m Type tupleT i = pure (TupleT i) unboxedTupleT :: Quote m => Int -> m Type unboxedTupleT i = pure (UnboxedTupleT i) unboxedSumT :: Quote m => SumArity -> m Type unboxedSumT arity = pure (UnboxedSumT arity) sigT :: Quote m => m Type -> m Kind -> m Type sigT t k = do t' <- t k' <- k pure $ SigT t' k' equalityT :: Quote m => m Type equalityT = pure EqualityT wildCardT :: Quote m => m Type wildCardT = pure WildCardT implicitParamT :: Quote m => String -> m Type -> m Type implicitParamT n t = do t' <- t pure $ ImplicitParamT n t' {-# DEPRECATED classP "As of template-haskell-2.10, constraint predicates (Pred) are just types (Type), in keeping with ConstraintKinds. Please use 'conT' and 'appT'." #-} classP :: Quote m => Name -> [m Type] -> m Pred classP cla tys = do tysl <- sequenceA tys pure (foldl AppT (ConT cla) tysl) {-# DEPRECATED equalP "As of template-haskell-2.10, constraint predicates (Pred) are just types (Type), in keeping with ConstraintKinds. Please see 'equalityT'." #-} equalP :: Quote m => m Type -> m Type -> m Pred equalP tleft tright = do tleft1 <- tleft tright1 <- tright eqT <- equalityT pure (foldl AppT eqT [tleft1, tright1]) promotedT :: Quote m => Name -> m Type promotedT = pure . PromotedT promotedTupleT :: Quote m => Int -> m Type promotedTupleT i = pure (PromotedTupleT i) promotedNilT :: Quote m => m Type promotedNilT = pure PromotedNilT promotedConsT :: Quote m => m Type promotedConsT = pure PromotedConsT noSourceUnpackedness, sourceNoUnpack, sourceUnpack :: Quote m => m SourceUnpackedness noSourceUnpackedness = pure NoSourceUnpackedness sourceNoUnpack = pure SourceNoUnpack sourceUnpack = pure SourceUnpack noSourceStrictness, sourceLazy, sourceStrict :: Quote m => m SourceStrictness noSourceStrictness = pure NoSourceStrictness sourceLazy = pure SourceLazy sourceStrict = pure SourceStrict {-# DEPRECATED isStrict ["Use 'bang'. See https://gitlab.haskell.org/ghc/ghc/wikis/migration/8.0. ", "Example usage: 'bang noSourceUnpackedness sourceStrict'"] #-} {-# DEPRECATED notStrict ["Use 'bang'. See https://gitlab.haskell.org/ghc/ghc/wikis/migration/8.0. ", "Example usage: 'bang noSourceUnpackedness noSourceStrictness'"] #-} {-# DEPRECATED unpacked ["Use 'bang'. See https://gitlab.haskell.org/ghc/ghc/wikis/migration/8.0. ", "Example usage: 'bang sourceUnpack sourceStrict'"] #-} isStrict, notStrict, unpacked :: Quote m => m Strict isStrict = bang noSourceUnpackedness sourceStrict notStrict = bang noSourceUnpackedness noSourceStrictness unpacked = bang sourceUnpack sourceStrict bang :: Quote m => m SourceUnpackedness -> m SourceStrictness -> m Bang bang u s = do u' <- u s' <- s pure (Bang u' s') bangType :: Quote m => m Bang -> m Type -> m BangType bangType = liftA2 (,) varBangType :: Quote m => Name -> m BangType -> m VarBangType varBangType v bt = (\(b, t) -> (v, b, t)) <$> bt {-# DEPRECATED strictType "As of @template-haskell-2.11.0.0@, 'StrictType' has been replaced by 'BangType'. Please use 'bangType' instead." #-} strictType :: Quote m => m Strict -> m Type -> m StrictType strictType = bangType {-# DEPRECATED varStrictType "As of @template-haskell-2.11.0.0@, 'VarStrictType' has been replaced by 'VarBangType'. Please use 'varBangType' instead." #-} varStrictType :: Quote m => Name -> m StrictType -> m VarStrictType varStrictType = varBangType -- * Type Literals -- MonadFail here complicates things (a lot) because it would mean we would -- have to emit a MonadFail constraint during typechecking if there was any -- chance the desugaring would use numTyLit, which in general is hard to -- predict. numTyLit :: Quote m => Integer -> m TyLit numTyLit n = if n >= 0 then pure (NumTyLit n) else error ("Negative type-level number: " ++ show n) strTyLit :: Quote m => String -> m TyLit strTyLit s = pure (StrTyLit s) charTyLit :: Quote m => Char -> m TyLit charTyLit c = pure (CharTyLit c) ------------------------------------------------------------------------------- -- * Kind plainTV :: Quote m => Name -> m (TyVarBndr ()) plainTV n = pure $ PlainTV n () plainInvisTV :: Quote m => Name -> Specificity -> m (TyVarBndr Specificity) plainInvisTV n s = pure $ PlainTV n s kindedTV :: Quote m => Name -> m Kind -> m (TyVarBndr ()) kindedTV n = fmap (KindedTV n ()) kindedInvisTV :: Quote m => Name -> Specificity -> m Kind -> m (TyVarBndr Specificity) kindedInvisTV n s = fmap (KindedTV n s) specifiedSpec :: Specificity specifiedSpec = SpecifiedSpec inferredSpec :: Specificity inferredSpec = InferredSpec varK :: Name -> Kind varK = VarT conK :: Name -> Kind conK = ConT tupleK :: Int -> Kind tupleK = TupleT arrowK :: Kind arrowK = ArrowT listK :: Kind listK = ListT appK :: Kind -> Kind -> Kind appK = AppT starK :: Quote m => m Kind starK = pure StarT constraintK :: Quote m => m Kind constraintK = pure ConstraintT ------------------------------------------------------------------------------- -- * Type family result noSig :: Quote m => m FamilyResultSig noSig = pure NoSig kindSig :: Quote m => m Kind -> m FamilyResultSig kindSig = fmap KindSig tyVarSig :: Quote m => m (TyVarBndr ()) -> m FamilyResultSig tyVarSig = fmap TyVarSig ------------------------------------------------------------------------------- -- * Injectivity annotation injectivityAnn :: Name -> [Name] -> InjectivityAnn injectivityAnn = TH.InjectivityAnn ------------------------------------------------------------------------------- -- * Role nominalR, representationalR, phantomR, inferR :: Role nominalR = NominalR representationalR = RepresentationalR phantomR = PhantomR inferR = InferR ------------------------------------------------------------------------------- -- * Callconv cCall, stdCall, cApi, prim, javaScript :: Callconv cCall = CCall stdCall = StdCall cApi = CApi prim = Prim javaScript = JavaScript ------------------------------------------------------------------------------- -- * Safety unsafe, safe, interruptible :: Safety unsafe = Unsafe safe = Safe interruptible = Interruptible ------------------------------------------------------------------------------- -- * FunDep funDep :: [Name] -> [Name] -> FunDep funDep = FunDep ------------------------------------------------------------------------------- -- * RuleBndr ruleVar :: Quote m => Name -> m RuleBndr ruleVar = pure . RuleVar typedRuleVar :: Quote m => Name -> m Type -> m RuleBndr typedRuleVar n ty = TypedRuleVar n <$> ty ------------------------------------------------------------------------------- -- * AnnTarget valueAnnotation :: Name -> AnnTarget valueAnnotation = ValueAnnotation typeAnnotation :: Name -> AnnTarget typeAnnotation = TypeAnnotation moduleAnnotation :: AnnTarget moduleAnnotation = ModuleAnnotation ------------------------------------------------------------------------------- -- * Pattern Synonyms (sub constructs) unidir, implBidir :: Quote m => m PatSynDir unidir = pure Unidir implBidir = pure ImplBidir explBidir :: Quote m => [m Clause] -> m PatSynDir explBidir cls = do cls' <- sequenceA cls pure (ExplBidir cls') prefixPatSyn :: Quote m => [Name] -> m PatSynArgs prefixPatSyn args = pure $ PrefixPatSyn args recordPatSyn :: Quote m => [Name] -> m PatSynArgs recordPatSyn sels = pure $ RecordPatSyn sels infixPatSyn :: Quote m => Name -> Name -> m PatSynArgs infixPatSyn arg1 arg2 = pure $ InfixPatSyn arg1 arg2 -------------------------------------------------------------- -- * Useful helper function appsE :: Quote m => [m Exp] -> m Exp appsE [] = error "appsE []" appsE [x] = x appsE (x:y:zs) = appsE ( (appE x y) : zs ) -- | pure the Module at the place of splicing. Can be used as an -- input for 'reifyModule'. thisModule :: Q Module thisModule = do loc <- location pure $ Module (mkPkgName $ loc_package loc) (mkModName $ loc_module loc) -------------------------------------------------------------- -- * Documentation combinators -- | Attaches Haddock documentation to the declaration provided. Unlike -- 'putDoc', the names do not need to be in scope when calling this function so -- it can be used for quoted declarations and anything else currently being -- spliced. -- Not all declarations can have documentation attached to them. For those that -- can't, 'withDecDoc' will return it unchanged without any side effects. withDecDoc :: String -> Q Dec -> Q Dec withDecDoc doc dec = do dec' <- dec case doc_loc dec' of Just loc -> qAddModFinalizer $ qPutDoc loc doc Nothing -> pure () pure dec' where doc_loc (FunD n _) = Just $ DeclDoc n doc_loc (ValD (VarP n) _ _) = Just $ DeclDoc n doc_loc (DataD _ n _ _ _ _) = Just $ DeclDoc n doc_loc (NewtypeD _ n _ _ _ _) = Just $ DeclDoc n doc_loc (TySynD n _ _) = Just $ DeclDoc n doc_loc (ClassD _ n _ _ _) = Just $ DeclDoc n doc_loc (SigD n _) = Just $ DeclDoc n doc_loc (ForeignD (ImportF _ _ _ n _)) = Just $ DeclDoc n doc_loc (ForeignD (ExportF _ _ n _)) = Just $ DeclDoc n doc_loc (InfixD _ n) = Just $ DeclDoc n doc_loc (DataFamilyD n _ _) = Just $ DeclDoc n doc_loc (OpenTypeFamilyD (TypeFamilyHead n _ _ _)) = Just $ DeclDoc n doc_loc (ClosedTypeFamilyD (TypeFamilyHead n _ _ _) _) = Just $ DeclDoc n doc_loc (PatSynD n _ _ _) = Just $ DeclDoc n doc_loc (PatSynSigD n _) = Just $ DeclDoc n -- For instances we just pass along the full type doc_loc (InstanceD _ _ t _) = Just $ InstDoc t doc_loc (DataInstD _ _ t _ _ _) = Just $ InstDoc t doc_loc (NewtypeInstD _ _ t _ _ _) = Just $ InstDoc t doc_loc (TySynInstD (TySynEqn _ t _)) = Just $ InstDoc t -- Declarations that can't have documentation attached to -- ValDs that aren't a simple variable pattern doc_loc (ValD _ _ _) = Nothing doc_loc (KiSigD _ _) = Nothing doc_loc (PragmaD _) = Nothing doc_loc (RoleAnnotD _ _) = Nothing doc_loc (StandaloneDerivD _ _ _) = Nothing doc_loc (DefaultSigD _ _) = Nothing doc_loc (ImplicitParamBindD _ _) = Nothing doc_loc (DefaultD _) = Nothing -- | Variant of 'withDecDoc' that applies the same documentation to -- multiple declarations. Useful for documenting quoted declarations. withDecsDoc :: String -> Q [Dec] -> Q [Dec] withDecsDoc doc decs = decs >>= mapM (withDecDoc doc . pure) -- | Variant of 'funD' that attaches Haddock documentation. funD_doc :: Name -> [Q Clause] -> Maybe String -- ^ Documentation to attach to function -> [Maybe String] -- ^ Documentation to attach to arguments -> Q Dec funD_doc nm cs mfun_doc arg_docs = do qAddModFinalizer $ sequence_ [putDoc (ArgDoc nm i) s | (i, Just s) <- zip [0..] arg_docs] let dec = funD nm cs case mfun_doc of Just fun_doc -> withDecDoc fun_doc dec Nothing -> funD nm cs -- | Variant of 'dataD' that attaches Haddock documentation. dataD_doc :: Q Cxt -> Name -> [Q (TyVarBndr ())] -> Maybe (Q Kind) -> [(Q Con, Maybe String, [Maybe String])] -- ^ List of constructors, documentation for the constructor, and -- documentation for the arguments -> [Q DerivClause] -> Maybe String -- ^ Documentation to attach to the data declaration -> Q Dec dataD_doc ctxt tc tvs ksig cons_with_docs derivs mdoc = do qAddModFinalizer $ mapM_ docCons cons_with_docs let dec = dataD ctxt tc tvs ksig (map (\(con, _, _) -> con) cons_with_docs) derivs maybe dec (flip withDecDoc dec) mdoc -- | Variant of 'newtypeD' that attaches Haddock documentation. newtypeD_doc :: Q Cxt -> Name -> [Q (TyVarBndr ())] -> Maybe (Q Kind) -> (Q Con, Maybe String, [Maybe String]) -- ^ The constructor, documentation for the constructor, and -- documentation for the arguments -> [Q DerivClause] -> Maybe String -- ^ Documentation to attach to the newtype declaration -> Q Dec newtypeD_doc ctxt tc tvs ksig con_with_docs@(con, _, _) derivs mdoc = do qAddModFinalizer $ docCons con_with_docs let dec = newtypeD ctxt tc tvs ksig con derivs maybe dec (flip withDecDoc dec) mdoc -- | Variant of 'dataInstD' that attaches Haddock documentation. dataInstD_doc :: Q Cxt -> (Maybe [Q (TyVarBndr ())]) -> Q Type -> Maybe (Q Kind) -> [(Q Con, Maybe String, [Maybe String])] -- ^ List of constructors, documentation for the constructor, and -- documentation for the arguments -> [Q DerivClause] -> Maybe String -- ^ Documentation to attach to the instance declaration -> Q Dec dataInstD_doc ctxt mb_bndrs ty ksig cons_with_docs derivs mdoc = do qAddModFinalizer $ mapM_ docCons cons_with_docs let dec = dataInstD ctxt mb_bndrs ty ksig (map (\(con, _, _) -> con) cons_with_docs) derivs maybe dec (flip withDecDoc dec) mdoc -- | Variant of 'newtypeInstD' that attaches Haddock documentation. newtypeInstD_doc :: Q Cxt -> (Maybe [Q (TyVarBndr ())]) -> Q Type -> Maybe (Q Kind) -> (Q Con, Maybe String, [Maybe String]) -- ^ The constructor, documentation for the constructor, and -- documentation for the arguments -> [Q DerivClause] -> Maybe String -- ^ Documentation to attach to the instance declaration -> Q Dec newtypeInstD_doc ctxt mb_bndrs ty ksig con_with_docs@(con, _, _) derivs mdoc = do qAddModFinalizer $ docCons con_with_docs let dec = newtypeInstD ctxt mb_bndrs ty ksig con derivs maybe dec (flip withDecDoc dec) mdoc -- | Variant of 'patSynD' that attaches Haddock documentation. patSynD_doc :: Name -> Q PatSynArgs -> Q PatSynDir -> Q Pat -> Maybe String -- ^ Documentation to attach to the pattern synonym -> [Maybe String] -- ^ Documentation to attach to the pattern arguments -> Q Dec patSynD_doc name args dir pat mdoc arg_docs = do qAddModFinalizer $ sequence_ [putDoc (ArgDoc name i) s | (i, Just s) <- zip [0..] arg_docs] let dec = patSynD name args dir pat maybe dec (flip withDecDoc dec) mdoc -- | Document a data/newtype constructor with its arguments. docCons :: (Q Con, Maybe String, [Maybe String]) -> Q () docCons (c, md, arg_docs) = do c' <- c -- Attach docs to the constructors sequence_ [ putDoc (DeclDoc nm) d | Just d <- [md], nm <- get_cons_names c' ] -- Attach docs to the arguments case c' of -- Record selector documentation isn't stored in the argument map, -- but in the declaration map instead RecC _ var_bang_types -> sequence_ [ putDoc (DeclDoc nm) arg_doc | (Just arg_doc, (nm, _, _)) <- zip arg_docs var_bang_types ] _ -> sequence_ [ putDoc (ArgDoc nm i) arg_doc | nm <- get_cons_names c' , (i, Just arg_doc) <- zip [0..] arg_docs ] where get_cons_names :: Con -> [Name] get_cons_names (NormalC n _) = [n] get_cons_names (RecC n _) = [n] get_cons_names (InfixC _ n _) = [n] get_cons_names (ForallC _ _ cons) = get_cons_names cons -- GadtC can have multiple names, e.g -- > data Bar a where -- > MkBar1, MkBar2 :: a -> Bar a -- Will have one GadtC with [MkBar1, MkBar2] as names get_cons_names (GadtC ns _ _) = ns get_cons_names (RecGadtC ns _ _) = ns ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/Lib/Map.hs0000644000000000000000000000765514470055371026060 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE Safe #-} -- This is a non-exposed internal module -- -- The code in this module has been ripped from containers-0.5.5.1:Data.Map.Base [1] almost -- verbatimely to avoid a dependency of 'template-haskell' on the containers package. -- -- [1] see https://hackage.haskell.org/package/containers-0.5.5.1 -- -- The original code is BSD-licensed and copyrighted by Daan Leijen, Andriy Palamarchuk, et al. module Language.Haskell.TH.Lib.Map ( Map , empty , insert , Language.Haskell.TH.Lib.Map.lookup ) where import Prelude data Map k a = Bin {-# UNPACK #-} !Size !k a !(Map k a) !(Map k a) | Tip type Size = Int empty :: Map k a empty = Tip {-# INLINE empty #-} singleton :: k -> a -> Map k a singleton k x = Bin 1 k x Tip Tip {-# INLINE singleton #-} size :: Map k a -> Int size Tip = 0 size (Bin sz _ _ _ _) = sz {-# INLINE size #-} lookup :: Ord k => k -> Map k a -> Maybe a lookup = go where go _ Tip = Nothing go !k (Bin _ kx x l r) = case compare k kx of LT -> go k l GT -> go k r EQ -> Just x {-# INLINABLE lookup #-} insert :: Ord k => k -> a -> Map k a -> Map k a insert = go where go :: Ord k => k -> a -> Map k a -> Map k a go !kx x Tip = singleton kx x go !kx x (Bin sz ky y l r) = case compare kx ky of LT -> balanceL ky y (go kx x l) r GT -> balanceR ky y l (go kx x r) EQ -> Bin sz kx x l r {-# INLINABLE insert #-} balanceL :: k -> a -> Map k a -> Map k a -> Map k a balanceL k x l r = case r of Tip -> case l of Tip -> Bin 1 k x Tip Tip (Bin _ _ _ Tip Tip) -> Bin 2 k x l Tip (Bin _ lk lx Tip (Bin _ lrk lrx _ _)) -> Bin 3 lrk lrx (Bin 1 lk lx Tip Tip) (Bin 1 k x Tip Tip) (Bin _ lk lx ll@(Bin _ _ _ _ _) Tip) -> Bin 3 lk lx ll (Bin 1 k x Tip Tip) (Bin ls lk lx ll@(Bin lls _ _ _ _) lr@(Bin lrs lrk lrx lrl lrr)) | lrs < ratio*lls -> Bin (1+ls) lk lx ll (Bin (1+lrs) k x lr Tip) | otherwise -> Bin (1+ls) lrk lrx (Bin (1+lls+size lrl) lk lx ll lrl) (Bin (1+size lrr) k x lrr Tip) (Bin rs _ _ _ _) -> case l of Tip -> Bin (1+rs) k x Tip r (Bin ls lk lx ll lr) | ls > delta*rs -> case (ll, lr) of (Bin lls _ _ _ _, Bin lrs lrk lrx lrl lrr) | lrs < ratio*lls -> Bin (1+ls+rs) lk lx ll (Bin (1+rs+lrs) k x lr r) | otherwise -> Bin (1+ls+rs) lrk lrx (Bin (1+lls+size lrl) lk lx ll lrl) (Bin (1+rs+size lrr) k x lrr r) (_, _) -> error "Failure in Data.Map.balanceL" | otherwise -> Bin (1+ls+rs) k x l r {-# NOINLINE balanceL #-} balanceR :: k -> a -> Map k a -> Map k a -> Map k a balanceR k x l r = case l of Tip -> case r of Tip -> Bin 1 k x Tip Tip (Bin _ _ _ Tip Tip) -> Bin 2 k x Tip r (Bin _ rk rx Tip rr@(Bin _ _ _ _ _)) -> Bin 3 rk rx (Bin 1 k x Tip Tip) rr (Bin _ rk rx (Bin _ rlk rlx _ _) Tip) -> Bin 3 rlk rlx (Bin 1 k x Tip Tip) (Bin 1 rk rx Tip Tip) (Bin rs rk rx rl@(Bin rls rlk rlx rll rlr) rr@(Bin rrs _ _ _ _)) | rls < ratio*rrs -> Bin (1+rs) rk rx (Bin (1+rls) k x Tip rl) rr | otherwise -> Bin (1+rs) rlk rlx (Bin (1+size rll) k x Tip rll) (Bin (1+rrs+size rlr) rk rx rlr rr) (Bin ls _ _ _ _) -> case r of Tip -> Bin (1+ls) k x l Tip (Bin rs rk rx rl rr) | rs > delta*ls -> case (rl, rr) of (Bin rls rlk rlx rll rlr, Bin rrs _ _ _ _) | rls < ratio*rrs -> Bin (1+ls+rs) rk rx (Bin (1+ls+rls) k x l rl) rr | otherwise -> Bin (1+ls+rs) rlk rlx (Bin (1+ls+size rll) k x l rll) (Bin (1+rrs+size rlr) rk rx rlr rr) (_, _) -> error "Failure in Data.Map.balanceR" | otherwise -> Bin (1+ls+rs) k x l r {-# NOINLINE balanceR #-} delta,ratio :: Int delta = 3 ratio = 2 ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/Ppr.hs0000644000000000000000000011623014472375231025366 0ustar0000000000000000{-# LANGUAGE Safe #-} {-# LANGUAGE LambdaCase #-} -- | contains a prettyprinter for the -- Template Haskell datatypes module Language.Haskell.TH.Ppr where -- All of the exports from this module should -- be "public" functions. The main module TH -- re-exports them all. import Text.PrettyPrint (render) import Language.Haskell.TH.PprLib import Language.Haskell.TH.Syntax import Data.Word ( Word8 ) import Data.Char ( toLower, chr) import GHC.Show ( showMultiLineString ) import GHC.Lexeme( startsVarSym ) import Data.Ratio ( numerator, denominator ) import Data.Foldable ( toList ) import Prelude hiding ((<>)) nestDepth :: Int nestDepth = 4 type Precedence = Int appPrec, opPrec, unopPrec, sigPrec, noPrec :: Precedence appPrec = 4 -- Argument of a function application opPrec = 3 -- Argument of an infix operator unopPrec = 2 -- Argument of an unresolved infix operator sigPrec = 1 -- Argument of an explicit type signature noPrec = 0 -- Others parensIf :: Bool -> Doc -> Doc parensIf True d = parens d parensIf False d = d ------------------------------ pprint :: Ppr a => a -> String pprint x = render $ to_HPJ_Doc $ ppr x class Ppr a where ppr :: a -> Doc ppr_list :: [a] -> Doc ppr_list = vcat . map ppr instance Ppr a => Ppr [a] where ppr x = ppr_list x ------------------------------ instance Ppr Name where ppr v = pprName v ------------------------------ instance Ppr Info where ppr (TyConI d) = ppr d ppr (ClassI d is) = ppr d $$ vcat (map ppr is) ppr (FamilyI d is) = ppr d $$ vcat (map ppr is) ppr (PrimTyConI name arity is_unlifted) = text "Primitive" <+> (if is_unlifted then text "unlifted" else empty) <+> text "type constructor" <+> quotes (ppr name) <+> parens (text "arity" <+> int arity) ppr (ClassOpI v ty cls) = text "Class op from" <+> ppr cls <> colon <+> ppr_sig v ty ppr (DataConI v ty tc) = text "Constructor from" <+> ppr tc <> colon <+> ppr_sig v ty ppr (PatSynI nm ty) = pprPatSynSig nm ty ppr (TyVarI v ty) = text "Type variable" <+> ppr v <+> equals <+> ppr ty ppr (VarI v ty mb_d) = vcat [ppr_sig v ty, case mb_d of { Nothing -> empty; Just d -> ppr d }] ppr_sig :: Name -> Type -> Doc ppr_sig v ty = pprName' Applied v <+> dcolon <+> ppr ty pprFixity :: Name -> Fixity -> Doc pprFixity _ f | f == defaultFixity = empty pprFixity v (Fixity i d) = ppr_fix d <+> int i <+> pprName' Infix v where ppr_fix InfixR = text "infixr" ppr_fix InfixL = text "infixl" ppr_fix InfixN = text "infix" -- | Pretty prints a pattern synonym type signature pprPatSynSig :: Name -> PatSynType -> Doc pprPatSynSig nm ty = text "pattern" <+> pprPrefixOcc nm <+> dcolon <+> pprPatSynType ty -- | Pretty prints a pattern synonym's type; follows the usual -- conventions to print a pattern synonym type compactly, yet -- unambiguously. See the note on 'PatSynType' and the section on -- pattern synonyms in the GHC user's guide for more information. pprPatSynType :: PatSynType -> Doc pprPatSynType ty@(ForallT uniTys reqs ty'@(ForallT exTys provs ty'')) | null exTys, null provs = ppr (ForallT uniTys reqs ty'') | null uniTys, null reqs = noreqs <+> ppr ty' | null reqs = pprForallBndrs uniTys <+> noreqs <+> ppr ty' | otherwise = ppr ty where noreqs = text "() =>" pprForallBndrs tvs = text "forall" <+> hsep (map ppr tvs) <+> text "." pprPatSynType ty = ppr ty ------------------------------ instance Ppr Module where ppr (Module pkg m) = text (pkgString pkg) <+> text (modString m) instance Ppr ModuleInfo where ppr (ModuleInfo imps) = text "Module" <+> vcat (map ppr imps) ------------------------------ instance Ppr Exp where ppr = pprExp noPrec pprPrefixOcc :: Name -> Doc -- Print operators with parens around them pprPrefixOcc n = parensIf (isSymOcc n) (ppr n) isSymOcc :: Name -> Bool isSymOcc n = case nameBase n of [] -> True -- Empty name; weird (c:_) -> startsVarSym c -- c.f. OccName.startsVarSym in GHC itself pprInfixExp :: Exp -> Doc pprInfixExp (VarE v) = pprName' Infix v pprInfixExp (ConE v) = pprName' Infix v pprInfixExp (UnboundVarE v) = pprName' Infix v -- This case will only ever be reached in exceptional circumstances. -- For example, when printing an error message in case of a malformed expression. pprInfixExp e = text "`" <> ppr e <> text "`" pprExp :: Precedence -> Exp -> Doc pprExp _ (VarE v) = pprName' Applied v pprExp _ (ConE c) = pprName' Applied c pprExp i (LitE l) = pprLit i l pprExp i (AppE e1 e2) = parensIf (i >= appPrec) $ pprExp opPrec e1 <+> pprExp appPrec e2 pprExp i (AppTypeE e t) = parensIf (i >= appPrec) $ pprExp opPrec e <+> char '@' <> pprParendType t pprExp _ (ParensE e) = parens (pprExp noPrec e) pprExp i (UInfixE e1 op e2) = parensIf (i > unopPrec) $ pprExp unopPrec e1 <+> pprInfixExp op <+> pprExp unopPrec e2 pprExp i (InfixE (Just e1) op (Just e2)) = parensIf (i >= opPrec) $ pprExp opPrec e1 <+> pprInfixExp op <+> pprExp opPrec e2 pprExp _ (InfixE me1 op me2) = parens $ pprMaybeExp noPrec me1 <+> pprInfixExp op <+> pprMaybeExp noPrec me2 pprExp i (LamE [] e) = pprExp i e -- #13856 pprExp i (LamE ps e) = parensIf (i > noPrec) $ char '\\' <> hsep (map (pprPat appPrec) ps) <+> text "->" <+> ppr e pprExp i (LamCaseE ms) = parensIf (i > noPrec) $ text "\\case" $$ braces (semiSep ms) pprExp i (LamCasesE ms) = parensIf (i > noPrec) $ text "\\cases" $$ braces (semi_sep ms) where semi_sep = sep . punctuate semi . map (pprClause False) pprExp i (TupE es) | [Just e] <- es = pprExp i (ConE (tupleDataName 1) `AppE` e) | otherwise = parens (commaSepWith (pprMaybeExp noPrec) es) pprExp _ (UnboxedTupE es) = hashParens (commaSepWith (pprMaybeExp noPrec) es) pprExp _ (UnboxedSumE e alt arity) = unboxedSumBars (ppr e) alt arity -- Nesting in Cond is to avoid potential problems in do statements pprExp i (CondE guard true false) = parensIf (i > noPrec) $ sep [text "if" <+> ppr guard, nest 1 $ text "then" <+> ppr true, nest 1 $ text "else" <+> ppr false] pprExp i (MultiIfE alts) = parensIf (i > noPrec) $ vcat $ case alts of [] -> [text "if {}"] (alt : alts') -> text "if" <+> pprGuarded arrow alt : map (nest 3 . pprGuarded arrow) alts' pprExp i (LetE ds_ e) = parensIf (i > noPrec) $ text "let" <+> pprDecs ds_ $$ text " in" <+> ppr e where pprDecs [] = empty pprDecs [d] = ppr d pprDecs ds = braces (semiSep ds) pprExp i (CaseE e ms) = parensIf (i > noPrec) $ text "case" <+> ppr e <+> text "of" $$ braces (semiSep ms) pprExp i (DoE m ss_) = parensIf (i > noPrec) $ pprQualifier m <> text "do" <+> pprStms ss_ where pprQualifier Nothing = empty pprQualifier (Just modName) = text (modString modName) <> char '.' pprStms [] = empty pprStms [s] = ppr s pprStms ss = braces (semiSep ss) pprExp i (MDoE m ss_) = parensIf (i > noPrec) $ pprQualifier m <> text "mdo" <+> pprStms ss_ where pprQualifier Nothing = empty pprQualifier (Just modName) = text (modString modName) <> char '.' pprStms [] = empty pprStms [s] = ppr s pprStms ss = braces (semiSep ss) pprExp _ (CompE []) = text "<>" -- This will probably break with fixity declarations - would need a ';' pprExp _ (CompE ss) = if null ss' -- If there are no statements in a list comprehension besides the last -- one, we simply treat it like a normal list. then text "[" <> ppr s <> text "]" else text "[" <> ppr s <+> bar <+> commaSep ss' <> text "]" where s = last ss ss' = init ss pprExp _ (ArithSeqE d) = ppr d pprExp _ (ListE es) = brackets (commaSep es) pprExp i (SigE e t) = parensIf (i > noPrec) $ pprExp sigPrec e <+> dcolon <+> ppr t pprExp _ (RecConE nm fs) = pprName' Applied nm <> braces (pprFields fs) pprExp _ (RecUpdE e fs) = pprExp appPrec e <> braces (pprFields fs) pprExp i (StaticE e) = parensIf (i >= appPrec) $ text "static"<+> pprExp appPrec e pprExp _ (UnboundVarE v) = pprName' Applied v pprExp _ (LabelE s) = text "#" <> text s pprExp _ (ImplicitParamVarE n) = text ('?' : n) pprExp _ (GetFieldE e f) = pprExp appPrec e <> text ('.': f) pprExp _ (ProjectionE xs) = parens $ hcat $ map ((char '.'<>) . text) $ toList xs pprFields :: [(Name,Exp)] -> Doc pprFields = sep . punctuate comma . map (\(s,e) -> pprName' Applied s <+> equals <+> ppr e) pprMaybeExp :: Precedence -> Maybe Exp -> Doc pprMaybeExp _ Nothing = empty pprMaybeExp i (Just e) = pprExp i e ------------------------------ instance Ppr Stmt where ppr (BindS p e) = ppr p <+> text "<-" <+> ppr e ppr (LetS ds) = text "let" <+> (braces (semiSep ds)) ppr (NoBindS e) = ppr e ppr (ParS sss) = sep $ punctuate bar $ map commaSep sss ppr (RecS ss) = text "rec" <+> (braces (semiSep ss)) ------------------------------ instance Ppr Match where ppr (Match p rhs ds) = pprMatchPat p <+> pprBody False rhs $$ where_clause ds pprMatchPat :: Pat -> Doc -- Everything except pattern signatures bind more tightly than (->) pprMatchPat p@(SigP {}) = parens (ppr p) pprMatchPat p = ppr p ------------------------------ pprGuarded :: Doc -> (Guard, Exp) -> Doc pprGuarded eqDoc (guard, expr) = case guard of NormalG guardExpr -> bar <+> ppr guardExpr <+> eqDoc <+> ppr expr PatG stmts -> bar <+> vcat (punctuate comma $ map ppr stmts) $$ nest nestDepth (eqDoc <+> ppr expr) ------------------------------ pprBody :: Bool -> Body -> Doc pprBody eq body = case body of GuardedB xs -> nest nestDepth $ vcat $ map (pprGuarded eqDoc) xs NormalB e -> eqDoc <+> ppr e where eqDoc | eq = equals | otherwise = arrow ------------------------------ pprClause :: Bool -> Clause -> Doc pprClause eqDoc (Clause ps rhs ds) = hsep (map (pprPat appPrec) ps) <+> pprBody eqDoc rhs $$ where_clause ds ------------------------------ instance Ppr Lit where ppr = pprLit noPrec pprLit :: Precedence -> Lit -> Doc pprLit i (IntPrimL x) = parensIf (i > noPrec && x < 0) (integer x <> char '#') pprLit _ (WordPrimL x) = integer x <> text "##" pprLit i (FloatPrimL x) = parensIf (i > noPrec && x < 0) (float (fromRational x) <> char '#') pprLit i (DoublePrimL x) = parensIf (i > noPrec && x < 0) (double (fromRational x) <> text "##") pprLit i (IntegerL x) = parensIf (i > noPrec && x < 0) (integer x) pprLit _ (CharL c) = text (show c) pprLit _ (CharPrimL c) = text (show c) <> char '#' pprLit _ (StringL s) = pprString s pprLit _ (StringPrimL s) = pprString (bytesToString s) <> char '#' pprLit _ (BytesPrimL {}) = pprString "" pprLit i (RationalL rat) | withoutFactor 2 (withoutFactor 5 $ denominator rat) /= 1 -- if the denominator has prime factors other than 2 and 5, show as fraction = parensIf (i > noPrec) $ integer (numerator rat) <+> char '/' <+> integer (denominator rat) | rat /= 0 && (zeroes < -1 || zeroes > 7), let (n, d) = properFraction (rat' / magnitude) (rat', zeroes') | abs rat < 1 = (10 * rat, zeroes - 1) | otherwise = (rat, zeroes) -- if < 0.01 or >= 100_000_000, use scientific notation = parensIf (i > noPrec && rat < 0) (integer n <> (if d == 0 then empty else char '.' <> decimals (abs d)) <> char 'e' <> integer zeroes') | let (n, d) = properFraction rat = parensIf (i > noPrec && rat < 0) (integer n <> char '.' <> if d == 0 then char '0' else decimals (abs d)) where zeroes :: Integer zeroes = truncate (logBase 10 (abs (fromRational rat) :: Double) * (1 - epsilon)) epsilon = 0.0000001 magnitude :: Rational magnitude = 10 ^^ zeroes withoutFactor :: Integer -> Integer -> Integer withoutFactor _ 0 = 0 withoutFactor p n | (n', 0) <- divMod n p = withoutFactor p n' | otherwise = n -- | Expects the argument 0 <= x < 1 decimals :: Rational -> Doc decimals x | x == 0 = empty | otherwise = integer n <> decimals d where (n, d) = properFraction (x * 10) bytesToString :: [Word8] -> String bytesToString = map (chr . fromIntegral) pprString :: String -> Doc -- Print newlines as newlines with Haskell string escape notation, -- not as '\n'. For other non-printables use regular escape notation. pprString s = vcat (map text (showMultiLineString s)) ------------------------------ instance Ppr Pat where ppr = pprPat noPrec pprPat :: Precedence -> Pat -> Doc pprPat i (LitP l) = pprLit i l pprPat _ (VarP v) = pprName' Applied v pprPat i (TupP ps) | [_] <- ps = pprPat i (ConP (tupleDataName 1) [] ps) | otherwise = parens (commaSep ps) pprPat _ (UnboxedTupP ps) = hashParens (commaSep ps) pprPat _ (UnboxedSumP p alt arity) = unboxedSumBars (ppr p) alt arity pprPat i (ConP s ts ps) = parensIf (i >= appPrec) $ pprName' Applied s <+> sep (map (\t -> char '@' <> pprParendType t) ts) <+> sep (map (pprPat appPrec) ps) pprPat _ (ParensP p) = parens $ pprPat noPrec p pprPat i (UInfixP p1 n p2) = parensIf (i > unopPrec) (pprPat unopPrec p1 <+> pprName' Infix n <+> pprPat unopPrec p2) pprPat i (InfixP p1 n p2) = parensIf (i >= opPrec) (pprPat opPrec p1 <+> pprName' Infix n <+> pprPat opPrec p2) pprPat i (TildeP p) = parensIf (i > noPrec) $ char '~' <> pprPat appPrec p pprPat i (BangP p) = parensIf (i > noPrec) $ char '!' <> pprPat appPrec p pprPat i (AsP v p) = parensIf (i > noPrec) $ ppr v <> text "@" <> pprPat appPrec p pprPat _ WildP = text "_" pprPat _ (RecP nm fs) = parens $ pprName' Applied nm <+> braces (sep $ punctuate comma $ map (\(s,p) -> pprName' Applied s <+> equals <+> ppr p) fs) pprPat _ (ListP ps) = brackets (commaSep ps) pprPat i (SigP p t) = parensIf (i > noPrec) $ ppr p <+> dcolon <+> ppr t pprPat _ (ViewP e p) = parens $ pprExp noPrec e <+> text "->" <+> pprPat noPrec p ------------------------------ instance Ppr Dec where ppr = ppr_dec True ppr_dec :: Bool -- declaration on the toplevel? -> Dec -> Doc ppr_dec _ (FunD f cs) = vcat $ map (\c -> pprPrefixOcc f <+> ppr c) cs ppr_dec _ (ValD p r ds) = ppr p <+> pprBody True r $$ where_clause ds ppr_dec _ (TySynD t xs rhs) = ppr_tySyn empty (Just t) (hsep (map ppr xs)) rhs ppr_dec _ (DataD ctxt t xs ksig cs decs) = ppr_data empty ctxt (Just t) (hsep (map ppr xs)) ksig cs decs ppr_dec _ (NewtypeD ctxt t xs ksig c decs) = ppr_newtype empty ctxt (Just t) (sep (map ppr xs)) ksig c decs ppr_dec _ (ClassD ctxt c xs fds ds) = text "class" <+> pprCxt ctxt <+> ppr c <+> hsep (map ppr xs) <+> ppr fds $$ where_clause ds ppr_dec _ (InstanceD o ctxt i ds) = text "instance" <+> maybe empty ppr_overlap o <+> pprCxt ctxt <+> ppr i $$ where_clause ds ppr_dec _ (SigD f t) = pprPrefixOcc f <+> dcolon <+> ppr t ppr_dec _ (KiSigD f k) = text "type" <+> pprPrefixOcc f <+> dcolon <+> ppr k ppr_dec _ (ForeignD f) = ppr f ppr_dec _ (InfixD fx n) = pprFixity n fx ppr_dec _ (DefaultD tys) = text "default" <+> parens (sep $ punctuate comma $ map ppr tys) ppr_dec _ (PragmaD p) = ppr p ppr_dec isTop (DataFamilyD tc tvs kind) = text "data" <+> maybeFamily <+> ppr tc <+> hsep (map ppr tvs) <+> maybeKind where maybeFamily | isTop = text "family" | otherwise = empty maybeKind | (Just k') <- kind = dcolon <+> ppr k' | otherwise = empty ppr_dec isTop (DataInstD ctxt bndrs ty ksig cs decs) = ppr_data (maybeInst <+> ppr_bndrs bndrs) ctxt Nothing (ppr ty) ksig cs decs where maybeInst | isTop = text "instance" | otherwise = empty ppr_dec isTop (NewtypeInstD ctxt bndrs ty ksig c decs) = ppr_newtype (maybeInst <+> ppr_bndrs bndrs) ctxt Nothing (ppr ty) ksig c decs where maybeInst | isTop = text "instance" | otherwise = empty ppr_dec isTop (TySynInstD (TySynEqn mb_bndrs ty rhs)) = ppr_tySyn (maybeInst <+> ppr_bndrs mb_bndrs) Nothing (ppr ty) rhs where maybeInst | isTop = text "instance" | otherwise = empty ppr_dec isTop (OpenTypeFamilyD tfhead) = text "type" <+> maybeFamily <+> ppr_tf_head tfhead where maybeFamily | isTop = text "family" | otherwise = empty ppr_dec _ (ClosedTypeFamilyD tfhead eqns) = hang (text "type family" <+> ppr_tf_head tfhead <+> text "where") nestDepth (vcat (map ppr_eqn eqns)) where ppr_eqn (TySynEqn mb_bndrs lhs rhs) = ppr_bndrs mb_bndrs <+> ppr lhs <+> text "=" <+> ppr rhs ppr_dec _ (RoleAnnotD name roles) = hsep [ text "type role", ppr name ] <+> hsep (map ppr roles) ppr_dec _ (StandaloneDerivD ds cxt ty) = hsep [ text "deriving" , maybe empty ppr_deriv_strategy ds , text "instance" , pprCxt cxt , ppr ty ] ppr_dec _ (DefaultSigD n ty) = hsep [ text "default", pprPrefixOcc n, dcolon, ppr ty ] ppr_dec _ (PatSynD name args dir pat) = text "pattern" <+> pprNameArgs <+> ppr dir <+> pprPatRHS where pprNameArgs | InfixPatSyn a1 a2 <- args = ppr a1 <+> pprName' Infix name <+> ppr a2 | otherwise = pprName' Applied name <+> ppr args pprPatRHS | ExplBidir cls <- dir = hang (ppr pat <+> text "where") nestDepth (pprName' Applied name <+> ppr cls) | otherwise = ppr pat ppr_dec _ (PatSynSigD name ty) = pprPatSynSig name ty ppr_dec _ (ImplicitParamBindD n e) = hsep [text ('?' : n), text "=", ppr e] ppr_deriv_strategy :: DerivStrategy -> Doc ppr_deriv_strategy ds = case ds of StockStrategy -> text "stock" AnyclassStrategy -> text "anyclass" NewtypeStrategy -> text "newtype" ViaStrategy ty -> text "via" <+> pprParendType ty ppr_overlap :: Overlap -> Doc ppr_overlap o = text $ case o of Overlaps -> "{-# OVERLAPS #-}" Overlappable -> "{-# OVERLAPPABLE #-}" Overlapping -> "{-# OVERLAPPING #-}" Incoherent -> "{-# INCOHERENT #-}" ppr_data :: Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> [Con] -> [DerivClause] -> Doc ppr_data = ppr_typedef "data" ppr_newtype :: Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> Con -> [DerivClause] -> Doc ppr_newtype maybeInst ctxt t argsDoc ksig c decs = ppr_typedef "newtype" maybeInst ctxt t argsDoc ksig [c] decs ppr_typedef :: String -> Doc -> Cxt -> Maybe Name -> Doc -> Maybe Kind -> [Con] -> [DerivClause] -> Doc ppr_typedef data_or_newtype maybeInst ctxt t argsDoc ksig cs decs = sep [text data_or_newtype <+> maybeInst <+> pprCxt ctxt <+> case t of Just n -> pprName' Applied n <+> argsDoc Nothing -> argsDoc <+> ksigDoc <+> maybeWhere, nest nestDepth (vcat (pref $ map ppr cs)), if null decs then empty else nest nestDepth $ vcat $ map ppr_deriv_clause decs] where pref :: [Doc] -> [Doc] pref xs | isGadtDecl = xs pref [] = [] -- No constructors; can't happen in H98 pref (d:ds) = (char '=' <+> d):map (bar <+>) ds maybeWhere :: Doc maybeWhere | isGadtDecl = text "where" | otherwise = empty isGadtDecl :: Bool isGadtDecl = not (null cs) && all isGadtCon cs where isGadtCon (GadtC _ _ _ ) = True isGadtCon (RecGadtC _ _ _) = True isGadtCon (ForallC _ _ x ) = isGadtCon x isGadtCon _ = False ksigDoc = case ksig of Nothing -> empty Just k -> dcolon <+> ppr k ppr_deriv_clause :: DerivClause -> Doc ppr_deriv_clause (DerivClause ds ctxt) = text "deriving" <+> pp_strat_before <+> ppr_cxt_preds ctxt <+> pp_strat_after where -- @via@ is unique in that in comes /after/ the class being derived, -- so we must special-case it. (pp_strat_before, pp_strat_after) = case ds of Just (via@ViaStrategy{}) -> (empty, ppr_deriv_strategy via) _ -> (maybe empty ppr_deriv_strategy ds, empty) ppr_tySyn :: Doc -> Maybe Name -> Doc -> Type -> Doc ppr_tySyn maybeInst t argsDoc rhs = text "type" <+> maybeInst <+> case t of Just n -> pprName' Applied n <+> argsDoc Nothing -> argsDoc <+> text "=" <+> ppr rhs ppr_tf_head :: TypeFamilyHead -> Doc ppr_tf_head (TypeFamilyHead tc tvs res inj) = pprName' Applied tc <+> hsep (map ppr tvs) <+> ppr res <+> maybeInj where maybeInj | (Just inj') <- inj = ppr inj' | otherwise = empty ppr_bndrs :: PprFlag flag => Maybe [TyVarBndr flag] -> Doc ppr_bndrs (Just bndrs) = text "forall" <+> sep (map ppr bndrs) <> text "." ppr_bndrs Nothing = empty ------------------------------ instance Ppr FunDep where ppr (FunDep xs ys) = hsep (map ppr xs) <+> text "->" <+> hsep (map ppr ys) ppr_list [] = empty ppr_list xs = bar <+> commaSep xs ------------------------------ instance Ppr FamilyResultSig where ppr NoSig = empty ppr (KindSig k) = dcolon <+> ppr k ppr (TyVarSig bndr) = text "=" <+> ppr bndr ------------------------------ instance Ppr InjectivityAnn where ppr (InjectivityAnn lhs rhs) = bar <+> ppr lhs <+> text "->" <+> hsep (map ppr rhs) ------------------------------ instance Ppr Foreign where ppr (ImportF callconv safety impent as typ) = text "foreign import" <+> showtextl callconv <+> showtextl safety <+> text (show impent) <+> pprName' Applied as <+> dcolon <+> ppr typ ppr (ExportF callconv expent as typ) = text "foreign export" <+> showtextl callconv <+> text (show expent) <+> pprName' Applied as <+> dcolon <+> ppr typ ------------------------------ instance Ppr Pragma where ppr (InlineP n inline rm phases) = text "{-#" <+> ppr inline <+> ppr rm <+> ppr phases <+> pprName' Applied n <+> text "#-}" ppr (OpaqueP n) = text "{-# OPAQUE" <+> pprName' Applied n <+> text "#-}" ppr (SpecialiseP n ty inline phases) = text "{-# SPECIALISE" <+> maybe empty ppr inline <+> ppr phases <+> sep [ pprName' Applied n <+> dcolon , nest 2 $ ppr ty ] <+> text "#-}" ppr (SpecialiseInstP inst) = text "{-# SPECIALISE instance" <+> ppr inst <+> text "#-}" ppr (RuleP n ty_bndrs tm_bndrs lhs rhs phases) = sep [ text "{-# RULES" <+> pprString n <+> ppr phases , nest 4 $ ppr_ty_forall ty_bndrs <+> ppr_tm_forall ty_bndrs <+> ppr lhs , nest 4 $ char '=' <+> ppr rhs <+> text "#-}" ] where ppr_ty_forall Nothing = empty ppr_ty_forall (Just bndrs) = text "forall" <+> fsep (map ppr bndrs) <+> char '.' ppr_tm_forall Nothing | null tm_bndrs = empty ppr_tm_forall _ = text "forall" <+> fsep (map ppr tm_bndrs) <+> char '.' ppr (AnnP tgt expr) = text "{-# ANN" <+> target1 tgt <+> ppr expr <+> text "#-}" where target1 ModuleAnnotation = text "module" target1 (TypeAnnotation t) = text "type" <+> pprName' Applied t target1 (ValueAnnotation v) = pprName' Applied v ppr (LineP line file) = text "{-# LINE" <+> int line <+> text (show file) <+> text "#-}" ppr (CompleteP cls mty) = text "{-# COMPLETE" <+> (fsep $ punctuate comma $ map (pprName' Applied) cls) <+> maybe empty (\ty -> dcolon <+> pprName' Applied ty) mty <+> text "#-}" ------------------------------ instance Ppr Inline where ppr NoInline = text "NOINLINE" ppr Inline = text "INLINE" ppr Inlinable = text "INLINABLE" ------------------------------ instance Ppr RuleMatch where ppr ConLike = text "CONLIKE" ppr FunLike = empty ------------------------------ instance Ppr Phases where ppr AllPhases = empty ppr (FromPhase i) = brackets $ int i ppr (BeforePhase i) = brackets $ char '~' <> int i ------------------------------ instance Ppr RuleBndr where ppr (RuleVar n) = ppr n ppr (TypedRuleVar n ty) = parens $ ppr n <+> dcolon <+> ppr ty ------------------------------ instance Ppr Clause where ppr = pprClause True ------------------------------ instance Ppr Con where ppr (NormalC c sts) = pprName' Applied c <+> sep (map pprBangType sts) ppr (RecC c vsts) = pprName' Applied c <+> braces (sep (punctuate comma $ map pprVarBangType vsts)) ppr (InfixC st1 c st2) = pprBangType st1 <+> pprName' Infix c <+> pprBangType st2 ppr (ForallC ns ctxt (GadtC c sts ty)) = commaSepApplied c <+> dcolon <+> pprForall ns ctxt <+> pprGadtRHS sts ty ppr (ForallC ns ctxt (RecGadtC c vsts ty)) = commaSepApplied c <+> dcolon <+> pprForall ns ctxt <+> pprRecFields vsts ty ppr (ForallC ns ctxt con) = pprForall ns ctxt <+> ppr con ppr (GadtC c sts ty) = commaSepApplied c <+> dcolon <+> pprGadtRHS sts ty ppr (RecGadtC c vsts ty) = commaSepApplied c <+> dcolon <+> pprRecFields vsts ty instance Ppr PatSynDir where ppr Unidir = text "<-" ppr ImplBidir = text "=" ppr (ExplBidir _) = text "<-" -- the ExplBidir's clauses are pretty printed together with the -- entire pattern synonym; so only print the direction here. instance Ppr PatSynArgs where ppr (PrefixPatSyn args) = sep $ map ppr args ppr (InfixPatSyn a1 a2) = ppr a1 <+> ppr a2 ppr (RecordPatSyn sels) = braces $ sep (punctuate comma (map (pprName' Applied) sels)) commaSepApplied :: [Name] -> Doc commaSepApplied = commaSepWith (pprName' Applied) pprForall :: [TyVarBndr Specificity] -> Cxt -> Doc pprForall = pprForall' ForallInvis pprForallVis :: [TyVarBndr ()] -> Cxt -> Doc pprForallVis = pprForall' ForallVis pprForall' :: PprFlag flag => ForallVisFlag -> [TyVarBndr flag] -> Cxt -> Doc pprForall' fvf tvs cxt -- even in the case without any tvs, there could be a non-empty -- context cxt (e.g., in the case of pattern synonyms, where there -- are multiple forall binders and contexts). | [] <- tvs = pprCxt cxt | otherwise = text "forall" <+> hsep (map ppr tvs) <+> separator <+> pprCxt cxt where separator = case fvf of ForallVis -> text "->" ForallInvis -> char '.' pprRecFields :: [(Name, Strict, Type)] -> Type -> Doc pprRecFields vsts ty = braces (sep (punctuate comma $ map pprVarBangType vsts)) <+> arrow <+> ppr ty pprGadtRHS :: [(Strict, Type)] -> Type -> Doc pprGadtRHS [] ty = ppr ty pprGadtRHS sts ty = sep (punctuate (space <> arrow) (map pprBangType sts)) <+> arrow <+> ppr ty ------------------------------ pprVarBangType :: VarBangType -> Doc -- Slight infelicity: with print non-atomic type with parens pprVarBangType (v, bang, t) = pprName' Applied v <+> dcolon <+> pprBangType (bang, t) ------------------------------ pprBangType :: BangType -> Doc -- Make sure we print -- -- Con {-# UNPACK #-} a -- -- rather than -- -- Con {-# UNPACK #-}a -- -- when there's no strictness annotation. If there is a strictness annotation, -- it's okay to not put a space between it and the type. pprBangType (bt@(Bang _ NoSourceStrictness), t) = ppr bt <+> pprParendType t pprBangType (bt, t) = ppr bt <> pprParendType t ------------------------------ instance Ppr Bang where ppr (Bang su ss) = ppr su <+> ppr ss ------------------------------ instance Ppr SourceUnpackedness where ppr NoSourceUnpackedness = empty ppr SourceNoUnpack = text "{-# NOUNPACK #-}" ppr SourceUnpack = text "{-# UNPACK #-}" ------------------------------ instance Ppr SourceStrictness where ppr NoSourceStrictness = empty ppr SourceLazy = char '~' ppr SourceStrict = char '!' ------------------------------ instance Ppr DecidedStrictness where ppr DecidedLazy = empty ppr DecidedStrict = char '!' ppr DecidedUnpack = text "{-# UNPACK #-} !" ------------------------------ {-# DEPRECATED pprVarStrictType "As of @template-haskell-2.11.0.0@, 'VarStrictType' has been replaced by 'VarBangType'. Please use 'pprVarBangType' instead." #-} pprVarStrictType :: (Name, Strict, Type) -> Doc pprVarStrictType = pprVarBangType ------------------------------ {-# DEPRECATED pprStrictType "As of @template-haskell-2.11.0.0@, 'StrictType' has been replaced by 'BangType'. Please use 'pprBangType' instead." #-} pprStrictType :: (Strict, Type) -> Doc pprStrictType = pprBangType ------------------------------ pprParendType :: Type -> Doc pprParendType (VarT v) = pprName' Applied v -- `Applied` is used here instead of `ppr` because of infix names (#13887) pprParendType (ConT c) = pprName' Applied c pprParendType (TupleT 0) = text "()" pprParendType (TupleT 1) = pprParendType (ConT (tupleTypeName 1)) pprParendType (TupleT n) = parens (hcat (replicate (n-1) comma)) pprParendType (UnboxedTupleT n) = hashParens $ hcat $ replicate (n-1) comma pprParendType (UnboxedSumT arity) = hashParens $ hcat $ replicate (arity-1) bar pprParendType ArrowT = parens (text "->") pprParendType MulArrowT = text "FUN" pprParendType ListT = text "[]" pprParendType (LitT l) = pprTyLit l pprParendType (PromotedT c) = text "'" <> pprName' Applied c pprParendType (PromotedTupleT 0) = text "'()" pprParendType (PromotedTupleT 1) = pprParendType (PromotedT (tupleDataName 1)) pprParendType (PromotedTupleT n) = quoteParens (hcat (replicate (n-1) comma)) pprParendType PromotedNilT = text "'[]" pprParendType PromotedConsT = text "'(:)" pprParendType StarT = char '*' pprParendType ConstraintT = text "Constraint" pprParendType (SigT ty k) = parens (ppr ty <+> text "::" <+> ppr k) pprParendType WildCardT = char '_' pprParendType t@(InfixT {}) = parens (pprInfixT t) pprParendType t@(UInfixT {}) = parens (pprInfixT t) pprParendType t@(PromotedInfixT {}) = parens (pprInfixT t) pprParendType t@(PromotedUInfixT {}) = parens (pprInfixT t) pprParendType (ParensT t) = ppr t pprParendType tuple | (TupleT n, args) <- split tuple , length args == n = parens (commaSep args) pprParendType (ImplicitParamT n t) = text ('?':n) <+> text "::" <+> ppr t pprParendType EqualityT = text "(~)" pprParendType t@(ForallT {}) = parens (ppr t) pprParendType t@(ForallVisT {}) = parens (ppr t) pprParendType t@(AppT {}) = parens (ppr t) pprParendType t@(AppKindT {}) = parens (ppr t) pprInfixT :: Type -> Doc pprInfixT = \case (InfixT x n y) -> with x n y "" ppr (UInfixT x n y) -> with x n y "" pprInfixT (PromotedInfixT x n y) -> with x n y "'" ppr (PromotedUInfixT x n y) -> with x n y "'" pprInfixT t -> ppr t where with x n y prefix ppr' = ppr' x <+> text prefix <> pprName' Infix n <+> ppr' y instance Ppr Type where ppr (ForallT tvars ctxt ty) = sep [pprForall tvars ctxt, ppr ty] ppr (ForallVisT tvars ty) = sep [pprForallVis tvars [], ppr ty] ppr ty = pprTyApp (split ty) -- Works, in a degenerate way, for SigT, and puts parens round (ty :: kind) -- See Note [Pretty-printing kind signatures] instance Ppr TypeArg where ppr (TANormal ty) = parensIf (isStarT ty) (ppr ty) ppr (TyArg ki) = char '@' <> parensIf (isStarT ki) (ppr ki) pprParendTypeArg :: TypeArg -> Doc pprParendTypeArg (TANormal ty) = parensIf (isStarT ty) (pprParendType ty) pprParendTypeArg (TyArg ki) = char '@' <> parensIf (isStarT ki) (pprParendType ki) isStarT :: Type -> Bool isStarT StarT = True isStarT _ = False {- Note [Pretty-printing kind signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GHC's parser only recognises a kind signature in a type when there are parens around it. E.g. the parens are required here: f :: (Int :: *) type instance F Int = (Bool :: *) So we always print a SigT with parens (see #10050). -} pprTyApp :: (Type, [TypeArg]) -> Doc pprTyApp (MulArrowT, [TANormal (PromotedT c), TANormal arg1, TANormal arg2]) | c == oneName = sep [pprFunArgType arg1 <+> text "%1 ->", ppr arg2] | c == manyName = sep [pprFunArgType arg1 <+> text "->", ppr arg2] pprTyApp (MulArrowT, [TANormal argm, TANormal arg1, TANormal arg2]) = sep [pprFunArgType arg1 <+> text "%" <> ppr argm <+> text "->", ppr arg2] pprTyApp (ArrowT, [TANormal arg1, TANormal arg2]) = sep [pprFunArgType arg1 <+> text "->", ppr arg2] pprTyApp (EqualityT, [TANormal arg1, TANormal arg2]) = sep [pprFunArgType arg1 <+> text "~", ppr arg2] pprTyApp (ListT, [TANormal arg]) = brackets (ppr arg) pprTyApp (TupleT 1, args) = pprTyApp (ConT (tupleTypeName 1), args) pprTyApp (PromotedTupleT 1, args) = pprTyApp (PromotedT (tupleDataName 1), args) pprTyApp (TupleT n, args) | length args == n, Just args' <- traverse fromTANormal args = parens (commaSep args') pprTyApp (PromotedTupleT n, args) | length args == n, Just args' <- traverse fromTANormal args = quoteParens (commaSep args') pprTyApp (fun, args) = pprParendType fun <+> sep (map pprParendTypeArg args) fromTANormal :: TypeArg -> Maybe Type fromTANormal (TANormal arg) = Just arg fromTANormal (TyArg _) = Nothing pprFunArgType :: Type -> Doc -- Should really use a precedence argument -- Everything except forall and (->) binds more tightly than (->) pprFunArgType ty@(ForallT {}) = parens (ppr ty) pprFunArgType ty@(ForallVisT {}) = parens (ppr ty) pprFunArgType ty@(((MulArrowT `AppT` _) `AppT` _) `AppT` _) = parens (ppr ty) pprFunArgType ty@((ArrowT `AppT` _) `AppT` _) = parens (ppr ty) pprFunArgType ty@(SigT _ _) = parens (ppr ty) pprFunArgType ty = ppr ty data ForallVisFlag = ForallVis -- forall a -> {...} | ForallInvis -- forall a. {...} deriving Show data TypeArg = TANormal Type | TyArg Kind split :: Type -> (Type, [TypeArg]) -- Split into function and args split t = go t [] where go (AppT t1 t2) args = go t1 (TANormal t2:args) go (AppKindT ty ki) args = go ty (TyArg ki:args) go ty args = (ty, args) pprTyLit :: TyLit -> Doc pprTyLit (NumTyLit n) = integer n pprTyLit (StrTyLit s) = text (show s) pprTyLit (CharTyLit c) = text (show c) instance Ppr TyLit where ppr = pprTyLit ------------------------------ class PprFlag flag where pprTyVarBndr :: (TyVarBndr flag) -> Doc instance PprFlag () where pprTyVarBndr (PlainTV nm ()) = ppr nm pprTyVarBndr (KindedTV nm () k) = parens (ppr nm <+> dcolon <+> ppr k) instance PprFlag Specificity where pprTyVarBndr (PlainTV nm SpecifiedSpec) = ppr nm pprTyVarBndr (PlainTV nm InferredSpec) = braces (ppr nm) pprTyVarBndr (KindedTV nm SpecifiedSpec k) = parens (ppr nm <+> dcolon <+> ppr k) pprTyVarBndr (KindedTV nm InferredSpec k) = braces (ppr nm <+> dcolon <+> ppr k) instance PprFlag flag => Ppr (TyVarBndr flag) where ppr bndr = pprTyVarBndr bndr instance Ppr Role where ppr NominalR = text "nominal" ppr RepresentationalR = text "representational" ppr PhantomR = text "phantom" ppr InferR = text "_" ------------------------------ pprCxt :: Cxt -> Doc pprCxt [] = empty pprCxt ts = ppr_cxt_preds ts <+> text "=>" ppr_cxt_preds :: Cxt -> Doc ppr_cxt_preds [] = empty ppr_cxt_preds [t@ImplicitParamT{}] = parens (ppr t) ppr_cxt_preds [t@ForallT{}] = parens (ppr t) ppr_cxt_preds [t] = ppr t ppr_cxt_preds ts = parens (commaSep ts) ------------------------------ instance Ppr Range where ppr = brackets . pprRange where pprRange :: Range -> Doc pprRange (FromR e) = ppr e <+> text ".." pprRange (FromThenR e1 e2) = ppr e1 <> text "," <+> ppr e2 <+> text ".." pprRange (FromToR e1 e2) = ppr e1 <+> text ".." <+> ppr e2 pprRange (FromThenToR e1 e2 e3) = ppr e1 <> text "," <+> ppr e2 <+> text ".." <+> ppr e3 ------------------------------ where_clause :: [Dec] -> Doc where_clause [] = empty where_clause ds = nest nestDepth $ text "where" <+> braces (semiSepWith (ppr_dec False) ds) showtextl :: Show a => a -> Doc showtextl = text . map toLower . show hashParens :: Doc -> Doc hashParens d = text "(# " <> d <> text " #)" quoteParens :: Doc -> Doc quoteParens d = text "'(" <> d <> text ")" ----------------------------- instance Ppr Loc where ppr (Loc { loc_module = md , loc_package = pkg , loc_start = (start_ln, start_col) , loc_end = (end_ln, end_col) }) = hcat [ text pkg, colon, text md, colon , parens $ int start_ln <> comma <> int start_col , text "-" , parens $ int end_ln <> comma <> int end_col ] -- Takes a separator and a pretty-printing function and prints a list of things -- separated by the separator followed by space. sepWith :: Doc -> (a -> Doc) -> [a] -> Doc sepWith sepDoc pprFun = sep . punctuate sepDoc . map pprFun -- Takes a list of printable things and prints them separated by commas followed -- by space. commaSep :: Ppr a => [a] -> Doc commaSep = commaSepWith ppr -- Takes a list of things and prints them with the given pretty-printing -- function, separated by commas followed by space. commaSepWith :: (a -> Doc) -> [a] -> Doc commaSepWith pprFun = sepWith comma pprFun -- Takes a list of printable things and prints them separated by semicolons -- followed by space. semiSep :: Ppr a => [a] -> Doc semiSep = sep . punctuate semi . map ppr -- Takes a list of things and prints them with the given pretty-printing -- function, separated by semicolons followed by space. semiSepWith :: (a -> Doc) -> [a] -> Doc semiSepWith pprFun = sepWith semi pprFun -- Prints out the series of vertical bars that wraps an expression or pattern -- used in an unboxed sum. unboxedSumBars :: Doc -> SumAlt -> SumArity -> Doc unboxedSumBars d alt arity = hashParens $ bars (alt-1) <> d <> bars (arity - alt) where bars i = hsep (replicate i bar) -- Text containing the vertical bar character. bar :: Doc bar = char '|' ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/PprLib.hs0000644000000000000000000001504514470055371026015 0ustar0000000000000000{-# LANGUAGE FlexibleInstances, Safe #-} -- | Monadic front-end to Text.PrettyPrint module Language.Haskell.TH.PprLib ( -- * The document type Doc, -- Abstract, instance of Show PprM, -- * Primitive Documents empty, semi, comma, colon, dcolon, space, equals, arrow, lparen, rparen, lbrack, rbrack, lbrace, rbrace, -- * Converting values into documents text, char, ptext, int, integer, float, double, rational, -- * Wrapping documents in delimiters parens, brackets, braces, quotes, doubleQuotes, -- * Combining documents (<>), (<+>), hcat, hsep, ($$), ($+$), vcat, sep, cat, fsep, fcat, nest, hang, punctuate, -- * Predicates on documents isEmpty, to_HPJ_Doc, pprName, pprName' ) where import Language.Haskell.TH.Syntax (Uniq, Name(..), showName', NameFlavour(..), NameIs(..)) import qualified Text.PrettyPrint as HPJ import Control.Monad (liftM, liftM2, ap) import Language.Haskell.TH.Lib.Map ( Map ) import qualified Language.Haskell.TH.Lib.Map as Map ( lookup, insert, empty ) import Prelude hiding ((<>)) infixl 6 <> infixl 6 <+> infixl 5 $$, $+$ -- --------------------------------------------------------------------------- -- The interface -- The primitive Doc values instance Show Doc where show d = HPJ.render (to_HPJ_Doc d) isEmpty :: Doc -> PprM Bool; -- ^ Returns 'True' if the document is empty empty :: Doc; -- ^ An empty document semi :: Doc; -- ^ A ';' character comma :: Doc; -- ^ A ',' character colon :: Doc; -- ^ A ':' character dcolon :: Doc; -- ^ A "::" string space :: Doc; -- ^ A space character equals :: Doc; -- ^ A '=' character arrow :: Doc; -- ^ A "->" string lparen :: Doc; -- ^ A '(' character rparen :: Doc; -- ^ A ')' character lbrack :: Doc; -- ^ A '[' character rbrack :: Doc; -- ^ A ']' character lbrace :: Doc; -- ^ A '{' character rbrace :: Doc; -- ^ A '}' character text :: String -> Doc ptext :: String -> Doc char :: Char -> Doc int :: Int -> Doc integer :: Integer -> Doc float :: Float -> Doc double :: Double -> Doc rational :: Rational -> Doc parens :: Doc -> Doc; -- ^ Wrap document in @(...)@ brackets :: Doc -> Doc; -- ^ Wrap document in @[...]@ braces :: Doc -> Doc; -- ^ Wrap document in @{...}@ quotes :: Doc -> Doc; -- ^ Wrap document in @\'...\'@ doubleQuotes :: Doc -> Doc; -- ^ Wrap document in @\"...\"@ -- Combining @Doc@ values (<>) :: Doc -> Doc -> Doc; -- ^Beside hcat :: [Doc] -> Doc; -- ^List version of '<>' (<+>) :: Doc -> Doc -> Doc; -- ^Beside, separated by space hsep :: [Doc] -> Doc; -- ^List version of '<+>' ($$) :: Doc -> Doc -> Doc; -- ^Above; if there is no -- overlap it \"dovetails\" the two ($+$) :: Doc -> Doc -> Doc; -- ^Above, without dovetailing. vcat :: [Doc] -> Doc; -- ^List version of '$$' cat :: [Doc] -> Doc; -- ^ Either hcat or vcat sep :: [Doc] -> Doc; -- ^ Either hsep or vcat fcat :: [Doc] -> Doc; -- ^ \"Paragraph fill\" version of cat fsep :: [Doc] -> Doc; -- ^ \"Paragraph fill\" version of sep nest :: Int -> Doc -> Doc; -- ^ Nested -- GHC-specific ones. hang :: Doc -> Int -> Doc -> Doc; -- ^ @hang d1 n d2 = sep [d1, nest n d2]@ punctuate :: Doc -> [Doc] -> [Doc] -- ^ @punctuate p [d1, ... dn] = [d1 \<> p, d2 \<> p, ... dn-1 \<> p, dn]@ -- --------------------------------------------------------------------------- -- The "implementation" type State = (Map Name Name, Uniq) data PprM a = PprM { runPprM :: State -> (a, State) } pprName :: Name -> Doc pprName = pprName' Alone pprName' :: NameIs -> Name -> Doc pprName' ni n@(Name o (NameU _)) = PprM $ \s@(fm, i) -> let (n', s') = case Map.lookup n fm of Just d -> (d, s) Nothing -> let n'' = Name o (NameU i) in (n'', (Map.insert n n'' fm, i + 1)) in (HPJ.text $ showName' ni n', s') pprName' ni n = text $ showName' ni n {- instance Show Name where show (Name occ (NameU u)) = occString occ ++ "_" ++ show (I# u) show (Name occ NameS) = occString occ show (Name occ (NameG ns m)) = modString m ++ "." ++ occString occ data Name = Name OccName NameFlavour data NameFlavour | NameU Int# -- A unique local name -} to_HPJ_Doc :: Doc -> HPJ.Doc to_HPJ_Doc d = fst $ runPprM d (Map.empty, 0) instance Functor PprM where fmap = liftM instance Applicative PprM where pure x = PprM $ \s -> (x, s) (<*>) = ap instance Monad PprM where m >>= k = PprM $ \s -> let (x, s') = runPprM m s in runPprM (k x) s' type Doc = PprM HPJ.Doc -- The primitive Doc values isEmpty = liftM HPJ.isEmpty empty = return HPJ.empty semi = return HPJ.semi comma = return HPJ.comma colon = return HPJ.colon dcolon = return $ HPJ.text "::" space = return HPJ.space equals = return HPJ.equals arrow = return $ HPJ.text "->" lparen = return HPJ.lparen rparen = return HPJ.rparen lbrack = return HPJ.lbrack rbrack = return HPJ.rbrack lbrace = return HPJ.lbrace rbrace = return HPJ.rbrace text = return . HPJ.text ptext = return . HPJ.ptext char = return . HPJ.char int = return . HPJ.int integer = return . HPJ.integer float = return . HPJ.float double = return . HPJ.double rational = return . HPJ.rational parens = liftM HPJ.parens brackets = liftM HPJ.brackets braces = liftM HPJ.braces quotes = liftM HPJ.quotes doubleQuotes = liftM HPJ.doubleQuotes -- Combining @Doc@ values (<>) = liftM2 (HPJ.<>) hcat = liftM HPJ.hcat . sequence (<+>) = liftM2 (HPJ.<+>) hsep = liftM HPJ.hsep . sequence ($$) = liftM2 (HPJ.$$) ($+$) = liftM2 (HPJ.$+$) vcat = liftM HPJ.vcat . sequence cat = liftM HPJ.cat . sequence sep = liftM HPJ.sep . sequence fcat = liftM HPJ.fcat . sequence fsep = liftM HPJ.fsep . sequence nest n = liftM (HPJ.nest n) hang d1 n d2 = do d1' <- d1 d2' <- d2 return (HPJ.hang d1' n d2') -- punctuate uses the same definition as Text.PrettyPrint punctuate _ [] = [] punctuate p (d:ds) = go d ds where go d' [] = [d'] go d' (e:es) = (d' <> p) : go e es ghc-lib-parser-9.4.7.20230826/libraries/template-haskell/Language/Haskell/TH/Syntax.hs0000644000000000000000000031506014472375231026115 0ustar0000000000000000{-# LANGUAGE CPP, DeriveDataTypeable, DeriveGeneric, FlexibleInstances, DefaultSignatures, RankNTypes, RoleAnnotations, ScopedTypeVariables, MagicHash, KindSignatures, PolyKinds, TypeApplications, DataKinds, GADTs, UnboxedTuples, UnboxedSums, TypeInType, TypeOperators, Trustworthy, DeriveFunctor, BangPatterns, RecordWildCards, ImplicitParams #-} {-# OPTIONS_GHC -fno-warn-inline-rule-shadowing #-} ----------------------------------------------------------------------------- -- | -- Module : Language.Haskell.Syntax -- Copyright : (c) The University of Glasgow 2003 -- License : BSD-style (see the file libraries/base/LICENSE) -- -- Maintainer : libraries@haskell.org -- Stability : experimental -- Portability : portable -- -- Abstract syntax definitions for Template Haskell. -- ----------------------------------------------------------------------------- module Language.Haskell.TH.Syntax ( module Language.Haskell.TH.Syntax -- * Language extensions , module Language.Haskell.TH.LanguageExtensions , ForeignSrcLang(..) ) where import Data.Data hiding (Fixity(..)) import Data.IORef import System.IO.Unsafe ( unsafePerformIO ) import System.FilePath import GHC.IO.Unsafe ( unsafeDupableInterleaveIO ) import Control.Monad (liftM) import Control.Monad.IO.Class (MonadIO (..)) import Control.Monad.Fix (MonadFix (..)) import Control.Applicative (liftA2) import Control.Exception (BlockedIndefinitelyOnMVar (..), catch, throwIO) import Control.Exception.Base (FixIOException (..)) import Control.Concurrent.MVar (newEmptyMVar, readMVar, putMVar) import System.IO ( hPutStrLn, stderr ) import Data.Char ( isAlpha, isAlphaNum, isUpper, ord ) import Data.Int import Data.List.NonEmpty ( NonEmpty(..) ) import Data.Void ( Void, absurd ) import Data.Word import Data.Ratio import GHC.CString ( unpackCString# ) import GHC.Generics ( Generic ) import GHC.Types ( Int(..), Word(..), Char(..), Double(..), Float(..), TYPE, RuntimeRep(..) ) import GHC.Prim ( Int#, Word#, Char#, Double#, Float#, Addr# ) import GHC.Ptr ( Ptr, plusPtr ) import GHC.Lexeme ( startsVarSym, startsVarId ) import GHC.ForeignSrcLang.Type import Language.Haskell.TH.LanguageExtensions import Numeric.Natural import Prelude import Foreign.ForeignPtr import Foreign.C.String import Foreign.C.Types import GHC.Stack #if __GLASGOW_HASKELL__ >= 901 import GHC.Types ( Levity(..) ) #endif #if __GLASGOW_HASKELL__ >= 903 import Data.Array.Byte (ByteArray(..)) import GHC.Exts ( ByteArray#, unsafeFreezeByteArray#, copyAddrToByteArray#, newByteArray# , isByteArrayPinned#, isTrue#, sizeofByteArray#, unsafeCoerce#, byteArrayContents# , copyByteArray#, newPinnedByteArray#) import GHC.ForeignPtr (ForeignPtr(..), ForeignPtrContents(..)) import GHC.ST (ST(..), runST) #endif ----------------------------------------------------- -- -- The Quasi class -- ----------------------------------------------------- class (MonadIO m, MonadFail m) => Quasi m where qNewName :: String -> m Name -- ^ Fresh names -- Error reporting and recovery qReport :: Bool -> String -> m () -- ^ Report an error (True) or warning (False) -- ...but carry on; use 'fail' to stop qRecover :: m a -- ^ the error handler -> m a -- ^ action which may fail -> m a -- ^ Recover from the monadic 'fail' -- Inspect the type-checker's environment qLookupName :: Bool -> String -> m (Maybe Name) -- True <=> type namespace, False <=> value namespace qReify :: Name -> m Info qReifyFixity :: Name -> m (Maybe Fixity) qReifyType :: Name -> m Type qReifyInstances :: Name -> [Type] -> m [Dec] -- Is (n tys) an instance? -- Returns list of matching instance Decs -- (with empty sub-Decs) -- Works for classes and type functions qReifyRoles :: Name -> m [Role] qReifyAnnotations :: Data a => AnnLookup -> m [a] qReifyModule :: Module -> m ModuleInfo qReifyConStrictness :: Name -> m [DecidedStrictness] qLocation :: m Loc qRunIO :: IO a -> m a qRunIO = liftIO -- ^ Input/output (dangerous) qGetPackageRoot :: m FilePath qAddDependentFile :: FilePath -> m () qAddTempFile :: String -> m FilePath qAddTopDecls :: [Dec] -> m () qAddForeignFilePath :: ForeignSrcLang -> String -> m () qAddModFinalizer :: Q () -> m () qAddCorePlugin :: String -> m () qGetQ :: Typeable a => m (Maybe a) qPutQ :: Typeable a => a -> m () qIsExtEnabled :: Extension -> m Bool qExtsEnabled :: m [Extension] qPutDoc :: DocLoc -> String -> m () qGetDoc :: DocLoc -> m (Maybe String) ----------------------------------------------------- -- The IO instance of Quasi -- -- This instance is used only when running a Q -- computation in the IO monad, usually just to -- print the result. There is no interesting -- type environment, so reification isn't going to -- work. -- ----------------------------------------------------- instance Quasi IO where qNewName = newNameIO qReport True msg = hPutStrLn stderr ("Template Haskell error: " ++ msg) qReport False msg = hPutStrLn stderr ("Template Haskell error: " ++ msg) qLookupName _ _ = badIO "lookupName" qReify _ = badIO "reify" qReifyFixity _ = badIO "reifyFixity" qReifyType _ = badIO "reifyFixity" qReifyInstances _ _ = badIO "reifyInstances" qReifyRoles _ = badIO "reifyRoles" qReifyAnnotations _ = badIO "reifyAnnotations" qReifyModule _ = badIO "reifyModule" qReifyConStrictness _ = badIO "reifyConStrictness" qLocation = badIO "currentLocation" qRecover _ _ = badIO "recover" -- Maybe we could fix this? qGetPackageRoot = badIO "getProjectRoot" qAddDependentFile _ = badIO "addDependentFile" qAddTempFile _ = badIO "addTempFile" qAddTopDecls _ = badIO "addTopDecls" qAddForeignFilePath _ _ = badIO "addForeignFilePath" qAddModFinalizer _ = badIO "addModFinalizer" qAddCorePlugin _ = badIO "addCorePlugin" qGetQ = badIO "getQ" qPutQ _ = badIO "putQ" qIsExtEnabled _ = badIO "isExtEnabled" qExtsEnabled = badIO "extsEnabled" qPutDoc _ _ = badIO "putDoc" qGetDoc _ = badIO "getDoc" instance Quote IO where newName = newNameIO newNameIO :: String -> IO Name newNameIO s = do { n <- atomicModifyIORef' counter (\x -> (x + 1, x)) ; pure (mkNameU s n) } badIO :: String -> IO a badIO op = do { qReport True ("Can't do `" ++ op ++ "' in the IO monad") ; fail "Template Haskell failure" } -- Global variable to generate unique symbols counter :: IORef Uniq {-# NOINLINE counter #-} counter = unsafePerformIO (newIORef 0) ----------------------------------------------------- -- -- The Q monad -- ----------------------------------------------------- newtype Q a = Q { unQ :: forall m. Quasi m => m a } -- \"Runs\" the 'Q' monad. Normal users of Template Haskell -- should not need this function, as the splice brackets @$( ... )@ -- are the usual way of running a 'Q' computation. -- -- This function is primarily used in GHC internals, and for debugging -- splices by running them in 'IO'. -- -- Note that many functions in 'Q', such as 'reify' and other compiler -- queries, are not supported when running 'Q' in 'IO'; these operations -- simply fail at runtime. Indeed, the only operations guaranteed to succeed -- are 'newName', 'runIO', 'reportError' and 'reportWarning'. runQ :: Quasi m => Q a -> m a runQ (Q m) = m instance Monad Q where Q m >>= k = Q (m >>= \x -> unQ (k x)) (>>) = (*>) instance MonadFail Q where fail s = report True s >> Q (fail "Q monad failure") instance Functor Q where fmap f (Q x) = Q (fmap f x) instance Applicative Q where pure x = Q (pure x) Q f <*> Q x = Q (f <*> x) Q m *> Q n = Q (m *> n) -- | @since 2.17.0.0 instance Semigroup a => Semigroup (Q a) where (<>) = liftA2 (<>) -- | @since 2.17.0.0 instance Monoid a => Monoid (Q a) where mempty = pure mempty -- | If the function passed to 'mfix' inspects its argument, -- the resulting action will throw a 'FixIOException'. -- -- @since 2.17.0.0 instance MonadFix Q where -- We use the same blackholing approach as in fixIO. -- See Note [Blackholing in fixIO] in System.IO in base. mfix k = do m <- runIO newEmptyMVar ans <- runIO (unsafeDupableInterleaveIO (readMVar m `catch` \BlockedIndefinitelyOnMVar -> throwIO FixIOException)) result <- k ans runIO (putMVar m result) return result ----------------------------------------------------- -- -- The Quote class -- ----------------------------------------------------- -- | The 'Quote' class implements the minimal interface which is necessary for -- desugaring quotations. -- -- * The @Monad m@ superclass is needed to stitch together the different -- AST fragments. -- * 'newName' is used when desugaring binding structures such as lambdas -- to generate fresh names. -- -- Therefore the type of an untyped quotation in GHC is `Quote m => m Exp` -- -- For many years the type of a quotation was fixed to be `Q Exp` but by -- more precisely specifying the minimal interface it enables the `Exp` to -- be extracted purely from the quotation without interacting with `Q`. class Monad m => Quote m where {- | Generate a fresh name, which cannot be captured. For example, this: @f = $(do nm1 <- newName \"x\" let nm2 = 'mkName' \"x\" return ('LamE' ['VarP' nm1] (LamE [VarP nm2] ('VarE' nm1))) )@ will produce the splice >f = \x0 -> \x -> x0 In particular, the occurrence @VarE nm1@ refers to the binding @VarP nm1@, and is not captured by the binding @VarP nm2@. Although names generated by @newName@ cannot /be captured/, they can /capture/ other names. For example, this: >g = $(do > nm1 <- newName "x" > let nm2 = mkName "x" > return (LamE [VarP nm2] (LamE [VarP nm1] (VarE nm2))) > ) will produce the splice >g = \x -> \x0 -> x0 since the occurrence @VarE nm2@ is captured by the innermost binding of @x@, namely @VarP nm1@. -} newName :: String -> m Name instance Quote Q where newName s = Q (qNewName s) ----------------------------------------------------- -- -- The TExp type -- ----------------------------------------------------- type role TExp nominal -- See Note [Role of TExp] newtype TExp (a :: TYPE (r :: RuntimeRep)) = TExp { unType :: Exp -- ^ Underlying untyped Template Haskell expression } -- ^ Represents an expression which has type @a@. Built on top of 'Exp', typed -- expressions allow for type-safe splicing via: -- -- - typed quotes, written as @[|| ... ||]@ where @...@ is an expression; if -- that expression has type @a@, then the quotation has type -- @'Q' ('TExp' a)@ -- -- - typed splices inside of typed quotes, written as @$$(...)@ where @...@ -- is an arbitrary expression of type @'Q' ('TExp' a)@ -- -- Traditional expression quotes and splices let us construct ill-typed -- expressions: -- -- >>> fmap ppr $ runQ [| True == $( [| "foo" |] ) |] -- GHC.Types.True GHC.Classes.== "foo" -- >>> GHC.Types.True GHC.Classes.== "foo" -- error: -- • Couldn't match expected type ‘Bool’ with actual type ‘[Char]’ -- • In the second argument of ‘(==)’, namely ‘"foo"’ -- In the expression: True == "foo" -- In an equation for ‘it’: it = True == "foo" -- -- With typed expressions, the type error occurs when /constructing/ the -- Template Haskell expression: -- -- >>> fmap ppr $ runQ [|| True == $$( [|| "foo" ||] ) ||] -- error: -- • Couldn't match type ‘[Char]’ with ‘Bool’ -- Expected type: Q (TExp Bool) -- Actual type: Q (TExp [Char]) -- • In the Template Haskell quotation [|| "foo" ||] -- In the expression: [|| "foo" ||] -- In the Template Haskell splice $$([|| "foo" ||]) -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. -- | Discard the type annotation and produce a plain Template Haskell -- expression -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. unTypeQ :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => m (TExp a) -> m Exp unTypeQ m = do { TExp e <- m ; return e } -- | Annotate the Template Haskell expression with a type -- -- This is unsafe because GHC cannot check for you that the expression -- really does have the type you claim it has. -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. unsafeTExpCoerce :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => m Exp -> m (TExp a) unsafeTExpCoerce m = do { e <- m ; return (TExp e) } {- Note [Role of TExp] ~~~~~~~~~~~~~~~~~~~~~~ TExp's argument must have a nominal role, not phantom as would be inferred (#8459). Consider e :: TExp Age e = MkAge 3 foo = $(coerce e) + 4::Int The splice will evaluate to (MkAge 3) and you can't add that to 4::Int. So you can't coerce a (TExp Age) to a (TExp Int). -} -- Code constructor type role Code representational nominal -- See Note [Role of TExp] newtype Code m (a :: TYPE (r :: RuntimeRep)) = Code { examineCode :: m (TExp a) -- ^ Underlying monadic value } -- | Unsafely convert an untyped code representation into a typed code -- representation. unsafeCodeCoerce :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => m Exp -> Code m a unsafeCodeCoerce m = Code (unsafeTExpCoerce m) -- | Lift a monadic action producing code into the typed 'Code' -- representation liftCode :: forall (r :: RuntimeRep) (a :: TYPE r) m . m (TExp a) -> Code m a liftCode = Code -- | Extract the untyped representation from the typed representation unTypeCode :: forall (r :: RuntimeRep) (a :: TYPE r) m . Quote m => Code m a -> m Exp unTypeCode = unTypeQ . examineCode -- | Modify the ambient monad used during code generation. For example, you -- can use `hoistCode` to handle a state effect: -- @ -- handleState :: Code (StateT Int Q) a -> Code Q a -- handleState = hoistCode (flip runState 0) -- @ hoistCode :: forall m n (r :: RuntimeRep) (a :: TYPE r) . Monad m => (forall x . m x -> n x) -> Code m a -> Code n a hoistCode f (Code a) = Code (f a) -- | Variant of (>>=) which allows effectful computations to be injected -- into code generation. bindCode :: forall m a (r :: RuntimeRep) (b :: TYPE r) . Monad m => m a -> (a -> Code m b) -> Code m b bindCode q k = liftCode (q >>= examineCode . k) -- | Variant of (>>) which allows effectful computations to be injected -- into code generation. bindCode_ :: forall m a (r :: RuntimeRep) (b :: TYPE r) . Monad m => m a -> Code m b -> Code m b bindCode_ q c = liftCode ( q >> examineCode c) -- | A useful combinator for embedding monadic actions into 'Code' -- @ -- myCode :: ... => Code m a -- myCode = joinCode $ do -- x <- someSideEffect -- return (makeCodeWith x) -- @ joinCode :: forall m (r :: RuntimeRep) (a :: TYPE r) . Monad m => m (Code m a) -> Code m a joinCode = flip bindCode id ---------------------------------------------------- -- Packaged versions for the programmer, hiding the Quasi-ness -- | Report an error (True) or warning (False), -- but carry on; use 'fail' to stop. report :: Bool -> String -> Q () report b s = Q (qReport b s) {-# DEPRECATED report "Use reportError or reportWarning instead" #-} -- deprecated in 7.6 -- | Report an error to the user, but allow the current splice's computation to carry on. To abort the computation, use 'fail'. reportError :: String -> Q () reportError = report True -- | Report a warning to the user, and carry on. reportWarning :: String -> Q () reportWarning = report False -- | Recover from errors raised by 'reportError' or 'fail'. recover :: Q a -- ^ handler to invoke on failure -> Q a -- ^ computation to run -> Q a recover (Q r) (Q m) = Q (qRecover r m) -- We don't export lookupName; the Bool isn't a great API -- Instead we export lookupTypeName, lookupValueName lookupName :: Bool -> String -> Q (Maybe Name) lookupName ns s = Q (qLookupName ns s) -- | Look up the given name in the (type namespace of the) current splice's scope. See "Language.Haskell.TH.Syntax#namelookup" for more details. lookupTypeName :: String -> Q (Maybe Name) lookupTypeName s = Q (qLookupName True s) -- | Look up the given name in the (value namespace of the) current splice's scope. See "Language.Haskell.TH.Syntax#namelookup" for more details. lookupValueName :: String -> Q (Maybe Name) lookupValueName s = Q (qLookupName False s) {- Note [Name lookup] ~~~~~~~~~~~~~~~~~~ -} {- $namelookup #namelookup# The functions 'lookupTypeName' and 'lookupValueName' provide a way to query the current splice's context for what names are in scope. The function 'lookupTypeName' queries the type namespace, whereas 'lookupValueName' queries the value namespace, but the functions are otherwise identical. A call @lookupValueName s@ will check if there is a value with name @s@ in scope at the current splice's location. If there is, the @Name@ of this value is returned; if not, then @Nothing@ is returned. The returned name cannot be \"captured\". For example: > f = "global" > g = $( do > Just nm <- lookupValueName "f" > [| let f = "local" in $( varE nm ) |] In this case, @g = \"global\"@; the call to @lookupValueName@ returned the global @f@, and this name was /not/ captured by the local definition of @f@. The lookup is performed in the context of the /top-level/ splice being run. For example: > f = "global" > g = $( [| let f = "local" in > $(do > Just nm <- lookupValueName "f" > varE nm > ) |] ) Again in this example, @g = \"global\"@, because the call to @lookupValueName@ queries the context of the outer-most @$(...)@. Operators should be queried without any surrounding parentheses, like so: > lookupValueName "+" Qualified names are also supported, like so: > lookupValueName "Prelude.+" > lookupValueName "Prelude.map" -} {- | 'reify' looks up information about the 'Name'. It will fail with a compile error if the 'Name' is not visible. A 'Name' is visible if it is imported or defined in a prior top-level declaration group. See the documentation for 'newDeclarationGroup' for more details. It is sometimes useful to construct the argument name using 'lookupTypeName' or 'lookupValueName' to ensure that we are reifying from the right namespace. For instance, in this context: > data D = D which @D@ does @reify (mkName \"D\")@ return information about? (Answer: @D@-the-type, but don't rely on it.) To ensure we get information about @D@-the-value, use 'lookupValueName': > do > Just nm <- lookupValueName "D" > reify nm and to get information about @D@-the-type, use 'lookupTypeName'. -} reify :: Name -> Q Info reify v = Q (qReify v) {- | @reifyFixity nm@ attempts to find a fixity declaration for @nm@. For example, if the function @foo@ has the fixity declaration @infixr 7 foo@, then @reifyFixity 'foo@ would return @'Just' ('Fixity' 7 'InfixR')@. If the function @bar@ does not have a fixity declaration, then @reifyFixity 'bar@ returns 'Nothing', so you may assume @bar@ has 'defaultFixity'. -} reifyFixity :: Name -> Q (Maybe Fixity) reifyFixity nm = Q (qReifyFixity nm) {- | @reifyType nm@ attempts to find the type or kind of @nm@. For example, @reifyType 'not@ returns @Bool -> Bool@, and @reifyType ''Bool@ returns @Type@. This works even if there's no explicit signature and the type or kind is inferred. -} reifyType :: Name -> Q Type reifyType nm = Q (qReifyType nm) {- | Template Haskell is capable of reifying information about types and terms defined in previous declaration groups. Top-level declaration splices break up declaration groups. For an example, consider this code block. We define a datatype @X@ and then try to call 'reify' on the datatype. @ module Check where data X = X deriving Eq $(do info <- reify ''X runIO $ print info ) @ This code fails to compile, noting that @X@ is not available for reification at the site of 'reify'. We can fix this by creating a new declaration group using an empty top-level splice: @ data X = X deriving Eq $(pure []) $(do info <- reify ''X runIO $ print info ) @ We provide 'newDeclarationGroup' as a means of documenting this behavior and providing a name for the pattern. Since top level splices infer the presence of the @$( ... )@ brackets, we can also write: @ data X = X deriving Eq newDeclarationGroup $(do info <- reify ''X runIO $ print info ) @ -} newDeclarationGroup :: Q [Dec] newDeclarationGroup = pure [] {- | @reifyInstances nm tys@ returns a list of visible instances of @nm tys@. That is, if @nm@ is the name of a type class, then all instances of this class at the types @tys@ are returned. Alternatively, if @nm@ is the name of a data family or type family, all instances of this family at the types @tys@ are returned. Note that this is a \"shallow\" test; the declarations returned merely have instance heads which unify with @nm tys@, they need not actually be satisfiable. - @reifyInstances ''Eq [ 'TupleT' 2 \``AppT`\` 'ConT' ''A \``AppT`\` 'ConT' ''B ]@ contains the @instance (Eq a, Eq b) => Eq (a, b)@ regardless of whether @A@ and @B@ themselves implement 'Eq' - @reifyInstances ''Show [ 'VarT' ('mkName' "a") ]@ produces every available instance of 'Eq' There is one edge case: @reifyInstances ''Typeable tys@ currently always produces an empty list (no matter what @tys@ are given). An instance is visible if it is imported or defined in a prior top-level declaration group. See the documentation for 'newDeclarationGroup' for more details. -} reifyInstances :: Name -> [Type] -> Q [InstanceDec] reifyInstances cls tys = Q (qReifyInstances cls tys) {- | @reifyRoles nm@ returns the list of roles associated with the parameters (both visible and invisible) of the tycon @nm@. Fails if @nm@ cannot be found or is not a tycon. The returned list should never contain 'InferR'. An invisible parameter to a tycon is often a kind parameter. For example, if we have @ type Proxy :: forall k. k -> Type data Proxy a = MkProxy @ and @reifyRoles Proxy@, we will get @['NominalR', 'PhantomR']@. The 'NominalR' is the role of the invisible @k@ parameter. Kind parameters are always nominal. -} reifyRoles :: Name -> Q [Role] reifyRoles nm = Q (qReifyRoles nm) -- | @reifyAnnotations target@ returns the list of annotations -- associated with @target@. Only the annotations that are -- appropriately typed is returned. So if you have @Int@ and @String@ -- annotations for the same target, you have to call this function twice. reifyAnnotations :: Data a => AnnLookup -> Q [a] reifyAnnotations an = Q (qReifyAnnotations an) -- | @reifyModule mod@ looks up information about module @mod@. To -- look up the current module, call this function with the return -- value of 'Language.Haskell.TH.Lib.thisModule'. reifyModule :: Module -> Q ModuleInfo reifyModule m = Q (qReifyModule m) -- | @reifyConStrictness nm@ looks up the strictness information for the fields -- of the constructor with the name @nm@. Note that the strictness information -- that 'reifyConStrictness' returns may not correspond to what is written in -- the source code. For example, in the following data declaration: -- -- @ -- data Pair a = Pair a a -- @ -- -- 'reifyConStrictness' would return @['DecidedLazy', DecidedLazy]@ under most -- circumstances, but it would return @['DecidedStrict', DecidedStrict]@ if the -- @-XStrictData@ language extension was enabled. reifyConStrictness :: Name -> Q [DecidedStrictness] reifyConStrictness n = Q (qReifyConStrictness n) -- | Is the list of instances returned by 'reifyInstances' nonempty? -- -- If you're confused by an instance not being visible despite being -- defined in the same module and above the splice in question, see the -- docs for 'newDeclarationGroup' for a possible explanation. isInstance :: Name -> [Type] -> Q Bool isInstance nm tys = do { decs <- reifyInstances nm tys ; return (not (null decs)) } -- | The location at which this computation is spliced. location :: Q Loc location = Q qLocation -- |The 'runIO' function lets you run an I\/O computation in the 'Q' monad. -- Take care: you are guaranteed the ordering of calls to 'runIO' within -- a single 'Q' computation, but not about the order in which splices are run. -- -- Note: for various murky reasons, stdout and stderr handles are not -- necessarily flushed when the compiler finishes running, so you should -- flush them yourself. runIO :: IO a -> Q a runIO m = Q (qRunIO m) -- | Get the package root for the current package which is being compiled. -- This can be set explicitly with the -package-root flag but is normally -- just the current working directory. -- -- The motivation for this flag is to provide a principled means to remove the -- assumption from splices that they will be executed in the directory where the -- cabal file resides. Projects such as haskell-language-server can't and don't -- change directory when compiling files but instead set the -package-root flag -- appropiately. getPackageRoot :: Q FilePath getPackageRoot = Q qGetPackageRoot -- | The input is a filepath, which if relative is offset by the package root. makeRelativeToProject :: FilePath -> Q FilePath makeRelativeToProject fp | isRelative fp = do root <- getPackageRoot return (root fp) makeRelativeToProject fp = return fp -- | Record external files that runIO is using (dependent upon). -- The compiler can then recognize that it should re-compile the Haskell file -- when an external file changes. -- -- Expects an absolute file path. -- -- Notes: -- -- * ghc -M does not know about these dependencies - it does not execute TH. -- -- * The dependency is based on file content, not a modification time addDependentFile :: FilePath -> Q () addDependentFile fp = Q (qAddDependentFile fp) -- | Obtain a temporary file path with the given suffix. The compiler will -- delete this file after compilation. addTempFile :: String -> Q FilePath addTempFile suffix = Q (qAddTempFile suffix) -- | Add additional top-level declarations. The added declarations will be type -- checked along with the current declaration group. addTopDecls :: [Dec] -> Q () addTopDecls ds = Q (qAddTopDecls ds) -- | addForeignFile :: ForeignSrcLang -> String -> Q () addForeignFile = addForeignSource {-# DEPRECATED addForeignFile "Use 'Language.Haskell.TH.Syntax.addForeignSource' instead" #-} -- deprecated in 8.6 -- | Emit a foreign file which will be compiled and linked to the object for -- the current module. Currently only languages that can be compiled with -- the C compiler are supported, and the flags passed as part of -optc will -- be also applied to the C compiler invocation that will compile them. -- -- Note that for non-C languages (for example C++) @extern "C"@ directives -- must be used to get symbols that we can access from Haskell. -- -- To get better errors, it is recommended to use #line pragmas when -- emitting C files, e.g. -- -- > {-# LANGUAGE CPP #-} -- > ... -- > addForeignSource LangC $ unlines -- > [ "#line " ++ show (__LINE__ + 1) ++ " " ++ show __FILE__ -- > , ... -- > ] addForeignSource :: ForeignSrcLang -> String -> Q () addForeignSource lang src = do let suffix = case lang of LangC -> "c" LangCxx -> "cpp" LangObjc -> "m" LangObjcxx -> "mm" LangAsm -> "s" RawObject -> "a" path <- addTempFile suffix runIO $ writeFile path src addForeignFilePath lang path -- | Same as 'addForeignSource', but expects to receive a path pointing to the -- foreign file instead of a 'String' of its contents. Consider using this in -- conjunction with 'addTempFile'. -- -- This is a good alternative to 'addForeignSource' when you are trying to -- directly link in an object file. addForeignFilePath :: ForeignSrcLang -> FilePath -> Q () addForeignFilePath lang fp = Q (qAddForeignFilePath lang fp) -- | Add a finalizer that will run in the Q monad after the current module has -- been type checked. This only makes sense when run within a top-level splice. -- -- The finalizer is given the local type environment at the splice point. Thus -- 'reify' is able to find the local definitions when executed inside the -- finalizer. addModFinalizer :: Q () -> Q () addModFinalizer act = Q (qAddModFinalizer (unQ act)) -- | Adds a core plugin to the compilation pipeline. -- -- @addCorePlugin m@ has almost the same effect as passing @-fplugin=m@ to ghc -- in the command line. The major difference is that the plugin module @m@ -- must not belong to the current package. When TH executes, it is too late -- to tell the compiler that we needed to compile first a plugin module in the -- current package. addCorePlugin :: String -> Q () addCorePlugin plugin = Q (qAddCorePlugin plugin) -- | Get state from the 'Q' monad. Note that the state is local to the -- Haskell module in which the Template Haskell expression is executed. getQ :: Typeable a => Q (Maybe a) getQ = Q qGetQ -- | Replace the state in the 'Q' monad. Note that the state is local to the -- Haskell module in which the Template Haskell expression is executed. putQ :: Typeable a => a -> Q () putQ x = Q (qPutQ x) -- | Determine whether the given language extension is enabled in the 'Q' monad. isExtEnabled :: Extension -> Q Bool isExtEnabled ext = Q (qIsExtEnabled ext) -- | List all enabled language extensions. extsEnabled :: Q [Extension] extsEnabled = Q qExtsEnabled -- | Add Haddock documentation to the specified location. This will overwrite -- any documentation at the location if it already exists. This will reify the -- specified name, so it must be in scope when you call it. If you want to add -- documentation to something that you are currently splicing, you can use -- 'addModFinalizer' e.g. -- -- > do -- > let nm = mkName "x" -- > addModFinalizer $ putDoc (DeclDoc nm) "Hello" -- > [d| $(varP nm) = 42 |] -- -- The helper functions 'withDecDoc' and 'withDecsDoc' will do this for you, as -- will the 'funD_doc' and other @_doc@ combinators. -- You most likely want to have the @-haddock@ flag turned on when using this. -- Adding documentation to anything outside of the current module will cause an -- error. putDoc :: DocLoc -> String -> Q () putDoc t s = Q (qPutDoc t s) -- | Retreives the Haddock documentation at the specified location, if one -- exists. -- It can be used to read documentation on things defined outside of the current -- module, provided that those modules were compiled with the @-haddock@ flag. getDoc :: DocLoc -> Q (Maybe String) getDoc n = Q (qGetDoc n) instance MonadIO Q where liftIO = runIO instance Quasi Q where qNewName = newName qReport = report qRecover = recover qReify = reify qReifyFixity = reifyFixity qReifyType = reifyType qReifyInstances = reifyInstances qReifyRoles = reifyRoles qReifyAnnotations = reifyAnnotations qReifyModule = reifyModule qReifyConStrictness = reifyConStrictness qLookupName = lookupName qLocation = location qGetPackageRoot = getPackageRoot qAddDependentFile = addDependentFile qAddTempFile = addTempFile qAddTopDecls = addTopDecls qAddForeignFilePath = addForeignFilePath qAddModFinalizer = addModFinalizer qAddCorePlugin = addCorePlugin qGetQ = getQ qPutQ = putQ qIsExtEnabled = isExtEnabled qExtsEnabled = extsEnabled qPutDoc = putDoc qGetDoc = getDoc ---------------------------------------------------- -- The following operations are used solely in GHC.HsToCore.Quote when -- desugaring brackets. They are not necessary for the user, who can use -- ordinary return and (>>=) etc sequenceQ :: forall m . Monad m => forall a . [m a] -> m [a] sequenceQ = sequence ----------------------------------------------------- -- -- The Lift class -- ----------------------------------------------------- -- | A 'Lift' instance can have any of its values turned into a Template -- Haskell expression. This is needed when a value used within a Template -- Haskell quotation is bound outside the Oxford brackets (@[| ... |]@ or -- @[|| ... ||]@) but not at the top level. As an example: -- -- > add1 :: Int -> Q (TExp Int) -- > add1 x = [|| x + 1 ||] -- -- Template Haskell has no way of knowing what value @x@ will take on at -- splice-time, so it requires the type of @x@ to be an instance of 'Lift'. -- -- A 'Lift' instance must satisfy @$(lift x) ≡ x@ and @$$(liftTyped x) ≡ x@ -- for all @x@, where @$(...)@ and @$$(...)@ are Template Haskell splices. -- It is additionally expected that @'lift' x ≡ 'unTypeQ' ('liftTyped' x)@. -- -- 'Lift' instances can be derived automatically by use of the @-XDeriveLift@ -- GHC language extension: -- -- > {-# LANGUAGE DeriveLift #-} -- > module Foo where -- > -- > import Language.Haskell.TH.Syntax -- > -- > data Bar a = Bar1 a (Bar a) | Bar2 String -- > deriving Lift -- -- Representation-polymorphic since /template-haskell-2.16.0.0/. class Lift (t :: TYPE r) where -- | Turn a value into a Template Haskell expression, suitable for use in -- a splice. lift :: Quote m => t -> m Exp #if __GLASGOW_HASKELL__ >= 901 default lift :: (r ~ ('BoxedRep 'Lifted), Quote m) => t -> m Exp #else default lift :: (r ~ 'LiftedRep, Quote m) => t -> m Exp #endif lift = unTypeCode . liftTyped -- | Turn a value into a Template Haskell typed expression, suitable for use -- in a typed splice. -- -- @since 2.16.0.0 liftTyped :: Quote m => t -> Code m t -- If you add any instances here, consider updating test th/TH_Lift instance Lift Integer where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL x)) instance Lift Int where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) -- | @since 2.16.0.0 instance Lift Int# where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntPrimL (fromIntegral (I# x)))) instance Lift Int8 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Int16 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Int32 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Int64 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) -- | @since 2.16.0.0 instance Lift Word# where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (WordPrimL (fromIntegral (W# x)))) instance Lift Word where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Word8 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Word16 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Word32 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Word64 where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Lift Natural where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (IntegerL (fromIntegral x))) instance Integral a => Lift (Ratio a) where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (RationalL (toRational x))) instance Lift Float where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (RationalL (toRational x))) -- | @since 2.16.0.0 instance Lift Float# where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (FloatPrimL (toRational (F# x)))) instance Lift Double where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (RationalL (toRational x))) -- | @since 2.16.0.0 instance Lift Double# where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (DoublePrimL (toRational (D# x)))) instance Lift Char where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (CharL x)) -- | @since 2.16.0.0 instance Lift Char# where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (CharPrimL (C# x))) instance Lift Bool where liftTyped x = unsafeCodeCoerce (lift x) lift True = return (ConE trueName) lift False = return (ConE falseName) -- | Produces an 'Addr#' literal from the NUL-terminated C-string starting at -- the given memory address. -- -- @since 2.16.0.0 instance Lift Addr# where liftTyped x = unsafeCodeCoerce (lift x) lift x = return (LitE (StringPrimL (map (fromIntegral . ord) (unpackCString# x)))) #if __GLASGOW_HASKELL__ >= 903 -- | -- @since 2.19.0.0 instance Lift ByteArray where liftTyped x = unsafeCodeCoerce (lift x) lift (ByteArray b) = return (AppE (AppE (VarE addrToByteArrayName) (LitE (IntegerL (fromIntegral len)))) (LitE (BytesPrimL (Bytes ptr 0 (fromIntegral len))))) where len# = sizeofByteArray# b len = I# len# pb :: ByteArray# !(ByteArray pb) | isTrue# (isByteArrayPinned# b) = ByteArray b | otherwise = runST $ ST $ \s -> case newPinnedByteArray# len# s of (# s', mb #) -> case copyByteArray# b 0# mb 0# len# s' of s'' -> case unsafeFreezeByteArray# mb s'' of (# s''', ret #) -> (# s''', ByteArray ret #) ptr :: ForeignPtr Word8 ptr = ForeignPtr (byteArrayContents# pb) (PlainPtr (unsafeCoerce# pb)) -- We can't use a TH quote in this module because we're in the template-haskell -- package, so we conconct this quite defensive solution to make the correct name -- which will work if the package name or module name changes in future. addrToByteArrayName :: Name addrToByteArrayName = helper where helper :: HasCallStack => Name helper = case head (getCallStack ?callStack) of (_, SrcLoc{..}) -> mkNameG_v srcLocPackage srcLocModule "addrToByteArray" addrToByteArray :: Int -> Addr# -> ByteArray addrToByteArray (I# len) addr = runST $ ST $ \s -> case newByteArray# len s of (# s', mb #) -> case copyAddrToByteArray# addr mb 0# len s' of s'' -> case unsafeFreezeByteArray# mb s'' of (# s''', ret #) -> (# s''', ByteArray ret #) #endif instance Lift a => Lift (Maybe a) where liftTyped x = unsafeCodeCoerce (lift x) lift Nothing = return (ConE nothingName) lift (Just x) = liftM (ConE justName `AppE`) (lift x) instance (Lift a, Lift b) => Lift (Either a b) where liftTyped x = unsafeCodeCoerce (lift x) lift (Left x) = liftM (ConE leftName `AppE`) (lift x) lift (Right y) = liftM (ConE rightName `AppE`) (lift y) instance Lift a => Lift [a] where liftTyped x = unsafeCodeCoerce (lift x) lift xs = do { xs' <- mapM lift xs; return (ListE xs') } liftString :: Quote m => String -> m Exp -- Used in GHC.Tc.Gen.Expr to short-circuit the lifting for strings liftString s = return (LitE (StringL s)) -- | @since 2.15.0.0 instance Lift a => Lift (NonEmpty a) where liftTyped x = unsafeCodeCoerce (lift x) lift (x :| xs) = do x' <- lift x xs' <- lift xs return (InfixE (Just x') (ConE nonemptyName) (Just xs')) -- | @since 2.15.0.0 instance Lift Void where liftTyped = liftCode . absurd lift = pure . absurd instance Lift () where liftTyped x = unsafeCodeCoerce (lift x) lift () = return (ConE (tupleDataName 0)) instance (Lift a, Lift b) => Lift (a, b) where liftTyped x = unsafeCodeCoerce (lift x) lift (a, b) = liftM TupE $ sequence $ map (fmap Just) [lift a, lift b] instance (Lift a, Lift b, Lift c) => Lift (a, b, c) where liftTyped x = unsafeCodeCoerce (lift x) lift (a, b, c) = liftM TupE $ sequence $ map (fmap Just) [lift a, lift b, lift c] instance (Lift a, Lift b, Lift c, Lift d) => Lift (a, b, c, d) where liftTyped x = unsafeCodeCoerce (lift x) lift (a, b, c, d) = liftM TupE $ sequence $ map (fmap Just) [lift a, lift b, lift c, lift d] instance (Lift a, Lift b, Lift c, Lift d, Lift e) => Lift (a, b, c, d, e) where liftTyped x = unsafeCodeCoerce (lift x) lift (a, b, c, d, e) = liftM TupE $ sequence $ map (fmap Just) [ lift a, lift b , lift c, lift d, lift e ] instance (Lift a, Lift b, Lift c, Lift d, Lift e, Lift f) => Lift (a, b, c, d, e, f) where liftTyped x = unsafeCodeCoerce (lift x) lift (a, b, c, d, e, f) = liftM TupE $ sequence $ map (fmap Just) [ lift a, lift b, lift c , lift d, lift e, lift f ] instance (Lift a, Lift b, Lift c, Lift d, Lift e, Lift f, Lift g) => Lift (a, b, c, d, e, f, g) where liftTyped x = unsafeCodeCoerce (lift x) lift (a, b, c, d, e, f, g) = liftM TupE $ sequence $ map (fmap Just) [ lift a, lift b, lift c , lift d, lift e, lift f, lift g ] -- | @since 2.16.0.0 instance Lift (# #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# #) = return (ConE (unboxedTupleTypeName 0)) -- | @since 2.16.0.0 instance (Lift a) => Lift (# a #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [lift a] -- | @since 2.16.0.0 instance (Lift a, Lift b) => Lift (# a, b #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a, b #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [lift a, lift b] -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c) => Lift (# a, b, c #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a, b, c #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [lift a, lift b, lift c] -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d) => Lift (# a, b, c, d #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a, b, c, d #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [ lift a, lift b , lift c, lift d ] -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d, Lift e) => Lift (# a, b, c, d, e #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a, b, c, d, e #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [ lift a, lift b , lift c, lift d, lift e ] -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d, Lift e, Lift f) => Lift (# a, b, c, d, e, f #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a, b, c, d, e, f #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [ lift a, lift b, lift c , lift d, lift e, lift f ] -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d, Lift e, Lift f, Lift g) => Lift (# a, b, c, d, e, f, g #) where liftTyped x = unsafeCodeCoerce (lift x) lift (# a, b, c, d, e, f, g #) = liftM UnboxedTupE $ sequence $ map (fmap Just) [ lift a, lift b, lift c , lift d, lift e, lift f , lift g ] -- | @since 2.16.0.0 instance (Lift a, Lift b) => Lift (# a | b #) where liftTyped x = unsafeCodeCoerce (lift x) lift x = case x of (# y | #) -> UnboxedSumE <$> lift y <*> pure 1 <*> pure 2 (# | y #) -> UnboxedSumE <$> lift y <*> pure 2 <*> pure 2 -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c) => Lift (# a | b | c #) where liftTyped x = unsafeCodeCoerce (lift x) lift x = case x of (# y | | #) -> UnboxedSumE <$> lift y <*> pure 1 <*> pure 3 (# | y | #) -> UnboxedSumE <$> lift y <*> pure 2 <*> pure 3 (# | | y #) -> UnboxedSumE <$> lift y <*> pure 3 <*> pure 3 -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d) => Lift (# a | b | c | d #) where liftTyped x = unsafeCodeCoerce (lift x) lift x = case x of (# y | | | #) -> UnboxedSumE <$> lift y <*> pure 1 <*> pure 4 (# | y | | #) -> UnboxedSumE <$> lift y <*> pure 2 <*> pure 4 (# | | y | #) -> UnboxedSumE <$> lift y <*> pure 3 <*> pure 4 (# | | | y #) -> UnboxedSumE <$> lift y <*> pure 4 <*> pure 4 -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d, Lift e) => Lift (# a | b | c | d | e #) where liftTyped x = unsafeCodeCoerce (lift x) lift x = case x of (# y | | | | #) -> UnboxedSumE <$> lift y <*> pure 1 <*> pure 5 (# | y | | | #) -> UnboxedSumE <$> lift y <*> pure 2 <*> pure 5 (# | | y | | #) -> UnboxedSumE <$> lift y <*> pure 3 <*> pure 5 (# | | | y | #) -> UnboxedSumE <$> lift y <*> pure 4 <*> pure 5 (# | | | | y #) -> UnboxedSumE <$> lift y <*> pure 5 <*> pure 5 -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d, Lift e, Lift f) => Lift (# a | b | c | d | e | f #) where liftTyped x = unsafeCodeCoerce (lift x) lift x = case x of (# y | | | | | #) -> UnboxedSumE <$> lift y <*> pure 1 <*> pure 6 (# | y | | | | #) -> UnboxedSumE <$> lift y <*> pure 2 <*> pure 6 (# | | y | | | #) -> UnboxedSumE <$> lift y <*> pure 3 <*> pure 6 (# | | | y | | #) -> UnboxedSumE <$> lift y <*> pure 4 <*> pure 6 (# | | | | y | #) -> UnboxedSumE <$> lift y <*> pure 5 <*> pure 6 (# | | | | | y #) -> UnboxedSumE <$> lift y <*> pure 6 <*> pure 6 -- | @since 2.16.0.0 instance (Lift a, Lift b, Lift c, Lift d, Lift e, Lift f, Lift g) => Lift (# a | b | c | d | e | f | g #) where liftTyped x = unsafeCodeCoerce (lift x) lift x = case x of (# y | | | | | | #) -> UnboxedSumE <$> lift y <*> pure 1 <*> pure 7 (# | y | | | | | #) -> UnboxedSumE <$> lift y <*> pure 2 <*> pure 7 (# | | y | | | | #) -> UnboxedSumE <$> lift y <*> pure 3 <*> pure 7 (# | | | y | | | #) -> UnboxedSumE <$> lift y <*> pure 4 <*> pure 7 (# | | | | y | | #) -> UnboxedSumE <$> lift y <*> pure 5 <*> pure 7 (# | | | | | y | #) -> UnboxedSumE <$> lift y <*> pure 6 <*> pure 7 (# | | | | | | y #) -> UnboxedSumE <$> lift y <*> pure 7 <*> pure 7 -- TH has a special form for literal strings, -- which we should take advantage of. -- NB: the lhs of the rule has no args, so that -- the rule will apply to a 'lift' all on its own -- which happens to be the way the type checker -- creates it. {-# RULES "TH:liftString" lift = \s -> return (LitE (StringL s)) #-} trueName, falseName :: Name trueName = mkNameG DataName "ghc-prim" "GHC.Types" "True" falseName = mkNameG DataName "ghc-prim" "GHC.Types" "False" nothingName, justName :: Name nothingName = mkNameG DataName "base" "GHC.Maybe" "Nothing" justName = mkNameG DataName "base" "GHC.Maybe" "Just" leftName, rightName :: Name leftName = mkNameG DataName "base" "Data.Either" "Left" rightName = mkNameG DataName "base" "Data.Either" "Right" nonemptyName :: Name nonemptyName = mkNameG DataName "base" "GHC.Base" ":|" oneName, manyName :: Name oneName = mkNameG DataName "ghc-prim" "GHC.Types" "One" manyName = mkNameG DataName "ghc-prim" "GHC.Types" "Many" ----------------------------------------------------- -- -- Generic Lift implementations -- ----------------------------------------------------- -- | 'dataToQa' is an internal utility function for constructing generic -- conversion functions from types with 'Data' instances to various -- quasi-quoting representations. See the source of 'dataToExpQ' and -- 'dataToPatQ' for two example usages: @mkCon@, @mkLit@ -- and @appQ@ are overloadable to account for different syntax for -- expressions and patterns; @antiQ@ allows you to override type-specific -- cases, a common usage is just @const Nothing@, which results in -- no overloading. dataToQa :: forall m a k q. (Quote m, Data a) => (Name -> k) -> (Lit -> m q) -> (k -> [m q] -> m q) -> (forall b . Data b => b -> Maybe (m q)) -> a -> m q dataToQa mkCon mkLit appCon antiQ t = case antiQ t of Nothing -> case constrRep constr of AlgConstr _ -> appCon (mkCon funOrConName) conArgs where funOrConName :: Name funOrConName = case showConstr constr of "(:)" -> Name (mkOccName ":") (NameG DataName (mkPkgName "ghc-prim") (mkModName "GHC.Types")) con@"[]" -> Name (mkOccName con) (NameG DataName (mkPkgName "ghc-prim") (mkModName "GHC.Types")) con@('(':_) -> Name (mkOccName con) (NameG DataName (mkPkgName "ghc-prim") (mkModName "GHC.Tuple")) -- Tricky case: see Note [Data for non-algebraic types] fun@(x:_) | startsVarSym x || startsVarId x -> mkNameG_v tyconPkg tyconMod fun con -> mkNameG_d tyconPkg tyconMod con where tycon :: TyCon tycon = (typeRepTyCon . typeOf) t tyconPkg, tyconMod :: String tyconPkg = tyConPackage tycon tyconMod = tyConModule tycon conArgs :: [m q] conArgs = gmapQ (dataToQa mkCon mkLit appCon antiQ) t IntConstr n -> mkLit $ IntegerL n FloatConstr n -> mkLit $ RationalL n CharConstr c -> mkLit $ CharL c where constr :: Constr constr = toConstr t Just y -> y {- Note [Data for non-algebraic types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Class Data was originally intended for algebraic data types. But it is possible to use it for abstract types too. For example, in package `text` we find instance Data Text where ... toConstr _ = packConstr packConstr :: Constr packConstr = mkConstr textDataType "pack" [] Prefix Here `packConstr` isn't a real data constructor, it's an ordinary function. Two complications * In such a case, we must take care to build the Name using mkNameG_v (for values), not mkNameG_d (for data constructors). See #10796. * The pseudo-constructor is named only by its string, here "pack". But 'dataToQa' needs the TyCon of its defining module, and has to assume it's defined in the same module as the TyCon itself. But nothing enforces that; #12596 shows what goes wrong if "pack" is defined in a different module than the data type "Text". -} -- | 'dataToExpQ' converts a value to a 'Exp' representation of the -- same value, in the SYB style. It is generalized to take a function -- override type-specific cases; see 'liftData' for a more commonly -- used variant. dataToExpQ :: (Quote m, Data a) => (forall b . Data b => b -> Maybe (m Exp)) -> a -> m Exp dataToExpQ = dataToQa varOrConE litE (foldl appE) where -- Make sure that VarE is used if the Constr value relies on a -- function underneath the surface (instead of a constructor). -- See #10796. varOrConE s = case nameSpace s of Just VarName -> return (VarE s) Just DataName -> return (ConE s) _ -> error $ "Can't construct an expression from name " ++ showName s appE x y = do { a <- x; b <- y; return (AppE a b)} litE c = return (LitE c) -- | 'liftData' is a variant of 'lift' in the 'Lift' type class which -- works for any type with a 'Data' instance. liftData :: (Quote m, Data a) => a -> m Exp liftData = dataToExpQ (const Nothing) -- | 'dataToPatQ' converts a value to a 'Pat' representation of the same -- value, in the SYB style. It takes a function to handle type-specific cases, -- alternatively, pass @const Nothing@ to get default behavior. dataToPatQ :: (Quote m, Data a) => (forall b . Data b => b -> Maybe (m Pat)) -> a -> m Pat dataToPatQ = dataToQa id litP conP where litP l = return (LitP l) conP n ps = case nameSpace n of Just DataName -> do ps' <- sequence ps return (ConP n [] ps') _ -> error $ "Can't construct a pattern from name " ++ showName n ----------------------------------------------------- -- Names and uniques ----------------------------------------------------- newtype ModName = ModName String -- Module name deriving (Show,Eq,Ord,Data,Generic) newtype PkgName = PkgName String -- package name deriving (Show,Eq,Ord,Data,Generic) -- | Obtained from 'reifyModule' and 'Language.Haskell.TH.Lib.thisModule'. data Module = Module PkgName ModName -- package qualified module name deriving (Show,Eq,Ord,Data,Generic) newtype OccName = OccName String deriving (Show,Eq,Ord,Data,Generic) mkModName :: String -> ModName mkModName s = ModName s modString :: ModName -> String modString (ModName m) = m mkPkgName :: String -> PkgName mkPkgName s = PkgName s pkgString :: PkgName -> String pkgString (PkgName m) = m ----------------------------------------------------- -- OccName ----------------------------------------------------- mkOccName :: String -> OccName mkOccName s = OccName s occString :: OccName -> String occString (OccName occ) = occ ----------------------------------------------------- -- Names ----------------------------------------------------- -- -- For "global" names ('NameG') we need a totally unique name, -- so we must include the name-space of the thing -- -- For unique-numbered things ('NameU'), we've got a unique reference -- anyway, so no need for name space -- -- For dynamically bound thing ('NameS') we probably want them to -- in a context-dependent way, so again we don't want the name -- space. For example: -- -- > let v = mkName "T" in [| data $v = $v |] -- -- Here we use the same Name for both type constructor and data constructor -- -- -- NameL and NameG are bound *outside* the TH syntax tree -- either globally (NameG) or locally (NameL). Ex: -- -- > f x = $(h [| (map, x) |]) -- -- The 'map' will be a NameG, and 'x' wil be a NameL -- -- These Names should never appear in a binding position in a TH syntax tree {- $namecapture #namecapture# Much of 'Name' API is concerned with the problem of /name capture/, which can be seen in the following example. > f expr = [| let x = 0 in $expr |] > ... > g x = $( f [| x |] ) > h y = $( f [| y |] ) A naive desugaring of this would yield: > g x = let x = 0 in x > h y = let x = 0 in y All of a sudden, @g@ and @h@ have different meanings! In this case, we say that the @x@ in the RHS of @g@ has been /captured/ by the binding of @x@ in @f@. What we actually want is for the @x@ in @f@ to be distinct from the @x@ in @g@, so we get the following desugaring: > g x = let x' = 0 in x > h y = let x' = 0 in y which avoids name capture as desired. In the general case, we say that a @Name@ can be captured if the thing it refers to can be changed by adding new declarations. -} {- | An abstract type representing names in the syntax tree. 'Name's can be constructed in several ways, which come with different name-capture guarantees (see "Language.Haskell.TH.Syntax#namecapture" for an explanation of name capture): * the built-in syntax @'f@ and @''T@ can be used to construct names, The expression @'f@ gives a @Name@ which refers to the value @f@ currently in scope, and @''T@ gives a @Name@ which refers to the type @T@ currently in scope. These names can never be captured. * 'lookupValueName' and 'lookupTypeName' are similar to @'f@ and @''T@ respectively, but the @Name@s are looked up at the point where the current splice is being run. These names can never be captured. * 'newName' monadically generates a new name, which can never be captured. * 'mkName' generates a capturable name. Names constructed using @newName@ and @mkName@ may be used in bindings (such as @let x = ...@ or @\x -> ...@), but names constructed using @lookupValueName@, @lookupTypeName@, @'f@, @''T@ may not. -} data Name = Name OccName NameFlavour deriving (Data, Eq, Generic) instance Ord Name where -- check if unique is different before looking at strings (Name o1 f1) `compare` (Name o2 f2) = (f1 `compare` f2) `thenCmp` (o1 `compare` o2) data NameFlavour = NameS -- ^ An unqualified name; dynamically bound | NameQ ModName -- ^ A qualified name; dynamically bound | NameU !Uniq -- ^ A unique local name | NameL !Uniq -- ^ Local name bound outside of the TH AST | NameG NameSpace PkgName ModName -- ^ Global name bound outside of the TH AST: -- An original name (occurrences only, not binders) -- Need the namespace too to be sure which -- thing we are naming deriving ( Data, Eq, Ord, Show, Generic ) data NameSpace = VarName -- ^ Variables | DataName -- ^ Data constructors | TcClsName -- ^ Type constructors and classes; Haskell has them -- in the same name space for now. deriving( Eq, Ord, Show, Data, Generic ) -- | @Uniq@ is used by GHC to distinguish names from each other. type Uniq = Integer -- | The name without its module prefix. -- -- ==== __Examples__ -- -- >>> nameBase ''Data.Either.Either -- "Either" -- >>> nameBase (mkName "foo") -- "foo" -- >>> nameBase (mkName "Module.foo") -- "foo" nameBase :: Name -> String nameBase (Name occ _) = occString occ -- | Module prefix of a name, if it exists. -- -- ==== __Examples__ -- -- >>> nameModule ''Data.Either.Either -- Just "Data.Either" -- >>> nameModule (mkName "foo") -- Nothing -- >>> nameModule (mkName "Module.foo") -- Just "Module" nameModule :: Name -> Maybe String nameModule (Name _ (NameQ m)) = Just (modString m) nameModule (Name _ (NameG _ _ m)) = Just (modString m) nameModule _ = Nothing -- | A name's package, if it exists. -- -- ==== __Examples__ -- -- >>> namePackage ''Data.Either.Either -- Just "base" -- >>> namePackage (mkName "foo") -- Nothing -- >>> namePackage (mkName "Module.foo") -- Nothing namePackage :: Name -> Maybe String namePackage (Name _ (NameG _ p _)) = Just (pkgString p) namePackage _ = Nothing -- | Returns whether a name represents an occurrence of a top-level variable -- ('VarName'), data constructor ('DataName'), type constructor, or type class -- ('TcClsName'). If we can't be sure, it returns 'Nothing'. -- -- ==== __Examples__ -- -- >>> nameSpace 'Prelude.id -- Just VarName -- >>> nameSpace (mkName "id") -- Nothing -- only works for top-level variable names -- >>> nameSpace 'Data.Maybe.Just -- Just DataName -- >>> nameSpace ''Data.Maybe.Maybe -- Just TcClsName -- >>> nameSpace ''Data.Ord.Ord -- Just TcClsName nameSpace :: Name -> Maybe NameSpace nameSpace (Name _ (NameG ns _ _)) = Just ns nameSpace _ = Nothing {- | Generate a capturable name. Occurrences of such names will be resolved according to the Haskell scoping rules at the occurrence site. For example: > f = [| pi + $(varE (mkName "pi")) |] > ... > g = let pi = 3 in $f In this case, @g@ is desugared to > g = Prelude.pi + 3 Note that @mkName@ may be used with qualified names: > mkName "Prelude.pi" See also 'Language.Haskell.TH.Lib.dyn' for a useful combinator. The above example could be rewritten using 'Language.Haskell.TH.Lib.dyn' as > f = [| pi + $(dyn "pi") |] -} mkName :: String -> Name -- The string can have a '.', thus "Foo.baz", -- giving a dynamically-bound qualified name, -- in which case we want to generate a NameQ -- -- Parse the string to see if it has a "." in it -- so we know whether to generate a qualified or unqualified name -- It's a bit tricky because we need to parse -- -- > Foo.Baz.x as Qual Foo.Baz x -- -- So we parse it from back to front mkName str = split [] (reverse str) where split occ [] = Name (mkOccName occ) NameS split occ ('.':rev) | not (null occ) , is_rev_mod_name rev = Name (mkOccName occ) (NameQ (mkModName (reverse rev))) -- The 'not (null occ)' guard ensures that -- mkName "&." = Name "&." NameS -- The 'is_rev_mod' guards ensure that -- mkName ".&" = Name ".&" NameS -- mkName "^.." = Name "^.." NameS -- #8633 -- mkName "Data.Bits..&" = Name ".&" (NameQ "Data.Bits") -- This rather bizarre case actually happened; (.&.) is in Data.Bits split occ (c:rev) = split (c:occ) rev -- Recognises a reversed module name xA.yB.C, -- with at least one component, -- and each component looks like a module name -- (i.e. non-empty, starts with capital, all alpha) is_rev_mod_name rev_mod_str | (compt, rest) <- break (== '.') rev_mod_str , not (null compt), isUpper (last compt), all is_mod_char compt = case rest of [] -> True (_dot : rest') -> is_rev_mod_name rest' | otherwise = False is_mod_char c = isAlphaNum c || c == '_' || c == '\'' -- | Only used internally mkNameU :: String -> Uniq -> Name mkNameU s u = Name (mkOccName s) (NameU u) -- | Only used internally mkNameL :: String -> Uniq -> Name mkNameL s u = Name (mkOccName s) (NameL u) -- | Used for 'x etc, but not available to the programmer mkNameG :: NameSpace -> String -> String -> String -> Name mkNameG ns pkg modu occ = Name (mkOccName occ) (NameG ns (mkPkgName pkg) (mkModName modu)) mkNameS :: String -> Name mkNameS n = Name (mkOccName n) NameS mkNameG_v, mkNameG_tc, mkNameG_d :: String -> String -> String -> Name mkNameG_v = mkNameG VarName mkNameG_tc = mkNameG TcClsName mkNameG_d = mkNameG DataName data NameIs = Alone | Applied | Infix showName :: Name -> String showName = showName' Alone showName' :: NameIs -> Name -> String showName' ni nm = case ni of Alone -> nms Applied | pnam -> nms | otherwise -> "(" ++ nms ++ ")" Infix | pnam -> "`" ++ nms ++ "`" | otherwise -> nms where -- For now, we make the NameQ and NameG print the same, even though -- NameQ is a qualified name (so what it means depends on what the -- current scope is), and NameG is an original name (so its meaning -- should be independent of what's in scope. -- We may well want to distinguish them in the end. -- Ditto NameU and NameL nms = case nm of Name occ NameS -> occString occ Name occ (NameQ m) -> modString m ++ "." ++ occString occ Name occ (NameG _ _ m) -> modString m ++ "." ++ occString occ Name occ (NameU u) -> occString occ ++ "_" ++ show u Name occ (NameL u) -> occString occ ++ "_" ++ show u pnam = classify nms -- True if we are function style, e.g. f, [], (,) -- False if we are operator style, e.g. +, :+ classify "" = False -- shouldn't happen; . operator is handled below classify (x:xs) | isAlpha x || (x `elem` "_[]()") = case dropWhile (/='.') xs of (_:xs') -> classify xs' [] -> True | otherwise = False instance Show Name where show = showName -- Tuple data and type constructors -- | Tuple data constructor tupleDataName :: Int -> Name -- | Tuple type constructor tupleTypeName :: Int -> Name tupleDataName n = mk_tup_name n DataName True tupleTypeName n = mk_tup_name n TcClsName True -- Unboxed tuple data and type constructors -- | Unboxed tuple data constructor unboxedTupleDataName :: Int -> Name -- | Unboxed tuple type constructor unboxedTupleTypeName :: Int -> Name unboxedTupleDataName n = mk_tup_name n DataName False unboxedTupleTypeName n = mk_tup_name n TcClsName False mk_tup_name :: Int -> NameSpace -> Bool -> Name mk_tup_name n space boxed = Name (mkOccName tup_occ) (NameG space (mkPkgName "ghc-prim") tup_mod) where withParens thing | boxed = "(" ++ thing ++ ")" | otherwise = "(#" ++ thing ++ "#)" tup_occ | n == 1 = if boxed then "Solo" else "Solo#" | otherwise = withParens (replicate n_commas ',') n_commas = n - 1 tup_mod = mkModName "GHC.Tuple" -- Unboxed sum data and type constructors -- | Unboxed sum data constructor unboxedSumDataName :: SumAlt -> SumArity -> Name -- | Unboxed sum type constructor unboxedSumTypeName :: SumArity -> Name unboxedSumDataName alt arity | alt > arity = error $ prefix ++ "Index out of bounds." ++ debug_info | alt <= 0 = error $ prefix ++ "Alt must be > 0." ++ debug_info | arity < 2 = error $ prefix ++ "Arity must be >= 2." ++ debug_info | otherwise = Name (mkOccName sum_occ) (NameG DataName (mkPkgName "ghc-prim") (mkModName "GHC.Prim")) where prefix = "unboxedSumDataName: " debug_info = " (alt: " ++ show alt ++ ", arity: " ++ show arity ++ ")" -- Synced with the definition of mkSumDataConOcc in GHC.Builtin.Types sum_occ = '(' : '#' : bars nbars_before ++ '_' : bars nbars_after ++ "#)" bars i = replicate i '|' nbars_before = alt - 1 nbars_after = arity - alt unboxedSumTypeName arity | arity < 2 = error $ "unboxedSumTypeName: Arity must be >= 2." ++ " (arity: " ++ show arity ++ ")" | otherwise = Name (mkOccName sum_occ) (NameG TcClsName (mkPkgName "ghc-prim") (mkModName "GHC.Prim")) where -- Synced with the definition of mkSumTyConOcc in GHC.Builtin.Types sum_occ = '(' : '#' : replicate (arity - 1) '|' ++ "#)" ----------------------------------------------------- -- Locations ----------------------------------------------------- data Loc = Loc { loc_filename :: String , loc_package :: String , loc_module :: String , loc_start :: CharPos , loc_end :: CharPos } deriving( Show, Eq, Ord, Data, Generic ) type CharPos = (Int, Int) -- ^ Line and character position ----------------------------------------------------- -- -- The Info returned by reification -- ----------------------------------------------------- -- | Obtained from 'reify' in the 'Q' Monad. data Info = -- | A class, with a list of its visible instances ClassI Dec [InstanceDec] -- | A class method | ClassOpI Name Type ParentName -- | A \"plain\" type constructor. \"Fancier\" type constructors are returned -- using 'PrimTyConI' or 'FamilyI' as appropriate. At present, this reified -- declaration will never have derived instances attached to it (if you wish -- to check for an instance, see 'reifyInstances'). | TyConI Dec -- | A type or data family, with a list of its visible instances. A closed -- type family is returned with 0 instances. | FamilyI Dec [InstanceDec] -- | A \"primitive\" type constructor, which can't be expressed with a 'Dec'. -- Examples: @(->)@, @Int#@. | PrimTyConI Name Arity Unlifted -- | A data constructor | DataConI Name Type ParentName -- | A pattern synonym | PatSynI Name PatSynType {- | A \"value\" variable (as opposed to a type variable, see 'TyVarI'). The @Maybe Dec@ field contains @Just@ the declaration which defined the variable - including the RHS of the declaration - or else @Nothing@, in the case where the RHS is unavailable to the compiler. At present, this value is /always/ @Nothing@: returning the RHS has not yet been implemented because of lack of interest. -} | VarI Name Type (Maybe Dec) {- | A type variable. The @Type@ field contains the type which underlies the variable. At present, this is always @'VarT' theName@, but future changes may permit refinement of this. -} | TyVarI -- Scoped type variable Name Type -- What it is bound to deriving( Show, Eq, Ord, Data, Generic ) -- | Obtained from 'reifyModule' in the 'Q' Monad. data ModuleInfo = -- | Contains the import list of the module. ModuleInfo [Module] deriving( Show, Eq, Ord, Data, Generic ) {- | In 'ClassOpI' and 'DataConI', name of the parent class or type -} type ParentName = Name -- | In 'UnboxedSumE' and 'UnboxedSumP', the number associated with a -- particular data constructor. 'SumAlt's are one-indexed and should never -- exceed the value of its corresponding 'SumArity'. For example: -- -- * @(\#_|\#)@ has 'SumAlt' 1 (out of a total 'SumArity' of 2) -- -- * @(\#|_\#)@ has 'SumAlt' 2 (out of a total 'SumArity' of 2) type SumAlt = Int -- | In 'UnboxedSumE', 'UnboxedSumT', and 'UnboxedSumP', the total number of -- 'SumAlt's. For example, @(\#|\#)@ has a 'SumArity' of 2. type SumArity = Int -- | In 'PrimTyConI', arity of the type constructor type Arity = Int -- | In 'PrimTyConI', is the type constructor unlifted? type Unlifted = Bool -- | 'InstanceDec' describes a single instance of a class or type function. -- It is just a 'Dec', but guaranteed to be one of the following: -- -- * 'InstanceD' (with empty @['Dec']@) -- -- * 'DataInstD' or 'NewtypeInstD' (with empty derived @['Name']@) -- -- * 'TySynInstD' type InstanceDec = Dec data Fixity = Fixity Int FixityDirection deriving( Eq, Ord, Show, Data, Generic ) data FixityDirection = InfixL | InfixR | InfixN deriving( Eq, Ord, Show, Data, Generic ) -- | Highest allowed operator precedence for 'Fixity' constructor (answer: 9) maxPrecedence :: Int maxPrecedence = (9::Int) -- | Default fixity: @infixl 9@ defaultFixity :: Fixity defaultFixity = Fixity maxPrecedence InfixL {- Note [Unresolved infix] ~~~~~~~~~~~~~~~~~~~~~~~ -} {- $infix #infix# When implementing antiquotation for quasiquoters, one often wants to parse strings into expressions: > parse :: String -> Maybe Exp But how should we parse @a + b * c@? If we don't know the fixities of @+@ and @*@, we don't know whether to parse it as @a + (b * c)@ or @(a + b) * c@. In cases like this, use 'UInfixE', 'UInfixP', 'UInfixT', or 'PromotedUInfixT', which stand for \"unresolved infix expression/pattern/type/promoted constructor\", respectively. When the compiler is given a splice containing a tree of @UInfixE@ applications such as > UInfixE > (UInfixE e1 op1 e2) > op2 > (UInfixE e3 op3 e4) it will look up and the fixities of the relevant operators and reassociate the tree as necessary. * trees will not be reassociated across 'ParensE', 'ParensP', or 'ParensT', which are of use for parsing expressions like > (a + b * c) + d * e * 'InfixE', 'InfixP', 'InfixT', and 'PromotedInfixT' expressions are never reassociated. * The 'UInfixE' constructor doesn't support sections. Sections such as @(a *)@ have no ambiguity, so 'InfixE' suffices. For longer sections such as @(a + b * c -)@, use an 'InfixE' constructor for the outer-most section, and use 'UInfixE' constructors for all other operators: > InfixE > Just (UInfixE ...a + b * c...) > op > Nothing Sections such as @(a + b +)@ and @((a + b) +)@ should be rendered into 'Exp's differently: > (+ a + b) ---> InfixE Nothing + (Just $ UInfixE a + b) > -- will result in a fixity error if (+) is left-infix > (+ (a + b)) ---> InfixE Nothing + (Just $ ParensE $ UInfixE a + b) > -- no fixity errors * Quoted expressions such as > [| a * b + c |] :: Q Exp > [p| a : b : c |] :: Q Pat > [t| T + T |] :: Q Type will never contain 'UInfixE', 'UInfixP', 'UInfixT', 'PromotedUInfixT', 'InfixT', 'PromotedInfixT, 'ParensE', 'ParensP', or 'ParensT' constructors. -} ----------------------------------------------------- -- -- The main syntax data types -- ----------------------------------------------------- data Lit = CharL Char | StringL String | IntegerL Integer -- ^ Used for overloaded and non-overloaded -- literals. We don't have a good way to -- represent non-overloaded literals at -- the moment. Maybe that doesn't matter? | RationalL Rational -- Ditto | IntPrimL Integer | WordPrimL Integer | FloatPrimL Rational | DoublePrimL Rational | StringPrimL [Word8] -- ^ A primitive C-style string, type 'Addr#' | BytesPrimL Bytes -- ^ Some raw bytes, type 'Addr#': | CharPrimL Char deriving( Show, Eq, Ord, Data, Generic ) -- We could add Int, Float, Double etc, as we do in HsLit, -- but that could complicate the -- supposedly-simple TH.Syntax literal type -- | Raw bytes embedded into the binary. -- -- Avoid using Bytes constructor directly as it is likely to change in the -- future. Use helpers such as `mkBytes` in Language.Haskell.TH.Lib instead. data Bytes = Bytes { bytesPtr :: ForeignPtr Word8 -- ^ Pointer to the data , bytesOffset :: Word -- ^ Offset from the pointer , bytesSize :: Word -- ^ Number of bytes -- Maybe someday: -- , bytesAlignement :: Word -- ^ Alignement constraint -- , bytesReadOnly :: Bool -- ^ Shall we embed into a read-only -- -- section or not -- , bytesInitialized :: Bool -- ^ False: only use `bytesSize` to allocate -- -- an uninitialized region } deriving (Data,Generic) -- We can't derive Show instance for Bytes because we don't want to show the -- pointer value but the actual bytes (similarly to what ByteString does). See -- #16457. instance Show Bytes where show b = unsafePerformIO $ withForeignPtr (bytesPtr b) $ \ptr -> peekCStringLen ( ptr `plusPtr` fromIntegral (bytesOffset b) , fromIntegral (bytesSize b) ) -- We can't derive Eq and Ord instances for Bytes because we don't want to -- compare pointer values but the actual bytes (similarly to what ByteString -- does). See #16457 instance Eq Bytes where (==) = eqBytes instance Ord Bytes where compare = compareBytes eqBytes :: Bytes -> Bytes -> Bool eqBytes a@(Bytes fp off len) b@(Bytes fp' off' len') | len /= len' = False -- short cut on length | fp == fp' && off == off' = True -- short cut for the same bytes | otherwise = compareBytes a b == EQ compareBytes :: Bytes -> Bytes -> Ordering compareBytes (Bytes _ _ 0) (Bytes _ _ 0) = EQ -- short cut for empty Bytes compareBytes (Bytes fp1 off1 len1) (Bytes fp2 off2 len2) = unsafePerformIO $ withForeignPtr fp1 $ \p1 -> withForeignPtr fp2 $ \p2 -> do i <- memcmp (p1 `plusPtr` fromIntegral off1) (p2 `plusPtr` fromIntegral off2) (fromIntegral (min len1 len2)) return $! (i `compare` 0) <> (len1 `compare` len2) foreign import ccall unsafe "memcmp" memcmp :: Ptr a -> Ptr b -> CSize -> IO CInt -- | Pattern in Haskell given in @{}@ data Pat = LitP Lit -- ^ @{ 5 or \'c\' }@ | VarP Name -- ^ @{ x }@ | TupP [Pat] -- ^ @{ (p1,p2) }@ | UnboxedTupP [Pat] -- ^ @{ (\# p1,p2 \#) }@ | UnboxedSumP Pat SumAlt SumArity -- ^ @{ (\#|p|\#) }@ | ConP Name [Type] [Pat] -- ^ @data T1 = C1 t1 t2; {C1 \@ty1 p1 p2} = e@ | InfixP Pat Name Pat -- ^ @foo ({x :+ y}) = e@ | UInfixP Pat Name Pat -- ^ @foo ({x :+ y}) = e@ -- -- See "Language.Haskell.TH.Syntax#infix" | ParensP Pat -- ^ @{(p)}@ -- -- See "Language.Haskell.TH.Syntax#infix" | TildeP Pat -- ^ @{ ~p }@ | BangP Pat -- ^ @{ !p }@ | AsP Name Pat -- ^ @{ x \@ p }@ | WildP -- ^ @{ _ }@ | RecP Name [FieldPat] -- ^ @f (Pt { pointx = x }) = g x@ | ListP [ Pat ] -- ^ @{ [1,2,3] }@ | SigP Pat Type -- ^ @{ p :: t }@ | ViewP Exp Pat -- ^ @{ e -> p }@ deriving( Show, Eq, Ord, Data, Generic ) type FieldPat = (Name,Pat) data Match = Match Pat Body [Dec] -- ^ @case e of { pat -> body where decs }@ deriving( Show, Eq, Ord, Data, Generic ) data Clause = Clause [Pat] Body [Dec] -- ^ @f { p1 p2 = body where decs }@ deriving( Show, Eq, Ord, Data, Generic ) data Exp = VarE Name -- ^ @{ x }@ | ConE Name -- ^ @data T1 = C1 t1 t2; p = {C1} e1 e2 @ | LitE Lit -- ^ @{ 5 or \'c\'}@ | AppE Exp Exp -- ^ @{ f x }@ | AppTypeE Exp Type -- ^ @{ f \@Int }@ | InfixE (Maybe Exp) Exp (Maybe Exp) -- ^ @{x + y} or {(x+)} or {(+ x)} or {(+)}@ -- It's a bit gruesome to use an Exp as the operator when a Name -- would suffice. Historically, Exp was used to make it easier to -- distinguish between infix constructors and non-constructors. -- This is a bit overkill, since one could just as well call -- `startsConId` or `startsConSym` (from `GHC.Lexeme`) on a Name. -- Unfortunately, changing this design now would involve lots of -- code churn for consumers of the TH API, so we continue to use -- an Exp as the operator and perform an extra check during conversion -- to ensure that the Exp is a constructor or a variable (#16895). | UInfixE Exp Exp Exp -- ^ @{x + y}@ -- -- See "Language.Haskell.TH.Syntax#infix" | ParensE Exp -- ^ @{ (e) }@ -- -- See "Language.Haskell.TH.Syntax#infix" | LamE [Pat] Exp -- ^ @{ \\ p1 p2 -> e }@ | LamCaseE [Match] -- ^ @{ \\case m1; m2 }@ | LamCasesE [Clause] -- ^ @{ \\cases m1; m2 }@ | TupE [Maybe Exp] -- ^ @{ (e1,e2) } @ -- -- The 'Maybe' is necessary for handling -- tuple sections. -- -- > (1,) -- -- translates to -- -- > TupE [Just (LitE (IntegerL 1)),Nothing] | UnboxedTupE [Maybe Exp] -- ^ @{ (\# e1,e2 \#) } @ -- -- The 'Maybe' is necessary for handling -- tuple sections. -- -- > (# 'c', #) -- -- translates to -- -- > UnboxedTupE [Just (LitE (CharL 'c')),Nothing] | UnboxedSumE Exp SumAlt SumArity -- ^ @{ (\#|e|\#) }@ | CondE Exp Exp Exp -- ^ @{ if e1 then e2 else e3 }@ | MultiIfE [(Guard, Exp)] -- ^ @{ if | g1 -> e1 | g2 -> e2 }@ | LetE [Dec] Exp -- ^ @{ let { x=e1; y=e2 } in e3 }@ | CaseE Exp [Match] -- ^ @{ case e of m1; m2 }@ | DoE (Maybe ModName) [Stmt] -- ^ @{ do { p <- e1; e2 } }@ or a qualified do if -- the module name is present | MDoE (Maybe ModName) [Stmt] -- ^ @{ mdo { x <- e1 y; y <- e2 x; } }@ or a qualified -- mdo if the module name is present | CompE [Stmt] -- ^ @{ [ (x,y) | x <- xs, y <- ys ] }@ -- -- The result expression of the comprehension is -- the /last/ of the @'Stmt'@s, and should be a 'NoBindS'. -- -- E.g. translation: -- -- > [ f x | x <- xs ] -- -- > CompE [BindS (VarP x) (VarE xs), NoBindS (AppE (VarE f) (VarE x))] | ArithSeqE Range -- ^ @{ [ 1 ,2 .. 10 ] }@ | ListE [ Exp ] -- ^ @{ [1,2,3] }@ | SigE Exp Type -- ^ @{ e :: t }@ | RecConE Name [FieldExp] -- ^ @{ T { x = y, z = w } }@ | RecUpdE Exp [FieldExp] -- ^ @{ (f x) { z = w } }@ | StaticE Exp -- ^ @{ static e }@ | UnboundVarE Name -- ^ @{ _x }@ -- -- This is used for holes or unresolved -- identifiers in AST quotes. Note that -- it could either have a variable name -- or constructor name. | LabelE String -- ^ @{ #x }@ ( Overloaded label ) | ImplicitParamVarE String -- ^ @{ ?x }@ ( Implicit parameter ) | GetFieldE Exp String -- ^ @{ exp.field }@ ( Overloaded Record Dot ) | ProjectionE (NonEmpty String) -- ^ @(.x)@ or @(.x.y)@ (Record projections) deriving( Show, Eq, Ord, Data, Generic ) type FieldExp = (Name,Exp) -- Omitted: implicit parameters data Body = GuardedB [(Guard,Exp)] -- ^ @f p { | e1 = e2 -- | e3 = e4 } -- where ds@ | NormalB Exp -- ^ @f p { = e } where ds@ deriving( Show, Eq, Ord, Data, Generic ) data Guard = NormalG Exp -- ^ @f x { | odd x } = x@ | PatG [Stmt] -- ^ @f x { | Just y <- x, Just z <- y } = z@ deriving( Show, Eq, Ord, Data, Generic ) data Stmt = BindS Pat Exp -- ^ @p <- e@ | LetS [ Dec ] -- ^ @{ let { x=e1; y=e2 } }@ | NoBindS Exp -- ^ @e@ | ParS [[Stmt]] -- ^ @x <- e1 | s2, s3 | s4@ (in 'CompE') | RecS [Stmt] -- ^ @rec { s1; s2 }@ deriving( Show, Eq, Ord, Data, Generic ) data Range = FromR Exp | FromThenR Exp Exp | FromToR Exp Exp | FromThenToR Exp Exp Exp deriving( Show, Eq, Ord, Data, Generic ) data Dec = FunD Name [Clause] -- ^ @{ f p1 p2 = b where decs }@ | ValD Pat Body [Dec] -- ^ @{ p = b where decs }@ | DataD Cxt Name [TyVarBndr ()] (Maybe Kind) -- Kind signature (allowed only for GADTs) [Con] [DerivClause] -- ^ @{ data Cxt x => T x = A x | B (T x) -- deriving (Z,W) -- deriving stock Eq }@ | NewtypeD Cxt Name [TyVarBndr ()] (Maybe Kind) -- Kind signature Con [DerivClause] -- ^ @{ newtype Cxt x => T x = A (B x) -- deriving (Z,W Q) -- deriving stock Eq }@ | TySynD Name [TyVarBndr ()] Type -- ^ @{ type T x = (x,x) }@ | ClassD Cxt Name [TyVarBndr ()] [FunDep] [Dec] -- ^ @{ class Eq a => Ord a where ds }@ | InstanceD (Maybe Overlap) Cxt Type [Dec] -- ^ @{ instance {\-\# OVERLAPS \#-\} -- Show w => Show [w] where ds }@ | SigD Name Type -- ^ @{ length :: [a] -> Int }@ | KiSigD Name Kind -- ^ @{ type TypeRep :: k -> Type }@ | ForeignD Foreign -- ^ @{ foreign import ... } --{ foreign export ... }@ | InfixD Fixity Name -- ^ @{ infix 3 foo }@ | DefaultD [Type] -- ^ @{ default (Integer, Double) }@ -- | pragmas | PragmaD Pragma -- ^ @{ {\-\# INLINE [1] foo \#-\} }@ -- | data families (may also appear in [Dec] of 'ClassD' and 'InstanceD') | DataFamilyD Name [TyVarBndr ()] (Maybe Kind) -- ^ @{ data family T a b c :: * }@ | DataInstD Cxt (Maybe [TyVarBndr ()]) Type (Maybe Kind) -- Kind signature [Con] [DerivClause] -- ^ @{ data instance Cxt x => T [x] -- = A x | B (T x) -- deriving (Z,W) -- deriving stock Eq }@ | NewtypeInstD Cxt (Maybe [TyVarBndr ()]) Type -- Quantified type vars (Maybe Kind) -- Kind signature Con [DerivClause] -- ^ @{ newtype instance Cxt x => T [x] -- = A (B x) -- deriving (Z,W) -- deriving stock Eq }@ | TySynInstD TySynEqn -- ^ @{ type instance ... }@ -- | open type families (may also appear in [Dec] of 'ClassD' and 'InstanceD') | OpenTypeFamilyD TypeFamilyHead -- ^ @{ type family T a b c = (r :: *) | r -> a b }@ | ClosedTypeFamilyD TypeFamilyHead [TySynEqn] -- ^ @{ type family F a b = (r :: *) | r -> a where ... }@ | RoleAnnotD Name [Role] -- ^ @{ type role T nominal representational }@ | StandaloneDerivD (Maybe DerivStrategy) Cxt Type -- ^ @{ deriving stock instance Ord a => Ord (Foo a) }@ | DefaultSigD Name Type -- ^ @{ default size :: Data a => a -> Int }@ -- | Pattern Synonyms | PatSynD Name PatSynArgs PatSynDir Pat -- ^ @{ pattern P v1 v2 .. vn <- p }@ unidirectional or -- @{ pattern P v1 v2 .. vn = p }@ implicit bidirectional or -- @{ pattern P v1 v2 .. vn <- p -- where P v1 v2 .. vn = e }@ explicit bidirectional -- -- also, besides prefix pattern synonyms, both infix and record -- pattern synonyms are supported. See 'PatSynArgs' for details | PatSynSigD Name PatSynType -- ^ A pattern synonym's type signature. | ImplicitParamBindD String Exp -- ^ @{ ?x = expr }@ -- -- Implicit parameter binding declaration. Can only be used in let -- and where clauses which consist entirely of implicit bindings. deriving( Show, Eq, Ord, Data, Generic ) -- | Varieties of allowed instance overlap. data Overlap = Overlappable -- ^ May be overlapped by more specific instances | Overlapping -- ^ May overlap a more general instance | Overlaps -- ^ Both 'Overlapping' and 'Overlappable' | Incoherent -- ^ Both 'Overlapping' and 'Overlappable', and -- pick an arbitrary one if multiple choices are -- available. deriving( Show, Eq, Ord, Data, Generic ) -- | A single @deriving@ clause at the end of a datatype. data DerivClause = DerivClause (Maybe DerivStrategy) Cxt -- ^ @{ deriving stock (Eq, Ord) }@ deriving( Show, Eq, Ord, Data, Generic ) -- | What the user explicitly requests when deriving an instance. data DerivStrategy = StockStrategy -- ^ A \"standard\" derived instance | AnyclassStrategy -- ^ @-XDeriveAnyClass@ | NewtypeStrategy -- ^ @-XGeneralizedNewtypeDeriving@ | ViaStrategy Type -- ^ @-XDerivingVia@ deriving( Show, Eq, Ord, Data, Generic ) -- | A pattern synonym's type. Note that a pattern synonym's /fully/ -- specified type has a peculiar shape coming with two forall -- quantifiers and two constraint contexts. For example, consider the -- pattern synonym -- -- > pattern P x1 x2 ... xn = -- -- P's complete type is of the following form -- -- > pattern P :: forall universals. required constraints -- > => forall existentials. provided constraints -- > => t1 -> t2 -> ... -> tn -> t -- -- consisting of four parts: -- -- 1. the (possibly empty lists of) universally quantified type -- variables and required constraints on them. -- 2. the (possibly empty lists of) existentially quantified -- type variables and the provided constraints on them. -- 3. the types @t1@, @t2@, .., @tn@ of @x1@, @x2@, .., @xn@, respectively -- 4. the type @t@ of @\@, mentioning only universals. -- -- Pattern synonym types interact with TH when (a) reifying a pattern -- synonym, (b) pretty printing, or (c) specifying a pattern synonym's -- type signature explicitly: -- -- * Reification always returns a pattern synonym's /fully/ specified -- type in abstract syntax. -- -- * Pretty printing via 'Language.Haskell.TH.Ppr.pprPatSynType' abbreviates -- a pattern synonym's type unambiguously in concrete syntax: The rule of -- thumb is to print initial empty universals and the required -- context as @() =>@, if existentials and a provided context -- follow. If only universals and their required context, but no -- existentials are specified, only the universals and their -- required context are printed. If both or none are specified, so -- both (or none) are printed. -- -- * When specifying a pattern synonym's type explicitly with -- 'PatSynSigD' either one of the universals, the existentials, or -- their contexts may be left empty. -- -- See the GHC user's guide for more information on pattern synonyms -- and their types: -- . type PatSynType = Type -- | Common elements of 'OpenTypeFamilyD' and 'ClosedTypeFamilyD'. By -- analogy with "head" for type classes and type class instances as -- defined in /Type classes: an exploration of the design space/, the -- @TypeFamilyHead@ is defined to be the elements of the declaration -- between @type family@ and @where@. data TypeFamilyHead = TypeFamilyHead Name [TyVarBndr ()] FamilyResultSig (Maybe InjectivityAnn) deriving( Show, Eq, Ord, Data, Generic ) -- | One equation of a type family instance or closed type family. The -- arguments are the left-hand-side type and the right-hand-side result. -- -- For instance, if you had the following type family: -- -- @ -- type family Foo (a :: k) :: k where -- forall k (a :: k). Foo \@k a = a -- @ -- -- The @Foo \@k a = a@ equation would be represented as follows: -- -- @ -- 'TySynEqn' ('Just' ['PlainTV' k, 'KindedTV' a ('VarT' k)]) -- ('AppT' ('AppKindT' ('ConT' ''Foo) ('VarT' k)) ('VarT' a)) -- ('VarT' a) -- @ data TySynEqn = TySynEqn (Maybe [TyVarBndr ()]) Type Type deriving( Show, Eq, Ord, Data, Generic ) data FunDep = FunDep [Name] [Name] deriving( Show, Eq, Ord, Data, Generic ) data Foreign = ImportF Callconv Safety String Name Type | ExportF Callconv String Name Type deriving( Show, Eq, Ord, Data, Generic ) -- keep Callconv in sync with module ForeignCall in ghc/compiler/GHC/Types/ForeignCall.hs data Callconv = CCall | StdCall | CApi | Prim | JavaScript deriving( Show, Eq, Ord, Data, Generic ) data Safety = Unsafe | Safe | Interruptible deriving( Show, Eq, Ord, Data, Generic ) data Pragma = InlineP Name Inline RuleMatch Phases | OpaqueP Name | SpecialiseP Name Type (Maybe Inline) Phases | SpecialiseInstP Type | RuleP String (Maybe [TyVarBndr ()]) [RuleBndr] Exp Exp Phases | AnnP AnnTarget Exp | LineP Int String | CompleteP [Name] (Maybe Name) -- ^ @{ {\-\# COMPLETE C_1, ..., C_i [ :: T ] \#-} }@ deriving( Show, Eq, Ord, Data, Generic ) data Inline = NoInline | Inline | Inlinable deriving (Show, Eq, Ord, Data, Generic) data RuleMatch = ConLike | FunLike deriving (Show, Eq, Ord, Data, Generic) data Phases = AllPhases | FromPhase Int | BeforePhase Int deriving (Show, Eq, Ord, Data, Generic) data RuleBndr = RuleVar Name | TypedRuleVar Name Type deriving (Show, Eq, Ord, Data, Generic) data AnnTarget = ModuleAnnotation | TypeAnnotation Name | ValueAnnotation Name deriving (Show, Eq, Ord, Data, Generic) type Cxt = [Pred] -- ^ @(Eq a, Ord b)@ -- | Since the advent of @ConstraintKinds@, constraints are really just types. -- Equality constraints use the 'EqualityT' constructor. Constraints may also -- be tuples of other constraints. type Pred = Type data SourceUnpackedness = NoSourceUnpackedness -- ^ @C a@ | SourceNoUnpack -- ^ @C { {\-\# NOUNPACK \#-\} } a@ | SourceUnpack -- ^ @C { {\-\# UNPACK \#-\} } a@ deriving (Show, Eq, Ord, Data, Generic) data SourceStrictness = NoSourceStrictness -- ^ @C a@ | SourceLazy -- ^ @C {~}a@ | SourceStrict -- ^ @C {!}a@ deriving (Show, Eq, Ord, Data, Generic) -- | Unlike 'SourceStrictness' and 'SourceUnpackedness', 'DecidedStrictness' -- refers to the strictness that the compiler chooses for a data constructor -- field, which may be different from what is written in source code. See -- 'reifyConStrictness' for more information. data DecidedStrictness = DecidedLazy | DecidedStrict | DecidedUnpack deriving (Show, Eq, Ord, Data, Generic) -- | A single data constructor. -- -- The constructors for 'Con' can roughly be divided up into two categories: -- those for constructors with \"vanilla\" syntax ('NormalC', 'RecC', and -- 'InfixC'), and those for constructors with GADT syntax ('GadtC' and -- 'RecGadtC'). The 'ForallC' constructor, which quantifies additional type -- variables and class contexts, can surround either variety of constructor. -- However, the type variables that it quantifies are different depending -- on what constructor syntax is used: -- -- * If a 'ForallC' surrounds a constructor with vanilla syntax, then the -- 'ForallC' will only quantify /existential/ type variables. For example: -- -- @ -- data Foo a = forall b. MkFoo a b -- @ -- -- In @MkFoo@, 'ForallC' will quantify @b@, but not @a@. -- -- * If a 'ForallC' surrounds a constructor with GADT syntax, then the -- 'ForallC' will quantify /all/ type variables used in the constructor. -- For example: -- -- @ -- data Bar a b where -- MkBar :: (a ~ b) => c -> MkBar a b -- @ -- -- In @MkBar@, 'ForallC' will quantify @a@, @b@, and @c@. -- -- Multiplicity annotations for data types are currently not supported -- in Template Haskell (i.e. all fields represented by Template Haskell -- will be linear). data Con = NormalC Name [BangType] -- ^ @C Int a@ | RecC Name [VarBangType] -- ^ @C { v :: Int, w :: a }@ | InfixC BangType Name BangType -- ^ @Int :+ a@ | ForallC [TyVarBndr Specificity] Cxt Con -- ^ @forall a. Eq a => C [a]@ | GadtC [Name] [BangType] Type -- See Note [GADT return type] -- ^ @C :: a -> b -> T b Int@ | RecGadtC [Name] [VarBangType] Type -- See Note [GADT return type] -- ^ @C :: { v :: Int } -> T b Int@ deriving (Show, Eq, Ord, Data, Generic) -- Note [GADT return type] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- The return type of a GADT constructor does not necessarily match the name of -- the data type: -- -- type S = T -- -- data T a where -- MkT :: S Int -- -- -- type S a = T -- -- data T a where -- MkT :: S Char Int -- -- -- type Id a = a -- type S a = T -- -- data T a where -- MkT :: Id (S Char Int) -- -- -- That is why we allow the return type stored by a constructor to be an -- arbitrary type. See also #11341 data Bang = Bang SourceUnpackedness SourceStrictness -- ^ @C { {\-\# UNPACK \#-\} !}a@ deriving (Show, Eq, Ord, Data, Generic) type BangType = (Bang, Type) type VarBangType = (Name, Bang, Type) -- | As of @template-haskell-2.11.0.0@, 'Strict' has been replaced by 'Bang'. type Strict = Bang -- | As of @template-haskell-2.11.0.0@, 'StrictType' has been replaced by -- 'BangType'. type StrictType = BangType -- | As of @template-haskell-2.11.0.0@, 'VarStrictType' has been replaced by -- 'VarBangType'. type VarStrictType = VarBangType -- | A pattern synonym's directionality. data PatSynDir = Unidir -- ^ @pattern P x {<-} p@ | ImplBidir -- ^ @pattern P x {=} p@ | ExplBidir [Clause] -- ^ @pattern P x {<-} p where P x = e@ deriving( Show, Eq, Ord, Data, Generic ) -- | A pattern synonym's argument type. data PatSynArgs = PrefixPatSyn [Name] -- ^ @pattern P {x y z} = p@ | InfixPatSyn Name Name -- ^ @pattern {x P y} = p@ | RecordPatSyn [Name] -- ^ @pattern P { {x,y,z} } = p@ deriving( Show, Eq, Ord, Data, Generic ) data Type = ForallT [TyVarBndr Specificity] Cxt Type -- ^ @forall \. \ => \@ | ForallVisT [TyVarBndr ()] Type -- ^ @forall \ -> \@ | AppT Type Type -- ^ @T a b@ | AppKindT Type Kind -- ^ @T \@k t@ | SigT Type Kind -- ^ @t :: k@ | VarT Name -- ^ @a@ | ConT Name -- ^ @T@ | PromotedT Name -- ^ @'T@ | InfixT Type Name Type -- ^ @T + T@ | UInfixT Type Name Type -- ^ @T + T@ -- -- See "Language.Haskell.TH.Syntax#infix" | PromotedInfixT Type Name Type -- ^ @T :+: T@ | PromotedUInfixT Type Name Type -- ^ @T :+: T@ -- -- See "Language.Haskell.TH.Syntax#infix" | ParensT Type -- ^ @(T)@ -- See Note [Representing concrete syntax in types] | TupleT Int -- ^ @(,), (,,), etc.@ | UnboxedTupleT Int -- ^ @(\#,\#), (\#,,\#), etc.@ | UnboxedSumT SumArity -- ^ @(\#|\#), (\#||\#), etc.@ | ArrowT -- ^ @->@ | MulArrowT -- ^ @%n ->@ -- -- Generalised arrow type with multiplicity argument | EqualityT -- ^ @~@ | ListT -- ^ @[]@ | PromotedTupleT Int -- ^ @'(), '(,), '(,,), etc.@ | PromotedNilT -- ^ @'[]@ | PromotedConsT -- ^ @(':)@ | StarT -- ^ @*@ | ConstraintT -- ^ @Constraint@ | LitT TyLit -- ^ @0,1,2, etc.@ | WildCardT -- ^ @_@ | ImplicitParamT String Type -- ^ @?x :: t@ deriving( Show, Eq, Ord, Data, Generic ) data Specificity = SpecifiedSpec -- ^ @a@ | InferredSpec -- ^ @{a}@ deriving( Show, Eq, Ord, Data, Generic ) data TyVarBndr flag = PlainTV Name flag -- ^ @a@ | KindedTV Name flag Kind -- ^ @(a :: k)@ deriving( Show, Eq, Ord, Data, Generic, Functor ) -- | Type family result signature data FamilyResultSig = NoSig -- ^ no signature | KindSig Kind -- ^ @k@ | TyVarSig (TyVarBndr ()) -- ^ @= r, = (r :: k)@ deriving( Show, Eq, Ord, Data, Generic ) -- | Injectivity annotation data InjectivityAnn = InjectivityAnn Name [Name] deriving ( Show, Eq, Ord, Data, Generic ) data TyLit = NumTyLit Integer -- ^ @2@ | StrTyLit String -- ^ @\"Hello\"@ | CharTyLit Char -- ^ @\'C\'@, @since 4.16.0.0 deriving ( Show, Eq, Ord, Data, Generic ) -- | Role annotations data Role = NominalR -- ^ @nominal@ | RepresentationalR -- ^ @representational@ | PhantomR -- ^ @phantom@ | InferR -- ^ @_@ deriving( Show, Eq, Ord, Data, Generic ) -- | Annotation target for reifyAnnotations data AnnLookup = AnnLookupModule Module | AnnLookupName Name deriving( Show, Eq, Ord, Data, Generic ) -- | To avoid duplication between kinds and types, they -- are defined to be the same. Naturally, you would never -- have a type be 'StarT' and you would never have a kind -- be 'SigT', but many of the other constructors are shared. -- Note that the kind @Bool@ is denoted with 'ConT', not -- 'PromotedT'. Similarly, tuple kinds are made with 'TupleT', -- not 'PromotedTupleT'. type Kind = Type {- Note [Representing concrete syntax in types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Haskell has a rich concrete syntax for types, including t1 -> t2, (t1,t2), [t], and so on In TH we represent all of this using AppT, with a distinguished type constructor at the head. So, Type TH representation ----------------------------------------------- t1 -> t2 ArrowT `AppT` t2 `AppT` t2 [t] ListT `AppT` t (t1,t2) TupleT 2 `AppT` t1 `AppT` t2 '(t1,t2) PromotedTupleT 2 `AppT` t1 `AppT` t2 But if the original HsSyn used prefix application, we won't use these special TH constructors. For example [] t ConT "[]" `AppT` t (->) t ConT "->" `AppT` t In this way we can faithfully represent in TH whether the original HsType used concrete syntax or not. The one case that doesn't fit this pattern is that of promoted lists '[ Maybe, IO ] PromotedListT 2 `AppT` t1 `AppT` t2 but it's very smelly because there really is no type constructor corresponding to PromotedListT. So we encode HsExplicitListTy with PromotedConsT and PromotedNilT (which *do* have underlying type constructors): '[ Maybe, IO ] PromotedConsT `AppT` Maybe `AppT` (PromotedConsT `AppT` IO `AppT` PromotedNilT) -} -- | A location at which to attach Haddock documentation. -- Note that adding documentation to a 'Name' defined oustide of the current -- module will cause an error. data DocLoc = ModuleDoc -- ^ At the current module's header. | DeclDoc Name -- ^ At a declaration, not necessarily top level. | ArgDoc Name Int -- ^ At a specific argument of a function, indexed by its -- position. | InstDoc Type -- ^ At a class or family instance. deriving ( Show, Eq, Ord, Data, Generic ) ----------------------------------------------------- -- Internal helper functions ----------------------------------------------------- cmpEq :: Ordering -> Bool cmpEq EQ = True cmpEq _ = False thenCmp :: Ordering -> Ordering -> Ordering thenCmp EQ o2 = o2 thenCmp o1 _ = o1 ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/PrimOps.hs-boot0000644000000000000000000000010514470055370022135 0ustar0000000000000000module GHC.Builtin.PrimOps where import GHC.Prelude () data PrimOp ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Types.hs-boot0000644000000000000000000000430214472375231021656 0ustar0000000000000000module GHC.Builtin.Types where import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) import {-# SOURCE #-} GHC.Core.TyCo.Rep (Type, Kind) import {-# SOURCE #-} GHC.Core.DataCon ( DataCon ) import GHC.Types.Basic (Arity, TupleSort, Boxity, ConTag) import {-# SOURCE #-} GHC.Types.Name (Name) listTyCon :: TyCon typeSymbolKind :: Type charTy :: Type mkBoxedTupleTy :: [Type] -> Type coercibleTyCon, heqTyCon :: TyCon unitTy :: Type liftedTypeKindTyConName :: Name liftedTypeKind, unliftedTypeKind, zeroBitTypeKind :: Kind liftedTypeKindTyCon, unliftedTypeKindTyCon :: TyCon liftedRepTyCon, unliftedRepTyCon :: TyCon constraintKind :: Kind runtimeRepTyCon, levityTyCon, vecCountTyCon, vecElemTyCon :: TyCon runtimeRepTy, levityTy :: Type boxedRepDataConTyCon, liftedDataConTyCon :: TyCon vecRepDataConTyCon, tupleRepDataConTyCon :: TyCon liftedRepTy, unliftedRepTy, zeroBitRepTy :: Type liftedDataConTy, unliftedDataConTy :: Type intRepDataConTy, int8RepDataConTy, int16RepDataConTy, int32RepDataConTy, int64RepDataConTy, wordRepDataConTy, word8RepDataConTy, word16RepDataConTy, word32RepDataConTy, word64RepDataConTy, addrRepDataConTy, floatRepDataConTy, doubleRepDataConTy :: Type vec2DataConTy, vec4DataConTy, vec8DataConTy, vec16DataConTy, vec32DataConTy, vec64DataConTy :: Type int8ElemRepDataConTy, int16ElemRepDataConTy, int32ElemRepDataConTy, int64ElemRepDataConTy, word8ElemRepDataConTy, word16ElemRepDataConTy, word32ElemRepDataConTy, word64ElemRepDataConTy, floatElemRepDataConTy, doubleElemRepDataConTy :: Type anyTypeOfKind :: Kind -> Type unboxedTupleKind :: [Type] -> Type multiplicityTyCon :: TyCon multiplicityTy :: Type oneDataConTy :: Type oneDataConTyCon :: TyCon manyDataConTy :: Type manyDataConTyCon :: TyCon unrestrictedFunTyCon :: TyCon multMulTyCon :: TyCon tupleTyConName :: TupleSort -> Arity -> Name integerTy, naturalTy :: Type promotedTupleDataCon :: Boxity -> Arity -> TyCon tupleDataCon :: Boxity -> Arity -> DataCon tupleTyCon :: Boxity -> Arity -> TyCon cTupleDataCon :: Arity -> DataCon cTupleDataConName :: Arity -> Name cTupleTyConName :: Arity -> Name cTupleSelIdName :: ConTag -> Arity -> Name sumDataCon :: ConTag -> Arity -> DataCon sumTyCon :: Arity -> TyCon ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Types/Prim.hs-boot0000644000000000000000000000011714472375231022565 0ustar0000000000000000module GHC.Builtin.Types.Prim where import GHC.Core.TyCon tYPETyCon :: TyCon ghc-lib-parser-9.4.7.20230826/compiler/GHC/Builtin/Uniques.hs-boot0000644000000000000000000000216614472375231022211 0ustar0000000000000000module GHC.Builtin.Uniques where import GHC.Prelude import GHC.Types.Unique import {-# SOURCE #-} GHC.Types.Name import GHC.Types.Basic import GHC.Data.FastString -- Needed by GHC.Builtin.Types knownUniqueName :: Unique -> Maybe Name mkSumTyConUnique :: Arity -> Unique mkSumDataConUnique :: ConTagZ -> Arity -> Unique mkCTupleTyConUnique :: Arity -> Unique mkCTupleDataConUnique :: Arity -> Unique mkTupleTyConUnique :: Boxity -> Arity -> Unique mkTupleDataConUnique :: Boxity -> Arity -> Unique mkAlphaTyVarUnique :: Int -> Unique mkPreludeClassUnique :: Int -> Unique mkPrimOpIdUnique :: Int -> Unique mkPrimOpWrapperUnique :: Int -> Unique mkPreludeMiscIdUnique :: Int -> Unique mkPseudoUniqueE, mkBuiltinUnique :: Int -> Unique mkRegSingleUnique, mkRegPairUnique, mkRegSubUnique, mkRegClassUnique :: Int -> Unique mkVarOccUnique, mkDataOccUnique, mkTvOccUnique, mkTcOccUnique :: FastString -> Unique initExitJoinUnique :: Unique mkPreludeTyConUnique :: Int -> Unique tyConRepNameUnique :: Unique -> Unique mkPreludeDataConUnique :: Int -> Unique dataConTyRepNameUnique, dataConWorkerUnique :: Unique -> Unique ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/BlockId.hs-boot0000644000000000000000000000026014470055370021163 0ustar0000000000000000module GHC.Cmm.BlockId (BlockId, mkBlockId) where import GHC.Cmm.Dataflow.Label (Label) import GHC.Types.Unique (Unique) type BlockId = Label mkBlockId :: Unique -> BlockId ghc-lib-parser-9.4.7.20230826/compiler/GHC/Cmm/CLabel.hs-boot0000644000000000000000000000022214472375231020777 0ustar0000000000000000module GHC.Cmm.CLabel where import GHC.Utils.Outputable import GHC.Platform data CLabel pprCLabel :: Platform -> LabelStyle -> CLabel -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Coercion.hs-boot0000644000000000000000000000411114472375231021573 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} module GHC.Core.Coercion where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCo.Rep import {-# SOURCE #-} GHC.Core.TyCon import GHC.Types.Basic ( LeftOrRight ) import GHC.Core.Coercion.Axiom import GHC.Types.Var import GHC.Data.Pair import GHC.Utils.Misc mkReflCo :: Role -> Type -> Coercion mkTyConAppCo :: HasDebugCallStack => Role -> TyCon -> [Coercion] -> Coercion mkAppCo :: Coercion -> Coercion -> Coercion mkForAllCo :: TyCoVar -> Coercion -> Coercion -> Coercion mkFunCo :: Role -> CoercionN -> Coercion -> Coercion -> Coercion mkCoVarCo :: CoVar -> Coercion mkAxiomInstCo :: CoAxiom Branched -> BranchIndex -> [Coercion] -> Coercion mkPhantomCo :: Coercion -> Type -> Type -> Coercion mkUnivCo :: UnivCoProvenance -> Role -> Type -> Type -> Coercion mkSymCo :: Coercion -> Coercion mkTransCo :: Coercion -> Coercion -> Coercion mkNthCo :: HasDebugCallStack => Role -> Int -> Coercion -> Coercion mkLRCo :: LeftOrRight -> Coercion -> Coercion mkInstCo :: Coercion -> Coercion -> Coercion mkGReflCo :: Role -> Type -> MCoercionN -> Coercion mkNomReflCo :: Type -> Coercion mkKindCo :: Coercion -> Coercion mkSubCo :: HasDebugCallStack => Coercion -> Coercion mkProofIrrelCo :: Role -> Coercion -> Coercion -> Coercion -> Coercion mkAxiomRuleCo :: CoAxiomRule -> [Coercion] -> Coercion isGReflCo :: Coercion -> Bool isReflCo :: Coercion -> Bool isReflexiveCo :: Coercion -> Bool decomposePiCos :: HasDebugCallStack => Coercion -> Pair Type -> [Type] -> ([Coercion], Coercion) coVarKindsTypesRole :: HasDebugCallStack => CoVar -> (Kind, Kind, Type, Type, Role) coVarRole :: CoVar -> Role mkCoercionType :: Role -> Type -> Type -> Type data LiftingContext liftCoSubst :: HasDebugCallStack => Role -> LiftingContext -> Type -> Coercion seqCo :: Coercion -> () coercionKind :: Coercion -> Pair Type coercionLKind :: Coercion -> Type coercionRKind :: Coercion -> Type coercionType :: Coercion -> Type topNormaliseNewType_maybe :: Type -> Maybe (Coercion, Type) -- used to look through newtypes to the right of -- function arrows, in 'GHC.Core.Type.getRuntimeArgTys' ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/DataCon.hs-boot0000644000000000000000000000241214472375231021345 0ustar0000000000000000module GHC.Core.DataCon where import GHC.Prelude import {-# SOURCE #-} GHC.Types.Var( Id, TyVar, TyCoVar, InvisTVBinder ) import {-# SOURCE #-} GHC.Types.Name( Name, NamedThing ) import {-# SOURCE #-} GHC.Core.TyCon( TyCon ) import GHC.Types.FieldLabel ( FieldLabel ) import GHC.Types.Unique ( Uniquable ) import GHC.Utils.Outputable ( Outputable, OutputableBndr ) import GHC.Types.Basic (Arity) import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type, ThetaType, Scaled ) data DataCon data DataConRep data EqSpec dataConName :: DataCon -> Name dataConWorkId :: DataCon -> Id dataConTyCon :: DataCon -> TyCon dataConExTyCoVars :: DataCon -> [TyCoVar] dataConUserTyVars :: DataCon -> [TyVar] dataConUserTyVarBinders :: DataCon -> [InvisTVBinder] dataConSourceArity :: DataCon -> Arity dataConFieldLabels :: DataCon -> [FieldLabel] dataConInstOrigArgTys :: DataCon -> [Type] -> [Scaled Type] dataConStupidTheta :: DataCon -> ThetaType dataConFullSig :: DataCon -> ([TyVar], [TyCoVar], [EqSpec], ThetaType, [Scaled Type], Type) isUnboxedSumDataCon :: DataCon -> Bool instance Eq DataCon instance Uniquable DataCon instance NamedThing DataCon instance Outputable DataCon instance OutputableBndr DataCon dataConWrapId :: DataCon -> Id promoteDataCon :: DataCon -> TyCon ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/CallerCC.hs-boot0000644000000000000000000000027514472375231022213 0ustar0000000000000000module GHC.Core.Opt.CallerCC where import GHC.Prelude -- Necessary due to import in GHC.Driver.Session. data CallerCcFilter parseCallerCcFilter :: String -> Either String CallerCcFilter ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/ConstantFold.hs-boot0000644000000000000000000000025614470055370023175 0ustar0000000000000000module GHC.Core.Opt.ConstantFold where import GHC.Prelude import GHC.Core import GHC.Builtin.PrimOps import GHC.Types.Name primOpRules :: Name -> PrimOp -> Maybe CoreRule ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Opt/Monad.hs-boot0000644000000000000000000000151614472375231021640 0ustar0000000000000000-- Created this hs-boot file to remove circular dependencies from the use of -- Plugins. Plugins needs CoreToDo and CoreM types to define core-to-core -- transformations. -- However GHC.Core.Opt.Monad does much more than defining these, and because Plugins are -- activated in various modules, the imports become circular. To solve this I -- extracted CoreToDo and CoreM into this file. -- I needed to write the whole definition of these types, otherwise it created -- a data-newtype conflict. module GHC.Core.Opt.Monad ( CoreToDo, CoreM ) where import GHC.Prelude import GHC.Data.IOEnv ( IOEnv ) type CoreIOEnv = IOEnv CoreReader data CoreReader newtype CoreWriter = CoreWriter { cw_simpl_count :: SimplCount } data SimplCount newtype CoreM a = CoreM { unCoreM :: CoreIOEnv (a, CoreWriter) } instance Monad CoreM data CoreToDo ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/SimpleOpt.hs-boot0000644000000000000000000000040714470055370021747 0ustar0000000000000000module GHC.Core.SimpleOpt where import GHC.Core import {-# SOURCE #-} GHC.Core.Unfold import GHC.Utils.Misc (HasDebugCallStack) data SimpleOpts so_uf_opts :: SimpleOpts -> UnfoldingOpts simpleOptExpr :: HasDebugCallStack => SimpleOpts -> CoreExpr -> CoreExpr ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/Ppr.hs-boot0000644000000000000000000000047014472375231021455 0ustar0000000000000000module GHC.Core.TyCo.Ppr where import {-# SOURCE #-} GHC.Types.Var ( TyVar ) import {-# SOURCE #-} GHC.Core.TyCo.Rep (Type, Kind, Coercion, TyLit) import GHC.Utils.Outputable ( SDoc ) pprType :: Type -> SDoc pprKind :: Kind -> SDoc pprCo :: Coercion -> SDoc pprTyLit :: TyLit -> SDoc pprTyVar :: TyVar -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCo/Rep.hs-boot0000644000000000000000000000131414472375231021440 0ustar0000000000000000{-# LANGUAGE NoPolyKinds #-} module GHC.Core.TyCo.Rep where import GHC.Utils.Outputable ( Outputable ) import Data.Data ( Data ) import {-# SOURCE #-} GHC.Types.Var( Var, ArgFlag, AnonArgFlag ) import {-# SOURCE #-} GHC.Core.TyCon ( TyCon ) data Type data Coercion data UnivCoProvenance data TyLit data TyCoBinder data MCoercion data Scaled a type Mult = Type type PredType = Type type Kind = Type type ThetaType = [PredType] type CoercionN = Coercion type MCoercionN = MCoercion mkFunTyMany :: AnonArgFlag -> Type -> Type -> Type mkForAllTy :: Var -> ArgFlag -> Type -> Type mkNakedTyConTy :: TyCon -> Type instance Data Type -- To support Data instances in GHC.Core.Coercion.Axiom instance Outputable Type ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/TyCon.hs-boot0000644000000000000000000000073414472375231021075 0ustar0000000000000000module GHC.Core.TyCon where import GHC.Prelude import GHC.Types.Unique ( Uniquable ) import {-# SOURCE #-} GHC.Types.Name import GHC.Utils.Outputable data TyCon instance Uniquable TyCon instance Outputable TyCon type TyConRepName = Name isTupleTyCon :: TyCon -> Bool isUnboxedTupleTyCon :: TyCon -> Bool isFunTyCon :: TyCon -> Bool tyConRepName_maybe :: TyCon -> Maybe TyConRepName mkPrelTyConRepName :: Name -> TyConRepName tyConName :: TyCon -> Name ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Type.hs-boot0000644000000000000000000000162514472375231020762 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} module GHC.Core.Type where import GHC.Prelude import {-# SOURCE #-} GHC.Core.TyCon import {-# SOURCE #-} GHC.Core.TyCo.Rep( Type, Coercion ) import GHC.Utils.Misc isPredTy :: HasDebugCallStack => Type -> Bool isCoercionTy :: Type -> Bool mkAppTy :: Type -> Type -> Type mkCastTy :: Type -> Coercion -> Type mkTyConTy :: TyCon -> Type mkTyConApp :: TyCon -> [Type] -> Type piResultTy :: HasDebugCallStack => Type -> Type -> Type coreView :: Type -> Maybe Type tcView :: Type -> Maybe Type isRuntimeRepTy :: Type -> Bool isLevityTy :: Type -> Bool isMultiplicityTy :: Type -> Bool isLiftedTypeKind :: Type -> Bool mkTYPEapp :: Type -> Type splitTyConApp_maybe :: HasDebugCallStack => Type -> Maybe (TyCon, [Type]) tyConAppTyCon_maybe :: Type -> Maybe TyCon getLevity :: HasDebugCallStack => Type -> Type partitionInvisibleTypes :: TyCon -> [Type] -> ([Type], [Type]) ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Unfold.hs-boot0000644000000000000000000000106514470055370021263 0ustar0000000000000000module GHC.Core.Unfold where import GHC.Prelude data UnfoldingOpts defaultUnfoldingOpts :: UnfoldingOpts updateCreationThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateUseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateFunAppDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateDictDiscount :: Int -> UnfoldingOpts -> UnfoldingOpts updateVeryAggressive :: Bool -> UnfoldingOpts -> UnfoldingOpts updateCaseThreshold :: Int -> UnfoldingOpts -> UnfoldingOpts updateCaseScaling :: Int -> UnfoldingOpts -> UnfoldingOpts ghc-lib-parser-9.4.7.20230826/compiler/GHC/Core/Utils.hs-boot0000644000000000000000000000017714472375231021142 0ustar0000000000000000module GHC.Core.Utils where import GHC.Core.Multiplicity import GHC.Core.Type mkFunctionType :: Mult -> Type -> Type -> Type ghc-lib-parser-9.4.7.20230826/compiler/GHC/CoreToIface.hs-boot0000644000000000000000000000141014472375231021264 0ustar0000000000000000module GHC.CoreToIface where import {-# SOURCE #-} GHC.Core.TyCo.Rep ( Type, TyLit, Coercion ) import {-# SOURCE #-} GHC.Iface.Type( IfaceType, IfaceTyCon, IfaceBndr , IfaceCoercion, IfaceTyLit, IfaceAppArgs ) import GHC.Types.Var ( VarBndr, TyCoVar ) import GHC.Types.Var.Env ( TidyEnv ) import GHC.Core.TyCon ( TyCon ) import GHC.Types.Var.Set( VarSet ) -- For GHC.Core.TyCo.Rep toIfaceTypeX :: VarSet -> Type -> IfaceType toIfaceTyLit :: TyLit -> IfaceTyLit toIfaceForAllBndr :: (VarBndr TyCoVar flag) -> (VarBndr IfaceBndr flag) toIfaceTyCon :: TyCon -> IfaceTyCon toIfaceTcArgs :: TyCon -> [Type] -> IfaceAppArgs toIfaceCoercionX :: VarSet -> Coercion -> IfaceCoercion tidyToIfaceTcArgs :: TidyEnv -> TyCon -> [Type] -> IfaceAppArgs ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Hooks.hs-boot0000644000000000000000000000030214470055370021453 0ustar0000000000000000module GHC.Driver.Hooks where import GHC.Prelude () data Hooks emptyHooks :: Hooks class HasHooks m where getHooks :: m Hooks class ContainsHooks a where extractHooks :: a -> Hooks ghc-lib-parser-9.4.7.20230826/compiler/GHC/Driver/Plugins.hs-boot0000644000000000000000000000041014470055370022011 0ustar0000000000000000-- The plugins datatype is stored in DynFlags, so it needs to be -- exposed without importing all of its implementation. module GHC.Driver.Plugins where import GHC.Prelude () data Plugin data Plugins emptyPlugins :: Plugins data LoadedPlugin data StaticPlugin ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Expr.hs-boot0000644000000000000000000000255714472375231020446 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable module GHC.Hs.Expr where import GHC.Utils.Outputable ( SDoc, Outputable ) import Language.Haskell.Syntax.Pat ( LPat ) import {-# SOURCE #-} GHC.Hs.Pat () -- for Outputable import GHC.Types.Basic ( SpliceExplicitFlag(..)) import Language.Haskell.Syntax.Expr ( HsExpr, LHsExpr , HsCmd , MatchGroup , GRHSs , HsSplice ) import GHC.Hs.Extension ( OutputableBndrId, GhcPass ) instance (OutputableBndrId p) => Outputable (HsExpr (GhcPass p)) instance (OutputableBndrId p) => Outputable (HsCmd (GhcPass p)) pprLExpr :: (OutputableBndrId p) => LHsExpr (GhcPass p) -> SDoc pprExpr :: (OutputableBndrId p) => HsExpr (GhcPass p) -> SDoc pprSplice :: (OutputableBndrId p) => HsSplice (GhcPass p) -> SDoc pprSpliceDecl :: (OutputableBndrId p) => HsSplice (GhcPass p) -> SpliceExplicitFlag -> SDoc pprPatBind :: forall bndr p . (OutputableBndrId bndr, OutputableBndrId p) => LPat (GhcPass bndr) -> GRHSs (GhcPass p) (LHsExpr (GhcPass p)) -> SDoc pprFunBind :: (OutputableBndrId idR) => MatchGroup (GhcPass idR) (LHsExpr (GhcPass idR)) -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Hs/Pat.hs-boot0000644000000000000000000000104114470055370020234 0ustar0000000000000000{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE UndecidableInstances #-} -- Wrinkle in Note [Trees That Grow] -- in module Language.Haskell.Syntax.Extension {-# OPTIONS_GHC -Wno-orphans #-} -- Outputable module GHC.Hs.Pat where import GHC.Utils.Outputable import GHC.Hs.Extension ( OutputableBndrId, GhcPass ) import Language.Haskell.Syntax.Pat instance (OutputableBndrId p) => Outputable (Pat (GhcPass p)) pprLPat :: (OutputableBndrId p) => LPat (GhcPass p) -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Iface/Type.hs-boot0000644000000000000000000000053014472375231021073 0ustar0000000000000000module GHC.Iface.Type ( IfaceType, IfaceTyCon, IfaceBndr , IfaceCoercion, IfaceTyLit, IfaceAppArgs ) where -- Empty import to influence the compilation ordering. -- See Note [Depend on GHC.Num.Integer] in GHC.Base import GHC.Base () data IfaceAppArgs data IfaceType data IfaceTyCon data IfaceTyLit data IfaceCoercion data IfaceBndr ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Errors/Hole/FitTypes.hs-boot0000644000000000000000000000147314472375231023432 0ustar0000000000000000-- This boot file is in place to break the loop where: -- + GHC.Tc.Types needs 'HoleFitPlugin', -- + which needs 'GHC.Tc.Errors.Hole.FitTypes' -- + which needs 'GHC.Tc.Types' module GHC.Tc.Errors.Hole.FitTypes where import GHC.Base (Int, Maybe) import GHC.Types.Var (Id) import GHC.Types.Name (Name) import GHC.Types.Name.Reader (GlobalRdrElt) import GHC.Tc.Utils.TcType (TcType) import GHC.Hs.Doc (HsDocString) import GHC.Utils.Outputable (SDoc) data HoleFitCandidate = IdHFCand Id | NameHFCand Name | GreHFCand GlobalRdrElt data HoleFitPlugin data HoleFit = HoleFit { hfId :: Id , hfCand :: HoleFitCandidate , hfType :: TcType , hfRefLvl :: Int , hfWrap :: [TcType] , hfMatches :: [TcType] , hfDoc :: Maybe [HsDocString] } | RawHoleFit SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types.hs-boot0000644000000000000000000000057014472375231020621 0ustar0000000000000000module GHC.Tc.Types where import GHC.Tc.Utils.TcType import GHC.Types.SrcLoc import GHC.Utils.Outputable data TcLclEnv data SelfBootInfo data TcIdSigInfo instance Outputable TcIdSigInfo setLclEnvTcLevel :: TcLclEnv -> TcLevel -> TcLclEnv getLclEnvTcLevel :: TcLclEnv -> TcLevel setLclEnvLoc :: TcLclEnv -> RealSrcSpan -> TcLclEnv getLclEnvLoc :: TcLclEnv -> RealSrcSpan ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Types/Origin.hs-boot0000644000000000000000000000030714472375231022046 0ustar0000000000000000module GHC.Tc.Types.Origin where import GHC.Stack ( HasCallStack ) data SkolemInfoAnon data SkolemInfo data FixedRuntimeRepContext data FixedRuntimeRepOrigin unkSkol :: HasCallStack => SkolemInfo ghc-lib-parser-9.4.7.20230826/compiler/GHC/Tc/Utils/TcType.hs-boot0000644000000000000000000000103414472375231022021 0ustar0000000000000000module GHC.Tc.Utils.TcType where import GHC.Utils.Outputable( SDoc ) import GHC.Prelude ( Bool ) import {-# SOURCE #-} GHC.Types.Var ( TcTyVar ) import {-# SOURCE #-} GHC.Core.TyCo.Rep import GHC.Utils.Misc ( HasDebugCallStack ) import GHC.Stack data MetaDetails data TcTyVarDetails pprTcTyVarDetails :: TcTyVarDetails -> SDoc vanillaSkolemTvUnk :: HasCallStack => TcTyVarDetails isMetaTyVar :: TcTyVar -> Bool isTyConableTyVar :: TcTyVar -> Bool isConcreteTyVar :: TcTyVar -> Bool tcEqType :: HasDebugCallStack => Type -> Type -> Bool ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id.hs-boot0000644000000000000000000000022214470055371020577 0ustar0000000000000000module GHC.Types.Id where import GHC.Prelude () import {-# SOURCE #-} GHC.Types.Name import {-# SOURCE #-} GHC.Types.Var idName :: Id -> Name ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id/Info.hs-boot0000644000000000000000000000034314470055371021476 0ustar0000000000000000module GHC.Types.Id.Info where import GHC.Prelude import GHC.Utils.Outputable data IdInfo data IdDetails vanillaIdInfo :: IdInfo coVarDetails :: IdDetails isCoVarDetails :: IdDetails -> Bool pprIdDetails :: IdDetails -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Id/Make.hs-boot0000644000000000000000000000041714470055371021462 0ustar0000000000000000module GHC.Types.Id.Make where import GHC.Types.Name( Name ) import GHC.Types.Var( Id ) import GHC.Core.Class( Class ) import {-# SOURCE #-} GHC.Core.DataCon( DataCon ) data DataConBoxer mkDataConWorkId :: Name -> DataCon -> Id mkDictSelId :: Name -> Class -> Id ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name.hs-boot0000644000000000000000000000114314472375231021130 0ustar0000000000000000module GHC.Types.Name ( module GHC.Types.Name, module GHC.Types.Name.Occurrence ) where import GHC.Prelude (Eq) import {-# SOURCE #-} GHC.Types.Name.Occurrence import GHC.Types.Unique import GHC.Utils.Outputable import Data.Data (Data) data Name instance Eq Name instance Data Name instance Uniquable Name instance Outputable Name class NamedThing a where getOccName :: a -> OccName getName :: a -> Name getOccName n = nameOccName (getName n) nameUnique :: Name -> Unique setNameUnique :: Name -> Unique -> Name nameOccName :: Name -> OccName tidyNameOcc :: Name -> OccName -> Name ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Name/Occurrence.hs-boot0000644000000000000000000000042314472375231023220 0ustar0000000000000000module GHC.Types.Name.Occurrence where import GHC.Prelude (String) import GHC.Data.FastString data OccName class HasOccName name where occName :: name -> OccName occNameString :: OccName -> String mkRecFldSelOcc :: String -> OccName mkVarOccFS :: FastString -> OccName ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/TyThing.hs-boot0000644000000000000000000000025614470055371021640 0ustar0000000000000000module GHC.Types.TyThing where import {-# SOURCE #-} GHC.Core.TyCon import {-# SOURCE #-} GHC.Types.Var data TyThing mkATyCon :: TyCon -> TyThing mkAnId :: Id -> TyThing ghc-lib-parser-9.4.7.20230826/compiler/GHC/Types/Var.hs-boot0000644000000000000000000000127714472375231021010 0ustar0000000000000000{-# LANGUAGE NoPolyKinds #-} module GHC.Types.Var where import GHC.Prelude () import {-# SOURCE #-} GHC.Types.Name -- We compile this GHC with -XNoImplicitPrelude, so if there are no imports -- it does not seem to depend on anything. But it does! We must, for -- example, compile GHC.Types in the ghc-prim library first. So this -- otherwise-unnecessary import tells the build system that this module -- depends on GhcPrelude, which ensures that GHC.Type is built first. data ArgFlag data AnonArgFlag data Var instance NamedThing Var data VarBndr var argf data Specificity type TyVar = Var type Id = Var type TyCoVar = Id type TcTyVar = Var type InvisTVBinder = VarBndr TyVar Specificity ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Module/Name.hs-boot0000644000000000000000000000011314472375231022164 0ustar0000000000000000module GHC.Unit.Module.Name where import GHC.Prelude () data ModuleName ghc-lib-parser-9.4.7.20230826/compiler/GHC/Unit/Types.hs-boot0000644000000000000000000000070314472375231021170 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} module GHC.Unit.Types where import GHC.Prelude () import {-# SOURCE #-} GHC.Utils.Outputable import {-# SOURCE #-} GHC.Unit.Module.Name ( ModuleName ) import Data.Kind (Type) data UnitId data GenModule (unit :: Type) data GenUnit (uid :: Type) type Module = GenModule Unit type Unit = GenUnit UnitId moduleName :: GenModule a -> ModuleName moduleUnit :: GenModule a -> a pprModule :: Module -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/GHC/Utils/Outputable.hs-boot0000644000000000000000000000017014472375231022367 0ustar0000000000000000module GHC.Utils.Outputable where import GHC.Prelude data SDoc data PprStyle data SDocContext text :: String -> SDoc ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Expr.hs-boot0000644000000000000000000000120414472375231024053 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE TypeFamilies #-} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Expr where import Language.Haskell.Syntax.Extension ( XRec ) import Data.Kind ( Type ) type role HsExpr nominal type role MatchGroup nominal nominal type role GRHSs nominal nominal type role HsSplice nominal data HsExpr (i :: Type) data HsSplice (i :: Type) data MatchGroup (a :: Type) (body :: Type) data GRHSs (a :: Type) (body :: Type) type family SyntaxExpr (i :: Type) type LHsExpr a = XRec a (HsExpr a) ghc-lib-parser-9.4.7.20230826/compiler/Language/Haskell/Syntax/Pat.hs-boot0000644000000000000000000000053514470055371023665 0ustar0000000000000000{-# LANGUAGE KindSignatures #-} {-# LANGUAGE RoleAnnotations #-} {-# LANGUAGE TypeFamilies #-} -- See Note [Language.Haskell.Syntax.* Hierarchy] for why not GHC.Hs.* module Language.Haskell.Syntax.Pat where import Language.Haskell.Syntax.Extension ( XRec ) import Data.Kind type role Pat nominal data Pat (i :: Type) type LPat i = XRec i (Pat i) ghc-lib-parser-9.4.7.20230826/libraries/ghc-heap/cbits/HeapPrim.cmm0000644000000000000000000000032614472400004022360 0ustar0000000000000000#include "Cmm.h" Ghclib_aToWordzh (P_ clos) { return (clos); } Ghclib_reallyUnsafePtrEqualityUpToTag (W_ clos1, W_ clos2) { clos1 = UNTAG(clos1); clos2 = UNTAG(clos2); return (clos1 == clos2); } ghc-lib-parser-9.4.7.20230826/compiler/cbits/genSym.c0000644000000000000000000000151014472400113017742 0ustar0000000000000000#include #include #include "Unique.h" #include // These global variables have been moved into the RTS. It allows them to be // shared with plugins even if two different instances of the GHC library are // loaded at the same time (#19940) // // The CPP is thus about the RTS version GHC is linked against, and not the // version of the GHC being built. #if !MIN_VERSION_GLASGOW_HASKELL(9,3,0,0) HsInt ghc_unique_counter = 0; HsInt ghc_unique_inc = 1; #endif #define UNIQUE_BITS (sizeof (HsInt) * 8 - UNIQUE_TAG_BITS) #define UNIQUE_MASK ((1ULL << UNIQUE_BITS) - 1) HsInt ghc_lib_parser_genSym(void) { HsInt u = atomic_inc((StgWord *)&ghc_unique_counter, ghc_unique_inc) & UNIQUE_MASK; // Uh oh! We will overflow next time a unique is requested. ASSERT(u != UNIQUE_MASK); return u; } ghc-lib-parser-9.4.7.20230826/compiler/cbits/cutils.c0000644000000000000000000000106514472400113020010 0ustar0000000000000000/* These utility routines are used various places in the GHC library. */ #include #include void ghc_lib_parser_enableTimingStats( void ) /* called from the driver */ { RtsFlags.GcFlags.giveStats = ONELINE_GC_STATS; } void ghc_lib_parser_setHeapSize( HsInt size ) { RtsFlags.GcFlags.heapSizeSuggestion = size / BLOCK_SIZE; if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion; } } ghc-lib-parser-9.4.7.20230826/compiler/cbits/keepCAFsForGHCi.c0000644000000000000000000000241114470055371021276 0ustar0000000000000000#include #include // Note [keepCAFsForGHCi] // ~~~~~~~~~~~~~~~~~~~~~~ // This file is only included in the dynamic library. // It contains an __attribute__((constructor)) function (run prior to main()) // which sets the keepCAFs flag in the RTS, before any Haskell code is run. // This is required so that GHCi can use dynamic libraries instead of HSxyz.o // files. // // For static builds we have to guarantee that the linker loads this object file // to ensure the constructor gets run and not discarded. If the object is part of // an archive and not otherwise referenced the linker would ignore the object. // To avoid this: // * When initializing a GHC session in initGhcMonad we assert keeping cafs has been // enabled by calling keepCAFsForGHCi. // * This causes the GHC module from the ghc package to carry a reference to this object // file. // * Which in turn ensures the linker doesn't discard this object file, causing // the constructor to be run, allowing the assertion to succeed in the first place // as keepCAFs will have been set already during initialization of constructors. bool keepCAFsForGHCi(void) __attribute__((constructor)); bool keepCAFsForGHCi(void) { bool was_set = keepCAFs; setKeepCAFs(); return was_set; } ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/lib/settings0000644000000000000000000000353714472400073020457 0ustar0000000000000000[("GCC extra via C opts", "") ,("C compiler command", "/usr/bin/gcc") ,("C compiler flags", "--target=x86_64-apple-darwin ") ,("C++ compiler command", "/usr/bin/g++") ,("C++ compiler flags", "--target=x86_64-apple-darwin ") ,("C compiler link flags", "--target=x86_64-apple-darwin -Wl,-no_fixup_chains") ,("C compiler supports -no-pie", "NO") ,("Haskell CPP command", "/usr/bin/gcc") ,("Haskell CPP flags", "-E -undef -traditional -Wno-invalid-pp-token -Wno-unicode -Wno-trigraphs") ,("ld command", "ld") ,("ld flags", "") ,("ld supports compact unwind", "YES") ,("ld supports build-id", "NO") ,("ld supports filelist", "YES") ,("ld is GNU ld", "NO") ,("Merge objects command", "ld") ,("Merge objects flags", "-r") ,("ar command", "/usr/bin/ar") ,("ar flags", "qcls") ,("ar supports at file", "NO") ,("ar supports -L", "NO") ,("ranlib command", "/usr/bin/ranlib") ,("otool command", "otool") ,("install_name_tool command", "install_name_tool") ,("touch command", "touch") ,("dllwrap command", "/bin/false") ,("windres command", "/bin/false") ,("libtool command", "libtool") ,("unlit command", "$topdir/bin/unlit") ,("cross compiling", "NO") ,("target platform string", "x86_64-apple-darwin") ,("target os", "OSDarwin") ,("target arch", "ArchX86_64") ,("target word size", "8") ,("target word big endian", "NO") ,("target has GNU nonexec stack", "NO") ,("target has .ident directive", "YES") ,("target has subsections via symbols", "YES") ,("target has RTS linker", "YES") ,("target has libm", "YES") ,("Unregisterised", "NO") ,("LLVM target", "x86_64-apple-darwin") ,("LLVM llc command", "llc") ,("LLVM opt command", "opt") ,("LLVM clang command", "clang") ,("Use inplace MinGW toolchain", "NO") ,("Use interpreter", "YES") ,("Support SMP", "YES") ,("RTS ways", "v thr") ,("Tables next to code", "YES") ,("Leading underscore", "YES") ,("Use LibFFI", "NO") ,("RTS expects libdw", "NO") ] ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/lib/llvm-targets0000644000000000000000000002546414472400073021243 0ustar0000000000000000[("i386-unknown-windows", ("e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32", "pentium4", "")) ,("i686-unknown-windows", ("e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32", "pentium4", "")) ,("x86_64-unknown-windows", ("e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("arm-unknown-linux-gnueabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm7tdmi", "-vfp2 -vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 -fp64 -d32 -neon -sha2 -aes -dotprod -fp16fml -bf16 -mve.fp -fpregs +strict-align")) ,("arm-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("arm-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("armv6-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1136jf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("armv6-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1136jf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("armv6l-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("armv6l-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("armv7-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7a-unknown-linux-gnueabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7a-unknown-linux-musleabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7a-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7a-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7l-unknown-linux-gnueabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7l-unknown-linux-musleabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7l-unknown-linux-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("armv7l-unknown-linux-musleabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("aarch64-unknown-linux-gnu", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64-unknown-linux-musl", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("aarch64-unknown-linux", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("i386-unknown-linux-gnu", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i386-unknown-linux-musl", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i386-unknown-linux", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i686-unknown-linux-gnu", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i686-unknown-linux-musl", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("i686-unknown-linux", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "pentium4", "")) ,("x86_64-unknown-linux-gnu", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-linux-musl", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-linux", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-linux-android", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "+sse4.2 +popcnt +cx16")) ,("armv7-unknown-linux-androideabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("aarch64-unknown-linux-android", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon +outline-atomics")) ,("armv7a-unknown-linux-androideabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("powerpc64le-unknown-linux-gnu", ("e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512", "ppc64le", "")) ,("powerpc64le-unknown-linux-musl", ("e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512", "ppc64le", "+secure-plt")) ,("powerpc64le-unknown-linux", ("e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512", "ppc64le", "")) ,("s390x-ibm-linux", ("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64", "z10", "")) ,("riscv64-unknown-linux-gnu", ("e-m:e-p:64:64-i64:64-i128:128-n64-S128", "", "+m +a +f +d +c +relax -save-restore")) ,("riscv64-unknown-linux", ("e-m:e-p:64:64-i64:64-i128:128-n64-S128", "", "+m +a +f +d +c +relax -save-restore")) ,("i386-apple-darwin", ("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:128-n8:16:32-S128", "yonah", "")) ,("x86_64-apple-darwin", ("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "core2", "")) ,("arm64-apple-darwin", ("e-m:o-i64:64-i128:128-n32:64-S128", "apple-a7", "+fp-armv8 +neon +crypto +zcm +zcz +sha2 +aes")) ,("armv7-apple-ios", ("e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32", "generic", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml")) ,("aarch64-apple-ios", ("e-m:o-i64:64-i128:128-n32:64-S128", "apple-a7", "+fp-armv8 +neon +crypto +zcm +zcz +sha2 +aes")) ,("i386-apple-ios", ("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:128-n8:16:32-S128", "yonah", "")) ,("x86_64-apple-ios", ("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "core2", "")) ,("x86_64-portbld-freebsd", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("x86_64-unknown-freebsd", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("aarch64-unknown-freebsd", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("armv6-unknown-freebsd-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm1176jzf-s", "+vfp2 +vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 -d32 -neon -sha2 -aes -fp16fml +strict-align")) ,("armv7-unknown-freebsd-gnueabihf", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "cortex-a8", "+vfp2 +vfp2sp +vfp3 +vfp3d16 +vfp3d16sp +vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 +fp64 +d32 +neon -sha2 -aes -fp16fml +strict-align")) ,("aarch64-unknown-netbsd", ("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", "generic", "+neon")) ,("x86_64-unknown-openbsd", ("e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "x86-64", "")) ,("i386-unknown-openbsd", ("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128", "i586", "")) ,("arm-unknown-nto-qnx-eabi", ("e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", "arm7tdmi", "-vfp2 -vfp2sp -vfp3 -vfp3d16 -vfp3d16sp -vfp3sp -fp16 -vfp4 -vfp4d16 -vfp4d16sp -vfp4sp -fp-armv8 -fp-armv8d16 -fp-armv8d16sp -fp-armv8sp -fullfp16 -fp64 -d32 -neon -sha2 -aes -dotprod -fp16fml -bf16 -mve.fp -fpregs +strict-align")) ] ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/lib/llvm-passes0000644000000000000000000000012014472400073021046 0ustar0000000000000000[ (0, "-mem2reg -globalopt -lower-expect"), (1, "-O1 -globalopt"), (2, "-O2") ] ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/rts/build/include/ghcautoconf.h0000644000000000000000000004347314472400073024134 0ustar0000000000000000#if !defined(__GHCAUTOCONF_H__) #define __GHCAUTOCONF_H__ /* mk/config.h. Generated from config.h.in by configure. */ /* mk/config.h.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ /* #undef AC_APPLE_UNIVERSAL_BUILD */ /* The alignment of a `char'. */ #define ALIGNMENT_CHAR 1 /* The alignment of a `double'. */ #define ALIGNMENT_DOUBLE 8 /* The alignment of a `float'. */ #define ALIGNMENT_FLOAT 4 /* The alignment of a `int'. */ #define ALIGNMENT_INT 4 /* The alignment of a `int16_t'. */ #define ALIGNMENT_INT16_T 2 /* The alignment of a `int32_t'. */ #define ALIGNMENT_INT32_T 4 /* The alignment of a `int64_t'. */ #define ALIGNMENT_INT64_T 8 /* The alignment of a `int8_t'. */ #define ALIGNMENT_INT8_T 1 /* The alignment of a `long'. */ #define ALIGNMENT_LONG 8 /* The alignment of a `long long'. */ #define ALIGNMENT_LONG_LONG 8 /* The alignment of a `short'. */ #define ALIGNMENT_SHORT 2 /* The alignment of a `uint16_t'. */ #define ALIGNMENT_UINT16_T 2 /* The alignment of a `uint32_t'. */ #define ALIGNMENT_UINT32_T 4 /* The alignment of a `uint64_t'. */ #define ALIGNMENT_UINT64_T 8 /* The alignment of a `uint8_t'. */ #define ALIGNMENT_UINT8_T 1 /* The alignment of a `unsigned char'. */ #define ALIGNMENT_UNSIGNED_CHAR 1 /* The alignment of a `unsigned int'. */ #define ALIGNMENT_UNSIGNED_INT 4 /* The alignment of a `unsigned long'. */ #define ALIGNMENT_UNSIGNED_LONG 8 /* The alignment of a `unsigned long long'. */ #define ALIGNMENT_UNSIGNED_LONG_LONG 8 /* The alignment of a `unsigned short'. */ #define ALIGNMENT_UNSIGNED_SHORT 2 /* The alignment of a `void *'. */ #define ALIGNMENT_VOID_P 8 /* Define (to 1) if C compiler has an LLVM back end */ #define CC_LLVM_BACKEND 1 /* Define to 1 if __thread is supported */ #define CC_SUPPORTS_TLS 1 /* Define to 1 if using 'alloca.c'. */ /* #undef C_ALLOCA */ /* Enable Native I/O manager as default. */ /* #undef DEFAULT_NATIVE_IO_MANAGER */ /* Define to 1 if your processor stores words of floats with the most significant byte first */ /* #undef FLOAT_WORDS_BIGENDIAN */ /* Has visibility hidden */ #define HAS_VISIBILITY_HIDDEN 1 /* Define to 1 if you have 'alloca', as a function or macro. */ #define HAVE_ALLOCA 1 /* Define to 1 if works. */ #define HAVE_ALLOCA_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_BFD_H */ /* Does C compiler support __atomic primitives? */ #define HAVE_C11_ATOMICS 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define to 1 if you have the `ctime_r' function. */ #define HAVE_CTIME_R 1 /* Define to 1 if you have the header file. */ #define HAVE_CTYPE_H 1 /* Define to 1 if you have the declaration of `ctime_r', and to 0 if you don't. */ #define HAVE_DECL_CTIME_R 1 /* Define to 1 if you have the declaration of `environ', and to 0 if you don't. */ #define HAVE_DECL_ENVIRON 0 /* Define to 1 if you have the declaration of `MADV_DONTNEED', and to 0 if you don't. */ /* #undef HAVE_DECL_MADV_DONTNEED */ /* Define to 1 if you have the declaration of `MADV_FREE', and to 0 if you don't. */ /* #undef HAVE_DECL_MADV_FREE */ /* Define to 1 if you have the declaration of `MAP_NORESERVE', and to 0 if you don't. */ /* #undef HAVE_DECL_MAP_NORESERVE */ /* Define to 1 if you have the header file. */ #define HAVE_DIRENT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the `dlinfo' function. */ /* #undef HAVE_DLINFO */ /* Define to 1 if you have the header file. */ /* #undef HAVE_ELFUTILS_LIBDW_H */ /* Define to 1 if you have the header file. */ #define HAVE_ERRNO_H 1 /* Define to 1 if you have the `eventfd' function. */ /* #undef HAVE_EVENTFD */ /* Define to 1 if you have the header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_FFI_H */ /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `getclock' function. */ /* #undef HAVE_GETCLOCK */ /* Define to 1 if you have the `GetModuleFileName' function. */ /* #undef HAVE_GETMODULEFILENAME */ /* Define to 1 if you have the `getrusage' function. */ #define HAVE_GETRUSAGE 1 /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the header file. */ #define HAVE_GRP_H 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `bfd' library (-lbfd). */ /* #undef HAVE_LIBBFD */ /* Define to 1 if you have the `dl' library (-ldl). */ #define HAVE_LIBDL 1 /* Define to 1 if you have the `iberty' library (-liberty). */ /* #undef HAVE_LIBIBERTY */ /* Define to 1 if you need to link with libm */ #define HAVE_LIBM 1 /* Define to 1 if you have the `mingwex' library (-lmingwex). */ /* #undef HAVE_LIBMINGWEX */ /* Define to 1 if you have libnuma */ #define HAVE_LIBNUMA 0 /* Define to 1 if you have the `pthread' library (-lpthread). */ #define HAVE_LIBPTHREAD 1 /* Define to 1 if you have the `rt' library (-lrt). */ /* #undef HAVE_LIBRT */ /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_LOCALE_H 1 /* Define to 1 if the system has the type `long long'. */ #define HAVE_LONG_LONG 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_MINIX_CONFIG_H */ /* Define to 1 if you have the header file. */ #define HAVE_NLIST_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_NUMAIF_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_NUMA_H */ /* Define to 1 if we have printf$LDBLStub (Apple Mac OS >= 10.4, PPC). */ #define HAVE_PRINTF_LDBLSTUB 0 /* Define to 1 if you have the `pthread_condattr_setclock' function. */ /* #undef HAVE_PTHREAD_CONDATTR_SETCLOCK */ /* Define to 1 if you have the header file. */ #define HAVE_PTHREAD_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_PTHREAD_NP_H */ /* Define to 1 if you have the glibc version of pthread_setname_np */ /* #undef HAVE_PTHREAD_SETNAME_NP */ /* Define to 1 if you have the Darwin version of pthread_setname_np */ #define HAVE_PTHREAD_SETNAME_NP_DARWIN 1 /* Define to 1 if you have the NetBSD version of pthread_setname_np */ /* #undef HAVE_PTHREAD_SETNAME_NP_NETBSD */ /* Define to 1 if you have pthread_set_name_np */ /* #undef HAVE_PTHREAD_SET_NAME_NP */ /* Define to 1 if you have the header file. */ #define HAVE_PWD_H 1 /* Define to 1 if you have the `sched_getaffinity' function. */ /* #undef HAVE_SCHED_GETAFFINITY */ /* Define to 1 if you have the header file. */ #define HAVE_SCHED_H 1 /* Define to 1 if you have the `sched_setaffinity' function. */ /* #undef HAVE_SCHED_SETAFFINITY */ /* Define to 1 if you have the `setitimer' function. */ #define HAVE_SETITIMER 1 /* Define to 1 if you have the `setlocale' function. */ #define HAVE_SETLOCALE 1 /* Define to 1 if you have the `siginterrupt' function. */ #define HAVE_SIGINTERRUPT 1 /* Define to 1 if you have the header file. */ #define HAVE_SIGNAL_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDIO_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if Apple-style dead-stripping is supported. */ #define HAVE_SUBSECTIONS_VIA_SYMBOLS 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have libffi. */ /* #undef HAVE_SYSTEM_LIBFFI */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_CPUSET_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_EVENTFD_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_PARAM_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_RESOURCE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_SELECT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIMEB_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_TIMERFD_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_TIMERS_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIMES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_UTSNAME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_WAIT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_TERMIOS_H 1 /* Define to 1 if you have the `timer_settime' function. */ /* #undef HAVE_TIMER_SETTIME */ /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the `uselocale' function. */ #define HAVE_USELOCALE 1 /* Define to 1 if you have the header file. */ #define HAVE_UTIME_H 1 /* Define to 1 if you have the `vfork' function. */ #define HAVE_VFORK 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_VFORK_H */ /* Define to 1 if you have the header file. */ #define HAVE_WCHAR_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if you have the `WinExec' function. */ /* #undef HAVE_WINEXEC */ /* Define to 1 if you have the header file. */ /* #undef HAVE_WINSOCK_H */ /* Define to 1 if `fork' works. */ #define HAVE_WORKING_FORK 1 /* Define to 1 if `vfork' works. */ #define HAVE_WORKING_VFORK 1 /* Define to 1 if C symbols have a leading underscore added by the compiler. */ #define LEADING_UNDERSCORE 1 /* Define to 1 if we need -latomic. */ #define NEED_ATOMIC_LIB 0 /* Define 1 if we need to link code using pthreads with -lpthread */ #define NEED_PTHREAD_LIB 0 /* Define to the address where bug reports for this package should be sent. */ /* #undef PACKAGE_BUGREPORT */ /* Define to the full name of this package. */ /* #undef PACKAGE_NAME */ /* Define to the full name and version of this package. */ /* #undef PACKAGE_STRING */ /* Define to the one symbol short name of this package. */ /* #undef PACKAGE_TARNAME */ /* Define to the home page for this package. */ /* #undef PACKAGE_URL */ /* Define to the version of this package. */ /* #undef PACKAGE_VERSION */ /* Use mmap in the runtime linker */ #define RTS_LINKER_USE_MMAP 1 /* The size of `char', as computed by sizeof. */ #define SIZEOF_CHAR 1 /* The size of `double', as computed by sizeof. */ #define SIZEOF_DOUBLE 8 /* The size of `float', as computed by sizeof. */ #define SIZEOF_FLOAT 4 /* The size of `int', as computed by sizeof. */ #define SIZEOF_INT 4 /* The size of `int16_t', as computed by sizeof. */ #define SIZEOF_INT16_T 2 /* The size of `int32_t', as computed by sizeof. */ #define SIZEOF_INT32_T 4 /* The size of `int64_t', as computed by sizeof. */ #define SIZEOF_INT64_T 8 /* The size of `int8_t', as computed by sizeof. */ #define SIZEOF_INT8_T 1 /* The size of `long', as computed by sizeof. */ #define SIZEOF_LONG 8 /* The size of `long long', as computed by sizeof. */ #define SIZEOF_LONG_LONG 8 /* The size of `short', as computed by sizeof. */ #define SIZEOF_SHORT 2 /* The size of `uint16_t', as computed by sizeof. */ #define SIZEOF_UINT16_T 2 /* The size of `uint32_t', as computed by sizeof. */ #define SIZEOF_UINT32_T 4 /* The size of `uint64_t', as computed by sizeof. */ #define SIZEOF_UINT64_T 8 /* The size of `uint8_t', as computed by sizeof. */ #define SIZEOF_UINT8_T 1 /* The size of `unsigned char', as computed by sizeof. */ #define SIZEOF_UNSIGNED_CHAR 1 /* The size of `unsigned int', as computed by sizeof. */ #define SIZEOF_UNSIGNED_INT 4 /* The size of `unsigned long', as computed by sizeof. */ #define SIZEOF_UNSIGNED_LONG 8 /* The size of `unsigned long long', as computed by sizeof. */ #define SIZEOF_UNSIGNED_LONG_LONG 8 /* The size of `unsigned short', as computed by sizeof. */ #define SIZEOF_UNSIGNED_SHORT 2 /* The size of `void *', as computed by sizeof. */ #define SIZEOF_VOID_P 8 /* If using the C implementation of alloca, define if you know the direction of stack growth for your system; otherwise it will be automatically deduced at runtime. STACK_DIRECTION > 0 => grows toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses STACK_DIRECTION = 0 => direction of growth unknown */ /* #undef STACK_DIRECTION */ /* Define to 1 if all of the C90 standard headers exist (not just the ones required in a freestanding environment). This macro is provided for backward compatibility; new code need not use it. */ #define STDC_HEADERS 1 /* Define to 1 if info tables are laid out next to code */ #define TABLES_NEXT_TO_CODE 1 /* Define to 1 if you can safely include both and . This macro is obsolete. */ #define TIME_WITH_SYS_TIME 1 /* Compile-in ASSERTs in all ways. */ /* #undef USE_ASSERTS_ALL_WAYS */ /* Enable single heap address space support */ #define USE_LARGE_ADDRESS_SPACE 1 /* Set to 1 to use libdw */ #define USE_LIBDW 0 /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif /* Enable general extensions on macOS. */ #ifndef _DARWIN_C_SOURCE # define _DARWIN_C_SOURCE 1 #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif /* Enable X/Open compliant socket functions that do not require linking with -lxnet on HP-UX 11.11. */ #ifndef _HPUX_ALT_XOPEN_SOCKET_API # define _HPUX_ALT_XOPEN_SOCKET_API 1 #endif /* Identify the host operating system as Minix. This macro does not affect the system headers' behavior. A future release of Autoconf may stop defining this macro. */ #ifndef _MINIX /* # undef _MINIX */ #endif /* Enable general extensions on NetBSD. Enable NetBSD compatibility extensions on Minix. */ #ifndef _NETBSD_SOURCE # define _NETBSD_SOURCE 1 #endif /* Enable OpenBSD compatibility extensions on NetBSD. Oddly enough, this does nothing on OpenBSD. */ #ifndef _OPENBSD_SOURCE # define _OPENBSD_SOURCE 1 #endif /* Define to 1 if needed for POSIX-compatible behavior. */ #ifndef _POSIX_SOURCE /* # undef _POSIX_SOURCE */ #endif /* Define to 2 if needed for POSIX-compatible behavior. */ #ifndef _POSIX_1_SOURCE /* # undef _POSIX_1_SOURCE */ #endif /* Enable POSIX-compatible threading on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-5:2014. */ #ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__ # define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-1:2014. */ #ifndef __STDC_WANT_IEC_60559_BFP_EXT__ # define __STDC_WANT_IEC_60559_BFP_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-2:2015. */ #ifndef __STDC_WANT_IEC_60559_DFP_EXT__ # define __STDC_WANT_IEC_60559_DFP_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-4:2015. */ #ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__ # define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TS 18661-3:2015. */ #ifndef __STDC_WANT_IEC_60559_TYPES_EXT__ # define __STDC_WANT_IEC_60559_TYPES_EXT__ 1 #endif /* Enable extensions specified by ISO/IEC TR 24731-2:2010. */ #ifndef __STDC_WANT_LIB_EXT2__ # define __STDC_WANT_LIB_EXT2__ 1 #endif /* Enable extensions specified by ISO/IEC 24747:2009. */ #ifndef __STDC_WANT_MATH_SPEC_FUNCS__ # define __STDC_WANT_MATH_SPEC_FUNCS__ 1 #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif /* Enable X/Open extensions. Define to 500 only if necessary to make mbstate_t available. */ #ifndef _XOPEN_SOURCE /* # undef _XOPEN_SOURCE */ #endif /* Define to 1 if we can use timer_create(CLOCK_REALTIME,...) */ /* #undef USE_TIMER_CREATE */ /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Number of bits in a file offset, on hosts where this is settable. */ /* #undef _FILE_OFFSET_BITS */ /* Define for large files, on AIX-style hosts. */ /* #undef _LARGE_FILES */ /* ARM pre v6 */ /* #undef arm_HOST_ARCH_PRE_ARMv6 */ /* ARM pre v7 */ /* #undef arm_HOST_ARCH_PRE_ARMv7 */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define as a signed integer type capable of holding a process identifier. */ /* #undef pid_t */ /* The maximum supported LLVM version number */ #define sUPPORTED_LLVM_VERSION_MAX (15) /* The minimum supported LLVM version number */ #define sUPPORTED_LLVM_VERSION_MIN (10) /* Define to `unsigned int' if does not define. */ /* #undef size_t */ /* Define as `fork' if `vfork' does not work. */ /* #undef vfork */ #endif /* __GHCAUTOCONF_H__ */ ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/rts/build/include/ghcplatform.h0000644000000000000000000000111514472400073024125 0ustar0000000000000000#if !defined(__GHCPLATFORM_H__) #define __GHCPLATFORM_H__ #define BuildPlatform_TYPE x86_64_apple_darwin #define HostPlatform_TYPE x86_64_apple_darwin #define x86_64_apple_darwin_BUILD 1 #define x86_64_apple_darwin_HOST 1 #define x86_64_BUILD_ARCH 1 #define x86_64_HOST_ARCH 1 #define BUILD_ARCH "x86_64" #define HOST_ARCH "x86_64" #define darwin_BUILD_OS 1 #define darwin_HOST_OS 1 #define BUILD_OS "darwin" #define HOST_OS "darwin" #define apple_BUILD_VENDOR 1 #define apple_HOST_VENDOR 1 #define BUILD_VENDOR "apple" #define HOST_VENDOR "apple" #endif /* __GHCPLATFORM_H__ */ ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/rts/build/include/GhclibDerivedConstants.h0000644000000000000000000007644014472400112026216 0ustar0000000000000000/* This file is created automatically. Do not edit by hand.*/ #define HS_CONSTANTS "291,1,2,4096,252,9,0,8,16,24,32,40,48,56,64,72,80,84,88,92,96,100,104,112,120,128,136,144,152,168,184,200,216,232,248,280,312,344,376,408,440,504,568,632,696,760,824,832,840,848,856,864,872,888,904,-24,-16,-8,24,0,8,48,46,96,72,8,48,8,8,16,8,64,8,16,8,0,72,56,8,16,0,8,8,0,8,0,96,112,16,8,16,0,4,4,24,20,4,15,7,1,-16,255,0,255,7,10,6,6,1,6,6,6,6,6,0,16384,21,1024,8,4,8,8,6,3,30,1152921503533105152,0,1152921504606846976,1" #define CONTROL_GROUP_CONST_291 291 #define STD_HDR_SIZE 1 #define PROF_HDR_SIZE 2 #define STACK_DIRTY 1 #define BLOCK_SIZE 4096 #define MBLOCK_SIZE 1048576 #define BLOCKS_PER_MBLOCK 252 #define TICKY_BIN_COUNT 9 #define OFFSET_StgRegTable_rR1 0 #define OFFSET_StgRegTable_rR2 8 #define OFFSET_StgRegTable_rR3 16 #define OFFSET_StgRegTable_rR4 24 #define OFFSET_StgRegTable_rR5 32 #define OFFSET_StgRegTable_rR6 40 #define OFFSET_StgRegTable_rR7 48 #define OFFSET_StgRegTable_rR8 56 #define OFFSET_StgRegTable_rR9 64 #define OFFSET_StgRegTable_rR10 72 #define OFFSET_StgRegTable_rF1 80 #define OFFSET_StgRegTable_rF2 84 #define OFFSET_StgRegTable_rF3 88 #define OFFSET_StgRegTable_rF4 92 #define OFFSET_StgRegTable_rF5 96 #define OFFSET_StgRegTable_rF6 100 #define OFFSET_StgRegTable_rD1 104 #define OFFSET_StgRegTable_rD2 112 #define OFFSET_StgRegTable_rD3 120 #define OFFSET_StgRegTable_rD4 128 #define OFFSET_StgRegTable_rD5 136 #define OFFSET_StgRegTable_rD6 144 #define OFFSET_StgRegTable_rXMM1 152 #define OFFSET_StgRegTable_rXMM2 168 #define OFFSET_StgRegTable_rXMM3 184 #define OFFSET_StgRegTable_rXMM4 200 #define OFFSET_StgRegTable_rXMM5 216 #define OFFSET_StgRegTable_rXMM6 232 #define OFFSET_StgRegTable_rYMM1 248 #define OFFSET_StgRegTable_rYMM2 280 #define OFFSET_StgRegTable_rYMM3 312 #define OFFSET_StgRegTable_rYMM4 344 #define OFFSET_StgRegTable_rYMM5 376 #define OFFSET_StgRegTable_rYMM6 408 #define OFFSET_StgRegTable_rZMM1 440 #define OFFSET_StgRegTable_rZMM2 504 #define OFFSET_StgRegTable_rZMM3 568 #define OFFSET_StgRegTable_rZMM4 632 #define OFFSET_StgRegTable_rZMM5 696 #define OFFSET_StgRegTable_rZMM6 760 #define OFFSET_StgRegTable_rL1 824 #define OFFSET_StgRegTable_rSp 832 #define OFFSET_StgRegTable_rSpLim 840 #define OFFSET_StgRegTable_rHp 848 #define OFFSET_StgRegTable_rHpLim 856 #define OFFSET_StgRegTable_rCCCS 864 #define OFFSET_StgRegTable_rCurrentTSO 872 #define OFFSET_StgRegTable_rCurrentNursery 888 #define OFFSET_StgRegTable_rHpAlloc 904 #define OFFSET_StgRegTable_rRet 912 #define REP_StgRegTable_rRet b64 #define StgRegTable_rRet(__ptr__) REP_StgRegTable_rRet[__ptr__+OFFSET_StgRegTable_rRet] #define OFFSET_StgRegTable_rNursery 880 #define REP_StgRegTable_rNursery b64 #define StgRegTable_rNursery(__ptr__) REP_StgRegTable_rNursery[__ptr__+OFFSET_StgRegTable_rNursery] #define OFFSET_stgEagerBlackholeInfo -24 #define OFFSET_stgGCEnter1 -16 #define OFFSET_stgGCFun -8 #define OFFSET_Capability_r 24 #define OFFSET_Capability_lock 1224 #define OFFSET_Capability_no 944 #define REP_Capability_no b32 #define Capability_no(__ptr__) REP_Capability_no[__ptr__+OFFSET_Capability_no] #define OFFSET_Capability_mut_lists 1016 #define REP_Capability_mut_lists b64 #define Capability_mut_lists(__ptr__) REP_Capability_mut_lists[__ptr__+OFFSET_Capability_mut_lists] #define OFFSET_Capability_context_switch 1192 #define REP_Capability_context_switch b32 #define Capability_context_switch(__ptr__) REP_Capability_context_switch[__ptr__+OFFSET_Capability_context_switch] #define OFFSET_Capability_interrupt 1196 #define REP_Capability_interrupt b32 #define Capability_interrupt(__ptr__) REP_Capability_interrupt[__ptr__+OFFSET_Capability_interrupt] #define OFFSET_Capability_sparks 1328 #define REP_Capability_sparks b64 #define Capability_sparks(__ptr__) REP_Capability_sparks[__ptr__+OFFSET_Capability_sparks] #define OFFSET_Capability_total_allocated 1200 #define REP_Capability_total_allocated b64 #define Capability_total_allocated(__ptr__) REP_Capability_total_allocated[__ptr__+OFFSET_Capability_total_allocated] #define OFFSET_Capability_weak_ptr_list_hd 1176 #define REP_Capability_weak_ptr_list_hd b64 #define Capability_weak_ptr_list_hd(__ptr__) REP_Capability_weak_ptr_list_hd[__ptr__+OFFSET_Capability_weak_ptr_list_hd] #define OFFSET_Capability_weak_ptr_list_tl 1184 #define REP_Capability_weak_ptr_list_tl b64 #define Capability_weak_ptr_list_tl(__ptr__) REP_Capability_weak_ptr_list_tl[__ptr__+OFFSET_Capability_weak_ptr_list_tl] #define OFFSET_bdescr_start 0 #define REP_bdescr_start b64 #define bdescr_start(__ptr__) REP_bdescr_start[__ptr__+OFFSET_bdescr_start] #define OFFSET_bdescr_free 8 #define REP_bdescr_free b64 #define bdescr_free(__ptr__) REP_bdescr_free[__ptr__+OFFSET_bdescr_free] #define OFFSET_bdescr_blocks 48 #define REP_bdescr_blocks b32 #define bdescr_blocks(__ptr__) REP_bdescr_blocks[__ptr__+OFFSET_bdescr_blocks] #define OFFSET_bdescr_gen_no 40 #define REP_bdescr_gen_no b16 #define bdescr_gen_no(__ptr__) REP_bdescr_gen_no[__ptr__+OFFSET_bdescr_gen_no] #define OFFSET_bdescr_link 16 #define REP_bdescr_link b64 #define bdescr_link(__ptr__) REP_bdescr_link[__ptr__+OFFSET_bdescr_link] #define OFFSET_bdescr_flags 46 #define REP_bdescr_flags b16 #define bdescr_flags(__ptr__) REP_bdescr_flags[__ptr__+OFFSET_bdescr_flags] #define SIZEOF_generation 384 #define OFFSET_generation_n_new_large_words 56 #define REP_generation_n_new_large_words b64 #define generation_n_new_large_words(__ptr__) REP_generation_n_new_large_words[__ptr__+OFFSET_generation_n_new_large_words] #define OFFSET_generation_weak_ptr_list 112 #define REP_generation_weak_ptr_list b64 #define generation_weak_ptr_list(__ptr__) REP_generation_weak_ptr_list[__ptr__+OFFSET_generation_weak_ptr_list] #define SIZEOF_CostCentreStack 96 #define OFFSET_CostCentreStack_ccsID 0 #define REP_CostCentreStack_ccsID b64 #define CostCentreStack_ccsID(__ptr__) REP_CostCentreStack_ccsID[__ptr__+OFFSET_CostCentreStack_ccsID] #define OFFSET_CostCentreStack_mem_alloc 72 #define REP_CostCentreStack_mem_alloc b64 #define CostCentreStack_mem_alloc(__ptr__) REP_CostCentreStack_mem_alloc[__ptr__+OFFSET_CostCentreStack_mem_alloc] #define OFFSET_CostCentreStack_scc_count 48 #define REP_CostCentreStack_scc_count b64 #define CostCentreStack_scc_count(__ptr__) REP_CostCentreStack_scc_count[__ptr__+OFFSET_CostCentreStack_scc_count] #define OFFSET_CostCentreStack_prevStack 16 #define REP_CostCentreStack_prevStack b64 #define CostCentreStack_prevStack(__ptr__) REP_CostCentreStack_prevStack[__ptr__+OFFSET_CostCentreStack_prevStack] #define OFFSET_CostCentre_ccID 0 #define REP_CostCentre_ccID b64 #define CostCentre_ccID(__ptr__) REP_CostCentre_ccID[__ptr__+OFFSET_CostCentre_ccID] #define OFFSET_CostCentre_link 56 #define REP_CostCentre_link b64 #define CostCentre_link(__ptr__) REP_CostCentre_link[__ptr__+OFFSET_CostCentre_link] #define OFFSET_StgHeader_info 0 #define REP_StgHeader_info b64 #define StgHeader_info(__ptr__) REP_StgHeader_info[__ptr__+OFFSET_StgHeader_info] #define OFFSET_StgHeader_ccs 8 #define REP_StgHeader_ccs b64 #define StgHeader_ccs(__ptr__) REP_StgHeader_ccs[__ptr__+OFFSET_StgHeader_ccs] #define OFFSET_StgHeader_ldvw 16 #define REP_StgHeader_ldvw b64 #define StgHeader_ldvw(__ptr__) REP_StgHeader_ldvw[__ptr__+OFFSET_StgHeader_ldvw] #define SIZEOF_StgSMPThunkHeader 8 #define OFFSET_StgClosure_payload 0 #define StgClosure_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgClosure_payload + WDS(__ix__)] #define OFFSET_StgEntCounter_allocs 64 #define REP_StgEntCounter_allocs b64 #define StgEntCounter_allocs(__ptr__) REP_StgEntCounter_allocs[__ptr__+OFFSET_StgEntCounter_allocs] #define OFFSET_StgEntCounter_allocd 16 #define REP_StgEntCounter_allocd b64 #define StgEntCounter_allocd(__ptr__) REP_StgEntCounter_allocd[__ptr__+OFFSET_StgEntCounter_allocd] #define OFFSET_StgEntCounter_registeredp 0 #define REP_StgEntCounter_registeredp b64 #define StgEntCounter_registeredp(__ptr__) REP_StgEntCounter_registeredp[__ptr__+OFFSET_StgEntCounter_registeredp] #define OFFSET_StgEntCounter_link 72 #define REP_StgEntCounter_link b64 #define StgEntCounter_link(__ptr__) REP_StgEntCounter_link[__ptr__+OFFSET_StgEntCounter_link] #define OFFSET_StgEntCounter_entry_count 56 #define REP_StgEntCounter_entry_count b64 #define StgEntCounter_entry_count(__ptr__) REP_StgEntCounter_entry_count[__ptr__+OFFSET_StgEntCounter_entry_count] #define SIZEOF_StgUpdateFrame_NoHdr 8 #define SIZEOF_StgUpdateFrame (SIZEOF_StgHeader+8) #define SIZEOF_StgCatchFrame_NoHdr 16 #define SIZEOF_StgCatchFrame (SIZEOF_StgHeader+16) #define SIZEOF_StgStopFrame_NoHdr 0 #define SIZEOF_StgStopFrame (SIZEOF_StgHeader+0) #define SIZEOF_StgMutArrPtrs_NoHdr 16 #define SIZEOF_StgMutArrPtrs (SIZEOF_StgHeader+16) #define OFFSET_StgMutArrPtrs_ptrs 0 #define REP_StgMutArrPtrs_ptrs b64 #define StgMutArrPtrs_ptrs(__ptr__) REP_StgMutArrPtrs_ptrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgMutArrPtrs_ptrs] #define OFFSET_StgMutArrPtrs_size 8 #define REP_StgMutArrPtrs_size b64 #define StgMutArrPtrs_size(__ptr__) REP_StgMutArrPtrs_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgMutArrPtrs_size] #define SIZEOF_StgSmallMutArrPtrs_NoHdr 8 #define SIZEOF_StgSmallMutArrPtrs (SIZEOF_StgHeader+8) #define OFFSET_StgSmallMutArrPtrs_ptrs 0 #define REP_StgSmallMutArrPtrs_ptrs b64 #define StgSmallMutArrPtrs_ptrs(__ptr__) REP_StgSmallMutArrPtrs_ptrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgSmallMutArrPtrs_ptrs] #define SIZEOF_StgArrBytes_NoHdr 8 #define SIZEOF_StgArrBytes (SIZEOF_StgHeader+8) #define OFFSET_StgArrBytes_bytes 0 #define REP_StgArrBytes_bytes b64 #define StgArrBytes_bytes(__ptr__) REP_StgArrBytes_bytes[__ptr__+SIZEOF_StgHeader+OFFSET_StgArrBytes_bytes] #define OFFSET_StgArrBytes_payload 8 #define StgArrBytes_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgArrBytes_payload + WDS(__ix__)] #define OFFSET_StgTSO__link 0 #define REP_StgTSO__link b64 #define StgTSO__link(__ptr__) REP_StgTSO__link[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO__link] #define OFFSET_StgTSO_global_link 8 #define REP_StgTSO_global_link b64 #define StgTSO_global_link(__ptr__) REP_StgTSO_global_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_global_link] #define OFFSET_StgTSO_what_next 24 #define REP_StgTSO_what_next b16 #define StgTSO_what_next(__ptr__) REP_StgTSO_what_next[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_what_next] #define OFFSET_StgTSO_why_blocked 26 #define REP_StgTSO_why_blocked b16 #define StgTSO_why_blocked(__ptr__) REP_StgTSO_why_blocked[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_why_blocked] #define OFFSET_StgTSO_block_info 32 #define REP_StgTSO_block_info b64 #define StgTSO_block_info(__ptr__) REP_StgTSO_block_info[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_block_info] #define OFFSET_StgTSO_blocked_exceptions 80 #define REP_StgTSO_blocked_exceptions b64 #define StgTSO_blocked_exceptions(__ptr__) REP_StgTSO_blocked_exceptions[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_blocked_exceptions] #define OFFSET_StgTSO_id 40 #define REP_StgTSO_id b64 #define StgTSO_id(__ptr__) REP_StgTSO_id[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_id] #define OFFSET_StgTSO_cap 64 #define REP_StgTSO_cap b64 #define StgTSO_cap(__ptr__) REP_StgTSO_cap[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_cap] #define OFFSET_StgTSO_saved_errno 48 #define REP_StgTSO_saved_errno b32 #define StgTSO_saved_errno(__ptr__) REP_StgTSO_saved_errno[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_saved_errno] #define OFFSET_StgTSO_trec 72 #define REP_StgTSO_trec b64 #define StgTSO_trec(__ptr__) REP_StgTSO_trec[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_trec] #define OFFSET_StgTSO_flags 28 #define REP_StgTSO_flags b32 #define StgTSO_flags(__ptr__) REP_StgTSO_flags[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_flags] #define OFFSET_StgTSO_dirty 52 #define REP_StgTSO_dirty b32 #define StgTSO_dirty(__ptr__) REP_StgTSO_dirty[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_dirty] #define OFFSET_StgTSO_bq 88 #define REP_StgTSO_bq b64 #define StgTSO_bq(__ptr__) REP_StgTSO_bq[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_bq] #define OFFSET_StgTSO_alloc_limit 96 #define REP_StgTSO_alloc_limit b64 #define StgTSO_alloc_limit(__ptr__) REP_StgTSO_alloc_limit[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_alloc_limit] #define OFFSET_StgTSO_cccs 112 #define REP_StgTSO_cccs b64 #define StgTSO_cccs(__ptr__) REP_StgTSO_cccs[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_cccs] #define OFFSET_StgTSO_stackobj 16 #define REP_StgTSO_stackobj b64 #define StgTSO_stackobj(__ptr__) REP_StgTSO_stackobj[__ptr__+SIZEOF_StgHeader+OFFSET_StgTSO_stackobj] #define OFFSET_StgStack_sp 8 #define REP_StgStack_sp b64 #define StgStack_sp(__ptr__) REP_StgStack_sp[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_sp] #define OFFSET_StgStack_stack 16 #define OFFSET_StgStack_stack_size 0 #define REP_StgStack_stack_size b32 #define StgStack_stack_size(__ptr__) REP_StgStack_stack_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_stack_size] #define OFFSET_StgStack_dirty 4 #define REP_StgStack_dirty b8 #define StgStack_dirty(__ptr__) REP_StgStack_dirty[__ptr__+SIZEOF_StgHeader+OFFSET_StgStack_dirty] #define SIZEOF_StgTSOProfInfo 8 #define OFFSET_StgUpdateFrame_updatee 0 #define REP_StgUpdateFrame_updatee b64 #define StgUpdateFrame_updatee(__ptr__) REP_StgUpdateFrame_updatee[__ptr__+SIZEOF_StgHeader+OFFSET_StgUpdateFrame_updatee] #define OFFSET_StgCatchFrame_handler 8 #define REP_StgCatchFrame_handler b64 #define StgCatchFrame_handler(__ptr__) REP_StgCatchFrame_handler[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchFrame_handler] #define OFFSET_StgCatchFrame_exceptions_blocked 0 #define REP_StgCatchFrame_exceptions_blocked b64 #define StgCatchFrame_exceptions_blocked(__ptr__) REP_StgCatchFrame_exceptions_blocked[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchFrame_exceptions_blocked] #define SIZEOF_StgPAP_NoHdr 16 #define SIZEOF_StgPAP (SIZEOF_StgHeader+16) #define OFFSET_StgPAP_n_args 4 #define REP_StgPAP_n_args b32 #define StgPAP_n_args(__ptr__) REP_StgPAP_n_args[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_n_args] #define OFFSET_StgPAP_fun 8 #define REP_StgPAP_fun gcptr #define StgPAP_fun(__ptr__) REP_StgPAP_fun[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_fun] #define OFFSET_StgPAP_arity 0 #define REP_StgPAP_arity b32 #define StgPAP_arity(__ptr__) REP_StgPAP_arity[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_arity] #define OFFSET_StgPAP_payload 16 #define StgPAP_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgPAP_payload + WDS(__ix__)] #define SIZEOF_StgAP_NoThunkHdr 16 #define SIZEOF_StgAP_NoHdr 24 #define SIZEOF_StgAP (SIZEOF_StgHeader+24) #define OFFSET_StgAP_n_args 12 #define REP_StgAP_n_args b32 #define StgAP_n_args(__ptr__) REP_StgAP_n_args[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_n_args] #define OFFSET_StgAP_fun 16 #define REP_StgAP_fun gcptr #define StgAP_fun(__ptr__) REP_StgAP_fun[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_fun] #define OFFSET_StgAP_payload 24 #define StgAP_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_payload + WDS(__ix__)] #define SIZEOF_StgAP_STACK_NoThunkHdr 16 #define SIZEOF_StgAP_STACK_NoHdr 24 #define SIZEOF_StgAP_STACK (SIZEOF_StgHeader+24) #define OFFSET_StgAP_STACK_size 8 #define REP_StgAP_STACK_size b64 #define StgAP_STACK_size(__ptr__) REP_StgAP_STACK_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_STACK_size] #define OFFSET_StgAP_STACK_fun 16 #define REP_StgAP_STACK_fun gcptr #define StgAP_STACK_fun(__ptr__) REP_StgAP_STACK_fun[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_STACK_fun] #define OFFSET_StgAP_STACK_payload 24 #define StgAP_STACK_payload(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgAP_STACK_payload + WDS(__ix__)] #define SIZEOF_StgSelector_NoThunkHdr 8 #define SIZEOF_StgSelector_NoHdr 16 #define SIZEOF_StgSelector (SIZEOF_StgHeader+16) #define OFFSET_StgInd_indirectee 0 #define REP_StgInd_indirectee gcptr #define StgInd_indirectee(__ptr__) REP_StgInd_indirectee[__ptr__+SIZEOF_StgHeader+OFFSET_StgInd_indirectee] #define SIZEOF_StgMutVar_NoHdr 8 #define SIZEOF_StgMutVar (SIZEOF_StgHeader+8) #define OFFSET_StgMutVar_var 0 #define REP_StgMutVar_var b64 #define StgMutVar_var(__ptr__) REP_StgMutVar_var[__ptr__+SIZEOF_StgHeader+OFFSET_StgMutVar_var] #define SIZEOF_StgAtomicallyFrame_NoHdr 16 #define SIZEOF_StgAtomicallyFrame (SIZEOF_StgHeader+16) #define OFFSET_StgAtomicallyFrame_code 0 #define REP_StgAtomicallyFrame_code b64 #define StgAtomicallyFrame_code(__ptr__) REP_StgAtomicallyFrame_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgAtomicallyFrame_code] #define OFFSET_StgAtomicallyFrame_result 8 #define REP_StgAtomicallyFrame_result b64 #define StgAtomicallyFrame_result(__ptr__) REP_StgAtomicallyFrame_result[__ptr__+SIZEOF_StgHeader+OFFSET_StgAtomicallyFrame_result] #define OFFSET_StgTRecHeader_enclosing_trec 0 #define REP_StgTRecHeader_enclosing_trec b64 #define StgTRecHeader_enclosing_trec(__ptr__) REP_StgTRecHeader_enclosing_trec[__ptr__+SIZEOF_StgHeader+OFFSET_StgTRecHeader_enclosing_trec] #define SIZEOF_StgCatchSTMFrame_NoHdr 16 #define SIZEOF_StgCatchSTMFrame (SIZEOF_StgHeader+16) #define OFFSET_StgCatchSTMFrame_handler 8 #define REP_StgCatchSTMFrame_handler b64 #define StgCatchSTMFrame_handler(__ptr__) REP_StgCatchSTMFrame_handler[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchSTMFrame_handler] #define OFFSET_StgCatchSTMFrame_code 0 #define REP_StgCatchSTMFrame_code b64 #define StgCatchSTMFrame_code(__ptr__) REP_StgCatchSTMFrame_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchSTMFrame_code] #define SIZEOF_StgCatchRetryFrame_NoHdr 24 #define SIZEOF_StgCatchRetryFrame (SIZEOF_StgHeader+24) #define OFFSET_StgCatchRetryFrame_running_alt_code 0 #define REP_StgCatchRetryFrame_running_alt_code b64 #define StgCatchRetryFrame_running_alt_code(__ptr__) REP_StgCatchRetryFrame_running_alt_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchRetryFrame_running_alt_code] #define OFFSET_StgCatchRetryFrame_first_code 8 #define REP_StgCatchRetryFrame_first_code b64 #define StgCatchRetryFrame_first_code(__ptr__) REP_StgCatchRetryFrame_first_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchRetryFrame_first_code] #define OFFSET_StgCatchRetryFrame_alt_code 16 #define REP_StgCatchRetryFrame_alt_code b64 #define StgCatchRetryFrame_alt_code(__ptr__) REP_StgCatchRetryFrame_alt_code[__ptr__+SIZEOF_StgHeader+OFFSET_StgCatchRetryFrame_alt_code] #define OFFSET_StgTVarWatchQueue_closure 0 #define REP_StgTVarWatchQueue_closure b64 #define StgTVarWatchQueue_closure(__ptr__) REP_StgTVarWatchQueue_closure[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVarWatchQueue_closure] #define OFFSET_StgTVarWatchQueue_next_queue_entry 8 #define REP_StgTVarWatchQueue_next_queue_entry b64 #define StgTVarWatchQueue_next_queue_entry(__ptr__) REP_StgTVarWatchQueue_next_queue_entry[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVarWatchQueue_next_queue_entry] #define OFFSET_StgTVarWatchQueue_prev_queue_entry 16 #define REP_StgTVarWatchQueue_prev_queue_entry b64 #define StgTVarWatchQueue_prev_queue_entry(__ptr__) REP_StgTVarWatchQueue_prev_queue_entry[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVarWatchQueue_prev_queue_entry] #define SIZEOF_StgTVar_NoHdr 24 #define SIZEOF_StgTVar (SIZEOF_StgHeader+24) #define OFFSET_StgTVar_current_value 0 #define REP_StgTVar_current_value b64 #define StgTVar_current_value(__ptr__) REP_StgTVar_current_value[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVar_current_value] #define OFFSET_StgTVar_first_watch_queue_entry 8 #define REP_StgTVar_first_watch_queue_entry b64 #define StgTVar_first_watch_queue_entry(__ptr__) REP_StgTVar_first_watch_queue_entry[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVar_first_watch_queue_entry] #define OFFSET_StgTVar_num_updates 16 #define REP_StgTVar_num_updates b64 #define StgTVar_num_updates(__ptr__) REP_StgTVar_num_updates[__ptr__+SIZEOF_StgHeader+OFFSET_StgTVar_num_updates] #define SIZEOF_StgWeak_NoHdr 40 #define SIZEOF_StgWeak (SIZEOF_StgHeader+40) #define OFFSET_StgWeak_link 32 #define REP_StgWeak_link b64 #define StgWeak_link(__ptr__) REP_StgWeak_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_link] #define OFFSET_StgWeak_key 8 #define REP_StgWeak_key b64 #define StgWeak_key(__ptr__) REP_StgWeak_key[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_key] #define OFFSET_StgWeak_value 16 #define REP_StgWeak_value b64 #define StgWeak_value(__ptr__) REP_StgWeak_value[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_value] #define OFFSET_StgWeak_finalizer 24 #define REP_StgWeak_finalizer b64 #define StgWeak_finalizer(__ptr__) REP_StgWeak_finalizer[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_finalizer] #define OFFSET_StgWeak_cfinalizers 0 #define REP_StgWeak_cfinalizers b64 #define StgWeak_cfinalizers(__ptr__) REP_StgWeak_cfinalizers[__ptr__+SIZEOF_StgHeader+OFFSET_StgWeak_cfinalizers] #define SIZEOF_StgCFinalizerList_NoHdr 40 #define SIZEOF_StgCFinalizerList (SIZEOF_StgHeader+40) #define OFFSET_StgCFinalizerList_link 0 #define REP_StgCFinalizerList_link b64 #define StgCFinalizerList_link(__ptr__) REP_StgCFinalizerList_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_link] #define OFFSET_StgCFinalizerList_fptr 8 #define REP_StgCFinalizerList_fptr b64 #define StgCFinalizerList_fptr(__ptr__) REP_StgCFinalizerList_fptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_fptr] #define OFFSET_StgCFinalizerList_ptr 16 #define REP_StgCFinalizerList_ptr b64 #define StgCFinalizerList_ptr(__ptr__) REP_StgCFinalizerList_ptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_ptr] #define OFFSET_StgCFinalizerList_eptr 24 #define REP_StgCFinalizerList_eptr b64 #define StgCFinalizerList_eptr(__ptr__) REP_StgCFinalizerList_eptr[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_eptr] #define OFFSET_StgCFinalizerList_flag 32 #define REP_StgCFinalizerList_flag b64 #define StgCFinalizerList_flag(__ptr__) REP_StgCFinalizerList_flag[__ptr__+SIZEOF_StgHeader+OFFSET_StgCFinalizerList_flag] #define SIZEOF_StgMVar_NoHdr 24 #define SIZEOF_StgMVar (SIZEOF_StgHeader+24) #define OFFSET_StgMVar_head 0 #define REP_StgMVar_head b64 #define StgMVar_head(__ptr__) REP_StgMVar_head[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVar_head] #define OFFSET_StgMVar_tail 8 #define REP_StgMVar_tail b64 #define StgMVar_tail(__ptr__) REP_StgMVar_tail[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVar_tail] #define OFFSET_StgMVar_value 16 #define REP_StgMVar_value b64 #define StgMVar_value(__ptr__) REP_StgMVar_value[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVar_value] #define SIZEOF_StgMVarTSOQueue_NoHdr 16 #define SIZEOF_StgMVarTSOQueue (SIZEOF_StgHeader+16) #define OFFSET_StgMVarTSOQueue_link 0 #define REP_StgMVarTSOQueue_link b64 #define StgMVarTSOQueue_link(__ptr__) REP_StgMVarTSOQueue_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVarTSOQueue_link] #define OFFSET_StgMVarTSOQueue_tso 8 #define REP_StgMVarTSOQueue_tso b64 #define StgMVarTSOQueue_tso(__ptr__) REP_StgMVarTSOQueue_tso[__ptr__+SIZEOF_StgHeader+OFFSET_StgMVarTSOQueue_tso] #define SIZEOF_StgBCO_NoHdr 32 #define SIZEOF_StgBCO (SIZEOF_StgHeader+32) #define OFFSET_StgBCO_instrs 0 #define REP_StgBCO_instrs b64 #define StgBCO_instrs(__ptr__) REP_StgBCO_instrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_instrs] #define OFFSET_StgBCO_literals 8 #define REP_StgBCO_literals b64 #define StgBCO_literals(__ptr__) REP_StgBCO_literals[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_literals] #define OFFSET_StgBCO_ptrs 16 #define REP_StgBCO_ptrs b64 #define StgBCO_ptrs(__ptr__) REP_StgBCO_ptrs[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_ptrs] #define OFFSET_StgBCO_arity 24 #define REP_StgBCO_arity b32 #define StgBCO_arity(__ptr__) REP_StgBCO_arity[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_arity] #define OFFSET_StgBCO_size 28 #define REP_StgBCO_size b32 #define StgBCO_size(__ptr__) REP_StgBCO_size[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_size] #define OFFSET_StgBCO_bitmap 32 #define StgBCO_bitmap(__ptr__,__ix__) W_[__ptr__+SIZEOF_StgHeader+OFFSET_StgBCO_bitmap + WDS(__ix__)] #define SIZEOF_StgStableName_NoHdr 8 #define SIZEOF_StgStableName (SIZEOF_StgHeader+8) #define OFFSET_StgStableName_sn 0 #define REP_StgStableName_sn b64 #define StgStableName_sn(__ptr__) REP_StgStableName_sn[__ptr__+SIZEOF_StgHeader+OFFSET_StgStableName_sn] #define SIZEOF_StgBlockingQueue_NoHdr 32 #define SIZEOF_StgBlockingQueue (SIZEOF_StgHeader+32) #define OFFSET_StgBlockingQueue_bh 8 #define REP_StgBlockingQueue_bh b64 #define StgBlockingQueue_bh(__ptr__) REP_StgBlockingQueue_bh[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_bh] #define OFFSET_StgBlockingQueue_owner 16 #define REP_StgBlockingQueue_owner b64 #define StgBlockingQueue_owner(__ptr__) REP_StgBlockingQueue_owner[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_owner] #define OFFSET_StgBlockingQueue_queue 24 #define REP_StgBlockingQueue_queue b64 #define StgBlockingQueue_queue(__ptr__) REP_StgBlockingQueue_queue[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_queue] #define OFFSET_StgBlockingQueue_link 0 #define REP_StgBlockingQueue_link b64 #define StgBlockingQueue_link(__ptr__) REP_StgBlockingQueue_link[__ptr__+SIZEOF_StgHeader+OFFSET_StgBlockingQueue_link] #define SIZEOF_MessageBlackHole_NoHdr 24 #define SIZEOF_MessageBlackHole (SIZEOF_StgHeader+24) #define OFFSET_MessageBlackHole_link 0 #define REP_MessageBlackHole_link b64 #define MessageBlackHole_link(__ptr__) REP_MessageBlackHole_link[__ptr__+SIZEOF_StgHeader+OFFSET_MessageBlackHole_link] #define OFFSET_MessageBlackHole_tso 8 #define REP_MessageBlackHole_tso b64 #define MessageBlackHole_tso(__ptr__) REP_MessageBlackHole_tso[__ptr__+SIZEOF_StgHeader+OFFSET_MessageBlackHole_tso] #define OFFSET_MessageBlackHole_bh 16 #define REP_MessageBlackHole_bh b64 #define MessageBlackHole_bh(__ptr__) REP_MessageBlackHole_bh[__ptr__+SIZEOF_StgHeader+OFFSET_MessageBlackHole_bh] #define SIZEOF_StgCompactNFData_NoHdr 72 #define SIZEOF_StgCompactNFData (SIZEOF_StgHeader+72) #define OFFSET_StgCompactNFData_totalW 0 #define REP_StgCompactNFData_totalW b64 #define StgCompactNFData_totalW(__ptr__) REP_StgCompactNFData_totalW[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_totalW] #define OFFSET_StgCompactNFData_autoBlockW 8 #define REP_StgCompactNFData_autoBlockW b64 #define StgCompactNFData_autoBlockW(__ptr__) REP_StgCompactNFData_autoBlockW[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_autoBlockW] #define OFFSET_StgCompactNFData_nursery 32 #define REP_StgCompactNFData_nursery b64 #define StgCompactNFData_nursery(__ptr__) REP_StgCompactNFData_nursery[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_nursery] #define OFFSET_StgCompactNFData_last 40 #define REP_StgCompactNFData_last b64 #define StgCompactNFData_last(__ptr__) REP_StgCompactNFData_last[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_last] #define OFFSET_StgCompactNFData_hp 16 #define REP_StgCompactNFData_hp b64 #define StgCompactNFData_hp(__ptr__) REP_StgCompactNFData_hp[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_hp] #define OFFSET_StgCompactNFData_hpLim 24 #define REP_StgCompactNFData_hpLim b64 #define StgCompactNFData_hpLim(__ptr__) REP_StgCompactNFData_hpLim[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_hpLim] #define OFFSET_StgCompactNFData_hash 48 #define REP_StgCompactNFData_hash b64 #define StgCompactNFData_hash(__ptr__) REP_StgCompactNFData_hash[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_hash] #define OFFSET_StgCompactNFData_result 56 #define REP_StgCompactNFData_result b64 #define StgCompactNFData_result(__ptr__) REP_StgCompactNFData_result[__ptr__+SIZEOF_StgHeader+OFFSET_StgCompactNFData_result] #define SIZEOF_StgCompactNFDataBlock 24 #define OFFSET_StgCompactNFDataBlock_self 0 #define REP_StgCompactNFDataBlock_self b64 #define StgCompactNFDataBlock_self(__ptr__) REP_StgCompactNFDataBlock_self[__ptr__+OFFSET_StgCompactNFDataBlock_self] #define OFFSET_StgCompactNFDataBlock_owner 8 #define REP_StgCompactNFDataBlock_owner b64 #define StgCompactNFDataBlock_owner(__ptr__) REP_StgCompactNFDataBlock_owner[__ptr__+OFFSET_StgCompactNFDataBlock_owner] #define OFFSET_StgCompactNFDataBlock_next 16 #define REP_StgCompactNFDataBlock_next b64 #define StgCompactNFDataBlock_next(__ptr__) REP_StgCompactNFDataBlock_next[__ptr__+OFFSET_StgCompactNFDataBlock_next] #define OFFSET_RtsFlags_ProfFlags_doHeapProfile 280 #define REP_RtsFlags_ProfFlags_doHeapProfile b32 #define RtsFlags_ProfFlags_doHeapProfile(__ptr__) REP_RtsFlags_ProfFlags_doHeapProfile[__ptr__+OFFSET_RtsFlags_ProfFlags_doHeapProfile] #define OFFSET_RtsFlags_ProfFlags_showCCSOnException 301 #define REP_RtsFlags_ProfFlags_showCCSOnException b8 #define RtsFlags_ProfFlags_showCCSOnException(__ptr__) REP_RtsFlags_ProfFlags_showCCSOnException[__ptr__+OFFSET_RtsFlags_ProfFlags_showCCSOnException] #define OFFSET_RtsFlags_DebugFlags_apply 245 #define REP_RtsFlags_DebugFlags_apply b8 #define RtsFlags_DebugFlags_apply(__ptr__) REP_RtsFlags_DebugFlags_apply[__ptr__+OFFSET_RtsFlags_DebugFlags_apply] #define OFFSET_RtsFlags_DebugFlags_sanity 239 #define REP_RtsFlags_DebugFlags_sanity b8 #define RtsFlags_DebugFlags_sanity(__ptr__) REP_RtsFlags_DebugFlags_sanity[__ptr__+OFFSET_RtsFlags_DebugFlags_sanity] #define OFFSET_RtsFlags_DebugFlags_weak 234 #define REP_RtsFlags_DebugFlags_weak b8 #define RtsFlags_DebugFlags_weak(__ptr__) REP_RtsFlags_DebugFlags_weak[__ptr__+OFFSET_RtsFlags_DebugFlags_weak] #define OFFSET_RtsFlags_GcFlags_initialStkSize 16 #define REP_RtsFlags_GcFlags_initialStkSize b32 #define RtsFlags_GcFlags_initialStkSize(__ptr__) REP_RtsFlags_GcFlags_initialStkSize[__ptr__+OFFSET_RtsFlags_GcFlags_initialStkSize] #define OFFSET_RtsFlags_MiscFlags_tickInterval 200 #define REP_RtsFlags_MiscFlags_tickInterval b64 #define RtsFlags_MiscFlags_tickInterval(__ptr__) REP_RtsFlags_MiscFlags_tickInterval[__ptr__+OFFSET_RtsFlags_MiscFlags_tickInterval] #define SIZEOF_StgFunInfoExtraFwd 32 #define OFFSET_StgFunInfoExtraFwd_slow_apply 24 #define REP_StgFunInfoExtraFwd_slow_apply b64 #define StgFunInfoExtraFwd_slow_apply(__ptr__) REP_StgFunInfoExtraFwd_slow_apply[__ptr__+OFFSET_StgFunInfoExtraFwd_slow_apply] #define OFFSET_StgFunInfoExtraFwd_fun_type 0 #define REP_StgFunInfoExtraFwd_fun_type b32 #define StgFunInfoExtraFwd_fun_type(__ptr__) REP_StgFunInfoExtraFwd_fun_type[__ptr__+OFFSET_StgFunInfoExtraFwd_fun_type] #define OFFSET_StgFunInfoExtraFwd_arity 4 #define REP_StgFunInfoExtraFwd_arity b32 #define StgFunInfoExtraFwd_arity(__ptr__) REP_StgFunInfoExtraFwd_arity[__ptr__+OFFSET_StgFunInfoExtraFwd_arity] #define OFFSET_StgFunInfoExtraFwd_bitmap 16 #define REP_StgFunInfoExtraFwd_bitmap b64 #define StgFunInfoExtraFwd_bitmap(__ptr__) REP_StgFunInfoExtraFwd_bitmap[__ptr__+OFFSET_StgFunInfoExtraFwd_bitmap] #define SIZEOF_StgFunInfoExtraRev 24 #define OFFSET_StgFunInfoExtraRev_slow_apply_offset 0 #define REP_StgFunInfoExtraRev_slow_apply_offset b32 #define StgFunInfoExtraRev_slow_apply_offset(__ptr__) REP_StgFunInfoExtraRev_slow_apply_offset[__ptr__+OFFSET_StgFunInfoExtraRev_slow_apply_offset] #define OFFSET_StgFunInfoExtraRev_fun_type 16 #define REP_StgFunInfoExtraRev_fun_type b32 #define StgFunInfoExtraRev_fun_type(__ptr__) REP_StgFunInfoExtraRev_fun_type[__ptr__+OFFSET_StgFunInfoExtraRev_fun_type] #define OFFSET_StgFunInfoExtraRev_arity 20 #define REP_StgFunInfoExtraRev_arity b32 #define StgFunInfoExtraRev_arity(__ptr__) REP_StgFunInfoExtraRev_arity[__ptr__+OFFSET_StgFunInfoExtraRev_arity] #define OFFSET_StgFunInfoExtraRev_bitmap 8 #define REP_StgFunInfoExtraRev_bitmap b64 #define StgFunInfoExtraRev_bitmap(__ptr__) REP_StgFunInfoExtraRev_bitmap[__ptr__+OFFSET_StgFunInfoExtraRev_bitmap] #define OFFSET_StgFunInfoExtraRev_bitmap_offset 8 #define REP_StgFunInfoExtraRev_bitmap_offset b32 #define StgFunInfoExtraRev_bitmap_offset(__ptr__) REP_StgFunInfoExtraRev_bitmap_offset[__ptr__+OFFSET_StgFunInfoExtraRev_bitmap_offset] #define OFFSET_StgLargeBitmap_size 0 #define REP_StgLargeBitmap_size b64 #define StgLargeBitmap_size(__ptr__) REP_StgLargeBitmap_size[__ptr__+OFFSET_StgLargeBitmap_size] #define OFFSET_StgLargeBitmap_bitmap 8 #define SIZEOF_snEntry 24 #define OFFSET_snEntry_sn_obj 16 #define REP_snEntry_sn_obj b64 #define snEntry_sn_obj(__ptr__) REP_snEntry_sn_obj[__ptr__+OFFSET_snEntry_sn_obj] #define OFFSET_snEntry_addr 0 #define REP_snEntry_addr b64 #define snEntry_addr(__ptr__) REP_snEntry_addr[__ptr__+OFFSET_snEntry_addr] #define SIZEOF_spEntry 8 #define OFFSET_spEntry_addr 0 #define REP_spEntry_addr b64 #define spEntry_addr(__ptr__) REP_spEntry_addr[__ptr__+OFFSET_spEntry_addr] ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-can-fail.hs-incl0000644000000000000000000002504314472400112025262 0ustar0000000000000000primOpCanFail Int8QuotOp = True primOpCanFail Int8RemOp = True primOpCanFail Int8QuotRemOp = True primOpCanFail Word8QuotOp = True primOpCanFail Word8RemOp = True primOpCanFail Word8QuotRemOp = True primOpCanFail Int16QuotOp = True primOpCanFail Int16RemOp = True primOpCanFail Int16QuotRemOp = True primOpCanFail Word16QuotOp = True primOpCanFail Word16RemOp = True primOpCanFail Word16QuotRemOp = True primOpCanFail Int32QuotOp = True primOpCanFail Int32RemOp = True primOpCanFail Int32QuotRemOp = True primOpCanFail Word32QuotOp = True primOpCanFail Word32RemOp = True primOpCanFail Word32QuotRemOp = True primOpCanFail Int64QuotOp = True primOpCanFail Int64RemOp = True primOpCanFail Word64QuotOp = True primOpCanFail Word64RemOp = True primOpCanFail IntQuotOp = True primOpCanFail IntRemOp = True primOpCanFail IntQuotRemOp = True primOpCanFail WordQuotOp = True primOpCanFail WordRemOp = True primOpCanFail WordQuotRemOp = True primOpCanFail WordQuotRem2Op = True primOpCanFail DoubleDivOp = True primOpCanFail DoubleLogOp = True primOpCanFail DoubleLog1POp = True primOpCanFail DoubleAsinOp = True primOpCanFail DoubleAcosOp = True primOpCanFail FloatDivOp = True primOpCanFail FloatLogOp = True primOpCanFail FloatLog1POp = True primOpCanFail FloatAsinOp = True primOpCanFail FloatAcosOp = True primOpCanFail ReadArrayOp = True primOpCanFail WriteArrayOp = True primOpCanFail IndexArrayOp = True primOpCanFail CopyArrayOp = True primOpCanFail CopyMutableArrayOp = True primOpCanFail CloneArrayOp = True primOpCanFail CloneMutableArrayOp = True primOpCanFail FreezeArrayOp = True primOpCanFail ThawArrayOp = True primOpCanFail CasArrayOp = True primOpCanFail ReadSmallArrayOp = True primOpCanFail WriteSmallArrayOp = True primOpCanFail IndexSmallArrayOp = True primOpCanFail CopySmallArrayOp = True primOpCanFail CopySmallMutableArrayOp = True primOpCanFail CloneSmallArrayOp = True primOpCanFail CloneSmallMutableArrayOp = True primOpCanFail FreezeSmallArrayOp = True primOpCanFail ThawSmallArrayOp = True primOpCanFail CasSmallArrayOp = True primOpCanFail IndexByteArrayOp_Char = True primOpCanFail IndexByteArrayOp_WideChar = True primOpCanFail IndexByteArrayOp_Int = True primOpCanFail IndexByteArrayOp_Word = True primOpCanFail IndexByteArrayOp_Addr = True primOpCanFail IndexByteArrayOp_Float = True primOpCanFail IndexByteArrayOp_Double = True primOpCanFail IndexByteArrayOp_StablePtr = True primOpCanFail IndexByteArrayOp_Int8 = True primOpCanFail IndexByteArrayOp_Int16 = True primOpCanFail IndexByteArrayOp_Int32 = True primOpCanFail IndexByteArrayOp_Int64 = True primOpCanFail IndexByteArrayOp_Word8 = True primOpCanFail IndexByteArrayOp_Word16 = True primOpCanFail IndexByteArrayOp_Word32 = True primOpCanFail IndexByteArrayOp_Word64 = True primOpCanFail IndexByteArrayOp_Word8AsChar = True primOpCanFail IndexByteArrayOp_Word8AsWideChar = True primOpCanFail IndexByteArrayOp_Word8AsInt = True primOpCanFail IndexByteArrayOp_Word8AsWord = True primOpCanFail IndexByteArrayOp_Word8AsAddr = True primOpCanFail IndexByteArrayOp_Word8AsFloat = True primOpCanFail IndexByteArrayOp_Word8AsDouble = True primOpCanFail IndexByteArrayOp_Word8AsStablePtr = True primOpCanFail IndexByteArrayOp_Word8AsInt16 = True primOpCanFail IndexByteArrayOp_Word8AsInt32 = True primOpCanFail IndexByteArrayOp_Word8AsInt64 = True primOpCanFail IndexByteArrayOp_Word8AsWord16 = True primOpCanFail IndexByteArrayOp_Word8AsWord32 = True primOpCanFail IndexByteArrayOp_Word8AsWord64 = True primOpCanFail ReadByteArrayOp_Char = True primOpCanFail ReadByteArrayOp_WideChar = True primOpCanFail ReadByteArrayOp_Int = True primOpCanFail ReadByteArrayOp_Word = True primOpCanFail ReadByteArrayOp_Addr = True primOpCanFail ReadByteArrayOp_Float = True primOpCanFail ReadByteArrayOp_Double = True primOpCanFail ReadByteArrayOp_StablePtr = True primOpCanFail ReadByteArrayOp_Int8 = True primOpCanFail ReadByteArrayOp_Int16 = True primOpCanFail ReadByteArrayOp_Int32 = True primOpCanFail ReadByteArrayOp_Int64 = True primOpCanFail ReadByteArrayOp_Word8 = True primOpCanFail ReadByteArrayOp_Word16 = True primOpCanFail ReadByteArrayOp_Word32 = True primOpCanFail ReadByteArrayOp_Word64 = True primOpCanFail ReadByteArrayOp_Word8AsChar = True primOpCanFail ReadByteArrayOp_Word8AsWideChar = True primOpCanFail ReadByteArrayOp_Word8AsInt = True primOpCanFail ReadByteArrayOp_Word8AsWord = True primOpCanFail ReadByteArrayOp_Word8AsAddr = True primOpCanFail ReadByteArrayOp_Word8AsFloat = True primOpCanFail ReadByteArrayOp_Word8AsDouble = True primOpCanFail ReadByteArrayOp_Word8AsStablePtr = True primOpCanFail ReadByteArrayOp_Word8AsInt16 = True primOpCanFail ReadByteArrayOp_Word8AsInt32 = True primOpCanFail ReadByteArrayOp_Word8AsInt64 = True primOpCanFail ReadByteArrayOp_Word8AsWord16 = True primOpCanFail ReadByteArrayOp_Word8AsWord32 = True primOpCanFail ReadByteArrayOp_Word8AsWord64 = True primOpCanFail WriteByteArrayOp_Char = True primOpCanFail WriteByteArrayOp_WideChar = True primOpCanFail WriteByteArrayOp_Int = True primOpCanFail WriteByteArrayOp_Word = True primOpCanFail WriteByteArrayOp_Addr = True primOpCanFail WriteByteArrayOp_Float = True primOpCanFail WriteByteArrayOp_Double = True primOpCanFail WriteByteArrayOp_StablePtr = True primOpCanFail WriteByteArrayOp_Int8 = True primOpCanFail WriteByteArrayOp_Int16 = True primOpCanFail WriteByteArrayOp_Int32 = True primOpCanFail WriteByteArrayOp_Int64 = True primOpCanFail WriteByteArrayOp_Word8 = True primOpCanFail WriteByteArrayOp_Word16 = True primOpCanFail WriteByteArrayOp_Word32 = True primOpCanFail WriteByteArrayOp_Word64 = True primOpCanFail WriteByteArrayOp_Word8AsChar = True primOpCanFail WriteByteArrayOp_Word8AsWideChar = True primOpCanFail WriteByteArrayOp_Word8AsInt = True primOpCanFail WriteByteArrayOp_Word8AsWord = True primOpCanFail WriteByteArrayOp_Word8AsAddr = True primOpCanFail WriteByteArrayOp_Word8AsFloat = True primOpCanFail WriteByteArrayOp_Word8AsDouble = True primOpCanFail WriteByteArrayOp_Word8AsStablePtr = True primOpCanFail WriteByteArrayOp_Word8AsInt16 = True primOpCanFail WriteByteArrayOp_Word8AsInt32 = True primOpCanFail WriteByteArrayOp_Word8AsInt64 = True primOpCanFail WriteByteArrayOp_Word8AsWord16 = True primOpCanFail WriteByteArrayOp_Word8AsWord32 = True primOpCanFail WriteByteArrayOp_Word8AsWord64 = True primOpCanFail CompareByteArraysOp = True primOpCanFail CopyByteArrayOp = True primOpCanFail CopyMutableByteArrayOp = True primOpCanFail CopyByteArrayToAddrOp = True primOpCanFail CopyMutableByteArrayToAddrOp = True primOpCanFail CopyAddrToByteArrayOp = True primOpCanFail SetByteArrayOp = True primOpCanFail AtomicReadByteArrayOp_Int = True primOpCanFail AtomicWriteByteArrayOp_Int = True primOpCanFail CasByteArrayOp_Int = True primOpCanFail CasByteArrayOp_Int8 = True primOpCanFail CasByteArrayOp_Int16 = True primOpCanFail CasByteArrayOp_Int32 = True primOpCanFail CasByteArrayOp_Int64 = True primOpCanFail FetchAddByteArrayOp_Int = True primOpCanFail FetchSubByteArrayOp_Int = True primOpCanFail FetchAndByteArrayOp_Int = True primOpCanFail FetchNandByteArrayOp_Int = True primOpCanFail FetchOrByteArrayOp_Int = True primOpCanFail FetchXorByteArrayOp_Int = True primOpCanFail IndexOffAddrOp_Char = True primOpCanFail IndexOffAddrOp_WideChar = True primOpCanFail IndexOffAddrOp_Int = True primOpCanFail IndexOffAddrOp_Word = True primOpCanFail IndexOffAddrOp_Addr = True primOpCanFail IndexOffAddrOp_Float = True primOpCanFail IndexOffAddrOp_Double = True primOpCanFail IndexOffAddrOp_StablePtr = True primOpCanFail IndexOffAddrOp_Int8 = True primOpCanFail IndexOffAddrOp_Int16 = True primOpCanFail IndexOffAddrOp_Int32 = True primOpCanFail IndexOffAddrOp_Int64 = True primOpCanFail IndexOffAddrOp_Word8 = True primOpCanFail IndexOffAddrOp_Word16 = True primOpCanFail IndexOffAddrOp_Word32 = True primOpCanFail IndexOffAddrOp_Word64 = True primOpCanFail ReadOffAddrOp_Char = True primOpCanFail ReadOffAddrOp_WideChar = True primOpCanFail ReadOffAddrOp_Int = True primOpCanFail ReadOffAddrOp_Word = True primOpCanFail ReadOffAddrOp_Addr = True primOpCanFail ReadOffAddrOp_Float = True primOpCanFail ReadOffAddrOp_Double = True primOpCanFail ReadOffAddrOp_StablePtr = True primOpCanFail ReadOffAddrOp_Int8 = True primOpCanFail ReadOffAddrOp_Int16 = True primOpCanFail ReadOffAddrOp_Int32 = True primOpCanFail ReadOffAddrOp_Int64 = True primOpCanFail ReadOffAddrOp_Word8 = True primOpCanFail ReadOffAddrOp_Word16 = True primOpCanFail ReadOffAddrOp_Word32 = True primOpCanFail ReadOffAddrOp_Word64 = True primOpCanFail WriteOffAddrOp_Char = True primOpCanFail WriteOffAddrOp_WideChar = True primOpCanFail WriteOffAddrOp_Int = True primOpCanFail WriteOffAddrOp_Word = True primOpCanFail WriteOffAddrOp_Addr = True primOpCanFail WriteOffAddrOp_Float = True primOpCanFail WriteOffAddrOp_Double = True primOpCanFail WriteOffAddrOp_StablePtr = True primOpCanFail WriteOffAddrOp_Int8 = True primOpCanFail WriteOffAddrOp_Int16 = True primOpCanFail WriteOffAddrOp_Int32 = True primOpCanFail WriteOffAddrOp_Int64 = True primOpCanFail WriteOffAddrOp_Word8 = True primOpCanFail WriteOffAddrOp_Word16 = True primOpCanFail WriteOffAddrOp_Word32 = True primOpCanFail WriteOffAddrOp_Word64 = True primOpCanFail InterlockedExchange_Addr = True primOpCanFail InterlockedExchange_Word = True primOpCanFail CasAddrOp_Addr = True primOpCanFail CasAddrOp_Word = True primOpCanFail CasAddrOp_Word8 = True primOpCanFail CasAddrOp_Word16 = True primOpCanFail CasAddrOp_Word32 = True primOpCanFail CasAddrOp_Word64 = True primOpCanFail FetchAddAddrOp_Word = True primOpCanFail FetchSubAddrOp_Word = True primOpCanFail FetchAndAddrOp_Word = True primOpCanFail FetchNandAddrOp_Word = True primOpCanFail FetchOrAddrOp_Word = True primOpCanFail FetchXorAddrOp_Word = True primOpCanFail AtomicReadAddrOp_Word = True primOpCanFail AtomicWriteAddrOp_Word = True primOpCanFail AtomicModifyMutVar2Op = True primOpCanFail AtomicModifyMutVar_Op = True primOpCanFail RaiseOp = True primOpCanFail ReallyUnsafePtrEqualityOp = True primOpCanFail (VecInsertOp _ _ _) = True primOpCanFail (VecDivOp _ _ _) = True primOpCanFail (VecQuotOp _ _ _) = True primOpCanFail (VecRemOp _ _ _) = True primOpCanFail (VecIndexByteArrayOp _ _ _) = True primOpCanFail (VecReadByteArrayOp _ _ _) = True primOpCanFail (VecWriteByteArrayOp _ _ _) = True primOpCanFail (VecIndexOffAddrOp _ _ _) = True primOpCanFail (VecReadOffAddrOp _ _ _) = True primOpCanFail (VecWriteOffAddrOp _ _ _) = True primOpCanFail (VecIndexScalarByteArrayOp _ _ _) = True primOpCanFail (VecReadScalarByteArrayOp _ _ _) = True primOpCanFail (VecWriteScalarByteArrayOp _ _ _) = True primOpCanFail (VecIndexScalarOffAddrOp _ _ _) = True primOpCanFail (VecReadScalarOffAddrOp _ _ _) = True primOpCanFail (VecWriteScalarOffAddrOp _ _ _) = True primOpCanFail _ = False ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-code-size.hs-incl0000644000000000000000000000653014472400112025472 0ustar0000000000000000primOpCodeSize OrdOp = 0 primOpCodeSize Int8ToWord8Op = 0 primOpCodeSize Word8ToInt8Op = 0 primOpCodeSize Int16ToWord16Op = 0 primOpCodeSize Word16ToInt16Op = 0 primOpCodeSize Int32ToWord32Op = 0 primOpCodeSize Word32ToInt32Op = 0 primOpCodeSize Int64ToWord64Op = 0 primOpCodeSize Word64ToInt64Op = 0 primOpCodeSize IntAddCOp = 2 primOpCodeSize IntSubCOp = 2 primOpCodeSize ChrOp = 0 primOpCodeSize IntToWordOp = 0 primOpCodeSize WordAddCOp = 2 primOpCodeSize WordSubCOp = 2 primOpCodeSize WordAdd2Op = 2 primOpCodeSize WordToIntOp = 0 primOpCodeSize DoubleExpOp = primOpCodeSizeForeignCall primOpCodeSize DoubleExpM1Op = primOpCodeSizeForeignCall primOpCodeSize DoubleLogOp = primOpCodeSizeForeignCall primOpCodeSize DoubleLog1POp = primOpCodeSizeForeignCall primOpCodeSize DoubleSqrtOp = primOpCodeSizeForeignCall primOpCodeSize DoubleSinOp = primOpCodeSizeForeignCall primOpCodeSize DoubleCosOp = primOpCodeSizeForeignCall primOpCodeSize DoubleTanOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAsinOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAcosOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAtanOp = primOpCodeSizeForeignCall primOpCodeSize DoubleSinhOp = primOpCodeSizeForeignCall primOpCodeSize DoubleCoshOp = primOpCodeSizeForeignCall primOpCodeSize DoubleTanhOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAsinhOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAcoshOp = primOpCodeSizeForeignCall primOpCodeSize DoubleAtanhOp = primOpCodeSizeForeignCall primOpCodeSize DoublePowerOp = primOpCodeSizeForeignCall primOpCodeSize FloatExpOp = primOpCodeSizeForeignCall primOpCodeSize FloatExpM1Op = primOpCodeSizeForeignCall primOpCodeSize FloatLogOp = primOpCodeSizeForeignCall primOpCodeSize FloatLog1POp = primOpCodeSizeForeignCall primOpCodeSize FloatSqrtOp = primOpCodeSizeForeignCall primOpCodeSize FloatSinOp = primOpCodeSizeForeignCall primOpCodeSize FloatCosOp = primOpCodeSizeForeignCall primOpCodeSize FloatTanOp = primOpCodeSizeForeignCall primOpCodeSize FloatAsinOp = primOpCodeSizeForeignCall primOpCodeSize FloatAcosOp = primOpCodeSizeForeignCall primOpCodeSize FloatAtanOp = primOpCodeSizeForeignCall primOpCodeSize FloatSinhOp = primOpCodeSizeForeignCall primOpCodeSize FloatCoshOp = primOpCodeSizeForeignCall primOpCodeSize FloatTanhOp = primOpCodeSizeForeignCall primOpCodeSize FloatAsinhOp = primOpCodeSizeForeignCall primOpCodeSize FloatAcoshOp = primOpCodeSizeForeignCall primOpCodeSize FloatAtanhOp = primOpCodeSizeForeignCall primOpCodeSize FloatPowerOp = primOpCodeSizeForeignCall primOpCodeSize WriteArrayOp = 2 primOpCodeSize CopyByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyMutableByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyByteArrayToAddrOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyMutableByteArrayToAddrOp = primOpCodeSizeForeignCall + 4 primOpCodeSize CopyAddrToByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize SetByteArrayOp = primOpCodeSizeForeignCall + 4 primOpCodeSize AddrToIntOp = 0 primOpCodeSize IntToAddrOp = 0 primOpCodeSize WriteMutVarOp = primOpCodeSizeForeignCall primOpCodeSize TouchOp = 0 primOpCodeSize ParOp = primOpCodeSizeForeignCall primOpCodeSize SparkOp = primOpCodeSizeForeignCall primOpCodeSize AddrToAnyOp = 0 primOpCodeSize AnyToAddrOp = 0 primOpCodeSize _ = primOpCodeSizeDefault ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-commutable.hs-incl0000644000000000000000000000337414472400111025742 0ustar0000000000000000commutableOp CharEqOp = True commutableOp CharNeOp = True commutableOp Int8AddOp = True commutableOp Int8MulOp = True commutableOp Word8AddOp = True commutableOp Word8MulOp = True commutableOp Word8AndOp = True commutableOp Word8OrOp = True commutableOp Word8XorOp = True commutableOp Int16AddOp = True commutableOp Int16MulOp = True commutableOp Word16AddOp = True commutableOp Word16MulOp = True commutableOp Word16AndOp = True commutableOp Word16OrOp = True commutableOp Word16XorOp = True commutableOp Int32AddOp = True commutableOp Int32MulOp = True commutableOp Word32AddOp = True commutableOp Word32MulOp = True commutableOp Word32AndOp = True commutableOp Word32OrOp = True commutableOp Word32XorOp = True commutableOp Int64AddOp = True commutableOp Int64MulOp = True commutableOp Word64AddOp = True commutableOp Word64MulOp = True commutableOp Word64AndOp = True commutableOp Word64OrOp = True commutableOp Word64XorOp = True commutableOp IntAddOp = True commutableOp IntMulOp = True commutableOp IntMulMayOfloOp = True commutableOp IntAndOp = True commutableOp IntOrOp = True commutableOp IntXorOp = True commutableOp IntAddCOp = True commutableOp IntEqOp = True commutableOp IntNeOp = True commutableOp WordAddOp = True commutableOp WordAddCOp = True commutableOp WordAdd2Op = True commutableOp WordMulOp = True commutableOp WordMul2Op = True commutableOp WordAndOp = True commutableOp WordOrOp = True commutableOp WordXorOp = True commutableOp DoubleEqOp = True commutableOp DoubleNeOp = True commutableOp DoubleAddOp = True commutableOp DoubleMulOp = True commutableOp FloatEqOp = True commutableOp FloatNeOp = True commutableOp FloatAddOp = True commutableOp FloatMulOp = True commutableOp (VecAddOp _ _ _) = True commutableOp (VecMulOp _ _ _) = True commutableOp _ = False ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-data-decl.hs-incl0000644000000000000000000003472714472400111025436 0ustar0000000000000000data PrimOp = CharGtOp | CharGeOp | CharEqOp | CharNeOp | CharLtOp | CharLeOp | OrdOp | Int8ToIntOp | IntToInt8Op | Int8NegOp | Int8AddOp | Int8SubOp | Int8MulOp | Int8QuotOp | Int8RemOp | Int8QuotRemOp | Int8SllOp | Int8SraOp | Int8SrlOp | Int8ToWord8Op | Int8EqOp | Int8GeOp | Int8GtOp | Int8LeOp | Int8LtOp | Int8NeOp | Word8ToWordOp | WordToWord8Op | Word8AddOp | Word8SubOp | Word8MulOp | Word8QuotOp | Word8RemOp | Word8QuotRemOp | Word8AndOp | Word8OrOp | Word8XorOp | Word8NotOp | Word8SllOp | Word8SrlOp | Word8ToInt8Op | Word8EqOp | Word8GeOp | Word8GtOp | Word8LeOp | Word8LtOp | Word8NeOp | Int16ToIntOp | IntToInt16Op | Int16NegOp | Int16AddOp | Int16SubOp | Int16MulOp | Int16QuotOp | Int16RemOp | Int16QuotRemOp | Int16SllOp | Int16SraOp | Int16SrlOp | Int16ToWord16Op | Int16EqOp | Int16GeOp | Int16GtOp | Int16LeOp | Int16LtOp | Int16NeOp | Word16ToWordOp | WordToWord16Op | Word16AddOp | Word16SubOp | Word16MulOp | Word16QuotOp | Word16RemOp | Word16QuotRemOp | Word16AndOp | Word16OrOp | Word16XorOp | Word16NotOp | Word16SllOp | Word16SrlOp | Word16ToInt16Op | Word16EqOp | Word16GeOp | Word16GtOp | Word16LeOp | Word16LtOp | Word16NeOp | Int32ToIntOp | IntToInt32Op | Int32NegOp | Int32AddOp | Int32SubOp | Int32MulOp | Int32QuotOp | Int32RemOp | Int32QuotRemOp | Int32SllOp | Int32SraOp | Int32SrlOp | Int32ToWord32Op | Int32EqOp | Int32GeOp | Int32GtOp | Int32LeOp | Int32LtOp | Int32NeOp | Word32ToWordOp | WordToWord32Op | Word32AddOp | Word32SubOp | Word32MulOp | Word32QuotOp | Word32RemOp | Word32QuotRemOp | Word32AndOp | Word32OrOp | Word32XorOp | Word32NotOp | Word32SllOp | Word32SrlOp | Word32ToInt32Op | Word32EqOp | Word32GeOp | Word32GtOp | Word32LeOp | Word32LtOp | Word32NeOp | Int64ToIntOp | IntToInt64Op | Int64NegOp | Int64AddOp | Int64SubOp | Int64MulOp | Int64QuotOp | Int64RemOp | Int64SllOp | Int64SraOp | Int64SrlOp | Int64ToWord64Op | Int64EqOp | Int64GeOp | Int64GtOp | Int64LeOp | Int64LtOp | Int64NeOp | Word64ToWordOp | WordToWord64Op | Word64AddOp | Word64SubOp | Word64MulOp | Word64QuotOp | Word64RemOp | Word64AndOp | Word64OrOp | Word64XorOp | Word64NotOp | Word64SllOp | Word64SrlOp | Word64ToInt64Op | Word64EqOp | Word64GeOp | Word64GtOp | Word64LeOp | Word64LtOp | Word64NeOp | IntAddOp | IntSubOp | IntMulOp | IntMul2Op | IntMulMayOfloOp | IntQuotOp | IntRemOp | IntQuotRemOp | IntAndOp | IntOrOp | IntXorOp | IntNotOp | IntNegOp | IntAddCOp | IntSubCOp | IntGtOp | IntGeOp | IntEqOp | IntNeOp | IntLtOp | IntLeOp | ChrOp | IntToWordOp | IntToFloatOp | IntToDoubleOp | WordToFloatOp | WordToDoubleOp | IntSllOp | IntSraOp | IntSrlOp | WordAddOp | WordAddCOp | WordSubCOp | WordAdd2Op | WordSubOp | WordMulOp | WordMul2Op | WordQuotOp | WordRemOp | WordQuotRemOp | WordQuotRem2Op | WordAndOp | WordOrOp | WordXorOp | WordNotOp | WordSllOp | WordSrlOp | WordToIntOp | WordGtOp | WordGeOp | WordEqOp | WordNeOp | WordLtOp | WordLeOp | PopCnt8Op | PopCnt16Op | PopCnt32Op | PopCnt64Op | PopCntOp | Pdep8Op | Pdep16Op | Pdep32Op | Pdep64Op | PdepOp | Pext8Op | Pext16Op | Pext32Op | Pext64Op | PextOp | Clz8Op | Clz16Op | Clz32Op | Clz64Op | ClzOp | Ctz8Op | Ctz16Op | Ctz32Op | Ctz64Op | CtzOp | BSwap16Op | BSwap32Op | BSwap64Op | BSwapOp | BRev8Op | BRev16Op | BRev32Op | BRev64Op | BRevOp | Narrow8IntOp | Narrow16IntOp | Narrow32IntOp | Narrow8WordOp | Narrow16WordOp | Narrow32WordOp | DoubleGtOp | DoubleGeOp | DoubleEqOp | DoubleNeOp | DoubleLtOp | DoubleLeOp | DoubleAddOp | DoubleSubOp | DoubleMulOp | DoubleDivOp | DoubleNegOp | DoubleFabsOp | DoubleToIntOp | DoubleToFloatOp | DoubleExpOp | DoubleExpM1Op | DoubleLogOp | DoubleLog1POp | DoubleSqrtOp | DoubleSinOp | DoubleCosOp | DoubleTanOp | DoubleAsinOp | DoubleAcosOp | DoubleAtanOp | DoubleSinhOp | DoubleCoshOp | DoubleTanhOp | DoubleAsinhOp | DoubleAcoshOp | DoubleAtanhOp | DoublePowerOp | DoubleDecode_2IntOp | DoubleDecode_Int64Op | FloatGtOp | FloatGeOp | FloatEqOp | FloatNeOp | FloatLtOp | FloatLeOp | FloatAddOp | FloatSubOp | FloatMulOp | FloatDivOp | FloatNegOp | FloatFabsOp | FloatToIntOp | FloatExpOp | FloatExpM1Op | FloatLogOp | FloatLog1POp | FloatSqrtOp | FloatSinOp | FloatCosOp | FloatTanOp | FloatAsinOp | FloatAcosOp | FloatAtanOp | FloatSinhOp | FloatCoshOp | FloatTanhOp | FloatAsinhOp | FloatAcoshOp | FloatAtanhOp | FloatPowerOp | FloatToDoubleOp | FloatDecode_IntOp | NewArrayOp | ReadArrayOp | WriteArrayOp | SizeofArrayOp | SizeofMutableArrayOp | IndexArrayOp | UnsafeFreezeArrayOp | UnsafeThawArrayOp | CopyArrayOp | CopyMutableArrayOp | CloneArrayOp | CloneMutableArrayOp | FreezeArrayOp | ThawArrayOp | CasArrayOp | NewSmallArrayOp | ShrinkSmallMutableArrayOp_Char | ReadSmallArrayOp | WriteSmallArrayOp | SizeofSmallArrayOp | SizeofSmallMutableArrayOp | GetSizeofSmallMutableArrayOp | IndexSmallArrayOp | UnsafeFreezeSmallArrayOp | UnsafeThawSmallArrayOp | CopySmallArrayOp | CopySmallMutableArrayOp | CloneSmallArrayOp | CloneSmallMutableArrayOp | FreezeSmallArrayOp | ThawSmallArrayOp | CasSmallArrayOp | NewByteArrayOp_Char | NewPinnedByteArrayOp_Char | NewAlignedPinnedByteArrayOp_Char | MutableByteArrayIsPinnedOp | ByteArrayIsPinnedOp | ByteArrayContents_Char | MutableByteArrayContents_Char | ShrinkMutableByteArrayOp_Char | ResizeMutableByteArrayOp_Char | UnsafeFreezeByteArrayOp | SizeofByteArrayOp | SizeofMutableByteArrayOp | GetSizeofMutableByteArrayOp | IndexByteArrayOp_Char | IndexByteArrayOp_WideChar | IndexByteArrayOp_Int | IndexByteArrayOp_Word | IndexByteArrayOp_Addr | IndexByteArrayOp_Float | IndexByteArrayOp_Double | IndexByteArrayOp_StablePtr | IndexByteArrayOp_Int8 | IndexByteArrayOp_Int16 | IndexByteArrayOp_Int32 | IndexByteArrayOp_Int64 | IndexByteArrayOp_Word8 | IndexByteArrayOp_Word16 | IndexByteArrayOp_Word32 | IndexByteArrayOp_Word64 | IndexByteArrayOp_Word8AsChar | IndexByteArrayOp_Word8AsWideChar | IndexByteArrayOp_Word8AsInt | IndexByteArrayOp_Word8AsWord | IndexByteArrayOp_Word8AsAddr | IndexByteArrayOp_Word8AsFloat | IndexByteArrayOp_Word8AsDouble | IndexByteArrayOp_Word8AsStablePtr | IndexByteArrayOp_Word8AsInt16 | IndexByteArrayOp_Word8AsInt32 | IndexByteArrayOp_Word8AsInt64 | IndexByteArrayOp_Word8AsWord16 | IndexByteArrayOp_Word8AsWord32 | IndexByteArrayOp_Word8AsWord64 | ReadByteArrayOp_Char | ReadByteArrayOp_WideChar | ReadByteArrayOp_Int | ReadByteArrayOp_Word | ReadByteArrayOp_Addr | ReadByteArrayOp_Float | ReadByteArrayOp_Double | ReadByteArrayOp_StablePtr | ReadByteArrayOp_Int8 | ReadByteArrayOp_Int16 | ReadByteArrayOp_Int32 | ReadByteArrayOp_Int64 | ReadByteArrayOp_Word8 | ReadByteArrayOp_Word16 | ReadByteArrayOp_Word32 | ReadByteArrayOp_Word64 | ReadByteArrayOp_Word8AsChar | ReadByteArrayOp_Word8AsWideChar | ReadByteArrayOp_Word8AsInt | ReadByteArrayOp_Word8AsWord | ReadByteArrayOp_Word8AsAddr | ReadByteArrayOp_Word8AsFloat | ReadByteArrayOp_Word8AsDouble | ReadByteArrayOp_Word8AsStablePtr | ReadByteArrayOp_Word8AsInt16 | ReadByteArrayOp_Word8AsInt32 | ReadByteArrayOp_Word8AsInt64 | ReadByteArrayOp_Word8AsWord16 | ReadByteArrayOp_Word8AsWord32 | ReadByteArrayOp_Word8AsWord64 | WriteByteArrayOp_Char | WriteByteArrayOp_WideChar | WriteByteArrayOp_Int | WriteByteArrayOp_Word | WriteByteArrayOp_Addr | WriteByteArrayOp_Float | WriteByteArrayOp_Double | WriteByteArrayOp_StablePtr | WriteByteArrayOp_Int8 | WriteByteArrayOp_Int16 | WriteByteArrayOp_Int32 | WriteByteArrayOp_Int64 | WriteByteArrayOp_Word8 | WriteByteArrayOp_Word16 | WriteByteArrayOp_Word32 | WriteByteArrayOp_Word64 | WriteByteArrayOp_Word8AsChar | WriteByteArrayOp_Word8AsWideChar | WriteByteArrayOp_Word8AsInt | WriteByteArrayOp_Word8AsWord | WriteByteArrayOp_Word8AsAddr | WriteByteArrayOp_Word8AsFloat | WriteByteArrayOp_Word8AsDouble | WriteByteArrayOp_Word8AsStablePtr | WriteByteArrayOp_Word8AsInt16 | WriteByteArrayOp_Word8AsInt32 | WriteByteArrayOp_Word8AsInt64 | WriteByteArrayOp_Word8AsWord16 | WriteByteArrayOp_Word8AsWord32 | WriteByteArrayOp_Word8AsWord64 | CompareByteArraysOp | CopyByteArrayOp | CopyMutableByteArrayOp | CopyByteArrayToAddrOp | CopyMutableByteArrayToAddrOp | CopyAddrToByteArrayOp | SetByteArrayOp | AtomicReadByteArrayOp_Int | AtomicWriteByteArrayOp_Int | CasByteArrayOp_Int | CasByteArrayOp_Int8 | CasByteArrayOp_Int16 | CasByteArrayOp_Int32 | CasByteArrayOp_Int64 | FetchAddByteArrayOp_Int | FetchSubByteArrayOp_Int | FetchAndByteArrayOp_Int | FetchNandByteArrayOp_Int | FetchOrByteArrayOp_Int | FetchXorByteArrayOp_Int | AddrAddOp | AddrSubOp | AddrRemOp | AddrToIntOp | IntToAddrOp | AddrGtOp | AddrGeOp | AddrEqOp | AddrNeOp | AddrLtOp | AddrLeOp | IndexOffAddrOp_Char | IndexOffAddrOp_WideChar | IndexOffAddrOp_Int | IndexOffAddrOp_Word | IndexOffAddrOp_Addr | IndexOffAddrOp_Float | IndexOffAddrOp_Double | IndexOffAddrOp_StablePtr | IndexOffAddrOp_Int8 | IndexOffAddrOp_Int16 | IndexOffAddrOp_Int32 | IndexOffAddrOp_Int64 | IndexOffAddrOp_Word8 | IndexOffAddrOp_Word16 | IndexOffAddrOp_Word32 | IndexOffAddrOp_Word64 | ReadOffAddrOp_Char | ReadOffAddrOp_WideChar | ReadOffAddrOp_Int | ReadOffAddrOp_Word | ReadOffAddrOp_Addr | ReadOffAddrOp_Float | ReadOffAddrOp_Double | ReadOffAddrOp_StablePtr | ReadOffAddrOp_Int8 | ReadOffAddrOp_Int16 | ReadOffAddrOp_Int32 | ReadOffAddrOp_Int64 | ReadOffAddrOp_Word8 | ReadOffAddrOp_Word16 | ReadOffAddrOp_Word32 | ReadOffAddrOp_Word64 | WriteOffAddrOp_Char | WriteOffAddrOp_WideChar | WriteOffAddrOp_Int | WriteOffAddrOp_Word | WriteOffAddrOp_Addr | WriteOffAddrOp_Float | WriteOffAddrOp_Double | WriteOffAddrOp_StablePtr | WriteOffAddrOp_Int8 | WriteOffAddrOp_Int16 | WriteOffAddrOp_Int32 | WriteOffAddrOp_Int64 | WriteOffAddrOp_Word8 | WriteOffAddrOp_Word16 | WriteOffAddrOp_Word32 | WriteOffAddrOp_Word64 | InterlockedExchange_Addr | InterlockedExchange_Word | CasAddrOp_Addr | CasAddrOp_Word | CasAddrOp_Word8 | CasAddrOp_Word16 | CasAddrOp_Word32 | CasAddrOp_Word64 | FetchAddAddrOp_Word | FetchSubAddrOp_Word | FetchAndAddrOp_Word | FetchNandAddrOp_Word | FetchOrAddrOp_Word | FetchXorAddrOp_Word | AtomicReadAddrOp_Word | AtomicWriteAddrOp_Word | NewMutVarOp | ReadMutVarOp | WriteMutVarOp | AtomicModifyMutVar2Op | AtomicModifyMutVar_Op | CasMutVarOp | CatchOp | RaiseOp | RaiseIOOp | MaskAsyncExceptionsOp | MaskUninterruptibleOp | UnmaskAsyncExceptionsOp | MaskStatus | AtomicallyOp | RetryOp | CatchRetryOp | CatchSTMOp | NewTVarOp | ReadTVarOp | ReadTVarIOOp | WriteTVarOp | NewMVarOp | TakeMVarOp | TryTakeMVarOp | PutMVarOp | TryPutMVarOp | ReadMVarOp | TryReadMVarOp | IsEmptyMVarOp | NewIOPortOp | ReadIOPortOp | WriteIOPortOp | DelayOp | WaitReadOp | WaitWriteOp | ForkOp | ForkOnOp | KillThreadOp | YieldOp | MyThreadIdOp | LabelThreadOp | IsCurrentThreadBoundOp | NoDuplicateOp | ThreadStatusOp | MkWeakOp | MkWeakNoFinalizerOp | AddCFinalizerToWeakOp | DeRefWeakOp | FinalizeWeakOp | TouchOp | MakeStablePtrOp | DeRefStablePtrOp | EqStablePtrOp | MakeStableNameOp | StableNameToIntOp | CompactNewOp | CompactResizeOp | CompactContainsOp | CompactContainsAnyOp | CompactGetFirstBlockOp | CompactGetNextBlockOp | CompactAllocateBlockOp | CompactFixupPointersOp | CompactAdd | CompactAddWithSharing | CompactSize | ReallyUnsafePtrEqualityOp | ParOp | SparkOp | SeqOp | GetSparkOp | NumSparks | KeepAliveOp | DataToTagOp | TagToEnumOp | AddrToAnyOp | AnyToAddrOp | MkApUpd0_Op | NewBCOOp | UnpackClosureOp | ClosureSizeOp | GetApStackValOp | GetCCSOfOp | GetCurrentCCSOp | ClearCCSOp | WhereFromOp | TraceEventOp | TraceEventBinaryOp | TraceMarkerOp | SetThreadAllocationCounter | VecBroadcastOp PrimOpVecCat Length Width | VecPackOp PrimOpVecCat Length Width | VecUnpackOp PrimOpVecCat Length Width | VecInsertOp PrimOpVecCat Length Width | VecAddOp PrimOpVecCat Length Width | VecSubOp PrimOpVecCat Length Width | VecMulOp PrimOpVecCat Length Width | VecDivOp PrimOpVecCat Length Width | VecQuotOp PrimOpVecCat Length Width | VecRemOp PrimOpVecCat Length Width | VecNegOp PrimOpVecCat Length Width | VecIndexByteArrayOp PrimOpVecCat Length Width | VecReadByteArrayOp PrimOpVecCat Length Width | VecWriteByteArrayOp PrimOpVecCat Length Width | VecIndexOffAddrOp PrimOpVecCat Length Width | VecReadOffAddrOp PrimOpVecCat Length Width | VecWriteOffAddrOp PrimOpVecCat Length Width | VecIndexScalarByteArrayOp PrimOpVecCat Length Width | VecReadScalarByteArrayOp PrimOpVecCat Length Width | VecWriteScalarByteArrayOp PrimOpVecCat Length Width | VecIndexScalarOffAddrOp PrimOpVecCat Length Width | VecReadScalarOffAddrOp PrimOpVecCat Length Width | VecWriteScalarOffAddrOp PrimOpVecCat Length Width | PrefetchByteArrayOp3 | PrefetchMutableByteArrayOp3 | PrefetchAddrOp3 | PrefetchValueOp3 | PrefetchByteArrayOp2 | PrefetchMutableByteArrayOp2 | PrefetchAddrOp2 | PrefetchValueOp2 | PrefetchByteArrayOp1 | PrefetchMutableByteArrayOp1 | PrefetchAddrOp1 | PrefetchValueOp1 | PrefetchByteArrayOp0 | PrefetchMutableByteArrayOp0 | PrefetchAddrOp0 | PrefetchValueOp0 ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-fixity.hs-incl0000644000000000000000000000223714472400111025123 0ustar0000000000000000primOpFixity IntAddOp = Just (Fixity NoSourceText 6 InfixL) primOpFixity IntSubOp = Just (Fixity NoSourceText 6 InfixL) primOpFixity IntMulOp = Just (Fixity NoSourceText 7 InfixL) primOpFixity IntGtOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity IntGeOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity IntEqOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity IntNeOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity IntLtOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity IntLeOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleGtOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleGeOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleEqOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleNeOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleLtOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleLeOp = Just (Fixity NoSourceText 4 InfixN) primOpFixity DoubleAddOp = Just (Fixity NoSourceText 6 InfixL) primOpFixity DoubleSubOp = Just (Fixity NoSourceText 6 InfixL) primOpFixity DoubleMulOp = Just (Fixity NoSourceText 7 InfixL) primOpFixity DoubleDivOp = Just (Fixity NoSourceText 7 InfixL) primOpFixity _ = Nothing ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-has-side-effects.hs-incl0000644000000000000000000002757014472400111026730 0ustar0000000000000000primOpHasSideEffects NewArrayOp = True primOpHasSideEffects ReadArrayOp = True primOpHasSideEffects WriteArrayOp = True primOpHasSideEffects UnsafeFreezeArrayOp = True primOpHasSideEffects UnsafeThawArrayOp = True primOpHasSideEffects CopyArrayOp = True primOpHasSideEffects CopyMutableArrayOp = True primOpHasSideEffects CloneArrayOp = True primOpHasSideEffects CloneMutableArrayOp = True primOpHasSideEffects FreezeArrayOp = True primOpHasSideEffects ThawArrayOp = True primOpHasSideEffects CasArrayOp = True primOpHasSideEffects NewSmallArrayOp = True primOpHasSideEffects ShrinkSmallMutableArrayOp_Char = True primOpHasSideEffects ReadSmallArrayOp = True primOpHasSideEffects WriteSmallArrayOp = True primOpHasSideEffects UnsafeFreezeSmallArrayOp = True primOpHasSideEffects UnsafeThawSmallArrayOp = True primOpHasSideEffects CopySmallArrayOp = True primOpHasSideEffects CopySmallMutableArrayOp = True primOpHasSideEffects CloneSmallArrayOp = True primOpHasSideEffects CloneSmallMutableArrayOp = True primOpHasSideEffects FreezeSmallArrayOp = True primOpHasSideEffects ThawSmallArrayOp = True primOpHasSideEffects CasSmallArrayOp = True primOpHasSideEffects NewByteArrayOp_Char = True primOpHasSideEffects NewPinnedByteArrayOp_Char = True primOpHasSideEffects NewAlignedPinnedByteArrayOp_Char = True primOpHasSideEffects ShrinkMutableByteArrayOp_Char = True primOpHasSideEffects ResizeMutableByteArrayOp_Char = True primOpHasSideEffects UnsafeFreezeByteArrayOp = True primOpHasSideEffects ReadByteArrayOp_Char = True primOpHasSideEffects ReadByteArrayOp_WideChar = True primOpHasSideEffects ReadByteArrayOp_Int = True primOpHasSideEffects ReadByteArrayOp_Word = True primOpHasSideEffects ReadByteArrayOp_Addr = True primOpHasSideEffects ReadByteArrayOp_Float = True primOpHasSideEffects ReadByteArrayOp_Double = True primOpHasSideEffects ReadByteArrayOp_StablePtr = True primOpHasSideEffects ReadByteArrayOp_Int8 = True primOpHasSideEffects ReadByteArrayOp_Int16 = True primOpHasSideEffects ReadByteArrayOp_Int32 = True primOpHasSideEffects ReadByteArrayOp_Int64 = True primOpHasSideEffects ReadByteArrayOp_Word8 = True primOpHasSideEffects ReadByteArrayOp_Word16 = True primOpHasSideEffects ReadByteArrayOp_Word32 = True primOpHasSideEffects ReadByteArrayOp_Word64 = True primOpHasSideEffects ReadByteArrayOp_Word8AsChar = True primOpHasSideEffects ReadByteArrayOp_Word8AsWideChar = True primOpHasSideEffects ReadByteArrayOp_Word8AsInt = True primOpHasSideEffects ReadByteArrayOp_Word8AsWord = True primOpHasSideEffects ReadByteArrayOp_Word8AsAddr = True primOpHasSideEffects ReadByteArrayOp_Word8AsFloat = True primOpHasSideEffects ReadByteArrayOp_Word8AsDouble = True primOpHasSideEffects ReadByteArrayOp_Word8AsStablePtr = True primOpHasSideEffects ReadByteArrayOp_Word8AsInt16 = True primOpHasSideEffects ReadByteArrayOp_Word8AsInt32 = True primOpHasSideEffects ReadByteArrayOp_Word8AsInt64 = True primOpHasSideEffects ReadByteArrayOp_Word8AsWord16 = True primOpHasSideEffects ReadByteArrayOp_Word8AsWord32 = True primOpHasSideEffects ReadByteArrayOp_Word8AsWord64 = True primOpHasSideEffects WriteByteArrayOp_Char = True primOpHasSideEffects WriteByteArrayOp_WideChar = True primOpHasSideEffects WriteByteArrayOp_Int = True primOpHasSideEffects WriteByteArrayOp_Word = True primOpHasSideEffects WriteByteArrayOp_Addr = True primOpHasSideEffects WriteByteArrayOp_Float = True primOpHasSideEffects WriteByteArrayOp_Double = True primOpHasSideEffects WriteByteArrayOp_StablePtr = True primOpHasSideEffects WriteByteArrayOp_Int8 = True primOpHasSideEffects WriteByteArrayOp_Int16 = True primOpHasSideEffects WriteByteArrayOp_Int32 = True primOpHasSideEffects WriteByteArrayOp_Int64 = True primOpHasSideEffects WriteByteArrayOp_Word8 = True primOpHasSideEffects WriteByteArrayOp_Word16 = True primOpHasSideEffects WriteByteArrayOp_Word32 = True primOpHasSideEffects WriteByteArrayOp_Word64 = True primOpHasSideEffects WriteByteArrayOp_Word8AsChar = True primOpHasSideEffects WriteByteArrayOp_Word8AsWideChar = True primOpHasSideEffects WriteByteArrayOp_Word8AsInt = True primOpHasSideEffects WriteByteArrayOp_Word8AsWord = True primOpHasSideEffects WriteByteArrayOp_Word8AsAddr = True primOpHasSideEffects WriteByteArrayOp_Word8AsFloat = True primOpHasSideEffects WriteByteArrayOp_Word8AsDouble = True primOpHasSideEffects WriteByteArrayOp_Word8AsStablePtr = True primOpHasSideEffects WriteByteArrayOp_Word8AsInt16 = True primOpHasSideEffects WriteByteArrayOp_Word8AsInt32 = True primOpHasSideEffects WriteByteArrayOp_Word8AsInt64 = True primOpHasSideEffects WriteByteArrayOp_Word8AsWord16 = True primOpHasSideEffects WriteByteArrayOp_Word8AsWord32 = True primOpHasSideEffects WriteByteArrayOp_Word8AsWord64 = True primOpHasSideEffects CopyByteArrayOp = True primOpHasSideEffects CopyMutableByteArrayOp = True primOpHasSideEffects CopyByteArrayToAddrOp = True primOpHasSideEffects CopyMutableByteArrayToAddrOp = True primOpHasSideEffects CopyAddrToByteArrayOp = True primOpHasSideEffects SetByteArrayOp = True primOpHasSideEffects AtomicReadByteArrayOp_Int = True primOpHasSideEffects AtomicWriteByteArrayOp_Int = True primOpHasSideEffects CasByteArrayOp_Int = True primOpHasSideEffects CasByteArrayOp_Int8 = True primOpHasSideEffects CasByteArrayOp_Int16 = True primOpHasSideEffects CasByteArrayOp_Int32 = True primOpHasSideEffects CasByteArrayOp_Int64 = True primOpHasSideEffects FetchAddByteArrayOp_Int = True primOpHasSideEffects FetchSubByteArrayOp_Int = True primOpHasSideEffects FetchAndByteArrayOp_Int = True primOpHasSideEffects FetchNandByteArrayOp_Int = True primOpHasSideEffects FetchOrByteArrayOp_Int = True primOpHasSideEffects FetchXorByteArrayOp_Int = True primOpHasSideEffects ReadOffAddrOp_Char = True primOpHasSideEffects ReadOffAddrOp_WideChar = True primOpHasSideEffects ReadOffAddrOp_Int = True primOpHasSideEffects ReadOffAddrOp_Word = True primOpHasSideEffects ReadOffAddrOp_Addr = True primOpHasSideEffects ReadOffAddrOp_Float = True primOpHasSideEffects ReadOffAddrOp_Double = True primOpHasSideEffects ReadOffAddrOp_StablePtr = True primOpHasSideEffects ReadOffAddrOp_Int8 = True primOpHasSideEffects ReadOffAddrOp_Int16 = True primOpHasSideEffects ReadOffAddrOp_Int32 = True primOpHasSideEffects ReadOffAddrOp_Int64 = True primOpHasSideEffects ReadOffAddrOp_Word8 = True primOpHasSideEffects ReadOffAddrOp_Word16 = True primOpHasSideEffects ReadOffAddrOp_Word32 = True primOpHasSideEffects ReadOffAddrOp_Word64 = True primOpHasSideEffects WriteOffAddrOp_Char = True primOpHasSideEffects WriteOffAddrOp_WideChar = True primOpHasSideEffects WriteOffAddrOp_Int = True primOpHasSideEffects WriteOffAddrOp_Word = True primOpHasSideEffects WriteOffAddrOp_Addr = True primOpHasSideEffects WriteOffAddrOp_Float = True primOpHasSideEffects WriteOffAddrOp_Double = True primOpHasSideEffects WriteOffAddrOp_StablePtr = True primOpHasSideEffects WriteOffAddrOp_Int8 = True primOpHasSideEffects WriteOffAddrOp_Int16 = True primOpHasSideEffects WriteOffAddrOp_Int32 = True primOpHasSideEffects WriteOffAddrOp_Int64 = True primOpHasSideEffects WriteOffAddrOp_Word8 = True primOpHasSideEffects WriteOffAddrOp_Word16 = True primOpHasSideEffects WriteOffAddrOp_Word32 = True primOpHasSideEffects WriteOffAddrOp_Word64 = True primOpHasSideEffects InterlockedExchange_Addr = True primOpHasSideEffects InterlockedExchange_Word = True primOpHasSideEffects CasAddrOp_Addr = True primOpHasSideEffects CasAddrOp_Word = True primOpHasSideEffects CasAddrOp_Word8 = True primOpHasSideEffects CasAddrOp_Word16 = True primOpHasSideEffects CasAddrOp_Word32 = True primOpHasSideEffects CasAddrOp_Word64 = True primOpHasSideEffects FetchAddAddrOp_Word = True primOpHasSideEffects FetchSubAddrOp_Word = True primOpHasSideEffects FetchAndAddrOp_Word = True primOpHasSideEffects FetchNandAddrOp_Word = True primOpHasSideEffects FetchOrAddrOp_Word = True primOpHasSideEffects FetchXorAddrOp_Word = True primOpHasSideEffects AtomicReadAddrOp_Word = True primOpHasSideEffects AtomicWriteAddrOp_Word = True primOpHasSideEffects NewMutVarOp = True primOpHasSideEffects ReadMutVarOp = True primOpHasSideEffects WriteMutVarOp = True primOpHasSideEffects AtomicModifyMutVar2Op = True primOpHasSideEffects AtomicModifyMutVar_Op = True primOpHasSideEffects CasMutVarOp = True primOpHasSideEffects CatchOp = True primOpHasSideEffects RaiseIOOp = True primOpHasSideEffects MaskAsyncExceptionsOp = True primOpHasSideEffects MaskUninterruptibleOp = True primOpHasSideEffects UnmaskAsyncExceptionsOp = True primOpHasSideEffects MaskStatus = True primOpHasSideEffects AtomicallyOp = True primOpHasSideEffects RetryOp = True primOpHasSideEffects CatchRetryOp = True primOpHasSideEffects CatchSTMOp = True primOpHasSideEffects NewTVarOp = True primOpHasSideEffects ReadTVarOp = True primOpHasSideEffects ReadTVarIOOp = True primOpHasSideEffects WriteTVarOp = True primOpHasSideEffects NewMVarOp = True primOpHasSideEffects TakeMVarOp = True primOpHasSideEffects TryTakeMVarOp = True primOpHasSideEffects PutMVarOp = True primOpHasSideEffects TryPutMVarOp = True primOpHasSideEffects ReadMVarOp = True primOpHasSideEffects TryReadMVarOp = True primOpHasSideEffects IsEmptyMVarOp = True primOpHasSideEffects NewIOPortOp = True primOpHasSideEffects ReadIOPortOp = True primOpHasSideEffects WriteIOPortOp = True primOpHasSideEffects DelayOp = True primOpHasSideEffects WaitReadOp = True primOpHasSideEffects WaitWriteOp = True primOpHasSideEffects ForkOp = True primOpHasSideEffects ForkOnOp = True primOpHasSideEffects KillThreadOp = True primOpHasSideEffects YieldOp = True primOpHasSideEffects MyThreadIdOp = True primOpHasSideEffects LabelThreadOp = True primOpHasSideEffects IsCurrentThreadBoundOp = True primOpHasSideEffects NoDuplicateOp = True primOpHasSideEffects ThreadStatusOp = True primOpHasSideEffects MkWeakOp = True primOpHasSideEffects MkWeakNoFinalizerOp = True primOpHasSideEffects AddCFinalizerToWeakOp = True primOpHasSideEffects DeRefWeakOp = True primOpHasSideEffects FinalizeWeakOp = True primOpHasSideEffects TouchOp = True primOpHasSideEffects MakeStablePtrOp = True primOpHasSideEffects DeRefStablePtrOp = True primOpHasSideEffects EqStablePtrOp = True primOpHasSideEffects MakeStableNameOp = True primOpHasSideEffects CompactNewOp = True primOpHasSideEffects CompactResizeOp = True primOpHasSideEffects CompactAllocateBlockOp = True primOpHasSideEffects CompactFixupPointersOp = True primOpHasSideEffects CompactAdd = True primOpHasSideEffects CompactAddWithSharing = True primOpHasSideEffects CompactSize = True primOpHasSideEffects ParOp = True primOpHasSideEffects SparkOp = True primOpHasSideEffects GetSparkOp = True primOpHasSideEffects NumSparks = True primOpHasSideEffects NewBCOOp = True primOpHasSideEffects TraceEventOp = True primOpHasSideEffects TraceEventBinaryOp = True primOpHasSideEffects TraceMarkerOp = True primOpHasSideEffects SetThreadAllocationCounter = True primOpHasSideEffects (VecReadByteArrayOp _ _ _) = True primOpHasSideEffects (VecWriteByteArrayOp _ _ _) = True primOpHasSideEffects (VecReadOffAddrOp _ _ _) = True primOpHasSideEffects (VecWriteOffAddrOp _ _ _) = True primOpHasSideEffects (VecReadScalarByteArrayOp _ _ _) = True primOpHasSideEffects (VecWriteScalarByteArrayOp _ _ _) = True primOpHasSideEffects (VecReadScalarOffAddrOp _ _ _) = True primOpHasSideEffects (VecWriteScalarOffAddrOp _ _ _) = True primOpHasSideEffects PrefetchByteArrayOp3 = True primOpHasSideEffects PrefetchMutableByteArrayOp3 = True primOpHasSideEffects PrefetchAddrOp3 = True primOpHasSideEffects PrefetchValueOp3 = True primOpHasSideEffects PrefetchByteArrayOp2 = True primOpHasSideEffects PrefetchMutableByteArrayOp2 = True primOpHasSideEffects PrefetchAddrOp2 = True primOpHasSideEffects PrefetchValueOp2 = True primOpHasSideEffects PrefetchByteArrayOp1 = True primOpHasSideEffects PrefetchMutableByteArrayOp1 = True primOpHasSideEffects PrefetchAddrOp1 = True primOpHasSideEffects PrefetchValueOp1 = True primOpHasSideEffects PrefetchByteArrayOp0 = True primOpHasSideEffects PrefetchMutableByteArrayOp0 = True primOpHasSideEffects PrefetchAddrOp0 = True primOpHasSideEffects PrefetchValueOp0 = True primOpHasSideEffects _ = False ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-list.hs-incl0000644000000000000000000011210714472400111024560 0ustar0000000000000000 [CharGtOp , CharGeOp , CharEqOp , CharNeOp , CharLtOp , CharLeOp , OrdOp , Int8ToIntOp , IntToInt8Op , Int8NegOp , Int8AddOp , Int8SubOp , Int8MulOp , Int8QuotOp , Int8RemOp , Int8QuotRemOp , Int8SllOp , Int8SraOp , Int8SrlOp , Int8ToWord8Op , Int8EqOp , Int8GeOp , Int8GtOp , Int8LeOp , Int8LtOp , Int8NeOp , Word8ToWordOp , WordToWord8Op , Word8AddOp , Word8SubOp , Word8MulOp , Word8QuotOp , Word8RemOp , Word8QuotRemOp , Word8AndOp , Word8OrOp , Word8XorOp , Word8NotOp , Word8SllOp , Word8SrlOp , Word8ToInt8Op , Word8EqOp , Word8GeOp , Word8GtOp , Word8LeOp , Word8LtOp , Word8NeOp , Int16ToIntOp , IntToInt16Op , Int16NegOp , Int16AddOp , Int16SubOp , Int16MulOp , Int16QuotOp , Int16RemOp , Int16QuotRemOp , Int16SllOp , Int16SraOp , Int16SrlOp , Int16ToWord16Op , Int16EqOp , Int16GeOp , Int16GtOp , Int16LeOp , Int16LtOp , Int16NeOp , Word16ToWordOp , WordToWord16Op , Word16AddOp , Word16SubOp , Word16MulOp , Word16QuotOp , Word16RemOp , Word16QuotRemOp , Word16AndOp , Word16OrOp , Word16XorOp , Word16NotOp , Word16SllOp , Word16SrlOp , Word16ToInt16Op , Word16EqOp , Word16GeOp , Word16GtOp , Word16LeOp , Word16LtOp , Word16NeOp , Int32ToIntOp , IntToInt32Op , Int32NegOp , Int32AddOp , Int32SubOp , Int32MulOp , Int32QuotOp , Int32RemOp , Int32QuotRemOp , Int32SllOp , Int32SraOp , Int32SrlOp , Int32ToWord32Op , Int32EqOp , Int32GeOp , Int32GtOp , Int32LeOp , Int32LtOp , Int32NeOp , Word32ToWordOp , WordToWord32Op , Word32AddOp , Word32SubOp , Word32MulOp , Word32QuotOp , Word32RemOp , Word32QuotRemOp , Word32AndOp , Word32OrOp , Word32XorOp , Word32NotOp , Word32SllOp , Word32SrlOp , Word32ToInt32Op , Word32EqOp , Word32GeOp , Word32GtOp , Word32LeOp , Word32LtOp , Word32NeOp , Int64ToIntOp , IntToInt64Op , Int64NegOp , Int64AddOp , Int64SubOp , Int64MulOp , Int64QuotOp , Int64RemOp , Int64SllOp , Int64SraOp , Int64SrlOp , Int64ToWord64Op , Int64EqOp , Int64GeOp , Int64GtOp , Int64LeOp , Int64LtOp , Int64NeOp , Word64ToWordOp , WordToWord64Op , Word64AddOp , Word64SubOp , Word64MulOp , Word64QuotOp , Word64RemOp , Word64AndOp , Word64OrOp , Word64XorOp , Word64NotOp , Word64SllOp , Word64SrlOp , Word64ToInt64Op , Word64EqOp , Word64GeOp , Word64GtOp , Word64LeOp , Word64LtOp , Word64NeOp , IntAddOp , IntSubOp , IntMulOp , IntMul2Op , IntMulMayOfloOp , IntQuotOp , IntRemOp , IntQuotRemOp , IntAndOp , IntOrOp , IntXorOp , IntNotOp , IntNegOp , IntAddCOp , IntSubCOp , IntGtOp , IntGeOp , IntEqOp , IntNeOp , IntLtOp , IntLeOp , ChrOp , IntToWordOp , IntToFloatOp , IntToDoubleOp , WordToFloatOp , WordToDoubleOp , IntSllOp , IntSraOp , IntSrlOp , WordAddOp , WordAddCOp , WordSubCOp , WordAdd2Op , WordSubOp , WordMulOp , WordMul2Op , WordQuotOp , WordRemOp , WordQuotRemOp , WordQuotRem2Op , WordAndOp , WordOrOp , WordXorOp , WordNotOp , WordSllOp , WordSrlOp , WordToIntOp , WordGtOp , WordGeOp , WordEqOp , WordNeOp , WordLtOp , WordLeOp , PopCnt8Op , PopCnt16Op , PopCnt32Op , PopCnt64Op , PopCntOp , Pdep8Op , Pdep16Op , Pdep32Op , Pdep64Op , PdepOp , Pext8Op , Pext16Op , Pext32Op , Pext64Op , PextOp , Clz8Op , Clz16Op , Clz32Op , Clz64Op , ClzOp , Ctz8Op , Ctz16Op , Ctz32Op , Ctz64Op , CtzOp , BSwap16Op , BSwap32Op , BSwap64Op , BSwapOp , BRev8Op , BRev16Op , BRev32Op , BRev64Op , BRevOp , Narrow8IntOp , Narrow16IntOp , Narrow32IntOp , Narrow8WordOp , Narrow16WordOp , Narrow32WordOp , DoubleGtOp , DoubleGeOp , DoubleEqOp , DoubleNeOp , DoubleLtOp , DoubleLeOp , DoubleAddOp , DoubleSubOp , DoubleMulOp , DoubleDivOp , DoubleNegOp , DoubleFabsOp , DoubleToIntOp , DoubleToFloatOp , DoubleExpOp , DoubleExpM1Op , DoubleLogOp , DoubleLog1POp , DoubleSqrtOp , DoubleSinOp , DoubleCosOp , DoubleTanOp , DoubleAsinOp , DoubleAcosOp , DoubleAtanOp , DoubleSinhOp , DoubleCoshOp , DoubleTanhOp , DoubleAsinhOp , DoubleAcoshOp , DoubleAtanhOp , DoublePowerOp , DoubleDecode_2IntOp , DoubleDecode_Int64Op , FloatGtOp , FloatGeOp , FloatEqOp , FloatNeOp , FloatLtOp , FloatLeOp , FloatAddOp , FloatSubOp , FloatMulOp , FloatDivOp , FloatNegOp , FloatFabsOp , FloatToIntOp , FloatExpOp , FloatExpM1Op , FloatLogOp , FloatLog1POp , FloatSqrtOp , FloatSinOp , FloatCosOp , FloatTanOp , FloatAsinOp , FloatAcosOp , FloatAtanOp , FloatSinhOp , FloatCoshOp , FloatTanhOp , FloatAsinhOp , FloatAcoshOp , FloatAtanhOp , FloatPowerOp , FloatToDoubleOp , FloatDecode_IntOp , NewArrayOp , ReadArrayOp , WriteArrayOp , SizeofArrayOp , SizeofMutableArrayOp , IndexArrayOp , UnsafeFreezeArrayOp , UnsafeThawArrayOp , CopyArrayOp , CopyMutableArrayOp , CloneArrayOp , CloneMutableArrayOp , FreezeArrayOp , ThawArrayOp , CasArrayOp , NewSmallArrayOp , ShrinkSmallMutableArrayOp_Char , ReadSmallArrayOp , WriteSmallArrayOp , SizeofSmallArrayOp , SizeofSmallMutableArrayOp , GetSizeofSmallMutableArrayOp , IndexSmallArrayOp , UnsafeFreezeSmallArrayOp , UnsafeThawSmallArrayOp , CopySmallArrayOp , CopySmallMutableArrayOp , CloneSmallArrayOp , CloneSmallMutableArrayOp , FreezeSmallArrayOp , ThawSmallArrayOp , CasSmallArrayOp , NewByteArrayOp_Char , NewPinnedByteArrayOp_Char , NewAlignedPinnedByteArrayOp_Char , MutableByteArrayIsPinnedOp , ByteArrayIsPinnedOp , ByteArrayContents_Char , MutableByteArrayContents_Char , ShrinkMutableByteArrayOp_Char , ResizeMutableByteArrayOp_Char , UnsafeFreezeByteArrayOp , SizeofByteArrayOp , SizeofMutableByteArrayOp , GetSizeofMutableByteArrayOp , IndexByteArrayOp_Char , IndexByteArrayOp_WideChar , IndexByteArrayOp_Int , IndexByteArrayOp_Word , IndexByteArrayOp_Addr , IndexByteArrayOp_Float , IndexByteArrayOp_Double , IndexByteArrayOp_StablePtr , IndexByteArrayOp_Int8 , IndexByteArrayOp_Int16 , IndexByteArrayOp_Int32 , IndexByteArrayOp_Int64 , IndexByteArrayOp_Word8 , IndexByteArrayOp_Word16 , IndexByteArrayOp_Word32 , IndexByteArrayOp_Word64 , IndexByteArrayOp_Word8AsChar , IndexByteArrayOp_Word8AsWideChar , IndexByteArrayOp_Word8AsInt , IndexByteArrayOp_Word8AsWord , IndexByteArrayOp_Word8AsAddr , IndexByteArrayOp_Word8AsFloat , IndexByteArrayOp_Word8AsDouble , IndexByteArrayOp_Word8AsStablePtr , IndexByteArrayOp_Word8AsInt16 , IndexByteArrayOp_Word8AsInt32 , IndexByteArrayOp_Word8AsInt64 , IndexByteArrayOp_Word8AsWord16 , IndexByteArrayOp_Word8AsWord32 , IndexByteArrayOp_Word8AsWord64 , ReadByteArrayOp_Char , ReadByteArrayOp_WideChar , ReadByteArrayOp_Int , ReadByteArrayOp_Word , ReadByteArrayOp_Addr , ReadByteArrayOp_Float , ReadByteArrayOp_Double , ReadByteArrayOp_StablePtr , ReadByteArrayOp_Int8 , ReadByteArrayOp_Int16 , ReadByteArrayOp_Int32 , ReadByteArrayOp_Int64 , ReadByteArrayOp_Word8 , ReadByteArrayOp_Word16 , ReadByteArrayOp_Word32 , ReadByteArrayOp_Word64 , ReadByteArrayOp_Word8AsChar , ReadByteArrayOp_Word8AsWideChar , ReadByteArrayOp_Word8AsInt , ReadByteArrayOp_Word8AsWord , ReadByteArrayOp_Word8AsAddr , ReadByteArrayOp_Word8AsFloat , ReadByteArrayOp_Word8AsDouble , ReadByteArrayOp_Word8AsStablePtr , ReadByteArrayOp_Word8AsInt16 , ReadByteArrayOp_Word8AsInt32 , ReadByteArrayOp_Word8AsInt64 , ReadByteArrayOp_Word8AsWord16 , ReadByteArrayOp_Word8AsWord32 , ReadByteArrayOp_Word8AsWord64 , WriteByteArrayOp_Char , WriteByteArrayOp_WideChar , WriteByteArrayOp_Int , WriteByteArrayOp_Word , WriteByteArrayOp_Addr , WriteByteArrayOp_Float , WriteByteArrayOp_Double , WriteByteArrayOp_StablePtr , WriteByteArrayOp_Int8 , WriteByteArrayOp_Int16 , WriteByteArrayOp_Int32 , WriteByteArrayOp_Int64 , WriteByteArrayOp_Word8 , WriteByteArrayOp_Word16 , WriteByteArrayOp_Word32 , WriteByteArrayOp_Word64 , WriteByteArrayOp_Word8AsChar , WriteByteArrayOp_Word8AsWideChar , WriteByteArrayOp_Word8AsInt , WriteByteArrayOp_Word8AsWord , WriteByteArrayOp_Word8AsAddr , WriteByteArrayOp_Word8AsFloat , WriteByteArrayOp_Word8AsDouble , WriteByteArrayOp_Word8AsStablePtr , WriteByteArrayOp_Word8AsInt16 , WriteByteArrayOp_Word8AsInt32 , WriteByteArrayOp_Word8AsInt64 , WriteByteArrayOp_Word8AsWord16 , WriteByteArrayOp_Word8AsWord32 , WriteByteArrayOp_Word8AsWord64 , CompareByteArraysOp , CopyByteArrayOp , CopyMutableByteArrayOp , CopyByteArrayToAddrOp , CopyMutableByteArrayToAddrOp , CopyAddrToByteArrayOp , SetByteArrayOp , AtomicReadByteArrayOp_Int , AtomicWriteByteArrayOp_Int , CasByteArrayOp_Int , CasByteArrayOp_Int8 , CasByteArrayOp_Int16 , CasByteArrayOp_Int32 , CasByteArrayOp_Int64 , FetchAddByteArrayOp_Int , FetchSubByteArrayOp_Int , FetchAndByteArrayOp_Int , FetchNandByteArrayOp_Int , FetchOrByteArrayOp_Int , FetchXorByteArrayOp_Int , AddrAddOp , AddrSubOp , AddrRemOp , AddrToIntOp , IntToAddrOp , AddrGtOp , AddrGeOp , AddrEqOp , AddrNeOp , AddrLtOp , AddrLeOp , IndexOffAddrOp_Char , IndexOffAddrOp_WideChar , IndexOffAddrOp_Int , IndexOffAddrOp_Word , IndexOffAddrOp_Addr , IndexOffAddrOp_Float , IndexOffAddrOp_Double , IndexOffAddrOp_StablePtr , IndexOffAddrOp_Int8 , IndexOffAddrOp_Int16 , IndexOffAddrOp_Int32 , IndexOffAddrOp_Int64 , IndexOffAddrOp_Word8 , IndexOffAddrOp_Word16 , IndexOffAddrOp_Word32 , IndexOffAddrOp_Word64 , ReadOffAddrOp_Char , ReadOffAddrOp_WideChar , ReadOffAddrOp_Int , ReadOffAddrOp_Word , ReadOffAddrOp_Addr , ReadOffAddrOp_Float , ReadOffAddrOp_Double , ReadOffAddrOp_StablePtr , ReadOffAddrOp_Int8 , ReadOffAddrOp_Int16 , ReadOffAddrOp_Int32 , ReadOffAddrOp_Int64 , ReadOffAddrOp_Word8 , ReadOffAddrOp_Word16 , ReadOffAddrOp_Word32 , ReadOffAddrOp_Word64 , WriteOffAddrOp_Char , WriteOffAddrOp_WideChar , WriteOffAddrOp_Int , WriteOffAddrOp_Word , WriteOffAddrOp_Addr , WriteOffAddrOp_Float , WriteOffAddrOp_Double , WriteOffAddrOp_StablePtr , WriteOffAddrOp_Int8 , WriteOffAddrOp_Int16 , WriteOffAddrOp_Int32 , WriteOffAddrOp_Int64 , WriteOffAddrOp_Word8 , WriteOffAddrOp_Word16 , WriteOffAddrOp_Word32 , WriteOffAddrOp_Word64 , InterlockedExchange_Addr , InterlockedExchange_Word , CasAddrOp_Addr , CasAddrOp_Word , CasAddrOp_Word8 , CasAddrOp_Word16 , CasAddrOp_Word32 , CasAddrOp_Word64 , FetchAddAddrOp_Word , FetchSubAddrOp_Word , FetchAndAddrOp_Word , FetchNandAddrOp_Word , FetchOrAddrOp_Word , FetchXorAddrOp_Word , AtomicReadAddrOp_Word , AtomicWriteAddrOp_Word , NewMutVarOp , ReadMutVarOp , WriteMutVarOp , AtomicModifyMutVar2Op , AtomicModifyMutVar_Op , CasMutVarOp , CatchOp , RaiseOp , RaiseIOOp , MaskAsyncExceptionsOp , MaskUninterruptibleOp , UnmaskAsyncExceptionsOp , MaskStatus , AtomicallyOp , RetryOp , CatchRetryOp , CatchSTMOp , NewTVarOp , ReadTVarOp , ReadTVarIOOp , WriteTVarOp , NewMVarOp , TakeMVarOp , TryTakeMVarOp , PutMVarOp , TryPutMVarOp , ReadMVarOp , TryReadMVarOp , IsEmptyMVarOp , NewIOPortOp , ReadIOPortOp , WriteIOPortOp , DelayOp , WaitReadOp , WaitWriteOp , ForkOp , ForkOnOp , KillThreadOp , YieldOp , MyThreadIdOp , LabelThreadOp , IsCurrentThreadBoundOp , NoDuplicateOp , ThreadStatusOp , MkWeakOp , MkWeakNoFinalizerOp , AddCFinalizerToWeakOp , DeRefWeakOp , FinalizeWeakOp , TouchOp , MakeStablePtrOp , DeRefStablePtrOp , EqStablePtrOp , MakeStableNameOp , StableNameToIntOp , CompactNewOp , CompactResizeOp , CompactContainsOp , CompactContainsAnyOp , CompactGetFirstBlockOp , CompactGetNextBlockOp , CompactAllocateBlockOp , CompactFixupPointersOp , CompactAdd , CompactAddWithSharing , CompactSize , ReallyUnsafePtrEqualityOp , ParOp , SparkOp , SeqOp , GetSparkOp , NumSparks , KeepAliveOp , DataToTagOp , TagToEnumOp , AddrToAnyOp , AnyToAddrOp , MkApUpd0_Op , NewBCOOp , UnpackClosureOp , ClosureSizeOp , GetApStackValOp , GetCCSOfOp , GetCurrentCCSOp , ClearCCSOp , WhereFromOp , TraceEventOp , TraceEventBinaryOp , TraceMarkerOp , SetThreadAllocationCounter , (VecBroadcastOp IntVec 16 W8) , (VecBroadcastOp IntVec 8 W16) , (VecBroadcastOp IntVec 4 W32) , (VecBroadcastOp IntVec 2 W64) , (VecBroadcastOp IntVec 32 W8) , (VecBroadcastOp IntVec 16 W16) , (VecBroadcastOp IntVec 8 W32) , (VecBroadcastOp IntVec 4 W64) , (VecBroadcastOp IntVec 64 W8) , (VecBroadcastOp IntVec 32 W16) , (VecBroadcastOp IntVec 16 W32) , (VecBroadcastOp IntVec 8 W64) , (VecBroadcastOp WordVec 16 W8) , (VecBroadcastOp WordVec 8 W16) , (VecBroadcastOp WordVec 4 W32) , (VecBroadcastOp WordVec 2 W64) , (VecBroadcastOp WordVec 32 W8) , (VecBroadcastOp WordVec 16 W16) , (VecBroadcastOp WordVec 8 W32) , (VecBroadcastOp WordVec 4 W64) , (VecBroadcastOp WordVec 64 W8) , (VecBroadcastOp WordVec 32 W16) , (VecBroadcastOp WordVec 16 W32) , (VecBroadcastOp WordVec 8 W64) , (VecBroadcastOp FloatVec 4 W32) , (VecBroadcastOp FloatVec 2 W64) , (VecBroadcastOp FloatVec 8 W32) , (VecBroadcastOp FloatVec 4 W64) , (VecBroadcastOp FloatVec 16 W32) , (VecBroadcastOp FloatVec 8 W64) , (VecPackOp IntVec 16 W8) , (VecPackOp IntVec 8 W16) , (VecPackOp IntVec 4 W32) , (VecPackOp IntVec 2 W64) , (VecPackOp IntVec 32 W8) , (VecPackOp IntVec 16 W16) , (VecPackOp IntVec 8 W32) , (VecPackOp IntVec 4 W64) , (VecPackOp IntVec 64 W8) , (VecPackOp IntVec 32 W16) , (VecPackOp IntVec 16 W32) , (VecPackOp IntVec 8 W64) , (VecPackOp WordVec 16 W8) , (VecPackOp WordVec 8 W16) , (VecPackOp WordVec 4 W32) , (VecPackOp WordVec 2 W64) , (VecPackOp WordVec 32 W8) , (VecPackOp WordVec 16 W16) , (VecPackOp WordVec 8 W32) , (VecPackOp WordVec 4 W64) , (VecPackOp WordVec 64 W8) , (VecPackOp WordVec 32 W16) , (VecPackOp WordVec 16 W32) , (VecPackOp WordVec 8 W64) , (VecPackOp FloatVec 4 W32) , (VecPackOp FloatVec 2 W64) , (VecPackOp FloatVec 8 W32) , (VecPackOp FloatVec 4 W64) , (VecPackOp FloatVec 16 W32) , (VecPackOp FloatVec 8 W64) , (VecUnpackOp IntVec 16 W8) , (VecUnpackOp IntVec 8 W16) , (VecUnpackOp IntVec 4 W32) , (VecUnpackOp IntVec 2 W64) , (VecUnpackOp IntVec 32 W8) , (VecUnpackOp IntVec 16 W16) , (VecUnpackOp IntVec 8 W32) , (VecUnpackOp IntVec 4 W64) , (VecUnpackOp IntVec 64 W8) , (VecUnpackOp IntVec 32 W16) , (VecUnpackOp IntVec 16 W32) , (VecUnpackOp IntVec 8 W64) , (VecUnpackOp WordVec 16 W8) , (VecUnpackOp WordVec 8 W16) , (VecUnpackOp WordVec 4 W32) , (VecUnpackOp WordVec 2 W64) , (VecUnpackOp WordVec 32 W8) , (VecUnpackOp WordVec 16 W16) , (VecUnpackOp WordVec 8 W32) , (VecUnpackOp WordVec 4 W64) , (VecUnpackOp WordVec 64 W8) , (VecUnpackOp WordVec 32 W16) , (VecUnpackOp WordVec 16 W32) , (VecUnpackOp WordVec 8 W64) , (VecUnpackOp FloatVec 4 W32) , (VecUnpackOp FloatVec 2 W64) , (VecUnpackOp FloatVec 8 W32) , (VecUnpackOp FloatVec 4 W64) , (VecUnpackOp FloatVec 16 W32) , (VecUnpackOp FloatVec 8 W64) , (VecInsertOp IntVec 16 W8) , (VecInsertOp IntVec 8 W16) , (VecInsertOp IntVec 4 W32) , (VecInsertOp IntVec 2 W64) , (VecInsertOp IntVec 32 W8) , (VecInsertOp IntVec 16 W16) , (VecInsertOp IntVec 8 W32) , (VecInsertOp IntVec 4 W64) , (VecInsertOp IntVec 64 W8) , (VecInsertOp IntVec 32 W16) , (VecInsertOp IntVec 16 W32) , (VecInsertOp IntVec 8 W64) , (VecInsertOp WordVec 16 W8) , (VecInsertOp WordVec 8 W16) , (VecInsertOp WordVec 4 W32) , (VecInsertOp WordVec 2 W64) , (VecInsertOp WordVec 32 W8) , (VecInsertOp WordVec 16 W16) , (VecInsertOp WordVec 8 W32) , (VecInsertOp WordVec 4 W64) , (VecInsertOp WordVec 64 W8) , (VecInsertOp WordVec 32 W16) , (VecInsertOp WordVec 16 W32) , (VecInsertOp WordVec 8 W64) , (VecInsertOp FloatVec 4 W32) , (VecInsertOp FloatVec 2 W64) , (VecInsertOp FloatVec 8 W32) , (VecInsertOp FloatVec 4 W64) , (VecInsertOp FloatVec 16 W32) , (VecInsertOp FloatVec 8 W64) , (VecAddOp IntVec 16 W8) , (VecAddOp IntVec 8 W16) , (VecAddOp IntVec 4 W32) , (VecAddOp IntVec 2 W64) , (VecAddOp IntVec 32 W8) , (VecAddOp IntVec 16 W16) , (VecAddOp IntVec 8 W32) , (VecAddOp IntVec 4 W64) , (VecAddOp IntVec 64 W8) , (VecAddOp IntVec 32 W16) , (VecAddOp IntVec 16 W32) , (VecAddOp IntVec 8 W64) , (VecAddOp WordVec 16 W8) , (VecAddOp WordVec 8 W16) , (VecAddOp WordVec 4 W32) , (VecAddOp WordVec 2 W64) , (VecAddOp WordVec 32 W8) , (VecAddOp WordVec 16 W16) , (VecAddOp WordVec 8 W32) , (VecAddOp WordVec 4 W64) , (VecAddOp WordVec 64 W8) , (VecAddOp WordVec 32 W16) , (VecAddOp WordVec 16 W32) , (VecAddOp WordVec 8 W64) , (VecAddOp FloatVec 4 W32) , (VecAddOp FloatVec 2 W64) , (VecAddOp FloatVec 8 W32) , (VecAddOp FloatVec 4 W64) , (VecAddOp FloatVec 16 W32) , (VecAddOp FloatVec 8 W64) , (VecSubOp IntVec 16 W8) , (VecSubOp IntVec 8 W16) , (VecSubOp IntVec 4 W32) , (VecSubOp IntVec 2 W64) , (VecSubOp IntVec 32 W8) , (VecSubOp IntVec 16 W16) , (VecSubOp IntVec 8 W32) , (VecSubOp IntVec 4 W64) , (VecSubOp IntVec 64 W8) , (VecSubOp IntVec 32 W16) , (VecSubOp IntVec 16 W32) , (VecSubOp IntVec 8 W64) , (VecSubOp WordVec 16 W8) , (VecSubOp WordVec 8 W16) , (VecSubOp WordVec 4 W32) , (VecSubOp WordVec 2 W64) , (VecSubOp WordVec 32 W8) , (VecSubOp WordVec 16 W16) , (VecSubOp WordVec 8 W32) , (VecSubOp WordVec 4 W64) , (VecSubOp WordVec 64 W8) , (VecSubOp WordVec 32 W16) , (VecSubOp WordVec 16 W32) , (VecSubOp WordVec 8 W64) , (VecSubOp FloatVec 4 W32) , (VecSubOp FloatVec 2 W64) , (VecSubOp FloatVec 8 W32) , (VecSubOp FloatVec 4 W64) , (VecSubOp FloatVec 16 W32) , (VecSubOp FloatVec 8 W64) , (VecMulOp IntVec 16 W8) , (VecMulOp IntVec 8 W16) , (VecMulOp IntVec 4 W32) , (VecMulOp IntVec 2 W64) , (VecMulOp IntVec 32 W8) , (VecMulOp IntVec 16 W16) , (VecMulOp IntVec 8 W32) , (VecMulOp IntVec 4 W64) , (VecMulOp IntVec 64 W8) , (VecMulOp IntVec 32 W16) , (VecMulOp IntVec 16 W32) , (VecMulOp IntVec 8 W64) , (VecMulOp WordVec 16 W8) , (VecMulOp WordVec 8 W16) , (VecMulOp WordVec 4 W32) , (VecMulOp WordVec 2 W64) , (VecMulOp WordVec 32 W8) , (VecMulOp WordVec 16 W16) , (VecMulOp WordVec 8 W32) , (VecMulOp WordVec 4 W64) , (VecMulOp WordVec 64 W8) , (VecMulOp WordVec 32 W16) , (VecMulOp WordVec 16 W32) , (VecMulOp WordVec 8 W64) , (VecMulOp FloatVec 4 W32) , (VecMulOp FloatVec 2 W64) , (VecMulOp FloatVec 8 W32) , (VecMulOp FloatVec 4 W64) , (VecMulOp FloatVec 16 W32) , (VecMulOp FloatVec 8 W64) , (VecDivOp FloatVec 4 W32) , (VecDivOp FloatVec 2 W64) , (VecDivOp FloatVec 8 W32) , (VecDivOp FloatVec 4 W64) , (VecDivOp FloatVec 16 W32) , (VecDivOp FloatVec 8 W64) , (VecQuotOp IntVec 16 W8) , (VecQuotOp IntVec 8 W16) , (VecQuotOp IntVec 4 W32) , (VecQuotOp IntVec 2 W64) , (VecQuotOp IntVec 32 W8) , (VecQuotOp IntVec 16 W16) , (VecQuotOp IntVec 8 W32) , (VecQuotOp IntVec 4 W64) , (VecQuotOp IntVec 64 W8) , (VecQuotOp IntVec 32 W16) , (VecQuotOp IntVec 16 W32) , (VecQuotOp IntVec 8 W64) , (VecQuotOp WordVec 16 W8) , (VecQuotOp WordVec 8 W16) , (VecQuotOp WordVec 4 W32) , (VecQuotOp WordVec 2 W64) , (VecQuotOp WordVec 32 W8) , (VecQuotOp WordVec 16 W16) , (VecQuotOp WordVec 8 W32) , (VecQuotOp WordVec 4 W64) , (VecQuotOp WordVec 64 W8) , (VecQuotOp WordVec 32 W16) , (VecQuotOp WordVec 16 W32) , (VecQuotOp WordVec 8 W64) , (VecRemOp IntVec 16 W8) , (VecRemOp IntVec 8 W16) , (VecRemOp IntVec 4 W32) , (VecRemOp IntVec 2 W64) , (VecRemOp IntVec 32 W8) , (VecRemOp IntVec 16 W16) , (VecRemOp IntVec 8 W32) , (VecRemOp IntVec 4 W64) , (VecRemOp IntVec 64 W8) , (VecRemOp IntVec 32 W16) , (VecRemOp IntVec 16 W32) , (VecRemOp IntVec 8 W64) , (VecRemOp WordVec 16 W8) , (VecRemOp WordVec 8 W16) , (VecRemOp WordVec 4 W32) , (VecRemOp WordVec 2 W64) , (VecRemOp WordVec 32 W8) , (VecRemOp WordVec 16 W16) , (VecRemOp WordVec 8 W32) , (VecRemOp WordVec 4 W64) , (VecRemOp WordVec 64 W8) , (VecRemOp WordVec 32 W16) , (VecRemOp WordVec 16 W32) , (VecRemOp WordVec 8 W64) , (VecNegOp IntVec 16 W8) , (VecNegOp IntVec 8 W16) , (VecNegOp IntVec 4 W32) , (VecNegOp IntVec 2 W64) , (VecNegOp IntVec 32 W8) , (VecNegOp IntVec 16 W16) , (VecNegOp IntVec 8 W32) , (VecNegOp IntVec 4 W64) , (VecNegOp IntVec 64 W8) , (VecNegOp IntVec 32 W16) , (VecNegOp IntVec 16 W32) , (VecNegOp IntVec 8 W64) , (VecNegOp FloatVec 4 W32) , (VecNegOp FloatVec 2 W64) , (VecNegOp FloatVec 8 W32) , (VecNegOp FloatVec 4 W64) , (VecNegOp FloatVec 16 W32) , (VecNegOp FloatVec 8 W64) , (VecIndexByteArrayOp IntVec 16 W8) , (VecIndexByteArrayOp IntVec 8 W16) , (VecIndexByteArrayOp IntVec 4 W32) , (VecIndexByteArrayOp IntVec 2 W64) , (VecIndexByteArrayOp IntVec 32 W8) , (VecIndexByteArrayOp IntVec 16 W16) , (VecIndexByteArrayOp IntVec 8 W32) , (VecIndexByteArrayOp IntVec 4 W64) , (VecIndexByteArrayOp IntVec 64 W8) , (VecIndexByteArrayOp IntVec 32 W16) , (VecIndexByteArrayOp IntVec 16 W32) , (VecIndexByteArrayOp IntVec 8 W64) , (VecIndexByteArrayOp WordVec 16 W8) , (VecIndexByteArrayOp WordVec 8 W16) , (VecIndexByteArrayOp WordVec 4 W32) , (VecIndexByteArrayOp WordVec 2 W64) , (VecIndexByteArrayOp WordVec 32 W8) , (VecIndexByteArrayOp WordVec 16 W16) , (VecIndexByteArrayOp WordVec 8 W32) , (VecIndexByteArrayOp WordVec 4 W64) , (VecIndexByteArrayOp WordVec 64 W8) , (VecIndexByteArrayOp WordVec 32 W16) , (VecIndexByteArrayOp WordVec 16 W32) , (VecIndexByteArrayOp WordVec 8 W64) , (VecIndexByteArrayOp FloatVec 4 W32) , (VecIndexByteArrayOp FloatVec 2 W64) , (VecIndexByteArrayOp FloatVec 8 W32) , (VecIndexByteArrayOp FloatVec 4 W64) , (VecIndexByteArrayOp FloatVec 16 W32) , (VecIndexByteArrayOp FloatVec 8 W64) , (VecReadByteArrayOp IntVec 16 W8) , (VecReadByteArrayOp IntVec 8 W16) , (VecReadByteArrayOp IntVec 4 W32) , (VecReadByteArrayOp IntVec 2 W64) , (VecReadByteArrayOp IntVec 32 W8) , (VecReadByteArrayOp IntVec 16 W16) , (VecReadByteArrayOp IntVec 8 W32) , (VecReadByteArrayOp IntVec 4 W64) , (VecReadByteArrayOp IntVec 64 W8) , (VecReadByteArrayOp IntVec 32 W16) , (VecReadByteArrayOp IntVec 16 W32) , (VecReadByteArrayOp IntVec 8 W64) , (VecReadByteArrayOp WordVec 16 W8) , (VecReadByteArrayOp WordVec 8 W16) , (VecReadByteArrayOp WordVec 4 W32) , (VecReadByteArrayOp WordVec 2 W64) , (VecReadByteArrayOp WordVec 32 W8) , (VecReadByteArrayOp WordVec 16 W16) , (VecReadByteArrayOp WordVec 8 W32) , (VecReadByteArrayOp WordVec 4 W64) , (VecReadByteArrayOp WordVec 64 W8) , (VecReadByteArrayOp WordVec 32 W16) , (VecReadByteArrayOp WordVec 16 W32) , (VecReadByteArrayOp WordVec 8 W64) , (VecReadByteArrayOp FloatVec 4 W32) , (VecReadByteArrayOp FloatVec 2 W64) , (VecReadByteArrayOp FloatVec 8 W32) , (VecReadByteArrayOp FloatVec 4 W64) , (VecReadByteArrayOp FloatVec 16 W32) , (VecReadByteArrayOp FloatVec 8 W64) , (VecWriteByteArrayOp IntVec 16 W8) , (VecWriteByteArrayOp IntVec 8 W16) , (VecWriteByteArrayOp IntVec 4 W32) , (VecWriteByteArrayOp IntVec 2 W64) , (VecWriteByteArrayOp IntVec 32 W8) , (VecWriteByteArrayOp IntVec 16 W16) , (VecWriteByteArrayOp IntVec 8 W32) , (VecWriteByteArrayOp IntVec 4 W64) , (VecWriteByteArrayOp IntVec 64 W8) , (VecWriteByteArrayOp IntVec 32 W16) , (VecWriteByteArrayOp IntVec 16 W32) , (VecWriteByteArrayOp IntVec 8 W64) , (VecWriteByteArrayOp WordVec 16 W8) , (VecWriteByteArrayOp WordVec 8 W16) , (VecWriteByteArrayOp WordVec 4 W32) , (VecWriteByteArrayOp WordVec 2 W64) , (VecWriteByteArrayOp WordVec 32 W8) , (VecWriteByteArrayOp WordVec 16 W16) , (VecWriteByteArrayOp WordVec 8 W32) , (VecWriteByteArrayOp WordVec 4 W64) , (VecWriteByteArrayOp WordVec 64 W8) , (VecWriteByteArrayOp WordVec 32 W16) , (VecWriteByteArrayOp WordVec 16 W32) , (VecWriteByteArrayOp WordVec 8 W64) , (VecWriteByteArrayOp FloatVec 4 W32) , (VecWriteByteArrayOp FloatVec 2 W64) , (VecWriteByteArrayOp FloatVec 8 W32) , (VecWriteByteArrayOp FloatVec 4 W64) , (VecWriteByteArrayOp FloatVec 16 W32) , (VecWriteByteArrayOp FloatVec 8 W64) , (VecIndexOffAddrOp IntVec 16 W8) , (VecIndexOffAddrOp IntVec 8 W16) , (VecIndexOffAddrOp IntVec 4 W32) , (VecIndexOffAddrOp IntVec 2 W64) , (VecIndexOffAddrOp IntVec 32 W8) , (VecIndexOffAddrOp IntVec 16 W16) , (VecIndexOffAddrOp IntVec 8 W32) , (VecIndexOffAddrOp IntVec 4 W64) , (VecIndexOffAddrOp IntVec 64 W8) , (VecIndexOffAddrOp IntVec 32 W16) , (VecIndexOffAddrOp IntVec 16 W32) , (VecIndexOffAddrOp IntVec 8 W64) , (VecIndexOffAddrOp WordVec 16 W8) , (VecIndexOffAddrOp WordVec 8 W16) , (VecIndexOffAddrOp WordVec 4 W32) , (VecIndexOffAddrOp WordVec 2 W64) , (VecIndexOffAddrOp WordVec 32 W8) , (VecIndexOffAddrOp WordVec 16 W16) , (VecIndexOffAddrOp WordVec 8 W32) , (VecIndexOffAddrOp WordVec 4 W64) , (VecIndexOffAddrOp WordVec 64 W8) , (VecIndexOffAddrOp WordVec 32 W16) , (VecIndexOffAddrOp WordVec 16 W32) , (VecIndexOffAddrOp WordVec 8 W64) , (VecIndexOffAddrOp FloatVec 4 W32) , (VecIndexOffAddrOp FloatVec 2 W64) , (VecIndexOffAddrOp FloatVec 8 W32) , (VecIndexOffAddrOp FloatVec 4 W64) , (VecIndexOffAddrOp FloatVec 16 W32) , (VecIndexOffAddrOp FloatVec 8 W64) , (VecReadOffAddrOp IntVec 16 W8) , (VecReadOffAddrOp IntVec 8 W16) , (VecReadOffAddrOp IntVec 4 W32) , (VecReadOffAddrOp IntVec 2 W64) , (VecReadOffAddrOp IntVec 32 W8) , (VecReadOffAddrOp IntVec 16 W16) , (VecReadOffAddrOp IntVec 8 W32) , (VecReadOffAddrOp IntVec 4 W64) , (VecReadOffAddrOp IntVec 64 W8) , (VecReadOffAddrOp IntVec 32 W16) , (VecReadOffAddrOp IntVec 16 W32) , (VecReadOffAddrOp IntVec 8 W64) , (VecReadOffAddrOp WordVec 16 W8) , (VecReadOffAddrOp WordVec 8 W16) , (VecReadOffAddrOp WordVec 4 W32) , (VecReadOffAddrOp WordVec 2 W64) , (VecReadOffAddrOp WordVec 32 W8) , (VecReadOffAddrOp WordVec 16 W16) , (VecReadOffAddrOp WordVec 8 W32) , (VecReadOffAddrOp WordVec 4 W64) , (VecReadOffAddrOp WordVec 64 W8) , (VecReadOffAddrOp WordVec 32 W16) , (VecReadOffAddrOp WordVec 16 W32) , (VecReadOffAddrOp WordVec 8 W64) , (VecReadOffAddrOp FloatVec 4 W32) , (VecReadOffAddrOp FloatVec 2 W64) , (VecReadOffAddrOp FloatVec 8 W32) , (VecReadOffAddrOp FloatVec 4 W64) , (VecReadOffAddrOp FloatVec 16 W32) , (VecReadOffAddrOp FloatVec 8 W64) , (VecWriteOffAddrOp IntVec 16 W8) , (VecWriteOffAddrOp IntVec 8 W16) , (VecWriteOffAddrOp IntVec 4 W32) , (VecWriteOffAddrOp IntVec 2 W64) , (VecWriteOffAddrOp IntVec 32 W8) , (VecWriteOffAddrOp IntVec 16 W16) , (VecWriteOffAddrOp IntVec 8 W32) , (VecWriteOffAddrOp IntVec 4 W64) , (VecWriteOffAddrOp IntVec 64 W8) , (VecWriteOffAddrOp IntVec 32 W16) , (VecWriteOffAddrOp IntVec 16 W32) , (VecWriteOffAddrOp IntVec 8 W64) , (VecWriteOffAddrOp WordVec 16 W8) , (VecWriteOffAddrOp WordVec 8 W16) , (VecWriteOffAddrOp WordVec 4 W32) , (VecWriteOffAddrOp WordVec 2 W64) , (VecWriteOffAddrOp WordVec 32 W8) , (VecWriteOffAddrOp WordVec 16 W16) , (VecWriteOffAddrOp WordVec 8 W32) , (VecWriteOffAddrOp WordVec 4 W64) , (VecWriteOffAddrOp WordVec 64 W8) , (VecWriteOffAddrOp WordVec 32 W16) , (VecWriteOffAddrOp WordVec 16 W32) , (VecWriteOffAddrOp WordVec 8 W64) , (VecWriteOffAddrOp FloatVec 4 W32) , (VecWriteOffAddrOp FloatVec 2 W64) , (VecWriteOffAddrOp FloatVec 8 W32) , (VecWriteOffAddrOp FloatVec 4 W64) , (VecWriteOffAddrOp FloatVec 16 W32) , (VecWriteOffAddrOp FloatVec 8 W64) , (VecIndexScalarByteArrayOp IntVec 16 W8) , (VecIndexScalarByteArrayOp IntVec 8 W16) , (VecIndexScalarByteArrayOp IntVec 4 W32) , (VecIndexScalarByteArrayOp IntVec 2 W64) , (VecIndexScalarByteArrayOp IntVec 32 W8) , (VecIndexScalarByteArrayOp IntVec 16 W16) , (VecIndexScalarByteArrayOp IntVec 8 W32) , (VecIndexScalarByteArrayOp IntVec 4 W64) , (VecIndexScalarByteArrayOp IntVec 64 W8) , (VecIndexScalarByteArrayOp IntVec 32 W16) , (VecIndexScalarByteArrayOp IntVec 16 W32) , (VecIndexScalarByteArrayOp IntVec 8 W64) , (VecIndexScalarByteArrayOp WordVec 16 W8) , (VecIndexScalarByteArrayOp WordVec 8 W16) , (VecIndexScalarByteArrayOp WordVec 4 W32) , (VecIndexScalarByteArrayOp WordVec 2 W64) , (VecIndexScalarByteArrayOp WordVec 32 W8) , (VecIndexScalarByteArrayOp WordVec 16 W16) , (VecIndexScalarByteArrayOp WordVec 8 W32) , (VecIndexScalarByteArrayOp WordVec 4 W64) , (VecIndexScalarByteArrayOp WordVec 64 W8) , (VecIndexScalarByteArrayOp WordVec 32 W16) , (VecIndexScalarByteArrayOp WordVec 16 W32) , (VecIndexScalarByteArrayOp WordVec 8 W64) , (VecIndexScalarByteArrayOp FloatVec 4 W32) , (VecIndexScalarByteArrayOp FloatVec 2 W64) , (VecIndexScalarByteArrayOp FloatVec 8 W32) , (VecIndexScalarByteArrayOp FloatVec 4 W64) , (VecIndexScalarByteArrayOp FloatVec 16 W32) , (VecIndexScalarByteArrayOp FloatVec 8 W64) , (VecReadScalarByteArrayOp IntVec 16 W8) , (VecReadScalarByteArrayOp IntVec 8 W16) , (VecReadScalarByteArrayOp IntVec 4 W32) , (VecReadScalarByteArrayOp IntVec 2 W64) , (VecReadScalarByteArrayOp IntVec 32 W8) , (VecReadScalarByteArrayOp IntVec 16 W16) , (VecReadScalarByteArrayOp IntVec 8 W32) , (VecReadScalarByteArrayOp IntVec 4 W64) , (VecReadScalarByteArrayOp IntVec 64 W8) , (VecReadScalarByteArrayOp IntVec 32 W16) , (VecReadScalarByteArrayOp IntVec 16 W32) , (VecReadScalarByteArrayOp IntVec 8 W64) , (VecReadScalarByteArrayOp WordVec 16 W8) , (VecReadScalarByteArrayOp WordVec 8 W16) , (VecReadScalarByteArrayOp WordVec 4 W32) , (VecReadScalarByteArrayOp WordVec 2 W64) , (VecReadScalarByteArrayOp WordVec 32 W8) , (VecReadScalarByteArrayOp WordVec 16 W16) , (VecReadScalarByteArrayOp WordVec 8 W32) , (VecReadScalarByteArrayOp WordVec 4 W64) , (VecReadScalarByteArrayOp WordVec 64 W8) , (VecReadScalarByteArrayOp WordVec 32 W16) , (VecReadScalarByteArrayOp WordVec 16 W32) , (VecReadScalarByteArrayOp WordVec 8 W64) , (VecReadScalarByteArrayOp FloatVec 4 W32) , (VecReadScalarByteArrayOp FloatVec 2 W64) , (VecReadScalarByteArrayOp FloatVec 8 W32) , (VecReadScalarByteArrayOp FloatVec 4 W64) , (VecReadScalarByteArrayOp FloatVec 16 W32) , (VecReadScalarByteArrayOp FloatVec 8 W64) , (VecWriteScalarByteArrayOp IntVec 16 W8) , (VecWriteScalarByteArrayOp IntVec 8 W16) , (VecWriteScalarByteArrayOp IntVec 4 W32) , (VecWriteScalarByteArrayOp IntVec 2 W64) , (VecWriteScalarByteArrayOp IntVec 32 W8) , (VecWriteScalarByteArrayOp IntVec 16 W16) , (VecWriteScalarByteArrayOp IntVec 8 W32) , (VecWriteScalarByteArrayOp IntVec 4 W64) , (VecWriteScalarByteArrayOp IntVec 64 W8) , (VecWriteScalarByteArrayOp IntVec 32 W16) , (VecWriteScalarByteArrayOp IntVec 16 W32) , (VecWriteScalarByteArrayOp IntVec 8 W64) , (VecWriteScalarByteArrayOp WordVec 16 W8) , (VecWriteScalarByteArrayOp WordVec 8 W16) , (VecWriteScalarByteArrayOp WordVec 4 W32) , (VecWriteScalarByteArrayOp WordVec 2 W64) , (VecWriteScalarByteArrayOp WordVec 32 W8) , (VecWriteScalarByteArrayOp WordVec 16 W16) , (VecWriteScalarByteArrayOp WordVec 8 W32) , (VecWriteScalarByteArrayOp WordVec 4 W64) , (VecWriteScalarByteArrayOp WordVec 64 W8) , (VecWriteScalarByteArrayOp WordVec 32 W16) , (VecWriteScalarByteArrayOp WordVec 16 W32) , (VecWriteScalarByteArrayOp WordVec 8 W64) , (VecWriteScalarByteArrayOp FloatVec 4 W32) , (VecWriteScalarByteArrayOp FloatVec 2 W64) , (VecWriteScalarByteArrayOp FloatVec 8 W32) , (VecWriteScalarByteArrayOp FloatVec 4 W64) , (VecWriteScalarByteArrayOp FloatVec 16 W32) , (VecWriteScalarByteArrayOp FloatVec 8 W64) , (VecIndexScalarOffAddrOp IntVec 16 W8) , (VecIndexScalarOffAddrOp IntVec 8 W16) , (VecIndexScalarOffAddrOp IntVec 4 W32) , (VecIndexScalarOffAddrOp IntVec 2 W64) , (VecIndexScalarOffAddrOp IntVec 32 W8) , (VecIndexScalarOffAddrOp IntVec 16 W16) , (VecIndexScalarOffAddrOp IntVec 8 W32) , (VecIndexScalarOffAddrOp IntVec 4 W64) , (VecIndexScalarOffAddrOp IntVec 64 W8) , (VecIndexScalarOffAddrOp IntVec 32 W16) , (VecIndexScalarOffAddrOp IntVec 16 W32) , (VecIndexScalarOffAddrOp IntVec 8 W64) , (VecIndexScalarOffAddrOp WordVec 16 W8) , (VecIndexScalarOffAddrOp WordVec 8 W16) , (VecIndexScalarOffAddrOp WordVec 4 W32) , (VecIndexScalarOffAddrOp WordVec 2 W64) , (VecIndexScalarOffAddrOp WordVec 32 W8) , (VecIndexScalarOffAddrOp WordVec 16 W16) , (VecIndexScalarOffAddrOp WordVec 8 W32) , (VecIndexScalarOffAddrOp WordVec 4 W64) , (VecIndexScalarOffAddrOp WordVec 64 W8) , (VecIndexScalarOffAddrOp WordVec 32 W16) , (VecIndexScalarOffAddrOp WordVec 16 W32) , (VecIndexScalarOffAddrOp WordVec 8 W64) , (VecIndexScalarOffAddrOp FloatVec 4 W32) , (VecIndexScalarOffAddrOp FloatVec 2 W64) , (VecIndexScalarOffAddrOp FloatVec 8 W32) , (VecIndexScalarOffAddrOp FloatVec 4 W64) , (VecIndexScalarOffAddrOp FloatVec 16 W32) , (VecIndexScalarOffAddrOp FloatVec 8 W64) , (VecReadScalarOffAddrOp IntVec 16 W8) , (VecReadScalarOffAddrOp IntVec 8 W16) , (VecReadScalarOffAddrOp IntVec 4 W32) , (VecReadScalarOffAddrOp IntVec 2 W64) , (VecReadScalarOffAddrOp IntVec 32 W8) , (VecReadScalarOffAddrOp IntVec 16 W16) , (VecReadScalarOffAddrOp IntVec 8 W32) , (VecReadScalarOffAddrOp IntVec 4 W64) , (VecReadScalarOffAddrOp IntVec 64 W8) , (VecReadScalarOffAddrOp IntVec 32 W16) , (VecReadScalarOffAddrOp IntVec 16 W32) , (VecReadScalarOffAddrOp IntVec 8 W64) , (VecReadScalarOffAddrOp WordVec 16 W8) , (VecReadScalarOffAddrOp WordVec 8 W16) , (VecReadScalarOffAddrOp WordVec 4 W32) , (VecReadScalarOffAddrOp WordVec 2 W64) , (VecReadScalarOffAddrOp WordVec 32 W8) , (VecReadScalarOffAddrOp WordVec 16 W16) , (VecReadScalarOffAddrOp WordVec 8 W32) , (VecReadScalarOffAddrOp WordVec 4 W64) , (VecReadScalarOffAddrOp WordVec 64 W8) , (VecReadScalarOffAddrOp WordVec 32 W16) , (VecReadScalarOffAddrOp WordVec 16 W32) , (VecReadScalarOffAddrOp WordVec 8 W64) , (VecReadScalarOffAddrOp FloatVec 4 W32) , (VecReadScalarOffAddrOp FloatVec 2 W64) , (VecReadScalarOffAddrOp FloatVec 8 W32) , (VecReadScalarOffAddrOp FloatVec 4 W64) , (VecReadScalarOffAddrOp FloatVec 16 W32) , (VecReadScalarOffAddrOp FloatVec 8 W64) , (VecWriteScalarOffAddrOp IntVec 16 W8) , (VecWriteScalarOffAddrOp IntVec 8 W16) , (VecWriteScalarOffAddrOp IntVec 4 W32) , (VecWriteScalarOffAddrOp IntVec 2 W64) , (VecWriteScalarOffAddrOp IntVec 32 W8) , (VecWriteScalarOffAddrOp IntVec 16 W16) , (VecWriteScalarOffAddrOp IntVec 8 W32) , (VecWriteScalarOffAddrOp IntVec 4 W64) , (VecWriteScalarOffAddrOp IntVec 64 W8) , (VecWriteScalarOffAddrOp IntVec 32 W16) , (VecWriteScalarOffAddrOp IntVec 16 W32) , (VecWriteScalarOffAddrOp IntVec 8 W64) , (VecWriteScalarOffAddrOp WordVec 16 W8) , (VecWriteScalarOffAddrOp WordVec 8 W16) , (VecWriteScalarOffAddrOp WordVec 4 W32) , (VecWriteScalarOffAddrOp WordVec 2 W64) , (VecWriteScalarOffAddrOp WordVec 32 W8) , (VecWriteScalarOffAddrOp WordVec 16 W16) , (VecWriteScalarOffAddrOp WordVec 8 W32) , (VecWriteScalarOffAddrOp WordVec 4 W64) , (VecWriteScalarOffAddrOp WordVec 64 W8) , (VecWriteScalarOffAddrOp WordVec 32 W16) , (VecWriteScalarOffAddrOp WordVec 16 W32) , (VecWriteScalarOffAddrOp WordVec 8 W64) , (VecWriteScalarOffAddrOp FloatVec 4 W32) , (VecWriteScalarOffAddrOp FloatVec 2 W64) , (VecWriteScalarOffAddrOp FloatVec 8 W32) , (VecWriteScalarOffAddrOp FloatVec 4 W64) , (VecWriteScalarOffAddrOp FloatVec 16 W32) , (VecWriteScalarOffAddrOp FloatVec 8 W64) , PrefetchByteArrayOp3 , PrefetchMutableByteArrayOp3 , PrefetchAddrOp3 , PrefetchValueOp3 , PrefetchByteArrayOp2 , PrefetchMutableByteArrayOp2 , PrefetchAddrOp2 , PrefetchValueOp2 , PrefetchByteArrayOp1 , PrefetchMutableByteArrayOp1 , PrefetchAddrOp1 , PrefetchValueOp1 , PrefetchByteArrayOp0 , PrefetchMutableByteArrayOp0 , PrefetchAddrOp0 , PrefetchValueOp0 ] ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-out-of-line.hs-incl0000644000000000000000000000770614472400111025753 0ustar0000000000000000primOpOutOfLine DoubleDecode_2IntOp = True primOpOutOfLine DoubleDecode_Int64Op = True primOpOutOfLine FloatDecode_IntOp = True primOpOutOfLine NewArrayOp = True primOpOutOfLine UnsafeThawArrayOp = True primOpOutOfLine CopyArrayOp = True primOpOutOfLine CopyMutableArrayOp = True primOpOutOfLine CloneArrayOp = True primOpOutOfLine CloneMutableArrayOp = True primOpOutOfLine FreezeArrayOp = True primOpOutOfLine ThawArrayOp = True primOpOutOfLine CasArrayOp = True primOpOutOfLine NewSmallArrayOp = True primOpOutOfLine ShrinkSmallMutableArrayOp_Char = True primOpOutOfLine UnsafeThawSmallArrayOp = True primOpOutOfLine CopySmallArrayOp = True primOpOutOfLine CopySmallMutableArrayOp = True primOpOutOfLine CloneSmallArrayOp = True primOpOutOfLine CloneSmallMutableArrayOp = True primOpOutOfLine FreezeSmallArrayOp = True primOpOutOfLine ThawSmallArrayOp = True primOpOutOfLine CasSmallArrayOp = True primOpOutOfLine NewByteArrayOp_Char = True primOpOutOfLine NewPinnedByteArrayOp_Char = True primOpOutOfLine NewAlignedPinnedByteArrayOp_Char = True primOpOutOfLine MutableByteArrayIsPinnedOp = True primOpOutOfLine ByteArrayIsPinnedOp = True primOpOutOfLine ShrinkMutableByteArrayOp_Char = True primOpOutOfLine ResizeMutableByteArrayOp_Char = True primOpOutOfLine NewMutVarOp = True primOpOutOfLine AtomicModifyMutVar2Op = True primOpOutOfLine AtomicModifyMutVar_Op = True primOpOutOfLine CasMutVarOp = True primOpOutOfLine CatchOp = True primOpOutOfLine RaiseOp = True primOpOutOfLine RaiseIOOp = True primOpOutOfLine MaskAsyncExceptionsOp = True primOpOutOfLine MaskUninterruptibleOp = True primOpOutOfLine UnmaskAsyncExceptionsOp = True primOpOutOfLine MaskStatus = True primOpOutOfLine AtomicallyOp = True primOpOutOfLine RetryOp = True primOpOutOfLine CatchRetryOp = True primOpOutOfLine CatchSTMOp = True primOpOutOfLine NewTVarOp = True primOpOutOfLine ReadTVarOp = True primOpOutOfLine ReadTVarIOOp = True primOpOutOfLine WriteTVarOp = True primOpOutOfLine NewMVarOp = True primOpOutOfLine TakeMVarOp = True primOpOutOfLine TryTakeMVarOp = True primOpOutOfLine PutMVarOp = True primOpOutOfLine TryPutMVarOp = True primOpOutOfLine ReadMVarOp = True primOpOutOfLine TryReadMVarOp = True primOpOutOfLine IsEmptyMVarOp = True primOpOutOfLine NewIOPortOp = True primOpOutOfLine ReadIOPortOp = True primOpOutOfLine WriteIOPortOp = True primOpOutOfLine DelayOp = True primOpOutOfLine WaitReadOp = True primOpOutOfLine WaitWriteOp = True primOpOutOfLine ForkOp = True primOpOutOfLine ForkOnOp = True primOpOutOfLine KillThreadOp = True primOpOutOfLine YieldOp = True primOpOutOfLine LabelThreadOp = True primOpOutOfLine IsCurrentThreadBoundOp = True primOpOutOfLine NoDuplicateOp = True primOpOutOfLine ThreadStatusOp = True primOpOutOfLine MkWeakOp = True primOpOutOfLine MkWeakNoFinalizerOp = True primOpOutOfLine AddCFinalizerToWeakOp = True primOpOutOfLine DeRefWeakOp = True primOpOutOfLine FinalizeWeakOp = True primOpOutOfLine MakeStablePtrOp = True primOpOutOfLine DeRefStablePtrOp = True primOpOutOfLine MakeStableNameOp = True primOpOutOfLine CompactNewOp = True primOpOutOfLine CompactResizeOp = True primOpOutOfLine CompactContainsOp = True primOpOutOfLine CompactContainsAnyOp = True primOpOutOfLine CompactGetFirstBlockOp = True primOpOutOfLine CompactGetNextBlockOp = True primOpOutOfLine CompactAllocateBlockOp = True primOpOutOfLine CompactFixupPointersOp = True primOpOutOfLine CompactAdd = True primOpOutOfLine CompactAddWithSharing = True primOpOutOfLine CompactSize = True primOpOutOfLine GetSparkOp = True primOpOutOfLine NumSparks = True primOpOutOfLine KeepAliveOp = True primOpOutOfLine MkApUpd0_Op = True primOpOutOfLine NewBCOOp = True primOpOutOfLine UnpackClosureOp = True primOpOutOfLine ClosureSizeOp = True primOpOutOfLine GetApStackValOp = True primOpOutOfLine ClearCCSOp = True primOpOutOfLine WhereFromOp = True primOpOutOfLine TraceEventOp = True primOpOutOfLine TraceEventBinaryOp = True primOpOutOfLine TraceMarkerOp = True primOpOutOfLine SetThreadAllocationCounter = True primOpOutOfLine _ = False ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-primop-info.hs-incl0000644000000000000000000064223114472400111026052 0ustar0000000000000000primOpInfo CharGtOp = mkCompare (fsLit "gtChar#") charPrimTy primOpInfo CharGeOp = mkCompare (fsLit "geChar#") charPrimTy primOpInfo CharEqOp = mkCompare (fsLit "eqChar#") charPrimTy primOpInfo CharNeOp = mkCompare (fsLit "neChar#") charPrimTy primOpInfo CharLtOp = mkCompare (fsLit "ltChar#") charPrimTy primOpInfo CharLeOp = mkCompare (fsLit "leChar#") charPrimTy primOpInfo OrdOp = mkGenPrimOp (fsLit "ord#") [] [charPrimTy] (intPrimTy) primOpInfo Int8ToIntOp = mkGenPrimOp (fsLit "int8ToInt#") [] [int8PrimTy] (intPrimTy) primOpInfo IntToInt8Op = mkGenPrimOp (fsLit "intToInt8#") [] [intPrimTy] (int8PrimTy) primOpInfo Int8NegOp = mkGenPrimOp (fsLit "negateInt8#") [] [int8PrimTy] (int8PrimTy) primOpInfo Int8AddOp = mkGenPrimOp (fsLit "plusInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8SubOp = mkGenPrimOp (fsLit "subInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8MulOp = mkGenPrimOp (fsLit "timesInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8QuotOp = mkGenPrimOp (fsLit "quotInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8RemOp = mkGenPrimOp (fsLit "remInt8#") [] [int8PrimTy, int8PrimTy] (int8PrimTy) primOpInfo Int8QuotRemOp = mkGenPrimOp (fsLit "quotRemInt8#") [] [int8PrimTy, int8PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy])) primOpInfo Int8SllOp = mkGenPrimOp (fsLit "uncheckedShiftLInt8#") [] [int8PrimTy, intPrimTy] (int8PrimTy) primOpInfo Int8SraOp = mkGenPrimOp (fsLit "uncheckedShiftRAInt8#") [] [int8PrimTy, intPrimTy] (int8PrimTy) primOpInfo Int8SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLInt8#") [] [int8PrimTy, intPrimTy] (int8PrimTy) primOpInfo Int8ToWord8Op = mkGenPrimOp (fsLit "int8ToWord8#") [] [int8PrimTy] (word8PrimTy) primOpInfo Int8EqOp = mkCompare (fsLit "eqInt8#") int8PrimTy primOpInfo Int8GeOp = mkCompare (fsLit "geInt8#") int8PrimTy primOpInfo Int8GtOp = mkCompare (fsLit "gtInt8#") int8PrimTy primOpInfo Int8LeOp = mkCompare (fsLit "leInt8#") int8PrimTy primOpInfo Int8LtOp = mkCompare (fsLit "ltInt8#") int8PrimTy primOpInfo Int8NeOp = mkCompare (fsLit "neInt8#") int8PrimTy primOpInfo Word8ToWordOp = mkGenPrimOp (fsLit "word8ToWord#") [] [word8PrimTy] (wordPrimTy) primOpInfo WordToWord8Op = mkGenPrimOp (fsLit "wordToWord8#") [] [wordPrimTy] (word8PrimTy) primOpInfo Word8AddOp = mkGenPrimOp (fsLit "plusWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8SubOp = mkGenPrimOp (fsLit "subWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8MulOp = mkGenPrimOp (fsLit "timesWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8QuotOp = mkGenPrimOp (fsLit "quotWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8RemOp = mkGenPrimOp (fsLit "remWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8QuotRemOp = mkGenPrimOp (fsLit "quotRemWord8#") [] [word8PrimTy, word8PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy])) primOpInfo Word8AndOp = mkGenPrimOp (fsLit "andWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8OrOp = mkGenPrimOp (fsLit "orWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8XorOp = mkGenPrimOp (fsLit "xorWord8#") [] [word8PrimTy, word8PrimTy] (word8PrimTy) primOpInfo Word8NotOp = mkGenPrimOp (fsLit "notWord8#") [] [word8PrimTy] (word8PrimTy) primOpInfo Word8SllOp = mkGenPrimOp (fsLit "uncheckedShiftLWord8#") [] [word8PrimTy, intPrimTy] (word8PrimTy) primOpInfo Word8SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLWord8#") [] [word8PrimTy, intPrimTy] (word8PrimTy) primOpInfo Word8ToInt8Op = mkGenPrimOp (fsLit "word8ToInt8#") [] [word8PrimTy] (int8PrimTy) primOpInfo Word8EqOp = mkCompare (fsLit "eqWord8#") word8PrimTy primOpInfo Word8GeOp = mkCompare (fsLit "geWord8#") word8PrimTy primOpInfo Word8GtOp = mkCompare (fsLit "gtWord8#") word8PrimTy primOpInfo Word8LeOp = mkCompare (fsLit "leWord8#") word8PrimTy primOpInfo Word8LtOp = mkCompare (fsLit "ltWord8#") word8PrimTy primOpInfo Word8NeOp = mkCompare (fsLit "neWord8#") word8PrimTy primOpInfo Int16ToIntOp = mkGenPrimOp (fsLit "int16ToInt#") [] [int16PrimTy] (intPrimTy) primOpInfo IntToInt16Op = mkGenPrimOp (fsLit "intToInt16#") [] [intPrimTy] (int16PrimTy) primOpInfo Int16NegOp = mkGenPrimOp (fsLit "negateInt16#") [] [int16PrimTy] (int16PrimTy) primOpInfo Int16AddOp = mkGenPrimOp (fsLit "plusInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16SubOp = mkGenPrimOp (fsLit "subInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16MulOp = mkGenPrimOp (fsLit "timesInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16QuotOp = mkGenPrimOp (fsLit "quotInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16RemOp = mkGenPrimOp (fsLit "remInt16#") [] [int16PrimTy, int16PrimTy] (int16PrimTy) primOpInfo Int16QuotRemOp = mkGenPrimOp (fsLit "quotRemInt16#") [] [int16PrimTy, int16PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy])) primOpInfo Int16SllOp = mkGenPrimOp (fsLit "uncheckedShiftLInt16#") [] [int16PrimTy, intPrimTy] (int16PrimTy) primOpInfo Int16SraOp = mkGenPrimOp (fsLit "uncheckedShiftRAInt16#") [] [int16PrimTy, intPrimTy] (int16PrimTy) primOpInfo Int16SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLInt16#") [] [int16PrimTy, intPrimTy] (int16PrimTy) primOpInfo Int16ToWord16Op = mkGenPrimOp (fsLit "int16ToWord16#") [] [int16PrimTy] (word16PrimTy) primOpInfo Int16EqOp = mkCompare (fsLit "eqInt16#") int16PrimTy primOpInfo Int16GeOp = mkCompare (fsLit "geInt16#") int16PrimTy primOpInfo Int16GtOp = mkCompare (fsLit "gtInt16#") int16PrimTy primOpInfo Int16LeOp = mkCompare (fsLit "leInt16#") int16PrimTy primOpInfo Int16LtOp = mkCompare (fsLit "ltInt16#") int16PrimTy primOpInfo Int16NeOp = mkCompare (fsLit "neInt16#") int16PrimTy primOpInfo Word16ToWordOp = mkGenPrimOp (fsLit "word16ToWord#") [] [word16PrimTy] (wordPrimTy) primOpInfo WordToWord16Op = mkGenPrimOp (fsLit "wordToWord16#") [] [wordPrimTy] (word16PrimTy) primOpInfo Word16AddOp = mkGenPrimOp (fsLit "plusWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16SubOp = mkGenPrimOp (fsLit "subWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16MulOp = mkGenPrimOp (fsLit "timesWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16QuotOp = mkGenPrimOp (fsLit "quotWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16RemOp = mkGenPrimOp (fsLit "remWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16QuotRemOp = mkGenPrimOp (fsLit "quotRemWord16#") [] [word16PrimTy, word16PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy])) primOpInfo Word16AndOp = mkGenPrimOp (fsLit "andWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16OrOp = mkGenPrimOp (fsLit "orWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16XorOp = mkGenPrimOp (fsLit "xorWord16#") [] [word16PrimTy, word16PrimTy] (word16PrimTy) primOpInfo Word16NotOp = mkGenPrimOp (fsLit "notWord16#") [] [word16PrimTy] (word16PrimTy) primOpInfo Word16SllOp = mkGenPrimOp (fsLit "uncheckedShiftLWord16#") [] [word16PrimTy, intPrimTy] (word16PrimTy) primOpInfo Word16SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLWord16#") [] [word16PrimTy, intPrimTy] (word16PrimTy) primOpInfo Word16ToInt16Op = mkGenPrimOp (fsLit "word16ToInt16#") [] [word16PrimTy] (int16PrimTy) primOpInfo Word16EqOp = mkCompare (fsLit "eqWord16#") word16PrimTy primOpInfo Word16GeOp = mkCompare (fsLit "geWord16#") word16PrimTy primOpInfo Word16GtOp = mkCompare (fsLit "gtWord16#") word16PrimTy primOpInfo Word16LeOp = mkCompare (fsLit "leWord16#") word16PrimTy primOpInfo Word16LtOp = mkCompare (fsLit "ltWord16#") word16PrimTy primOpInfo Word16NeOp = mkCompare (fsLit "neWord16#") word16PrimTy primOpInfo Int32ToIntOp = mkGenPrimOp (fsLit "int32ToInt#") [] [int32PrimTy] (intPrimTy) primOpInfo IntToInt32Op = mkGenPrimOp (fsLit "intToInt32#") [] [intPrimTy] (int32PrimTy) primOpInfo Int32NegOp = mkGenPrimOp (fsLit "negateInt32#") [] [int32PrimTy] (int32PrimTy) primOpInfo Int32AddOp = mkGenPrimOp (fsLit "plusInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32SubOp = mkGenPrimOp (fsLit "subInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32MulOp = mkGenPrimOp (fsLit "timesInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32QuotOp = mkGenPrimOp (fsLit "quotInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32RemOp = mkGenPrimOp (fsLit "remInt32#") [] [int32PrimTy, int32PrimTy] (int32PrimTy) primOpInfo Int32QuotRemOp = mkGenPrimOp (fsLit "quotRemInt32#") [] [int32PrimTy, int32PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy])) primOpInfo Int32SllOp = mkGenPrimOp (fsLit "uncheckedShiftLInt32#") [] [int32PrimTy, intPrimTy] (int32PrimTy) primOpInfo Int32SraOp = mkGenPrimOp (fsLit "uncheckedShiftRAInt32#") [] [int32PrimTy, intPrimTy] (int32PrimTy) primOpInfo Int32SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLInt32#") [] [int32PrimTy, intPrimTy] (int32PrimTy) primOpInfo Int32ToWord32Op = mkGenPrimOp (fsLit "int32ToWord32#") [] [int32PrimTy] (word32PrimTy) primOpInfo Int32EqOp = mkCompare (fsLit "eqInt32#") int32PrimTy primOpInfo Int32GeOp = mkCompare (fsLit "geInt32#") int32PrimTy primOpInfo Int32GtOp = mkCompare (fsLit "gtInt32#") int32PrimTy primOpInfo Int32LeOp = mkCompare (fsLit "leInt32#") int32PrimTy primOpInfo Int32LtOp = mkCompare (fsLit "ltInt32#") int32PrimTy primOpInfo Int32NeOp = mkCompare (fsLit "neInt32#") int32PrimTy primOpInfo Word32ToWordOp = mkGenPrimOp (fsLit "word32ToWord#") [] [word32PrimTy] (wordPrimTy) primOpInfo WordToWord32Op = mkGenPrimOp (fsLit "wordToWord32#") [] [wordPrimTy] (word32PrimTy) primOpInfo Word32AddOp = mkGenPrimOp (fsLit "plusWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32SubOp = mkGenPrimOp (fsLit "subWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32MulOp = mkGenPrimOp (fsLit "timesWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32QuotOp = mkGenPrimOp (fsLit "quotWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32RemOp = mkGenPrimOp (fsLit "remWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32QuotRemOp = mkGenPrimOp (fsLit "quotRemWord32#") [] [word32PrimTy, word32PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy])) primOpInfo Word32AndOp = mkGenPrimOp (fsLit "andWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32OrOp = mkGenPrimOp (fsLit "orWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32XorOp = mkGenPrimOp (fsLit "xorWord32#") [] [word32PrimTy, word32PrimTy] (word32PrimTy) primOpInfo Word32NotOp = mkGenPrimOp (fsLit "notWord32#") [] [word32PrimTy] (word32PrimTy) primOpInfo Word32SllOp = mkGenPrimOp (fsLit "uncheckedShiftLWord32#") [] [word32PrimTy, intPrimTy] (word32PrimTy) primOpInfo Word32SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRLWord32#") [] [word32PrimTy, intPrimTy] (word32PrimTy) primOpInfo Word32ToInt32Op = mkGenPrimOp (fsLit "word32ToInt32#") [] [word32PrimTy] (int32PrimTy) primOpInfo Word32EqOp = mkCompare (fsLit "eqWord32#") word32PrimTy primOpInfo Word32GeOp = mkCompare (fsLit "geWord32#") word32PrimTy primOpInfo Word32GtOp = mkCompare (fsLit "gtWord32#") word32PrimTy primOpInfo Word32LeOp = mkCompare (fsLit "leWord32#") word32PrimTy primOpInfo Word32LtOp = mkCompare (fsLit "ltWord32#") word32PrimTy primOpInfo Word32NeOp = mkCompare (fsLit "neWord32#") word32PrimTy primOpInfo Int64ToIntOp = mkGenPrimOp (fsLit "int64ToInt#") [] [int64PrimTy] (intPrimTy) primOpInfo IntToInt64Op = mkGenPrimOp (fsLit "intToInt64#") [] [intPrimTy] (int64PrimTy) primOpInfo Int64NegOp = mkGenPrimOp (fsLit "negateInt64#") [] [int64PrimTy] (int64PrimTy) primOpInfo Int64AddOp = mkGenPrimOp (fsLit "plusInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64SubOp = mkGenPrimOp (fsLit "subInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64MulOp = mkGenPrimOp (fsLit "timesInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64QuotOp = mkGenPrimOp (fsLit "quotInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64RemOp = mkGenPrimOp (fsLit "remInt64#") [] [int64PrimTy, int64PrimTy] (int64PrimTy) primOpInfo Int64SllOp = mkGenPrimOp (fsLit "uncheckedIShiftL64#") [] [int64PrimTy, intPrimTy] (int64PrimTy) primOpInfo Int64SraOp = mkGenPrimOp (fsLit "uncheckedIShiftRA64#") [] [int64PrimTy, intPrimTy] (int64PrimTy) primOpInfo Int64SrlOp = mkGenPrimOp (fsLit "uncheckedIShiftRL64#") [] [int64PrimTy, intPrimTy] (int64PrimTy) primOpInfo Int64ToWord64Op = mkGenPrimOp (fsLit "int64ToWord64#") [] [int64PrimTy] (word64PrimTy) primOpInfo Int64EqOp = mkCompare (fsLit "eqInt64#") int64PrimTy primOpInfo Int64GeOp = mkCompare (fsLit "geInt64#") int64PrimTy primOpInfo Int64GtOp = mkCompare (fsLit "gtInt64#") int64PrimTy primOpInfo Int64LeOp = mkCompare (fsLit "leInt64#") int64PrimTy primOpInfo Int64LtOp = mkCompare (fsLit "ltInt64#") int64PrimTy primOpInfo Int64NeOp = mkCompare (fsLit "neInt64#") int64PrimTy primOpInfo Word64ToWordOp = mkGenPrimOp (fsLit "word64ToWord#") [] [word64PrimTy] (wordPrimTy) primOpInfo WordToWord64Op = mkGenPrimOp (fsLit "wordToWord64#") [] [wordPrimTy] (word64PrimTy) primOpInfo Word64AddOp = mkGenPrimOp (fsLit "plusWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64SubOp = mkGenPrimOp (fsLit "subWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64MulOp = mkGenPrimOp (fsLit "timesWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64QuotOp = mkGenPrimOp (fsLit "quotWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64RemOp = mkGenPrimOp (fsLit "remWord64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64AndOp = mkGenPrimOp (fsLit "and64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64OrOp = mkGenPrimOp (fsLit "or64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64XorOp = mkGenPrimOp (fsLit "xor64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo Word64NotOp = mkGenPrimOp (fsLit "not64#") [] [word64PrimTy] (word64PrimTy) primOpInfo Word64SllOp = mkGenPrimOp (fsLit "uncheckedShiftL64#") [] [word64PrimTy, intPrimTy] (word64PrimTy) primOpInfo Word64SrlOp = mkGenPrimOp (fsLit "uncheckedShiftRL64#") [] [word64PrimTy, intPrimTy] (word64PrimTy) primOpInfo Word64ToInt64Op = mkGenPrimOp (fsLit "word64ToInt64#") [] [word64PrimTy] (int64PrimTy) primOpInfo Word64EqOp = mkCompare (fsLit "eqWord64#") word64PrimTy primOpInfo Word64GeOp = mkCompare (fsLit "geWord64#") word64PrimTy primOpInfo Word64GtOp = mkCompare (fsLit "gtWord64#") word64PrimTy primOpInfo Word64LeOp = mkCompare (fsLit "leWord64#") word64PrimTy primOpInfo Word64LtOp = mkCompare (fsLit "ltWord64#") word64PrimTy primOpInfo Word64NeOp = mkCompare (fsLit "neWord64#") word64PrimTy primOpInfo IntAddOp = mkGenPrimOp (fsLit "+#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntSubOp = mkGenPrimOp (fsLit "-#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntMulOp = mkGenPrimOp (fsLit "*#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntMul2Op = mkGenPrimOp (fsLit "timesInt2#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy, intPrimTy])) primOpInfo IntMulMayOfloOp = mkGenPrimOp (fsLit "mulIntMayOflo#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntQuotOp = mkGenPrimOp (fsLit "quotInt#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntRemOp = mkGenPrimOp (fsLit "remInt#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntQuotRemOp = mkGenPrimOp (fsLit "quotRemInt#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo IntAndOp = mkGenPrimOp (fsLit "andI#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntOrOp = mkGenPrimOp (fsLit "orI#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntXorOp = mkGenPrimOp (fsLit "xorI#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntNotOp = mkGenPrimOp (fsLit "notI#") [] [intPrimTy] (intPrimTy) primOpInfo IntNegOp = mkGenPrimOp (fsLit "negateInt#") [] [intPrimTy] (intPrimTy) primOpInfo IntAddCOp = mkGenPrimOp (fsLit "addIntC#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo IntSubCOp = mkGenPrimOp (fsLit "subIntC#") [] [intPrimTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo IntGtOp = mkCompare (fsLit ">#") intPrimTy primOpInfo IntGeOp = mkCompare (fsLit ">=#") intPrimTy primOpInfo IntEqOp = mkCompare (fsLit "==#") intPrimTy primOpInfo IntNeOp = mkCompare (fsLit "/=#") intPrimTy primOpInfo IntLtOp = mkCompare (fsLit "<#") intPrimTy primOpInfo IntLeOp = mkCompare (fsLit "<=#") intPrimTy primOpInfo ChrOp = mkGenPrimOp (fsLit "chr#") [] [intPrimTy] (charPrimTy) primOpInfo IntToWordOp = mkGenPrimOp (fsLit "int2Word#") [] [intPrimTy] (wordPrimTy) primOpInfo IntToFloatOp = mkGenPrimOp (fsLit "int2Float#") [] [intPrimTy] (floatPrimTy) primOpInfo IntToDoubleOp = mkGenPrimOp (fsLit "int2Double#") [] [intPrimTy] (doublePrimTy) primOpInfo WordToFloatOp = mkGenPrimOp (fsLit "word2Float#") [] [wordPrimTy] (floatPrimTy) primOpInfo WordToDoubleOp = mkGenPrimOp (fsLit "word2Double#") [] [wordPrimTy] (doublePrimTy) primOpInfo IntSllOp = mkGenPrimOp (fsLit "uncheckedIShiftL#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntSraOp = mkGenPrimOp (fsLit "uncheckedIShiftRA#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo IntSrlOp = mkGenPrimOp (fsLit "uncheckedIShiftRL#") [] [intPrimTy, intPrimTy] (intPrimTy) primOpInfo WordAddOp = mkGenPrimOp (fsLit "plusWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordAddCOp = mkGenPrimOp (fsLit "addWordC#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, intPrimTy])) primOpInfo WordSubCOp = mkGenPrimOp (fsLit "subWordC#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, intPrimTy])) primOpInfo WordAdd2Op = mkGenPrimOp (fsLit "plusWord2#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordSubOp = mkGenPrimOp (fsLit "minusWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordMulOp = mkGenPrimOp (fsLit "timesWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordMul2Op = mkGenPrimOp (fsLit "timesWord2#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordQuotOp = mkGenPrimOp (fsLit "quotWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordRemOp = mkGenPrimOp (fsLit "remWord#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordQuotRemOp = mkGenPrimOp (fsLit "quotRemWord#") [] [wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordQuotRem2Op = mkGenPrimOp (fsLit "quotRemWord2#") [] [wordPrimTy, wordPrimTy, wordPrimTy] ((mkTupleTy Unboxed [wordPrimTy, wordPrimTy])) primOpInfo WordAndOp = mkGenPrimOp (fsLit "and#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordOrOp = mkGenPrimOp (fsLit "or#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordXorOp = mkGenPrimOp (fsLit "xor#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo WordNotOp = mkGenPrimOp (fsLit "not#") [] [wordPrimTy] (wordPrimTy) primOpInfo WordSllOp = mkGenPrimOp (fsLit "uncheckedShiftL#") [] [wordPrimTy, intPrimTy] (wordPrimTy) primOpInfo WordSrlOp = mkGenPrimOp (fsLit "uncheckedShiftRL#") [] [wordPrimTy, intPrimTy] (wordPrimTy) primOpInfo WordToIntOp = mkGenPrimOp (fsLit "word2Int#") [] [wordPrimTy] (intPrimTy) primOpInfo WordGtOp = mkCompare (fsLit "gtWord#") wordPrimTy primOpInfo WordGeOp = mkCompare (fsLit "geWord#") wordPrimTy primOpInfo WordEqOp = mkCompare (fsLit "eqWord#") wordPrimTy primOpInfo WordNeOp = mkCompare (fsLit "neWord#") wordPrimTy primOpInfo WordLtOp = mkCompare (fsLit "ltWord#") wordPrimTy primOpInfo WordLeOp = mkCompare (fsLit "leWord#") wordPrimTy primOpInfo PopCnt8Op = mkGenPrimOp (fsLit "popCnt8#") [] [wordPrimTy] (wordPrimTy) primOpInfo PopCnt16Op = mkGenPrimOp (fsLit "popCnt16#") [] [wordPrimTy] (wordPrimTy) primOpInfo PopCnt32Op = mkGenPrimOp (fsLit "popCnt32#") [] [wordPrimTy] (wordPrimTy) primOpInfo PopCnt64Op = mkGenPrimOp (fsLit "popCnt64#") [] [word64PrimTy] (wordPrimTy) primOpInfo PopCntOp = mkGenPrimOp (fsLit "popCnt#") [] [wordPrimTy] (wordPrimTy) primOpInfo Pdep8Op = mkGenPrimOp (fsLit "pdep8#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pdep16Op = mkGenPrimOp (fsLit "pdep16#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pdep32Op = mkGenPrimOp (fsLit "pdep32#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pdep64Op = mkGenPrimOp (fsLit "pdep64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo PdepOp = mkGenPrimOp (fsLit "pdep#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext8Op = mkGenPrimOp (fsLit "pext8#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext16Op = mkGenPrimOp (fsLit "pext16#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext32Op = mkGenPrimOp (fsLit "pext32#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Pext64Op = mkGenPrimOp (fsLit "pext64#") [] [word64PrimTy, word64PrimTy] (word64PrimTy) primOpInfo PextOp = mkGenPrimOp (fsLit "pext#") [] [wordPrimTy, wordPrimTy] (wordPrimTy) primOpInfo Clz8Op = mkGenPrimOp (fsLit "clz8#") [] [wordPrimTy] (wordPrimTy) primOpInfo Clz16Op = mkGenPrimOp (fsLit "clz16#") [] [wordPrimTy] (wordPrimTy) primOpInfo Clz32Op = mkGenPrimOp (fsLit "clz32#") [] [wordPrimTy] (wordPrimTy) primOpInfo Clz64Op = mkGenPrimOp (fsLit "clz64#") [] [word64PrimTy] (wordPrimTy) primOpInfo ClzOp = mkGenPrimOp (fsLit "clz#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz8Op = mkGenPrimOp (fsLit "ctz8#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz16Op = mkGenPrimOp (fsLit "ctz16#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz32Op = mkGenPrimOp (fsLit "ctz32#") [] [wordPrimTy] (wordPrimTy) primOpInfo Ctz64Op = mkGenPrimOp (fsLit "ctz64#") [] [word64PrimTy] (wordPrimTy) primOpInfo CtzOp = mkGenPrimOp (fsLit "ctz#") [] [wordPrimTy] (wordPrimTy) primOpInfo BSwap16Op = mkGenPrimOp (fsLit "byteSwap16#") [] [wordPrimTy] (wordPrimTy) primOpInfo BSwap32Op = mkGenPrimOp (fsLit "byteSwap32#") [] [wordPrimTy] (wordPrimTy) primOpInfo BSwap64Op = mkGenPrimOp (fsLit "byteSwap64#") [] [word64PrimTy] (word64PrimTy) primOpInfo BSwapOp = mkGenPrimOp (fsLit "byteSwap#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev8Op = mkGenPrimOp (fsLit "bitReverse8#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev16Op = mkGenPrimOp (fsLit "bitReverse16#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev32Op = mkGenPrimOp (fsLit "bitReverse32#") [] [wordPrimTy] (wordPrimTy) primOpInfo BRev64Op = mkGenPrimOp (fsLit "bitReverse64#") [] [word64PrimTy] (word64PrimTy) primOpInfo BRevOp = mkGenPrimOp (fsLit "bitReverse#") [] [wordPrimTy] (wordPrimTy) primOpInfo Narrow8IntOp = mkGenPrimOp (fsLit "narrow8Int#") [] [intPrimTy] (intPrimTy) primOpInfo Narrow16IntOp = mkGenPrimOp (fsLit "narrow16Int#") [] [intPrimTy] (intPrimTy) primOpInfo Narrow32IntOp = mkGenPrimOp (fsLit "narrow32Int#") [] [intPrimTy] (intPrimTy) primOpInfo Narrow8WordOp = mkGenPrimOp (fsLit "narrow8Word#") [] [wordPrimTy] (wordPrimTy) primOpInfo Narrow16WordOp = mkGenPrimOp (fsLit "narrow16Word#") [] [wordPrimTy] (wordPrimTy) primOpInfo Narrow32WordOp = mkGenPrimOp (fsLit "narrow32Word#") [] [wordPrimTy] (wordPrimTy) primOpInfo DoubleGtOp = mkCompare (fsLit ">##") doublePrimTy primOpInfo DoubleGeOp = mkCompare (fsLit ">=##") doublePrimTy primOpInfo DoubleEqOp = mkCompare (fsLit "==##") doublePrimTy primOpInfo DoubleNeOp = mkCompare (fsLit "/=##") doublePrimTy primOpInfo DoubleLtOp = mkCompare (fsLit "<##") doublePrimTy primOpInfo DoubleLeOp = mkCompare (fsLit "<=##") doublePrimTy primOpInfo DoubleAddOp = mkGenPrimOp (fsLit "+##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleSubOp = mkGenPrimOp (fsLit "-##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleMulOp = mkGenPrimOp (fsLit "*##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleDivOp = mkGenPrimOp (fsLit "/##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleNegOp = mkGenPrimOp (fsLit "negateDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleFabsOp = mkGenPrimOp (fsLit "fabsDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleToIntOp = mkGenPrimOp (fsLit "double2Int#") [] [doublePrimTy] (intPrimTy) primOpInfo DoubleToFloatOp = mkGenPrimOp (fsLit "double2Float#") [] [doublePrimTy] (floatPrimTy) primOpInfo DoubleExpOp = mkGenPrimOp (fsLit "expDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleExpM1Op = mkGenPrimOp (fsLit "expm1Double#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleLogOp = mkGenPrimOp (fsLit "logDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleLog1POp = mkGenPrimOp (fsLit "log1pDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleSqrtOp = mkGenPrimOp (fsLit "sqrtDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleSinOp = mkGenPrimOp (fsLit "sinDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleCosOp = mkGenPrimOp (fsLit "cosDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleTanOp = mkGenPrimOp (fsLit "tanDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAsinOp = mkGenPrimOp (fsLit "asinDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAcosOp = mkGenPrimOp (fsLit "acosDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAtanOp = mkGenPrimOp (fsLit "atanDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleSinhOp = mkGenPrimOp (fsLit "sinhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleCoshOp = mkGenPrimOp (fsLit "coshDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleTanhOp = mkGenPrimOp (fsLit "tanhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAsinhOp = mkGenPrimOp (fsLit "asinhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAcoshOp = mkGenPrimOp (fsLit "acoshDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoubleAtanhOp = mkGenPrimOp (fsLit "atanhDouble#") [] [doublePrimTy] (doublePrimTy) primOpInfo DoublePowerOp = mkGenPrimOp (fsLit "**##") [] [doublePrimTy, doublePrimTy] (doublePrimTy) primOpInfo DoubleDecode_2IntOp = mkGenPrimOp (fsLit "decodeDouble_2Int#") [] [doublePrimTy] ((mkTupleTy Unboxed [intPrimTy, wordPrimTy, wordPrimTy, intPrimTy])) primOpInfo DoubleDecode_Int64Op = mkGenPrimOp (fsLit "decodeDouble_Int64#") [] [doublePrimTy] ((mkTupleTy Unboxed [int64PrimTy, intPrimTy])) primOpInfo FloatGtOp = mkCompare (fsLit "gtFloat#") floatPrimTy primOpInfo FloatGeOp = mkCompare (fsLit "geFloat#") floatPrimTy primOpInfo FloatEqOp = mkCompare (fsLit "eqFloat#") floatPrimTy primOpInfo FloatNeOp = mkCompare (fsLit "neFloat#") floatPrimTy primOpInfo FloatLtOp = mkCompare (fsLit "ltFloat#") floatPrimTy primOpInfo FloatLeOp = mkCompare (fsLit "leFloat#") floatPrimTy primOpInfo FloatAddOp = mkGenPrimOp (fsLit "plusFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatSubOp = mkGenPrimOp (fsLit "minusFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatMulOp = mkGenPrimOp (fsLit "timesFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatDivOp = mkGenPrimOp (fsLit "divideFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatNegOp = mkGenPrimOp (fsLit "negateFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatFabsOp = mkGenPrimOp (fsLit "fabsFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatToIntOp = mkGenPrimOp (fsLit "float2Int#") [] [floatPrimTy] (intPrimTy) primOpInfo FloatExpOp = mkGenPrimOp (fsLit "expFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatExpM1Op = mkGenPrimOp (fsLit "expm1Float#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatLogOp = mkGenPrimOp (fsLit "logFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatLog1POp = mkGenPrimOp (fsLit "log1pFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatSqrtOp = mkGenPrimOp (fsLit "sqrtFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatSinOp = mkGenPrimOp (fsLit "sinFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatCosOp = mkGenPrimOp (fsLit "cosFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatTanOp = mkGenPrimOp (fsLit "tanFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAsinOp = mkGenPrimOp (fsLit "asinFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAcosOp = mkGenPrimOp (fsLit "acosFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAtanOp = mkGenPrimOp (fsLit "atanFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatSinhOp = mkGenPrimOp (fsLit "sinhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatCoshOp = mkGenPrimOp (fsLit "coshFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatTanhOp = mkGenPrimOp (fsLit "tanhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAsinhOp = mkGenPrimOp (fsLit "asinhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAcoshOp = mkGenPrimOp (fsLit "acoshFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatAtanhOp = mkGenPrimOp (fsLit "atanhFloat#") [] [floatPrimTy] (floatPrimTy) primOpInfo FloatPowerOp = mkGenPrimOp (fsLit "powerFloat#") [] [floatPrimTy, floatPrimTy] (floatPrimTy) primOpInfo FloatToDoubleOp = mkGenPrimOp (fsLit "float2Double#") [] [floatPrimTy] (doublePrimTy) primOpInfo FloatDecode_IntOp = mkGenPrimOp (fsLit "decodeFloat_Int#") [] [floatPrimTy] ((mkTupleTy Unboxed [intPrimTy, intPrimTy])) primOpInfo NewArrayOp = mkGenPrimOp (fsLit "newArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadArrayOp = mkGenPrimOp (fsLit "readArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteArrayOp = mkGenPrimOp (fsLit "writeArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SizeofArrayOp = mkGenPrimOp (fsLit "sizeofArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy] (intPrimTy) primOpInfo SizeofMutableArrayOp = mkGenPrimOp (fsLit "sizeofMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy] (intPrimTy) primOpInfo IndexArrayOp = mkGenPrimOp (fsLit "indexArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy] ((mkTupleTy Unboxed [levPolyAlphaTy])) primOpInfo UnsafeFreezeArrayOp = mkGenPrimOp (fsLit "unsafeFreezeArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkArrayPrimTy levPolyAlphaTy])) primOpInfo UnsafeThawArrayOp = mkGenPrimOp (fsLit "unsafeThawArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CopyArrayOp = mkGenPrimOp (fsLit "copyArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableArrayOp = mkGenPrimOp (fsLit "copyMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CloneArrayOp = mkGenPrimOp (fsLit "cloneArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy] (mkArrayPrimTy levPolyAlphaTy) primOpInfo CloneMutableArrayOp = mkGenPrimOp (fsLit "cloneMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo FreezeArrayOp = mkGenPrimOp (fsLit "freezeArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkArrayPrimTy levPolyAlphaTy])) primOpInfo ThawArrayOp = mkGenPrimOp (fsLit "thawArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CasArrayOp = mkGenPrimOp (fsLit "casArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo NewSmallArrayOp = mkGenPrimOp (fsLit "newSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo ShrinkSmallMutableArrayOp_Char = mkGenPrimOp (fsLit "shrinkSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ReadSmallArrayOp = mkGenPrimOp (fsLit "readSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteSmallArrayOp = mkGenPrimOp (fsLit "writeSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SizeofSmallArrayOp = mkGenPrimOp (fsLit "sizeofSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy] (intPrimTy) primOpInfo SizeofSmallMutableArrayOp = mkGenPrimOp (fsLit "sizeofSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy] (intPrimTy) primOpInfo GetSizeofSmallMutableArrayOp = mkGenPrimOp (fsLit "getSizeofSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo IndexSmallArrayOp = mkGenPrimOp (fsLit "indexSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy] ((mkTupleTy Unboxed [levPolyAlphaTy])) primOpInfo UnsafeFreezeSmallArrayOp = mkGenPrimOp (fsLit "unsafeFreezeSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallArrayPrimTy levPolyAlphaTy])) primOpInfo UnsafeThawSmallArrayOp = mkGenPrimOp (fsLit "unsafeThawSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CopySmallArrayOp = mkGenPrimOp (fsLit "copySmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopySmallMutableArrayOp = mkGenPrimOp (fsLit "copySmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CloneSmallArrayOp = mkGenPrimOp (fsLit "cloneSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy] (mkSmallArrayPrimTy levPolyAlphaTy) primOpInfo CloneSmallMutableArrayOp = mkGenPrimOp (fsLit "cloneSmallMutableArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo FreezeSmallArrayOp = mkGenPrimOp (fsLit "freezeSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallArrayPrimTy levPolyAlphaTy])) primOpInfo ThawSmallArrayOp = mkGenPrimOp (fsLit "thawSmallArray#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [mkSmallArrayPrimTy levPolyAlphaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy])) primOpInfo CasSmallArrayOp = mkGenPrimOp (fsLit "casSmallArray#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkSmallMutableArrayPrimTy deltaTy levPolyAlphaTy, intPrimTy, levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo NewByteArrayOp_Char = mkGenPrimOp (fsLit "newByteArray#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo NewPinnedByteArrayOp_Char = mkGenPrimOp (fsLit "newPinnedByteArray#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo NewAlignedPinnedByteArrayOp_Char = mkGenPrimOp (fsLit "newAlignedPinnedByteArray#") [deltaTyVarSpec] [intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo MutableByteArrayIsPinnedOp = mkGenPrimOp (fsLit "isMutableByteArrayPinned#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (intPrimTy) primOpInfo ByteArrayIsPinnedOp = mkGenPrimOp (fsLit "isByteArrayPinned#") [] [byteArrayPrimTy] (intPrimTy) primOpInfo ByteArrayContents_Char = mkGenPrimOp (fsLit "byteArrayContents#") [] [byteArrayPrimTy] (addrPrimTy) primOpInfo MutableByteArrayContents_Char = mkGenPrimOp (fsLit "mutableByteArrayContents#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (addrPrimTy) primOpInfo ShrinkMutableByteArrayOp_Char = mkGenPrimOp (fsLit "shrinkMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ResizeMutableByteArrayOp_Char = mkGenPrimOp (fsLit "resizeMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutableByteArrayPrimTy deltaTy])) primOpInfo UnsafeFreezeByteArrayOp = mkGenPrimOp (fsLit "unsafeFreezeByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, byteArrayPrimTy])) primOpInfo SizeofByteArrayOp = mkGenPrimOp (fsLit "sizeofByteArray#") [] [byteArrayPrimTy] (intPrimTy) primOpInfo SizeofMutableByteArrayOp = mkGenPrimOp (fsLit "sizeofMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy] (intPrimTy) primOpInfo GetSizeofMutableByteArrayOp = mkGenPrimOp (fsLit "getSizeofMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo IndexByteArrayOp_Char = mkGenPrimOp (fsLit "indexCharArray#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_WideChar = mkGenPrimOp (fsLit "indexWideCharArray#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_Int = mkGenPrimOp (fsLit "indexIntArray#") [] [byteArrayPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexByteArrayOp_Word = mkGenPrimOp (fsLit "indexWordArray#") [] [byteArrayPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexByteArrayOp_Addr = mkGenPrimOp (fsLit "indexAddrArray#") [] [byteArrayPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexByteArrayOp_Float = mkGenPrimOp (fsLit "indexFloatArray#") [] [byteArrayPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexByteArrayOp_Double = mkGenPrimOp (fsLit "indexDoubleArray#") [] [byteArrayPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexByteArrayOp_StablePtr = mkGenPrimOp (fsLit "indexStablePtrArray#") [alphaTyVarSpec] [byteArrayPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexByteArrayOp_Int8 = mkGenPrimOp (fsLit "indexInt8Array#") [] [byteArrayPrimTy, intPrimTy] (int8PrimTy) primOpInfo IndexByteArrayOp_Int16 = mkGenPrimOp (fsLit "indexInt16Array#") [] [byteArrayPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexByteArrayOp_Int32 = mkGenPrimOp (fsLit "indexInt32Array#") [] [byteArrayPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexByteArrayOp_Int64 = mkGenPrimOp (fsLit "indexInt64Array#") [] [byteArrayPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexByteArrayOp_Word8 = mkGenPrimOp (fsLit "indexWord8Array#") [] [byteArrayPrimTy, intPrimTy] (word8PrimTy) primOpInfo IndexByteArrayOp_Word16 = mkGenPrimOp (fsLit "indexWord16Array#") [] [byteArrayPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexByteArrayOp_Word32 = mkGenPrimOp (fsLit "indexWord32Array#") [] [byteArrayPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexByteArrayOp_Word64 = mkGenPrimOp (fsLit "indexWord64Array#") [] [byteArrayPrimTy, intPrimTy] (word64PrimTy) primOpInfo IndexByteArrayOp_Word8AsChar = mkGenPrimOp (fsLit "indexWord8ArrayAsChar#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_Word8AsWideChar = mkGenPrimOp (fsLit "indexWord8ArrayAsWideChar#") [] [byteArrayPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexByteArrayOp_Word8AsInt = mkGenPrimOp (fsLit "indexWord8ArrayAsInt#") [] [byteArrayPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexByteArrayOp_Word8AsWord = mkGenPrimOp (fsLit "indexWord8ArrayAsWord#") [] [byteArrayPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexByteArrayOp_Word8AsAddr = mkGenPrimOp (fsLit "indexWord8ArrayAsAddr#") [] [byteArrayPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexByteArrayOp_Word8AsFloat = mkGenPrimOp (fsLit "indexWord8ArrayAsFloat#") [] [byteArrayPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexByteArrayOp_Word8AsDouble = mkGenPrimOp (fsLit "indexWord8ArrayAsDouble#") [] [byteArrayPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexByteArrayOp_Word8AsStablePtr = mkGenPrimOp (fsLit "indexWord8ArrayAsStablePtr#") [alphaTyVarSpec] [byteArrayPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexByteArrayOp_Word8AsInt16 = mkGenPrimOp (fsLit "indexWord8ArrayAsInt16#") [] [byteArrayPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexByteArrayOp_Word8AsInt32 = mkGenPrimOp (fsLit "indexWord8ArrayAsInt32#") [] [byteArrayPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexByteArrayOp_Word8AsInt64 = mkGenPrimOp (fsLit "indexWord8ArrayAsInt64#") [] [byteArrayPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexByteArrayOp_Word8AsWord16 = mkGenPrimOp (fsLit "indexWord8ArrayAsWord16#") [] [byteArrayPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexByteArrayOp_Word8AsWord32 = mkGenPrimOp (fsLit "indexWord8ArrayAsWord32#") [] [byteArrayPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexByteArrayOp_Word8AsWord64 = mkGenPrimOp (fsLit "indexWord8ArrayAsWord64#") [] [byteArrayPrimTy, intPrimTy] (word64PrimTy) primOpInfo ReadByteArrayOp_Char = mkGenPrimOp (fsLit "readCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_WideChar = mkGenPrimOp (fsLit "readWideCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_Int = mkGenPrimOp (fsLit "readIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadByteArrayOp_Word = mkGenPrimOp (fsLit "readWordArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadByteArrayOp_Addr = mkGenPrimOp (fsLit "readAddrArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadByteArrayOp_Float = mkGenPrimOp (fsLit "readFloatArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadByteArrayOp_Double = mkGenPrimOp (fsLit "readDoubleArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadByteArrayOp_StablePtr = mkGenPrimOp (fsLit "readStablePtrArray#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadByteArrayOp_Int8 = mkGenPrimOp (fsLit "readInt8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8PrimTy])) primOpInfo ReadByteArrayOp_Int16 = mkGenPrimOp (fsLit "readInt16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadByteArrayOp_Int32 = mkGenPrimOp (fsLit "readInt32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadByteArrayOp_Int64 = mkGenPrimOp (fsLit "readInt64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadByteArrayOp_Word8 = mkGenPrimOp (fsLit "readWord8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8PrimTy])) primOpInfo ReadByteArrayOp_Word16 = mkGenPrimOp (fsLit "readWord16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadByteArrayOp_Word32 = mkGenPrimOp (fsLit "readWord32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadByteArrayOp_Word64 = mkGenPrimOp (fsLit "readWord64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo ReadByteArrayOp_Word8AsChar = mkGenPrimOp (fsLit "readWord8ArrayAsChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_Word8AsWideChar = mkGenPrimOp (fsLit "readWord8ArrayAsWideChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadByteArrayOp_Word8AsInt = mkGenPrimOp (fsLit "readWord8ArrayAsInt#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord = mkGenPrimOp (fsLit "readWord8ArrayAsWord#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadByteArrayOp_Word8AsAddr = mkGenPrimOp (fsLit "readWord8ArrayAsAddr#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadByteArrayOp_Word8AsFloat = mkGenPrimOp (fsLit "readWord8ArrayAsFloat#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadByteArrayOp_Word8AsDouble = mkGenPrimOp (fsLit "readWord8ArrayAsDouble#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadByteArrayOp_Word8AsStablePtr = mkGenPrimOp (fsLit "readWord8ArrayAsStablePtr#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadByteArrayOp_Word8AsInt16 = mkGenPrimOp (fsLit "readWord8ArrayAsInt16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadByteArrayOp_Word8AsInt32 = mkGenPrimOp (fsLit "readWord8ArrayAsInt32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadByteArrayOp_Word8AsInt64 = mkGenPrimOp (fsLit "readWord8ArrayAsInt64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord16 = mkGenPrimOp (fsLit "readWord8ArrayAsWord16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord32 = mkGenPrimOp (fsLit "readWord8ArrayAsWord32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadByteArrayOp_Word8AsWord64 = mkGenPrimOp (fsLit "readWord8ArrayAsWord64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo WriteByteArrayOp_Char = mkGenPrimOp (fsLit "writeCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_WideChar = mkGenPrimOp (fsLit "writeWideCharArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int = mkGenPrimOp (fsLit "writeIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word = mkGenPrimOp (fsLit "writeWordArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Addr = mkGenPrimOp (fsLit "writeAddrArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Float = mkGenPrimOp (fsLit "writeFloatArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Double = mkGenPrimOp (fsLit "writeDoubleArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_StablePtr = mkGenPrimOp (fsLit "writeStablePtrArray#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int8 = mkGenPrimOp (fsLit "writeInt8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int16 = mkGenPrimOp (fsLit "writeInt16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int32 = mkGenPrimOp (fsLit "writeInt32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Int64 = mkGenPrimOp (fsLit "writeInt64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8 = mkGenPrimOp (fsLit "writeWord8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word16 = mkGenPrimOp (fsLit "writeWord16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word32 = mkGenPrimOp (fsLit "writeWord32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word64 = mkGenPrimOp (fsLit "writeWord64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsChar = mkGenPrimOp (fsLit "writeWord8ArrayAsChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWideChar = mkGenPrimOp (fsLit "writeWord8ArrayAsWideChar#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt = mkGenPrimOp (fsLit "writeWord8ArrayAsInt#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord = mkGenPrimOp (fsLit "writeWord8ArrayAsWord#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsAddr = mkGenPrimOp (fsLit "writeWord8ArrayAsAddr#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsFloat = mkGenPrimOp (fsLit "writeWord8ArrayAsFloat#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsDouble = mkGenPrimOp (fsLit "writeWord8ArrayAsDouble#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsStablePtr = mkGenPrimOp (fsLit "writeWord8ArrayAsStablePtr#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt16 = mkGenPrimOp (fsLit "writeWord8ArrayAsInt16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt32 = mkGenPrimOp (fsLit "writeWord8ArrayAsInt32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsInt64 = mkGenPrimOp (fsLit "writeWord8ArrayAsInt64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord16 = mkGenPrimOp (fsLit "writeWord8ArrayAsWord16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord32 = mkGenPrimOp (fsLit "writeWord8ArrayAsWord32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteByteArrayOp_Word8AsWord64 = mkGenPrimOp (fsLit "writeWord8ArrayAsWord64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CompareByteArraysOp = mkGenPrimOp (fsLit "compareByteArrays#") [] [byteArrayPrimTy, intPrimTy, byteArrayPrimTy, intPrimTy, intPrimTy] (intPrimTy) primOpInfo CopyByteArrayOp = mkGenPrimOp (fsLit "copyByteArray#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableByteArrayOp = mkGenPrimOp (fsLit "copyMutableByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyByteArrayToAddrOp = mkGenPrimOp (fsLit "copyByteArrayToAddr#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyMutableByteArrayToAddrOp = mkGenPrimOp (fsLit "copyMutableByteArrayToAddr#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CopyAddrToByteArrayOp = mkGenPrimOp (fsLit "copyAddrToByteArray#") [deltaTyVarSpec] [addrPrimTy, mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SetByteArrayOp = mkGenPrimOp (fsLit "setByteArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo AtomicReadByteArrayOp_Int = mkGenPrimOp (fsLit "atomicReadIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo AtomicWriteByteArrayOp_Int = mkGenPrimOp (fsLit "atomicWriteIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo CasByteArrayOp_Int = mkGenPrimOp (fsLit "casIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo CasByteArrayOp_Int8 = mkGenPrimOp (fsLit "casInt8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8PrimTy, int8PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8PrimTy])) primOpInfo CasByteArrayOp_Int16 = mkGenPrimOp (fsLit "casInt16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16PrimTy, int16PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo CasByteArrayOp_Int32 = mkGenPrimOp (fsLit "casInt32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32PrimTy, int32PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo CasByteArrayOp_Int64 = mkGenPrimOp (fsLit "casInt64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64PrimTy, int64PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo FetchAddByteArrayOp_Int = mkGenPrimOp (fsLit "fetchAddIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchSubByteArrayOp_Int = mkGenPrimOp (fsLit "fetchSubIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchAndByteArrayOp_Int = mkGenPrimOp (fsLit "fetchAndIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchNandByteArrayOp_Int = mkGenPrimOp (fsLit "fetchNandIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchOrByteArrayOp_Int = mkGenPrimOp (fsLit "fetchOrIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo FetchXorByteArrayOp_Int = mkGenPrimOp (fsLit "fetchXorIntArray#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo AddrAddOp = mkGenPrimOp (fsLit "plusAddr#") [] [addrPrimTy, intPrimTy] (addrPrimTy) primOpInfo AddrSubOp = mkGenPrimOp (fsLit "minusAddr#") [] [addrPrimTy, addrPrimTy] (intPrimTy) primOpInfo AddrRemOp = mkGenPrimOp (fsLit "remAddr#") [] [addrPrimTy, intPrimTy] (intPrimTy) primOpInfo AddrToIntOp = mkGenPrimOp (fsLit "addr2Int#") [] [addrPrimTy] (intPrimTy) primOpInfo IntToAddrOp = mkGenPrimOp (fsLit "int2Addr#") [] [intPrimTy] (addrPrimTy) primOpInfo AddrGtOp = mkCompare (fsLit "gtAddr#") addrPrimTy primOpInfo AddrGeOp = mkCompare (fsLit "geAddr#") addrPrimTy primOpInfo AddrEqOp = mkCompare (fsLit "eqAddr#") addrPrimTy primOpInfo AddrNeOp = mkCompare (fsLit "neAddr#") addrPrimTy primOpInfo AddrLtOp = mkCompare (fsLit "ltAddr#") addrPrimTy primOpInfo AddrLeOp = mkCompare (fsLit "leAddr#") addrPrimTy primOpInfo IndexOffAddrOp_Char = mkGenPrimOp (fsLit "indexCharOffAddr#") [] [addrPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexOffAddrOp_WideChar = mkGenPrimOp (fsLit "indexWideCharOffAddr#") [] [addrPrimTy, intPrimTy] (charPrimTy) primOpInfo IndexOffAddrOp_Int = mkGenPrimOp (fsLit "indexIntOffAddr#") [] [addrPrimTy, intPrimTy] (intPrimTy) primOpInfo IndexOffAddrOp_Word = mkGenPrimOp (fsLit "indexWordOffAddr#") [] [addrPrimTy, intPrimTy] (wordPrimTy) primOpInfo IndexOffAddrOp_Addr = mkGenPrimOp (fsLit "indexAddrOffAddr#") [] [addrPrimTy, intPrimTy] (addrPrimTy) primOpInfo IndexOffAddrOp_Float = mkGenPrimOp (fsLit "indexFloatOffAddr#") [] [addrPrimTy, intPrimTy] (floatPrimTy) primOpInfo IndexOffAddrOp_Double = mkGenPrimOp (fsLit "indexDoubleOffAddr#") [] [addrPrimTy, intPrimTy] (doublePrimTy) primOpInfo IndexOffAddrOp_StablePtr = mkGenPrimOp (fsLit "indexStablePtrOffAddr#") [alphaTyVarSpec] [addrPrimTy, intPrimTy] (mkStablePtrPrimTy alphaTy) primOpInfo IndexOffAddrOp_Int8 = mkGenPrimOp (fsLit "indexInt8OffAddr#") [] [addrPrimTy, intPrimTy] (int8PrimTy) primOpInfo IndexOffAddrOp_Int16 = mkGenPrimOp (fsLit "indexInt16OffAddr#") [] [addrPrimTy, intPrimTy] (int16PrimTy) primOpInfo IndexOffAddrOp_Int32 = mkGenPrimOp (fsLit "indexInt32OffAddr#") [] [addrPrimTy, intPrimTy] (int32PrimTy) primOpInfo IndexOffAddrOp_Int64 = mkGenPrimOp (fsLit "indexInt64OffAddr#") [] [addrPrimTy, intPrimTy] (int64PrimTy) primOpInfo IndexOffAddrOp_Word8 = mkGenPrimOp (fsLit "indexWord8OffAddr#") [] [addrPrimTy, intPrimTy] (word8PrimTy) primOpInfo IndexOffAddrOp_Word16 = mkGenPrimOp (fsLit "indexWord16OffAddr#") [] [addrPrimTy, intPrimTy] (word16PrimTy) primOpInfo IndexOffAddrOp_Word32 = mkGenPrimOp (fsLit "indexWord32OffAddr#") [] [addrPrimTy, intPrimTy] (word32PrimTy) primOpInfo IndexOffAddrOp_Word64 = mkGenPrimOp (fsLit "indexWord64OffAddr#") [] [addrPrimTy, intPrimTy] (word64PrimTy) primOpInfo ReadOffAddrOp_Char = mkGenPrimOp (fsLit "readCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadOffAddrOp_WideChar = mkGenPrimOp (fsLit "readWideCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, charPrimTy])) primOpInfo ReadOffAddrOp_Int = mkGenPrimOp (fsLit "readIntOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadOffAddrOp_Word = mkGenPrimOp (fsLit "readWordOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo ReadOffAddrOp_Addr = mkGenPrimOp (fsLit "readAddrOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ReadOffAddrOp_Float = mkGenPrimOp (fsLit "readFloatOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatPrimTy])) primOpInfo ReadOffAddrOp_Double = mkGenPrimOp (fsLit "readDoubleOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doublePrimTy])) primOpInfo ReadOffAddrOp_StablePtr = mkGenPrimOp (fsLit "readStablePtrOffAddr#") [deltaTyVarSpec, alphaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkStablePtrPrimTy alphaTy])) primOpInfo ReadOffAddrOp_Int8 = mkGenPrimOp (fsLit "readInt8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8PrimTy])) primOpInfo ReadOffAddrOp_Int16 = mkGenPrimOp (fsLit "readInt16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16PrimTy])) primOpInfo ReadOffAddrOp_Int32 = mkGenPrimOp (fsLit "readInt32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32PrimTy])) primOpInfo ReadOffAddrOp_Int64 = mkGenPrimOp (fsLit "readInt64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64PrimTy])) primOpInfo ReadOffAddrOp_Word8 = mkGenPrimOp (fsLit "readWord8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8PrimTy])) primOpInfo ReadOffAddrOp_Word16 = mkGenPrimOp (fsLit "readWord16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo ReadOffAddrOp_Word32 = mkGenPrimOp (fsLit "readWord32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo ReadOffAddrOp_Word64 = mkGenPrimOp (fsLit "readWord64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo WriteOffAddrOp_Char = mkGenPrimOp (fsLit "writeCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_WideChar = mkGenPrimOp (fsLit "writeWideCharOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, charPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int = mkGenPrimOp (fsLit "writeIntOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word = mkGenPrimOp (fsLit "writeWordOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Addr = mkGenPrimOp (fsLit "writeAddrOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Float = mkGenPrimOp (fsLit "writeFloatOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Double = mkGenPrimOp (fsLit "writeDoubleOffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doublePrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_StablePtr = mkGenPrimOp (fsLit "writeStablePtrOffAddr#") [alphaTyVarSpec, deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStablePtrPrimTy alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int8 = mkGenPrimOp (fsLit "writeInt8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int16 = mkGenPrimOp (fsLit "writeInt16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int32 = mkGenPrimOp (fsLit "writeInt32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Int64 = mkGenPrimOp (fsLit "writeInt64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word8 = mkGenPrimOp (fsLit "writeWord8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word16 = mkGenPrimOp (fsLit "writeWord16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word32 = mkGenPrimOp (fsLit "writeWord32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WriteOffAddrOp_Word64 = mkGenPrimOp (fsLit "writeWord64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo InterlockedExchange_Addr = mkGenPrimOp (fsLit "atomicExchangeAddrAddr#") [deltaTyVarSpec] [addrPrimTy, addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo InterlockedExchange_Word = mkGenPrimOp (fsLit "atomicExchangeWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo CasAddrOp_Addr = mkGenPrimOp (fsLit "atomicCasAddrAddr#") [deltaTyVarSpec] [addrPrimTy, addrPrimTy, addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo CasAddrOp_Word = mkGenPrimOp (fsLit "atomicCasWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo CasAddrOp_Word8 = mkGenPrimOp (fsLit "atomicCasWord8Addr#") [deltaTyVarSpec] [addrPrimTy, word8PrimTy, word8PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8PrimTy])) primOpInfo CasAddrOp_Word16 = mkGenPrimOp (fsLit "atomicCasWord16Addr#") [deltaTyVarSpec] [addrPrimTy, word16PrimTy, word16PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16PrimTy])) primOpInfo CasAddrOp_Word32 = mkGenPrimOp (fsLit "atomicCasWord32Addr#") [deltaTyVarSpec] [addrPrimTy, word32PrimTy, word32PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32PrimTy])) primOpInfo CasAddrOp_Word64 = mkGenPrimOp (fsLit "atomicCasWord64Addr#") [deltaTyVarSpec] [addrPrimTy, word64PrimTy, word64PrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64PrimTy])) primOpInfo FetchAddAddrOp_Word = mkGenPrimOp (fsLit "fetchAddWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchSubAddrOp_Word = mkGenPrimOp (fsLit "fetchSubWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchAndAddrOp_Word = mkGenPrimOp (fsLit "fetchAndWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchNandAddrOp_Word = mkGenPrimOp (fsLit "fetchNandWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchOrAddrOp_Word = mkGenPrimOp (fsLit "fetchOrWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo FetchXorAddrOp_Word = mkGenPrimOp (fsLit "fetchXorWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo AtomicReadAddrOp_Word = mkGenPrimOp (fsLit "atomicReadWordAddr#") [deltaTyVarSpec] [addrPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, wordPrimTy])) primOpInfo AtomicWriteAddrOp_Word = mkGenPrimOp (fsLit "atomicWriteWordAddr#") [deltaTyVarSpec] [addrPrimTy, wordPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo NewMutVarOp = mkGenPrimOp (fsLit "newMutVar#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMutVarPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadMutVarOp = mkGenPrimOp (fsLit "readMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteMutVarOp = mkGenPrimOp (fsLit "writeMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo AtomicModifyMutVar2Op = mkGenPrimOp (fsLit "atomicModifyMutVar2#") [deltaTyVarSpec, alphaTyVarSpec, gammaTyVarSpec] [mkMutVarPrimTy deltaTy alphaTy, (mkVisFunTyMany (alphaTy) (gammaTy)), mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy, gammaTy])) primOpInfo AtomicModifyMutVar_Op = mkGenPrimOp (fsLit "atomicModifyMutVar_#") [deltaTyVarSpec, alphaTyVarSpec] [mkMutVarPrimTy deltaTy alphaTy, (mkVisFunTyMany (alphaTy) (alphaTy)), mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy, alphaTy])) primOpInfo CasMutVarOp = mkGenPrimOp (fsLit "casMutVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMutVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo CatchOp = mkGenPrimOp (fsLit "catch#") [runtimeRep1TyVarInf, levity2TyVarInf, openAlphaTyVarSpec, levPolyBetaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), (mkVisFunTyMany (levPolyBetaTy) ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo RaiseOp = mkGenPrimOp (fsLit "raise#") [levity1TyVarInf, runtimeRep2TyVarInf, levPolyAlphaTyVarSpec, openBetaTyVarSpec] [levPolyAlphaTy] (openBetaTy) primOpInfo RaiseIOOp = mkGenPrimOp (fsLit "raiseIO#") [levity1TyVarInf, runtimeRep2TyVarInf, levPolyAlphaTyVarSpec, openBetaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openBetaTy])) primOpInfo MaskAsyncExceptionsOp = mkGenPrimOp (fsLit "maskAsyncExceptions#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo MaskUninterruptibleOp = mkGenPrimOp (fsLit "maskUninterruptible#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo UnmaskAsyncExceptionsOp = mkGenPrimOp (fsLit "unmaskAsyncExceptions#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy])) primOpInfo MaskStatus = mkGenPrimOp (fsLit "getMaskingState#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo AtomicallyOp = mkGenPrimOp (fsLit "atomically#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo RetryOp = mkGenPrimOp (fsLit "retry#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo CatchRetryOp = mkGenPrimOp (fsLit "catchRetry#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo CatchSTMOp = mkGenPrimOp (fsLit "catchSTM#") [levity1TyVarInf, levPolyAlphaTyVarSpec, betaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))), (mkVisFunTyMany (betaTy) ((mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy]))))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo NewTVarOp = mkGenPrimOp (fsLit "newTVar#") [levity1TyVarInf, levPolyAlphaTyVarSpec, deltaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkTVarPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadTVarOp = mkGenPrimOp (fsLit "readTVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkTVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo ReadTVarIOOp = mkGenPrimOp (fsLit "readTVarIO#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkTVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteTVarOp = mkGenPrimOp (fsLit "writeTVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkTVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo NewMVarOp = mkGenPrimOp (fsLit "newMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkMVarPrimTy deltaTy levPolyAlphaTy])) primOpInfo TakeMVarOp = mkGenPrimOp (fsLit "takeMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo TryTakeMVarOp = mkGenPrimOp (fsLit "tryTakeMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo PutMVarOp = mkGenPrimOp (fsLit "putMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo TryPutMVarOp = mkGenPrimOp (fsLit "tryPutMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo ReadMVarOp = mkGenPrimOp (fsLit "readMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo TryReadMVarOp = mkGenPrimOp (fsLit "tryReadMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, levPolyAlphaTy])) primOpInfo IsEmptyMVarOp = mkGenPrimOp (fsLit "isEmptyMVar#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkMVarPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo NewIOPortOp = mkGenPrimOp (fsLit "newIOPort#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, mkIOPortPrimTy deltaTy levPolyAlphaTy])) primOpInfo ReadIOPortOp = mkGenPrimOp (fsLit "readIOPort#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkIOPortPrimTy deltaTy levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, levPolyAlphaTy])) primOpInfo WriteIOPortOp = mkGenPrimOp (fsLit "writeIOPort#") [levity1TyVarInf, deltaTyVarSpec, levPolyAlphaTyVarSpec] [mkIOPortPrimTy deltaTy levPolyAlphaTy, levPolyAlphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo DelayOp = mkGenPrimOp (fsLit "delay#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WaitReadOp = mkGenPrimOp (fsLit "waitRead#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo WaitWriteOp = mkGenPrimOp (fsLit "waitWrite#") [deltaTyVarSpec] [intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ForkOp = mkGenPrimOp (fsLit "fork#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, threadIdPrimTy])) primOpInfo ForkOnOp = mkGenPrimOp (fsLit "forkOn#") [runtimeRep1TyVarInf, openAlphaTyVarSpec] [intPrimTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, openAlphaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, threadIdPrimTy])) primOpInfo KillThreadOp = mkGenPrimOp (fsLit "killThread#") [alphaTyVarSpec] [threadIdPrimTy, alphaTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo YieldOp = mkGenPrimOp (fsLit "yield#") [] [mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo MyThreadIdOp = mkGenPrimOp (fsLit "myThreadId#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, threadIdPrimTy])) primOpInfo LabelThreadOp = mkGenPrimOp (fsLit "labelThread#") [] [threadIdPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo IsCurrentThreadBoundOp = mkGenPrimOp (fsLit "isCurrentThreadBound#") [] [mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo NoDuplicateOp = mkGenPrimOp (fsLit "noDuplicate#") [deltaTyVarSpec] [mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo ThreadStatusOp = mkGenPrimOp (fsLit "threadStatus#") [] [threadIdPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, intPrimTy, intPrimTy])) primOpInfo MkWeakOp = mkGenPrimOp (fsLit "mkWeak#") [levity1TyVarInf, levity2TyVarInf, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec, gammaTyVarSpec] [levPolyAlphaTy, levPolyBetaTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, gammaTy]))), mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkWeakPrimTy levPolyBetaTy])) primOpInfo MkWeakNoFinalizerOp = mkGenPrimOp (fsLit "mkWeakNoFinalizer#") [levity1TyVarInf, levity2TyVarInf, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec] [levPolyAlphaTy, levPolyBetaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkWeakPrimTy levPolyBetaTy])) primOpInfo AddCFinalizerToWeakOp = mkGenPrimOp (fsLit "addCFinalizerToWeak#") [levity2TyVarInf, levPolyBetaTyVarSpec] [addrPrimTy, addrPrimTy, intPrimTy, addrPrimTy, mkWeakPrimTy levPolyBetaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo DeRefWeakOp = mkGenPrimOp (fsLit "deRefWeak#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkWeakPrimTy levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, levPolyAlphaTy])) primOpInfo FinalizeWeakOp = mkGenPrimOp (fsLit "finalizeWeak#") [levity1TyVarInf, levPolyAlphaTyVarSpec, betaTyVarSpec] [mkWeakPrimTy levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, betaTy])))])) primOpInfo TouchOp = mkGenPrimOp (fsLit "touch#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo MakeStablePtrOp = mkGenPrimOp (fsLit "makeStablePtr#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkStablePtrPrimTy levPolyAlphaTy])) primOpInfo DeRefStablePtrOp = mkGenPrimOp (fsLit "deRefStablePtr#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStablePtrPrimTy levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, levPolyAlphaTy])) primOpInfo EqStablePtrOp = mkGenPrimOp (fsLit "eqStablePtr#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStablePtrPrimTy levPolyAlphaTy, mkStablePtrPrimTy levPolyAlphaTy] (intPrimTy) primOpInfo MakeStableNameOp = mkGenPrimOp (fsLit "makeStableName#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, mkStableNamePrimTy levPolyAlphaTy])) primOpInfo StableNameToIntOp = mkGenPrimOp (fsLit "stableNameToInt#") [levity1TyVarInf, levPolyAlphaTyVarSpec] [mkStableNamePrimTy levPolyAlphaTy] (intPrimTy) primOpInfo CompactNewOp = mkGenPrimOp (fsLit "compactNew#") [] [wordPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, compactPrimTy])) primOpInfo CompactResizeOp = mkGenPrimOp (fsLit "compactResize#") [] [compactPrimTy, wordPrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo CompactContainsOp = mkGenPrimOp (fsLit "compactContains#") [alphaTyVarSpec] [compactPrimTy, alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo CompactContainsAnyOp = mkGenPrimOp (fsLit "compactContainsAny#") [alphaTyVarSpec] [alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, intPrimTy])) primOpInfo CompactGetFirstBlockOp = mkGenPrimOp (fsLit "compactGetFirstBlock#") [] [compactPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy, wordPrimTy])) primOpInfo CompactGetNextBlockOp = mkGenPrimOp (fsLit "compactGetNextBlock#") [] [compactPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy, wordPrimTy])) primOpInfo CompactAllocateBlockOp = mkGenPrimOp (fsLit "compactAllocateBlock#") [] [wordPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy])) primOpInfo CompactFixupPointersOp = mkGenPrimOp (fsLit "compactFixupPointers#") [] [addrPrimTy, addrPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, compactPrimTy, addrPrimTy])) primOpInfo CompactAdd = mkGenPrimOp (fsLit "compactAdd#") [alphaTyVarSpec] [compactPrimTy, alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy])) primOpInfo CompactAddWithSharing = mkGenPrimOp (fsLit "compactAddWithSharing#") [alphaTyVarSpec] [compactPrimTy, alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, alphaTy])) primOpInfo CompactSize = mkGenPrimOp (fsLit "compactSize#") [] [compactPrimTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, wordPrimTy])) primOpInfo ReallyUnsafePtrEqualityOp = mkGenPrimOp (fsLit "reallyUnsafePtrEquality#") [levity1TyVarInf, levity2TyVarInf, levPolyAlphaTyVarSpec, levPolyBetaTyVarSpec] [levPolyAlphaTy, levPolyBetaTy] (intPrimTy) primOpInfo ParOp = mkGenPrimOp (fsLit "par#") [alphaTyVarSpec] [alphaTy] (intPrimTy) primOpInfo SparkOp = mkGenPrimOp (fsLit "spark#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy])) primOpInfo SeqOp = mkGenPrimOp (fsLit "seq#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy])) primOpInfo GetSparkOp = mkGenPrimOp (fsLit "getSpark#") [deltaTyVarSpec, alphaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy, alphaTy])) primOpInfo NumSparks = mkGenPrimOp (fsLit "numSparks#") [deltaTyVarSpec] [mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, intPrimTy])) primOpInfo KeepAliveOp = mkGenPrimOp (fsLit "keepAlive#") [levity1TyVarInf, runtimeRep2TyVarInf, levPolyAlphaTyVarSpec, openBetaTyVarSpec] [levPolyAlphaTy, mkStatePrimTy realWorldTy, (mkVisFunTyMany (mkStatePrimTy realWorldTy) (openBetaTy))] (openBetaTy) primOpInfo DataToTagOp = mkGenPrimOp (fsLit "dataToTag#") [alphaTyVarSpec] [alphaTy] (intPrimTy) primOpInfo TagToEnumOp = mkGenPrimOp (fsLit "tagToEnum#") [alphaTyVarSpec] [intPrimTy] (alphaTy) primOpInfo AddrToAnyOp = mkGenPrimOp (fsLit "addrToAny#") [alphaTyVarSpec] [addrPrimTy] ((mkTupleTy Unboxed [alphaTy])) primOpInfo AnyToAddrOp = mkGenPrimOp (fsLit "anyToAddr#") [alphaTyVarSpec] [alphaTy, mkStatePrimTy realWorldTy] ((mkTupleTy Unboxed [mkStatePrimTy realWorldTy, addrPrimTy])) primOpInfo MkApUpd0_Op = mkGenPrimOp (fsLit "mkApUpd0#") [alphaTyVarSpec] [bcoPrimTy] ((mkTupleTy Unboxed [alphaTy])) primOpInfo NewBCOOp = mkGenPrimOp (fsLit "newBCO#") [alphaTyVarSpec, deltaTyVarSpec] [byteArrayPrimTy, byteArrayPrimTy, mkArrayPrimTy alphaTy, intPrimTy, byteArrayPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, bcoPrimTy])) primOpInfo UnpackClosureOp = mkGenPrimOp (fsLit "unpackClosure#") [alphaTyVarSpec, betaTyVarSpec] [alphaTy] ((mkTupleTy Unboxed [addrPrimTy, byteArrayPrimTy, mkArrayPrimTy betaTy])) primOpInfo ClosureSizeOp = mkGenPrimOp (fsLit "closureSize#") [alphaTyVarSpec] [alphaTy] (intPrimTy) primOpInfo GetApStackValOp = mkGenPrimOp (fsLit "getApStackVal#") [alphaTyVarSpec, betaTyVarSpec] [alphaTy, intPrimTy] ((mkTupleTy Unboxed [intPrimTy, betaTy])) primOpInfo GetCCSOfOp = mkGenPrimOp (fsLit "getCCSOf#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo GetCurrentCCSOp = mkGenPrimOp (fsLit "getCurrentCCS#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo ClearCCSOp = mkGenPrimOp (fsLit "clearCCS#") [deltaTyVarSpec, alphaTyVarSpec] [(mkVisFunTyMany (mkStatePrimTy deltaTy) ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy]))), mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, alphaTy])) primOpInfo WhereFromOp = mkGenPrimOp (fsLit "whereFrom#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, addrPrimTy])) primOpInfo TraceEventOp = mkGenPrimOp (fsLit "traceEvent#") [deltaTyVarSpec] [addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo TraceEventBinaryOp = mkGenPrimOp (fsLit "traceBinaryEvent#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo TraceMarkerOp = mkGenPrimOp (fsLit "traceMarker#") [deltaTyVarSpec] [addrPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo SetThreadAllocationCounter = mkGenPrimOp (fsLit "setThreadAllocationCounter#") [] [int64PrimTy, mkStatePrimTy realWorldTy] (mkStatePrimTy realWorldTy) primOpInfo (VecBroadcastOp IntVec 16 W8) = mkGenPrimOp (fsLit "broadcastInt8X16#") [] [int8PrimTy] (int8X16PrimTy) primOpInfo (VecBroadcastOp IntVec 8 W16) = mkGenPrimOp (fsLit "broadcastInt16X8#") [] [int16PrimTy] (int16X8PrimTy) primOpInfo (VecBroadcastOp IntVec 4 W32) = mkGenPrimOp (fsLit "broadcastInt32X4#") [] [int32PrimTy] (int32X4PrimTy) primOpInfo (VecBroadcastOp IntVec 2 W64) = mkGenPrimOp (fsLit "broadcastInt64X2#") [] [int64PrimTy] (int64X2PrimTy) primOpInfo (VecBroadcastOp IntVec 32 W8) = mkGenPrimOp (fsLit "broadcastInt8X32#") [] [int8PrimTy] (int8X32PrimTy) primOpInfo (VecBroadcastOp IntVec 16 W16) = mkGenPrimOp (fsLit "broadcastInt16X16#") [] [int16PrimTy] (int16X16PrimTy) primOpInfo (VecBroadcastOp IntVec 8 W32) = mkGenPrimOp (fsLit "broadcastInt32X8#") [] [int32PrimTy] (int32X8PrimTy) primOpInfo (VecBroadcastOp IntVec 4 W64) = mkGenPrimOp (fsLit "broadcastInt64X4#") [] [int64PrimTy] (int64X4PrimTy) primOpInfo (VecBroadcastOp IntVec 64 W8) = mkGenPrimOp (fsLit "broadcastInt8X64#") [] [int8PrimTy] (int8X64PrimTy) primOpInfo (VecBroadcastOp IntVec 32 W16) = mkGenPrimOp (fsLit "broadcastInt16X32#") [] [int16PrimTy] (int16X32PrimTy) primOpInfo (VecBroadcastOp IntVec 16 W32) = mkGenPrimOp (fsLit "broadcastInt32X16#") [] [int32PrimTy] (int32X16PrimTy) primOpInfo (VecBroadcastOp IntVec 8 W64) = mkGenPrimOp (fsLit "broadcastInt64X8#") [] [int64PrimTy] (int64X8PrimTy) primOpInfo (VecBroadcastOp WordVec 16 W8) = mkGenPrimOp (fsLit "broadcastWord8X16#") [] [word8PrimTy] (word8X16PrimTy) primOpInfo (VecBroadcastOp WordVec 8 W16) = mkGenPrimOp (fsLit "broadcastWord16X8#") [] [word16PrimTy] (word16X8PrimTy) primOpInfo (VecBroadcastOp WordVec 4 W32) = mkGenPrimOp (fsLit "broadcastWord32X4#") [] [word32PrimTy] (word32X4PrimTy) primOpInfo (VecBroadcastOp WordVec 2 W64) = mkGenPrimOp (fsLit "broadcastWord64X2#") [] [word64PrimTy] (word64X2PrimTy) primOpInfo (VecBroadcastOp WordVec 32 W8) = mkGenPrimOp (fsLit "broadcastWord8X32#") [] [word8PrimTy] (word8X32PrimTy) primOpInfo (VecBroadcastOp WordVec 16 W16) = mkGenPrimOp (fsLit "broadcastWord16X16#") [] [word16PrimTy] (word16X16PrimTy) primOpInfo (VecBroadcastOp WordVec 8 W32) = mkGenPrimOp (fsLit "broadcastWord32X8#") [] [word32PrimTy] (word32X8PrimTy) primOpInfo (VecBroadcastOp WordVec 4 W64) = mkGenPrimOp (fsLit "broadcastWord64X4#") [] [word64PrimTy] (word64X4PrimTy) primOpInfo (VecBroadcastOp WordVec 64 W8) = mkGenPrimOp (fsLit "broadcastWord8X64#") [] [word8PrimTy] (word8X64PrimTy) primOpInfo (VecBroadcastOp WordVec 32 W16) = mkGenPrimOp (fsLit "broadcastWord16X32#") [] [word16PrimTy] (word16X32PrimTy) primOpInfo (VecBroadcastOp WordVec 16 W32) = mkGenPrimOp (fsLit "broadcastWord32X16#") [] [word32PrimTy] (word32X16PrimTy) primOpInfo (VecBroadcastOp WordVec 8 W64) = mkGenPrimOp (fsLit "broadcastWord64X8#") [] [word64PrimTy] (word64X8PrimTy) primOpInfo (VecBroadcastOp FloatVec 4 W32) = mkGenPrimOp (fsLit "broadcastFloatX4#") [] [floatPrimTy] (floatX4PrimTy) primOpInfo (VecBroadcastOp FloatVec 2 W64) = mkGenPrimOp (fsLit "broadcastDoubleX2#") [] [doublePrimTy] (doubleX2PrimTy) primOpInfo (VecBroadcastOp FloatVec 8 W32) = mkGenPrimOp (fsLit "broadcastFloatX8#") [] [floatPrimTy] (floatX8PrimTy) primOpInfo (VecBroadcastOp FloatVec 4 W64) = mkGenPrimOp (fsLit "broadcastDoubleX4#") [] [doublePrimTy] (doubleX4PrimTy) primOpInfo (VecBroadcastOp FloatVec 16 W32) = mkGenPrimOp (fsLit "broadcastFloatX16#") [] [floatPrimTy] (floatX16PrimTy) primOpInfo (VecBroadcastOp FloatVec 8 W64) = mkGenPrimOp (fsLit "broadcastDoubleX8#") [] [doublePrimTy] (doubleX8PrimTy) primOpInfo (VecPackOp IntVec 16 W8) = mkGenPrimOp (fsLit "packInt8X16#") [] [(mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])] (int8X16PrimTy) primOpInfo (VecPackOp IntVec 8 W16) = mkGenPrimOp (fsLit "packInt16X8#") [] [(mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])] (int16X8PrimTy) primOpInfo (VecPackOp IntVec 4 W32) = mkGenPrimOp (fsLit "packInt32X4#") [] [(mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])] (int32X4PrimTy) primOpInfo (VecPackOp IntVec 2 W64) = mkGenPrimOp (fsLit "packInt64X2#") [] [(mkTupleTy Unboxed [int64PrimTy, int64PrimTy])] (int64X2PrimTy) primOpInfo (VecPackOp IntVec 32 W8) = mkGenPrimOp (fsLit "packInt8X32#") [] [(mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])] (int8X32PrimTy) primOpInfo (VecPackOp IntVec 16 W16) = mkGenPrimOp (fsLit "packInt16X16#") [] [(mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])] (int16X16PrimTy) primOpInfo (VecPackOp IntVec 8 W32) = mkGenPrimOp (fsLit "packInt32X8#") [] [(mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])] (int32X8PrimTy) primOpInfo (VecPackOp IntVec 4 W64) = mkGenPrimOp (fsLit "packInt64X4#") [] [(mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])] (int64X4PrimTy) primOpInfo (VecPackOp IntVec 64 W8) = mkGenPrimOp (fsLit "packInt8X64#") [] [(mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])] (int8X64PrimTy) primOpInfo (VecPackOp IntVec 32 W16) = mkGenPrimOp (fsLit "packInt16X32#") [] [(mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])] (int16X32PrimTy) primOpInfo (VecPackOp IntVec 16 W32) = mkGenPrimOp (fsLit "packInt32X16#") [] [(mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])] (int32X16PrimTy) primOpInfo (VecPackOp IntVec 8 W64) = mkGenPrimOp (fsLit "packInt64X8#") [] [(mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])] (int64X8PrimTy) primOpInfo (VecPackOp WordVec 16 W8) = mkGenPrimOp (fsLit "packWord8X16#") [] [(mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])] (word8X16PrimTy) primOpInfo (VecPackOp WordVec 8 W16) = mkGenPrimOp (fsLit "packWord16X8#") [] [(mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])] (word16X8PrimTy) primOpInfo (VecPackOp WordVec 4 W32) = mkGenPrimOp (fsLit "packWord32X4#") [] [(mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])] (word32X4PrimTy) primOpInfo (VecPackOp WordVec 2 W64) = mkGenPrimOp (fsLit "packWord64X2#") [] [(mkTupleTy Unboxed [word64PrimTy, word64PrimTy])] (word64X2PrimTy) primOpInfo (VecPackOp WordVec 32 W8) = mkGenPrimOp (fsLit "packWord8X32#") [] [(mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])] (word8X32PrimTy) primOpInfo (VecPackOp WordVec 16 W16) = mkGenPrimOp (fsLit "packWord16X16#") [] [(mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])] (word16X16PrimTy) primOpInfo (VecPackOp WordVec 8 W32) = mkGenPrimOp (fsLit "packWord32X8#") [] [(mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])] (word32X8PrimTy) primOpInfo (VecPackOp WordVec 4 W64) = mkGenPrimOp (fsLit "packWord64X4#") [] [(mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])] (word64X4PrimTy) primOpInfo (VecPackOp WordVec 64 W8) = mkGenPrimOp (fsLit "packWord8X64#") [] [(mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])] (word8X64PrimTy) primOpInfo (VecPackOp WordVec 32 W16) = mkGenPrimOp (fsLit "packWord16X32#") [] [(mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])] (word16X32PrimTy) primOpInfo (VecPackOp WordVec 16 W32) = mkGenPrimOp (fsLit "packWord32X16#") [] [(mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])] (word32X16PrimTy) primOpInfo (VecPackOp WordVec 8 W64) = mkGenPrimOp (fsLit "packWord64X8#") [] [(mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])] (word64X8PrimTy) primOpInfo (VecPackOp FloatVec 4 W32) = mkGenPrimOp (fsLit "packFloatX4#") [] [(mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])] (floatX4PrimTy) primOpInfo (VecPackOp FloatVec 2 W64) = mkGenPrimOp (fsLit "packDoubleX2#") [] [(mkTupleTy Unboxed [doublePrimTy, doublePrimTy])] (doubleX2PrimTy) primOpInfo (VecPackOp FloatVec 8 W32) = mkGenPrimOp (fsLit "packFloatX8#") [] [(mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])] (floatX8PrimTy) primOpInfo (VecPackOp FloatVec 4 W64) = mkGenPrimOp (fsLit "packDoubleX4#") [] [(mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])] (doubleX4PrimTy) primOpInfo (VecPackOp FloatVec 16 W32) = mkGenPrimOp (fsLit "packFloatX16#") [] [(mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])] (floatX16PrimTy) primOpInfo (VecPackOp FloatVec 8 W64) = mkGenPrimOp (fsLit "packDoubleX8#") [] [(mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])] (doubleX8PrimTy) primOpInfo (VecUnpackOp IntVec 16 W8) = mkGenPrimOp (fsLit "unpackInt8X16#") [] [int8X16PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])) primOpInfo (VecUnpackOp IntVec 8 W16) = mkGenPrimOp (fsLit "unpackInt16X8#") [] [int16X8PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])) primOpInfo (VecUnpackOp IntVec 4 W32) = mkGenPrimOp (fsLit "unpackInt32X4#") [] [int32X4PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])) primOpInfo (VecUnpackOp IntVec 2 W64) = mkGenPrimOp (fsLit "unpackInt64X2#") [] [int64X2PrimTy] ((mkTupleTy Unboxed [int64PrimTy, int64PrimTy])) primOpInfo (VecUnpackOp IntVec 32 W8) = mkGenPrimOp (fsLit "unpackInt8X32#") [] [int8X32PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])) primOpInfo (VecUnpackOp IntVec 16 W16) = mkGenPrimOp (fsLit "unpackInt16X16#") [] [int16X16PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])) primOpInfo (VecUnpackOp IntVec 8 W32) = mkGenPrimOp (fsLit "unpackInt32X8#") [] [int32X8PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])) primOpInfo (VecUnpackOp IntVec 4 W64) = mkGenPrimOp (fsLit "unpackInt64X4#") [] [int64X4PrimTy] ((mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])) primOpInfo (VecUnpackOp IntVec 64 W8) = mkGenPrimOp (fsLit "unpackInt8X64#") [] [int8X64PrimTy] ((mkTupleTy Unboxed [int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy, int8PrimTy])) primOpInfo (VecUnpackOp IntVec 32 W16) = mkGenPrimOp (fsLit "unpackInt16X32#") [] [int16X32PrimTy] ((mkTupleTy Unboxed [int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy, int16PrimTy])) primOpInfo (VecUnpackOp IntVec 16 W32) = mkGenPrimOp (fsLit "unpackInt32X16#") [] [int32X16PrimTy] ((mkTupleTy Unboxed [int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy, int32PrimTy])) primOpInfo (VecUnpackOp IntVec 8 W64) = mkGenPrimOp (fsLit "unpackInt64X8#") [] [int64X8PrimTy] ((mkTupleTy Unboxed [int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy, int64PrimTy])) primOpInfo (VecUnpackOp WordVec 16 W8) = mkGenPrimOp (fsLit "unpackWord8X16#") [] [word8X16PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])) primOpInfo (VecUnpackOp WordVec 8 W16) = mkGenPrimOp (fsLit "unpackWord16X8#") [] [word16X8PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])) primOpInfo (VecUnpackOp WordVec 4 W32) = mkGenPrimOp (fsLit "unpackWord32X4#") [] [word32X4PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])) primOpInfo (VecUnpackOp WordVec 2 W64) = mkGenPrimOp (fsLit "unpackWord64X2#") [] [word64X2PrimTy] ((mkTupleTy Unboxed [word64PrimTy, word64PrimTy])) primOpInfo (VecUnpackOp WordVec 32 W8) = mkGenPrimOp (fsLit "unpackWord8X32#") [] [word8X32PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])) primOpInfo (VecUnpackOp WordVec 16 W16) = mkGenPrimOp (fsLit "unpackWord16X16#") [] [word16X16PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])) primOpInfo (VecUnpackOp WordVec 8 W32) = mkGenPrimOp (fsLit "unpackWord32X8#") [] [word32X8PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])) primOpInfo (VecUnpackOp WordVec 4 W64) = mkGenPrimOp (fsLit "unpackWord64X4#") [] [word64X4PrimTy] ((mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])) primOpInfo (VecUnpackOp WordVec 64 W8) = mkGenPrimOp (fsLit "unpackWord8X64#") [] [word8X64PrimTy] ((mkTupleTy Unboxed [word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy, word8PrimTy])) primOpInfo (VecUnpackOp WordVec 32 W16) = mkGenPrimOp (fsLit "unpackWord16X32#") [] [word16X32PrimTy] ((mkTupleTy Unboxed [word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy, word16PrimTy])) primOpInfo (VecUnpackOp WordVec 16 W32) = mkGenPrimOp (fsLit "unpackWord32X16#") [] [word32X16PrimTy] ((mkTupleTy Unboxed [word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy, word32PrimTy])) primOpInfo (VecUnpackOp WordVec 8 W64) = mkGenPrimOp (fsLit "unpackWord64X8#") [] [word64X8PrimTy] ((mkTupleTy Unboxed [word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy, word64PrimTy])) primOpInfo (VecUnpackOp FloatVec 4 W32) = mkGenPrimOp (fsLit "unpackFloatX4#") [] [floatX4PrimTy] ((mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])) primOpInfo (VecUnpackOp FloatVec 2 W64) = mkGenPrimOp (fsLit "unpackDoubleX2#") [] [doubleX2PrimTy] ((mkTupleTy Unboxed [doublePrimTy, doublePrimTy])) primOpInfo (VecUnpackOp FloatVec 8 W32) = mkGenPrimOp (fsLit "unpackFloatX8#") [] [floatX8PrimTy] ((mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])) primOpInfo (VecUnpackOp FloatVec 4 W64) = mkGenPrimOp (fsLit "unpackDoubleX4#") [] [doubleX4PrimTy] ((mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])) primOpInfo (VecUnpackOp FloatVec 16 W32) = mkGenPrimOp (fsLit "unpackFloatX16#") [] [floatX16PrimTy] ((mkTupleTy Unboxed [floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy, floatPrimTy])) primOpInfo (VecUnpackOp FloatVec 8 W64) = mkGenPrimOp (fsLit "unpackDoubleX8#") [] [doubleX8PrimTy] ((mkTupleTy Unboxed [doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy, doublePrimTy])) primOpInfo (VecInsertOp IntVec 16 W8) = mkGenPrimOp (fsLit "insertInt8X16#") [] [int8X16PrimTy, int8PrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecInsertOp IntVec 8 W16) = mkGenPrimOp (fsLit "insertInt16X8#") [] [int16X8PrimTy, int16PrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecInsertOp IntVec 4 W32) = mkGenPrimOp (fsLit "insertInt32X4#") [] [int32X4PrimTy, int32PrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecInsertOp IntVec 2 W64) = mkGenPrimOp (fsLit "insertInt64X2#") [] [int64X2PrimTy, int64PrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecInsertOp IntVec 32 W8) = mkGenPrimOp (fsLit "insertInt8X32#") [] [int8X32PrimTy, int8PrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecInsertOp IntVec 16 W16) = mkGenPrimOp (fsLit "insertInt16X16#") [] [int16X16PrimTy, int16PrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecInsertOp IntVec 8 W32) = mkGenPrimOp (fsLit "insertInt32X8#") [] [int32X8PrimTy, int32PrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecInsertOp IntVec 4 W64) = mkGenPrimOp (fsLit "insertInt64X4#") [] [int64X4PrimTy, int64PrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecInsertOp IntVec 64 W8) = mkGenPrimOp (fsLit "insertInt8X64#") [] [int8X64PrimTy, int8PrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecInsertOp IntVec 32 W16) = mkGenPrimOp (fsLit "insertInt16X32#") [] [int16X32PrimTy, int16PrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecInsertOp IntVec 16 W32) = mkGenPrimOp (fsLit "insertInt32X16#") [] [int32X16PrimTy, int32PrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecInsertOp IntVec 8 W64) = mkGenPrimOp (fsLit "insertInt64X8#") [] [int64X8PrimTy, int64PrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecInsertOp WordVec 16 W8) = mkGenPrimOp (fsLit "insertWord8X16#") [] [word8X16PrimTy, word8PrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecInsertOp WordVec 8 W16) = mkGenPrimOp (fsLit "insertWord16X8#") [] [word16X8PrimTy, word16PrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecInsertOp WordVec 4 W32) = mkGenPrimOp (fsLit "insertWord32X4#") [] [word32X4PrimTy, word32PrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecInsertOp WordVec 2 W64) = mkGenPrimOp (fsLit "insertWord64X2#") [] [word64X2PrimTy, word64PrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecInsertOp WordVec 32 W8) = mkGenPrimOp (fsLit "insertWord8X32#") [] [word8X32PrimTy, word8PrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecInsertOp WordVec 16 W16) = mkGenPrimOp (fsLit "insertWord16X16#") [] [word16X16PrimTy, word16PrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecInsertOp WordVec 8 W32) = mkGenPrimOp (fsLit "insertWord32X8#") [] [word32X8PrimTy, word32PrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecInsertOp WordVec 4 W64) = mkGenPrimOp (fsLit "insertWord64X4#") [] [word64X4PrimTy, word64PrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecInsertOp WordVec 64 W8) = mkGenPrimOp (fsLit "insertWord8X64#") [] [word8X64PrimTy, word8PrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecInsertOp WordVec 32 W16) = mkGenPrimOp (fsLit "insertWord16X32#") [] [word16X32PrimTy, word16PrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecInsertOp WordVec 16 W32) = mkGenPrimOp (fsLit "insertWord32X16#") [] [word32X16PrimTy, word32PrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecInsertOp WordVec 8 W64) = mkGenPrimOp (fsLit "insertWord64X8#") [] [word64X8PrimTy, word64PrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecInsertOp FloatVec 4 W32) = mkGenPrimOp (fsLit "insertFloatX4#") [] [floatX4PrimTy, floatPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecInsertOp FloatVec 2 W64) = mkGenPrimOp (fsLit "insertDoubleX2#") [] [doubleX2PrimTy, doublePrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecInsertOp FloatVec 8 W32) = mkGenPrimOp (fsLit "insertFloatX8#") [] [floatX8PrimTy, floatPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecInsertOp FloatVec 4 W64) = mkGenPrimOp (fsLit "insertDoubleX4#") [] [doubleX4PrimTy, doublePrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecInsertOp FloatVec 16 W32) = mkGenPrimOp (fsLit "insertFloatX16#") [] [floatX16PrimTy, floatPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecInsertOp FloatVec 8 W64) = mkGenPrimOp (fsLit "insertDoubleX8#") [] [doubleX8PrimTy, doublePrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecAddOp IntVec 16 W8) = mkGenPrimOp (fsLit "plusInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecAddOp IntVec 8 W16) = mkGenPrimOp (fsLit "plusInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecAddOp IntVec 4 W32) = mkGenPrimOp (fsLit "plusInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecAddOp IntVec 2 W64) = mkGenPrimOp (fsLit "plusInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecAddOp IntVec 32 W8) = mkGenPrimOp (fsLit "plusInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecAddOp IntVec 16 W16) = mkGenPrimOp (fsLit "plusInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecAddOp IntVec 8 W32) = mkGenPrimOp (fsLit "plusInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecAddOp IntVec 4 W64) = mkGenPrimOp (fsLit "plusInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecAddOp IntVec 64 W8) = mkGenPrimOp (fsLit "plusInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecAddOp IntVec 32 W16) = mkGenPrimOp (fsLit "plusInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecAddOp IntVec 16 W32) = mkGenPrimOp (fsLit "plusInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecAddOp IntVec 8 W64) = mkGenPrimOp (fsLit "plusInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecAddOp WordVec 16 W8) = mkGenPrimOp (fsLit "plusWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecAddOp WordVec 8 W16) = mkGenPrimOp (fsLit "plusWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecAddOp WordVec 4 W32) = mkGenPrimOp (fsLit "plusWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecAddOp WordVec 2 W64) = mkGenPrimOp (fsLit "plusWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecAddOp WordVec 32 W8) = mkGenPrimOp (fsLit "plusWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecAddOp WordVec 16 W16) = mkGenPrimOp (fsLit "plusWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecAddOp WordVec 8 W32) = mkGenPrimOp (fsLit "plusWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecAddOp WordVec 4 W64) = mkGenPrimOp (fsLit "plusWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecAddOp WordVec 64 W8) = mkGenPrimOp (fsLit "plusWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecAddOp WordVec 32 W16) = mkGenPrimOp (fsLit "plusWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecAddOp WordVec 16 W32) = mkGenPrimOp (fsLit "plusWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecAddOp WordVec 8 W64) = mkGenPrimOp (fsLit "plusWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecAddOp FloatVec 4 W32) = mkGenPrimOp (fsLit "plusFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecAddOp FloatVec 2 W64) = mkGenPrimOp (fsLit "plusDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecAddOp FloatVec 8 W32) = mkGenPrimOp (fsLit "plusFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecAddOp FloatVec 4 W64) = mkGenPrimOp (fsLit "plusDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecAddOp FloatVec 16 W32) = mkGenPrimOp (fsLit "plusFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecAddOp FloatVec 8 W64) = mkGenPrimOp (fsLit "plusDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecSubOp IntVec 16 W8) = mkGenPrimOp (fsLit "minusInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecSubOp IntVec 8 W16) = mkGenPrimOp (fsLit "minusInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecSubOp IntVec 4 W32) = mkGenPrimOp (fsLit "minusInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecSubOp IntVec 2 W64) = mkGenPrimOp (fsLit "minusInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecSubOp IntVec 32 W8) = mkGenPrimOp (fsLit "minusInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecSubOp IntVec 16 W16) = mkGenPrimOp (fsLit "minusInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecSubOp IntVec 8 W32) = mkGenPrimOp (fsLit "minusInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecSubOp IntVec 4 W64) = mkGenPrimOp (fsLit "minusInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecSubOp IntVec 64 W8) = mkGenPrimOp (fsLit "minusInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecSubOp IntVec 32 W16) = mkGenPrimOp (fsLit "minusInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecSubOp IntVec 16 W32) = mkGenPrimOp (fsLit "minusInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecSubOp IntVec 8 W64) = mkGenPrimOp (fsLit "minusInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecSubOp WordVec 16 W8) = mkGenPrimOp (fsLit "minusWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecSubOp WordVec 8 W16) = mkGenPrimOp (fsLit "minusWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecSubOp WordVec 4 W32) = mkGenPrimOp (fsLit "minusWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecSubOp WordVec 2 W64) = mkGenPrimOp (fsLit "minusWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecSubOp WordVec 32 W8) = mkGenPrimOp (fsLit "minusWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecSubOp WordVec 16 W16) = mkGenPrimOp (fsLit "minusWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecSubOp WordVec 8 W32) = mkGenPrimOp (fsLit "minusWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecSubOp WordVec 4 W64) = mkGenPrimOp (fsLit "minusWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecSubOp WordVec 64 W8) = mkGenPrimOp (fsLit "minusWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecSubOp WordVec 32 W16) = mkGenPrimOp (fsLit "minusWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecSubOp WordVec 16 W32) = mkGenPrimOp (fsLit "minusWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecSubOp WordVec 8 W64) = mkGenPrimOp (fsLit "minusWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecSubOp FloatVec 4 W32) = mkGenPrimOp (fsLit "minusFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecSubOp FloatVec 2 W64) = mkGenPrimOp (fsLit "minusDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecSubOp FloatVec 8 W32) = mkGenPrimOp (fsLit "minusFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecSubOp FloatVec 4 W64) = mkGenPrimOp (fsLit "minusDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecSubOp FloatVec 16 W32) = mkGenPrimOp (fsLit "minusFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecSubOp FloatVec 8 W64) = mkGenPrimOp (fsLit "minusDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecMulOp IntVec 16 W8) = mkGenPrimOp (fsLit "timesInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecMulOp IntVec 8 W16) = mkGenPrimOp (fsLit "timesInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecMulOp IntVec 4 W32) = mkGenPrimOp (fsLit "timesInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecMulOp IntVec 2 W64) = mkGenPrimOp (fsLit "timesInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecMulOp IntVec 32 W8) = mkGenPrimOp (fsLit "timesInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecMulOp IntVec 16 W16) = mkGenPrimOp (fsLit "timesInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecMulOp IntVec 8 W32) = mkGenPrimOp (fsLit "timesInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecMulOp IntVec 4 W64) = mkGenPrimOp (fsLit "timesInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecMulOp IntVec 64 W8) = mkGenPrimOp (fsLit "timesInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecMulOp IntVec 32 W16) = mkGenPrimOp (fsLit "timesInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecMulOp IntVec 16 W32) = mkGenPrimOp (fsLit "timesInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecMulOp IntVec 8 W64) = mkGenPrimOp (fsLit "timesInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecMulOp WordVec 16 W8) = mkGenPrimOp (fsLit "timesWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecMulOp WordVec 8 W16) = mkGenPrimOp (fsLit "timesWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecMulOp WordVec 4 W32) = mkGenPrimOp (fsLit "timesWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecMulOp WordVec 2 W64) = mkGenPrimOp (fsLit "timesWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecMulOp WordVec 32 W8) = mkGenPrimOp (fsLit "timesWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecMulOp WordVec 16 W16) = mkGenPrimOp (fsLit "timesWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecMulOp WordVec 8 W32) = mkGenPrimOp (fsLit "timesWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecMulOp WordVec 4 W64) = mkGenPrimOp (fsLit "timesWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecMulOp WordVec 64 W8) = mkGenPrimOp (fsLit "timesWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecMulOp WordVec 32 W16) = mkGenPrimOp (fsLit "timesWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecMulOp WordVec 16 W32) = mkGenPrimOp (fsLit "timesWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecMulOp WordVec 8 W64) = mkGenPrimOp (fsLit "timesWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecMulOp FloatVec 4 W32) = mkGenPrimOp (fsLit "timesFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecMulOp FloatVec 2 W64) = mkGenPrimOp (fsLit "timesDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecMulOp FloatVec 8 W32) = mkGenPrimOp (fsLit "timesFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecMulOp FloatVec 4 W64) = mkGenPrimOp (fsLit "timesDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecMulOp FloatVec 16 W32) = mkGenPrimOp (fsLit "timesFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecMulOp FloatVec 8 W64) = mkGenPrimOp (fsLit "timesDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecDivOp FloatVec 4 W32) = mkGenPrimOp (fsLit "divideFloatX4#") [] [floatX4PrimTy, floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecDivOp FloatVec 2 W64) = mkGenPrimOp (fsLit "divideDoubleX2#") [] [doubleX2PrimTy, doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecDivOp FloatVec 8 W32) = mkGenPrimOp (fsLit "divideFloatX8#") [] [floatX8PrimTy, floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecDivOp FloatVec 4 W64) = mkGenPrimOp (fsLit "divideDoubleX4#") [] [doubleX4PrimTy, doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecDivOp FloatVec 16 W32) = mkGenPrimOp (fsLit "divideFloatX16#") [] [floatX16PrimTy, floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecDivOp FloatVec 8 W64) = mkGenPrimOp (fsLit "divideDoubleX8#") [] [doubleX8PrimTy, doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecQuotOp IntVec 16 W8) = mkGenPrimOp (fsLit "quotInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecQuotOp IntVec 8 W16) = mkGenPrimOp (fsLit "quotInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecQuotOp IntVec 4 W32) = mkGenPrimOp (fsLit "quotInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecQuotOp IntVec 2 W64) = mkGenPrimOp (fsLit "quotInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecQuotOp IntVec 32 W8) = mkGenPrimOp (fsLit "quotInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecQuotOp IntVec 16 W16) = mkGenPrimOp (fsLit "quotInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecQuotOp IntVec 8 W32) = mkGenPrimOp (fsLit "quotInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecQuotOp IntVec 4 W64) = mkGenPrimOp (fsLit "quotInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecQuotOp IntVec 64 W8) = mkGenPrimOp (fsLit "quotInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecQuotOp IntVec 32 W16) = mkGenPrimOp (fsLit "quotInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecQuotOp IntVec 16 W32) = mkGenPrimOp (fsLit "quotInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecQuotOp IntVec 8 W64) = mkGenPrimOp (fsLit "quotInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecQuotOp WordVec 16 W8) = mkGenPrimOp (fsLit "quotWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecQuotOp WordVec 8 W16) = mkGenPrimOp (fsLit "quotWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecQuotOp WordVec 4 W32) = mkGenPrimOp (fsLit "quotWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecQuotOp WordVec 2 W64) = mkGenPrimOp (fsLit "quotWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecQuotOp WordVec 32 W8) = mkGenPrimOp (fsLit "quotWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecQuotOp WordVec 16 W16) = mkGenPrimOp (fsLit "quotWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecQuotOp WordVec 8 W32) = mkGenPrimOp (fsLit "quotWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecQuotOp WordVec 4 W64) = mkGenPrimOp (fsLit "quotWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecQuotOp WordVec 64 W8) = mkGenPrimOp (fsLit "quotWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecQuotOp WordVec 32 W16) = mkGenPrimOp (fsLit "quotWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecQuotOp WordVec 16 W32) = mkGenPrimOp (fsLit "quotWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecQuotOp WordVec 8 W64) = mkGenPrimOp (fsLit "quotWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecRemOp IntVec 16 W8) = mkGenPrimOp (fsLit "remInt8X16#") [] [int8X16PrimTy, int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecRemOp IntVec 8 W16) = mkGenPrimOp (fsLit "remInt16X8#") [] [int16X8PrimTy, int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecRemOp IntVec 4 W32) = mkGenPrimOp (fsLit "remInt32X4#") [] [int32X4PrimTy, int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecRemOp IntVec 2 W64) = mkGenPrimOp (fsLit "remInt64X2#") [] [int64X2PrimTy, int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecRemOp IntVec 32 W8) = mkGenPrimOp (fsLit "remInt8X32#") [] [int8X32PrimTy, int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecRemOp IntVec 16 W16) = mkGenPrimOp (fsLit "remInt16X16#") [] [int16X16PrimTy, int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecRemOp IntVec 8 W32) = mkGenPrimOp (fsLit "remInt32X8#") [] [int32X8PrimTy, int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecRemOp IntVec 4 W64) = mkGenPrimOp (fsLit "remInt64X4#") [] [int64X4PrimTy, int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecRemOp IntVec 64 W8) = mkGenPrimOp (fsLit "remInt8X64#") [] [int8X64PrimTy, int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecRemOp IntVec 32 W16) = mkGenPrimOp (fsLit "remInt16X32#") [] [int16X32PrimTy, int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecRemOp IntVec 16 W32) = mkGenPrimOp (fsLit "remInt32X16#") [] [int32X16PrimTy, int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecRemOp IntVec 8 W64) = mkGenPrimOp (fsLit "remInt64X8#") [] [int64X8PrimTy, int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecRemOp WordVec 16 W8) = mkGenPrimOp (fsLit "remWord8X16#") [] [word8X16PrimTy, word8X16PrimTy] (word8X16PrimTy) primOpInfo (VecRemOp WordVec 8 W16) = mkGenPrimOp (fsLit "remWord16X8#") [] [word16X8PrimTy, word16X8PrimTy] (word16X8PrimTy) primOpInfo (VecRemOp WordVec 4 W32) = mkGenPrimOp (fsLit "remWord32X4#") [] [word32X4PrimTy, word32X4PrimTy] (word32X4PrimTy) primOpInfo (VecRemOp WordVec 2 W64) = mkGenPrimOp (fsLit "remWord64X2#") [] [word64X2PrimTy, word64X2PrimTy] (word64X2PrimTy) primOpInfo (VecRemOp WordVec 32 W8) = mkGenPrimOp (fsLit "remWord8X32#") [] [word8X32PrimTy, word8X32PrimTy] (word8X32PrimTy) primOpInfo (VecRemOp WordVec 16 W16) = mkGenPrimOp (fsLit "remWord16X16#") [] [word16X16PrimTy, word16X16PrimTy] (word16X16PrimTy) primOpInfo (VecRemOp WordVec 8 W32) = mkGenPrimOp (fsLit "remWord32X8#") [] [word32X8PrimTy, word32X8PrimTy] (word32X8PrimTy) primOpInfo (VecRemOp WordVec 4 W64) = mkGenPrimOp (fsLit "remWord64X4#") [] [word64X4PrimTy, word64X4PrimTy] (word64X4PrimTy) primOpInfo (VecRemOp WordVec 64 W8) = mkGenPrimOp (fsLit "remWord8X64#") [] [word8X64PrimTy, word8X64PrimTy] (word8X64PrimTy) primOpInfo (VecRemOp WordVec 32 W16) = mkGenPrimOp (fsLit "remWord16X32#") [] [word16X32PrimTy, word16X32PrimTy] (word16X32PrimTy) primOpInfo (VecRemOp WordVec 16 W32) = mkGenPrimOp (fsLit "remWord32X16#") [] [word32X16PrimTy, word32X16PrimTy] (word32X16PrimTy) primOpInfo (VecRemOp WordVec 8 W64) = mkGenPrimOp (fsLit "remWord64X8#") [] [word64X8PrimTy, word64X8PrimTy] (word64X8PrimTy) primOpInfo (VecNegOp IntVec 16 W8) = mkGenPrimOp (fsLit "negateInt8X16#") [] [int8X16PrimTy] (int8X16PrimTy) primOpInfo (VecNegOp IntVec 8 W16) = mkGenPrimOp (fsLit "negateInt16X8#") [] [int16X8PrimTy] (int16X8PrimTy) primOpInfo (VecNegOp IntVec 4 W32) = mkGenPrimOp (fsLit "negateInt32X4#") [] [int32X4PrimTy] (int32X4PrimTy) primOpInfo (VecNegOp IntVec 2 W64) = mkGenPrimOp (fsLit "negateInt64X2#") [] [int64X2PrimTy] (int64X2PrimTy) primOpInfo (VecNegOp IntVec 32 W8) = mkGenPrimOp (fsLit "negateInt8X32#") [] [int8X32PrimTy] (int8X32PrimTy) primOpInfo (VecNegOp IntVec 16 W16) = mkGenPrimOp (fsLit "negateInt16X16#") [] [int16X16PrimTy] (int16X16PrimTy) primOpInfo (VecNegOp IntVec 8 W32) = mkGenPrimOp (fsLit "negateInt32X8#") [] [int32X8PrimTy] (int32X8PrimTy) primOpInfo (VecNegOp IntVec 4 W64) = mkGenPrimOp (fsLit "negateInt64X4#") [] [int64X4PrimTy] (int64X4PrimTy) primOpInfo (VecNegOp IntVec 64 W8) = mkGenPrimOp (fsLit "negateInt8X64#") [] [int8X64PrimTy] (int8X64PrimTy) primOpInfo (VecNegOp IntVec 32 W16) = mkGenPrimOp (fsLit "negateInt16X32#") [] [int16X32PrimTy] (int16X32PrimTy) primOpInfo (VecNegOp IntVec 16 W32) = mkGenPrimOp (fsLit "negateInt32X16#") [] [int32X16PrimTy] (int32X16PrimTy) primOpInfo (VecNegOp IntVec 8 W64) = mkGenPrimOp (fsLit "negateInt64X8#") [] [int64X8PrimTy] (int64X8PrimTy) primOpInfo (VecNegOp FloatVec 4 W32) = mkGenPrimOp (fsLit "negateFloatX4#") [] [floatX4PrimTy] (floatX4PrimTy) primOpInfo (VecNegOp FloatVec 2 W64) = mkGenPrimOp (fsLit "negateDoubleX2#") [] [doubleX2PrimTy] (doubleX2PrimTy) primOpInfo (VecNegOp FloatVec 8 W32) = mkGenPrimOp (fsLit "negateFloatX8#") [] [floatX8PrimTy] (floatX8PrimTy) primOpInfo (VecNegOp FloatVec 4 W64) = mkGenPrimOp (fsLit "negateDoubleX4#") [] [doubleX4PrimTy] (doubleX4PrimTy) primOpInfo (VecNegOp FloatVec 16 W32) = mkGenPrimOp (fsLit "negateFloatX16#") [] [floatX16PrimTy] (floatX16PrimTy) primOpInfo (VecNegOp FloatVec 8 W64) = mkGenPrimOp (fsLit "negateDoubleX8#") [] [doubleX8PrimTy] (doubleX8PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8X16Array#") [] [byteArrayPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16X8Array#") [] [byteArrayPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32X4Array#") [] [byteArrayPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64X2Array#") [] [byteArrayPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8X32Array#") [] [byteArrayPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16X16Array#") [] [byteArrayPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32X8Array#") [] [byteArrayPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64X4Array#") [] [byteArrayPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8X64Array#") [] [byteArrayPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16X32Array#") [] [byteArrayPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32X16Array#") [] [byteArrayPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64X8Array#") [] [byteArrayPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8X16Array#") [] [byteArrayPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16X8Array#") [] [byteArrayPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32X4Array#") [] [byteArrayPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64X2Array#") [] [byteArrayPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8X32Array#") [] [byteArrayPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16X16Array#") [] [byteArrayPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32X8Array#") [] [byteArrayPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64X4Array#") [] [byteArrayPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8X64Array#") [] [byteArrayPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16X32Array#") [] [byteArrayPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32X16Array#") [] [byteArrayPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64X8Array#") [] [byteArrayPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatX4Array#") [] [byteArrayPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleX2Array#") [] [byteArrayPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatX8Array#") [] [byteArrayPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleX4Array#") [] [byteArrayPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatX16Array#") [] [byteArrayPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleX8Array#") [] [byteArrayPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleX2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatX16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64X2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64X4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8X64Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16X32Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32X16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64X8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleX2Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleX4Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatX16Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleX8Array#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecIndexOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8X16OffAddr#") [] [addrPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16X8OffAddr#") [] [addrPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32X4OffAddr#") [] [addrPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64X2OffAddr#") [] [addrPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8X32OffAddr#") [] [addrPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16X16OffAddr#") [] [addrPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32X8OffAddr#") [] [addrPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64X4OffAddr#") [] [addrPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8X64OffAddr#") [] [addrPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16X32OffAddr#") [] [addrPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32X16OffAddr#") [] [addrPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64X8OffAddr#") [] [addrPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8X16OffAddr#") [] [addrPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16X8OffAddr#") [] [addrPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32X4OffAddr#") [] [addrPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64X2OffAddr#") [] [addrPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8X32OffAddr#") [] [addrPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16X16OffAddr#") [] [addrPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32X8OffAddr#") [] [addrPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64X4OffAddr#") [] [addrPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8X64OffAddr#") [] [addrPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16X32OffAddr#") [] [addrPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32X16OffAddr#") [] [addrPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64X8OffAddr#") [] [addrPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatX4OffAddr#") [] [addrPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleX2OffAddr#") [] [addrPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatX8OffAddr#") [] [addrPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleX4OffAddr#") [] [addrPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatX16OffAddr#") [] [addrPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleX8OffAddr#") [] [addrPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleX2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatX16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64X2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64X4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8X64OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16X32OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32X16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64X8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleX2OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleX4OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatX16OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleX8OffAddr#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8ArrayAsInt8X16#") [] [byteArrayPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16ArrayAsInt16X8#") [] [byteArrayPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32ArrayAsInt32X4#") [] [byteArrayPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64ArrayAsInt64X2#") [] [byteArrayPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8ArrayAsInt8X32#") [] [byteArrayPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16ArrayAsInt16X16#") [] [byteArrayPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32ArrayAsInt32X8#") [] [byteArrayPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64ArrayAsInt64X4#") [] [byteArrayPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8ArrayAsInt8X64#") [] [byteArrayPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16ArrayAsInt16X32#") [] [byteArrayPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32ArrayAsInt32X16#") [] [byteArrayPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64ArrayAsInt64X8#") [] [byteArrayPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8ArrayAsWord8X16#") [] [byteArrayPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16ArrayAsWord16X8#") [] [byteArrayPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32ArrayAsWord32X4#") [] [byteArrayPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64ArrayAsWord64X2#") [] [byteArrayPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8ArrayAsWord8X32#") [] [byteArrayPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16ArrayAsWord16X16#") [] [byteArrayPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32ArrayAsWord32X8#") [] [byteArrayPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64ArrayAsWord64X4#") [] [byteArrayPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8ArrayAsWord8X64#") [] [byteArrayPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16ArrayAsWord16X32#") [] [byteArrayPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32ArrayAsWord32X16#") [] [byteArrayPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexScalarByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64ArrayAsWord64X8#") [] [byteArrayPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatArrayAsFloatX4#") [] [byteArrayPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleArrayAsDoubleX2#") [] [byteArrayPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatArrayAsFloatX8#") [] [byteArrayPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleArrayAsDoubleX4#") [] [byteArrayPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatArrayAsFloatX16#") [] [byteArrayPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexScalarByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleArrayAsDoubleX8#") [] [byteArrayPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadScalarByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8ArrayAsInt8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16ArrayAsInt16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32ArrayAsInt32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64ArrayAsInt64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8ArrayAsInt8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16ArrayAsInt16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32ArrayAsInt32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64ArrayAsInt64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8ArrayAsInt8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16ArrayAsInt16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32ArrayAsInt32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64ArrayAsInt64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8ArrayAsWord8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16ArrayAsWord16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32ArrayAsWord32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64ArrayAsWord64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8ArrayAsWord8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16ArrayAsWord16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32ArrayAsWord32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64ArrayAsWord64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8ArrayAsWord8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16ArrayAsWord16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32ArrayAsWord32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadScalarByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64ArrayAsWord64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatArrayAsFloatX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleArrayAsDoubleX2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatArrayAsFloatX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleArrayAsDoubleX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatArrayAsFloatX16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadScalarByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleArrayAsDoubleX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteScalarByteArrayOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8ArrayAsInt8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16ArrayAsInt16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32ArrayAsInt32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64ArrayAsInt64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8ArrayAsInt8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16ArrayAsInt16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32ArrayAsInt32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64ArrayAsInt64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8ArrayAsInt8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16ArrayAsInt16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32ArrayAsInt32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64ArrayAsInt64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8ArrayAsWord8X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16ArrayAsWord16X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32ArrayAsWord32X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64ArrayAsWord64X2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8ArrayAsWord8X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16ArrayAsWord16X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32ArrayAsWord32X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64ArrayAsWord64X4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8ArrayAsWord8X64#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16ArrayAsWord16X32#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32ArrayAsWord32X16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64ArrayAsWord64X8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatArrayAsFloatX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleArrayAsDoubleX2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatArrayAsFloatX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleArrayAsDoubleX4#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatArrayAsFloatX16#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarByteArrayOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleArrayAsDoubleX8#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "indexInt8OffAddrAsInt8X16#") [] [addrPrimTy, intPrimTy] (int8X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "indexInt16OffAddrAsInt16X8#") [] [addrPrimTy, intPrimTy] (int16X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "indexInt32OffAddrAsInt32X4#") [] [addrPrimTy, intPrimTy] (int32X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "indexInt64OffAddrAsInt64X2#") [] [addrPrimTy, intPrimTy] (int64X2PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "indexInt8OffAddrAsInt8X32#") [] [addrPrimTy, intPrimTy] (int8X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "indexInt16OffAddrAsInt16X16#") [] [addrPrimTy, intPrimTy] (int16X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "indexInt32OffAddrAsInt32X8#") [] [addrPrimTy, intPrimTy] (int32X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "indexInt64OffAddrAsInt64X4#") [] [addrPrimTy, intPrimTy] (int64X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "indexInt8OffAddrAsInt8X64#") [] [addrPrimTy, intPrimTy] (int8X64PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "indexInt16OffAddrAsInt16X32#") [] [addrPrimTy, intPrimTy] (int16X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "indexInt32OffAddrAsInt32X16#") [] [addrPrimTy, intPrimTy] (int32X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "indexInt64OffAddrAsInt64X8#") [] [addrPrimTy, intPrimTy] (int64X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord8X16#") [] [addrPrimTy, intPrimTy] (word8X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "indexWord16OffAddrAsWord16X8#") [] [addrPrimTy, intPrimTy] (word16X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "indexWord32OffAddrAsWord32X4#") [] [addrPrimTy, intPrimTy] (word32X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "indexWord64OffAddrAsWord64X2#") [] [addrPrimTy, intPrimTy] (word64X2PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord8X32#") [] [addrPrimTy, intPrimTy] (word8X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "indexWord16OffAddrAsWord16X16#") [] [addrPrimTy, intPrimTy] (word16X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "indexWord32OffAddrAsWord32X8#") [] [addrPrimTy, intPrimTy] (word32X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "indexWord64OffAddrAsWord64X4#") [] [addrPrimTy, intPrimTy] (word64X4PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "indexWord8OffAddrAsWord8X64#") [] [addrPrimTy, intPrimTy] (word8X64PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "indexWord16OffAddrAsWord16X32#") [] [addrPrimTy, intPrimTy] (word16X32PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "indexWord32OffAddrAsWord32X16#") [] [addrPrimTy, intPrimTy] (word32X16PrimTy) primOpInfo (VecIndexScalarOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "indexWord64OffAddrAsWord64X8#") [] [addrPrimTy, intPrimTy] (word64X8PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "indexFloatOffAddrAsFloatX4#") [] [addrPrimTy, intPrimTy] (floatX4PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "indexDoubleOffAddrAsDoubleX2#") [] [addrPrimTy, intPrimTy] (doubleX2PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "indexFloatOffAddrAsFloatX8#") [] [addrPrimTy, intPrimTy] (floatX8PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "indexDoubleOffAddrAsDoubleX4#") [] [addrPrimTy, intPrimTy] (doubleX4PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "indexFloatOffAddrAsFloatX16#") [] [addrPrimTy, intPrimTy] (floatX16PrimTy) primOpInfo (VecIndexScalarOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "indexDoubleOffAddrAsDoubleX8#") [] [addrPrimTy, intPrimTy] (doubleX8PrimTy) primOpInfo (VecReadScalarOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "readInt8OffAddrAsInt8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "readInt16OffAddrAsInt16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "readInt32OffAddrAsInt32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "readInt64OffAddrAsInt64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X2PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "readInt8OffAddrAsInt8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "readInt16OffAddrAsInt16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "readInt32OffAddrAsInt32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "readInt64OffAddrAsInt64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "readInt8OffAddrAsInt8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int8X64PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "readInt16OffAddrAsInt16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int16X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "readInt32OffAddrAsInt32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int32X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "readInt64OffAddrAsInt64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, int64X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "readWord8OffAddrAsWord8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "readWord16OffAddrAsWord16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "readWord32OffAddrAsWord32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "readWord64OffAddrAsWord64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X2PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "readWord8OffAddrAsWord8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "readWord16OffAddrAsWord16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "readWord32OffAddrAsWord32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "readWord64OffAddrAsWord64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X4PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "readWord8OffAddrAsWord8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word8X64PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "readWord16OffAddrAsWord16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word16X32PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "readWord32OffAddrAsWord32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word32X16PrimTy])) primOpInfo (VecReadScalarOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "readWord64OffAddrAsWord64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, word64X8PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "readFloatOffAddrAsFloatX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX4PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "readDoubleOffAddrAsDoubleX2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX2PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "readFloatOffAddrAsFloatX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX8PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "readDoubleOffAddrAsDoubleX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX4PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "readFloatOffAddrAsFloatX16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, floatX16PrimTy])) primOpInfo (VecReadScalarOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "readDoubleOffAddrAsDoubleX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] ((mkTupleTy Unboxed [mkStatePrimTy deltaTy, doubleX8PrimTy])) primOpInfo (VecWriteScalarOffAddrOp IntVec 16 W8) = mkGenPrimOp (fsLit "writeInt8OffAddrAsInt8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 8 W16) = mkGenPrimOp (fsLit "writeInt16OffAddrAsInt16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 4 W32) = mkGenPrimOp (fsLit "writeInt32OffAddrAsInt32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 2 W64) = mkGenPrimOp (fsLit "writeInt64OffAddrAsInt64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 32 W8) = mkGenPrimOp (fsLit "writeInt8OffAddrAsInt8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 16 W16) = mkGenPrimOp (fsLit "writeInt16OffAddrAsInt16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 8 W32) = mkGenPrimOp (fsLit "writeInt32OffAddrAsInt32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 4 W64) = mkGenPrimOp (fsLit "writeInt64OffAddrAsInt64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 64 W8) = mkGenPrimOp (fsLit "writeInt8OffAddrAsInt8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 32 W16) = mkGenPrimOp (fsLit "writeInt16OffAddrAsInt16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 16 W32) = mkGenPrimOp (fsLit "writeInt32OffAddrAsInt32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp IntVec 8 W64) = mkGenPrimOp (fsLit "writeInt64OffAddrAsInt64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, int64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 16 W8) = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord8X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 8 W16) = mkGenPrimOp (fsLit "writeWord16OffAddrAsWord16X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 4 W32) = mkGenPrimOp (fsLit "writeWord32OffAddrAsWord32X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 2 W64) = mkGenPrimOp (fsLit "writeWord64OffAddrAsWord64X2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 32 W8) = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord8X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 16 W16) = mkGenPrimOp (fsLit "writeWord16OffAddrAsWord16X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 8 W32) = mkGenPrimOp (fsLit "writeWord32OffAddrAsWord32X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 4 W64) = mkGenPrimOp (fsLit "writeWord64OffAddrAsWord64X4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 64 W8) = mkGenPrimOp (fsLit "writeWord8OffAddrAsWord8X64#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word8X64PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 32 W16) = mkGenPrimOp (fsLit "writeWord16OffAddrAsWord16X32#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word16X32PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 16 W32) = mkGenPrimOp (fsLit "writeWord32OffAddrAsWord32X16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word32X16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp WordVec 8 W64) = mkGenPrimOp (fsLit "writeWord64OffAddrAsWord64X8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, word64X8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 4 W32) = mkGenPrimOp (fsLit "writeFloatOffAddrAsFloatX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 2 W64) = mkGenPrimOp (fsLit "writeDoubleOffAddrAsDoubleX2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX2PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 8 W32) = mkGenPrimOp (fsLit "writeFloatOffAddrAsFloatX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 4 W64) = mkGenPrimOp (fsLit "writeDoubleOffAddrAsDoubleX4#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX4PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 16 W32) = mkGenPrimOp (fsLit "writeFloatOffAddrAsFloatX16#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, floatX16PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo (VecWriteScalarOffAddrOp FloatVec 8 W64) = mkGenPrimOp (fsLit "writeDoubleOffAddrAsDoubleX8#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, doubleX8PrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp3 = mkGenPrimOp (fsLit "prefetchByteArray3#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp3 = mkGenPrimOp (fsLit "prefetchMutableByteArray3#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp3 = mkGenPrimOp (fsLit "prefetchAddr3#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp3 = mkGenPrimOp (fsLit "prefetchValue3#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp2 = mkGenPrimOp (fsLit "prefetchByteArray2#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp2 = mkGenPrimOp (fsLit "prefetchMutableByteArray2#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp2 = mkGenPrimOp (fsLit "prefetchAddr2#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp2 = mkGenPrimOp (fsLit "prefetchValue2#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp1 = mkGenPrimOp (fsLit "prefetchByteArray1#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp1 = mkGenPrimOp (fsLit "prefetchMutableByteArray1#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp1 = mkGenPrimOp (fsLit "prefetchAddr1#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp1 = mkGenPrimOp (fsLit "prefetchValue1#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchByteArrayOp0 = mkGenPrimOp (fsLit "prefetchByteArray0#") [deltaTyVarSpec] [byteArrayPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchMutableByteArrayOp0 = mkGenPrimOp (fsLit "prefetchMutableByteArray0#") [deltaTyVarSpec] [mkMutableByteArrayPrimTy deltaTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchAddrOp0 = mkGenPrimOp (fsLit "prefetchAddr0#") [deltaTyVarSpec] [addrPrimTy, intPrimTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) primOpInfo PrefetchValueOp0 = mkGenPrimOp (fsLit "prefetchValue0#") [alphaTyVarSpec, deltaTyVarSpec] [alphaTy, mkStatePrimTy deltaTy] (mkStatePrimTy deltaTy) ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-strictness.hs-incl0000644000000000000000000000347414472400111026014 0ustar0000000000000000primOpStrictness CatchOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , lazyApply2Dmd , topDmd] topDiv primOpStrictness RaiseOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness RaiseIOOp = \ _arity -> mkClosedDmdSig [topDmd, topDmd] exnDiv primOpStrictness MaskAsyncExceptionsOp = \ _arity -> mkClosedDmdSig [strictOnceApply1Dmd,topDmd] topDiv primOpStrictness MaskUninterruptibleOp = \ _arity -> mkClosedDmdSig [strictOnceApply1Dmd,topDmd] topDiv primOpStrictness UnmaskAsyncExceptionsOp = \ _arity -> mkClosedDmdSig [strictOnceApply1Dmd,topDmd] topDiv primOpStrictness AtomicallyOp = \ _arity -> mkClosedDmdSig [strictManyApply1Dmd,topDmd] topDiv primOpStrictness RetryOp = \ _arity -> mkClosedDmdSig [topDmd] botDiv primOpStrictness CatchRetryOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , lazyApply1Dmd , topDmd ] topDiv primOpStrictness CatchSTMOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , lazyApply2Dmd , topDmd ] topDiv primOpStrictness ForkOp = \ _arity -> mkClosedDmdSig [ lazyApply1Dmd , topDmd ] topDiv primOpStrictness ForkOnOp = \ _arity -> mkClosedDmdSig [ topDmd , lazyApply1Dmd , topDmd ] topDiv primOpStrictness KeepAliveOp = \ _arity -> mkClosedDmdSig [topDmd, topDmd, strictOnceApply1Dmd] topDiv primOpStrictness DataToTagOp = \ _arity -> mkClosedDmdSig [evalDmd] topDiv primOpStrictness _ = \ arity -> mkClosedDmdSig (replicate arity topDmd) topDiv ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-tag.hs-incl0000644000000000000000000014652014472400111024366 0ustar0000000000000000maxPrimOpTag :: Int maxPrimOpTag = 1303 primOpTag :: PrimOp -> Int primOpTag CharGtOp = 0 primOpTag CharGeOp = 1 primOpTag CharEqOp = 2 primOpTag CharNeOp = 3 primOpTag CharLtOp = 4 primOpTag CharLeOp = 5 primOpTag OrdOp = 6 primOpTag Int8ToIntOp = 7 primOpTag IntToInt8Op = 8 primOpTag Int8NegOp = 9 primOpTag Int8AddOp = 10 primOpTag Int8SubOp = 11 primOpTag Int8MulOp = 12 primOpTag Int8QuotOp = 13 primOpTag Int8RemOp = 14 primOpTag Int8QuotRemOp = 15 primOpTag Int8SllOp = 16 primOpTag Int8SraOp = 17 primOpTag Int8SrlOp = 18 primOpTag Int8ToWord8Op = 19 primOpTag Int8EqOp = 20 primOpTag Int8GeOp = 21 primOpTag Int8GtOp = 22 primOpTag Int8LeOp = 23 primOpTag Int8LtOp = 24 primOpTag Int8NeOp = 25 primOpTag Word8ToWordOp = 26 primOpTag WordToWord8Op = 27 primOpTag Word8AddOp = 28 primOpTag Word8SubOp = 29 primOpTag Word8MulOp = 30 primOpTag Word8QuotOp = 31 primOpTag Word8RemOp = 32 primOpTag Word8QuotRemOp = 33 primOpTag Word8AndOp = 34 primOpTag Word8OrOp = 35 primOpTag Word8XorOp = 36 primOpTag Word8NotOp = 37 primOpTag Word8SllOp = 38 primOpTag Word8SrlOp = 39 primOpTag Word8ToInt8Op = 40 primOpTag Word8EqOp = 41 primOpTag Word8GeOp = 42 primOpTag Word8GtOp = 43 primOpTag Word8LeOp = 44 primOpTag Word8LtOp = 45 primOpTag Word8NeOp = 46 primOpTag Int16ToIntOp = 47 primOpTag IntToInt16Op = 48 primOpTag Int16NegOp = 49 primOpTag Int16AddOp = 50 primOpTag Int16SubOp = 51 primOpTag Int16MulOp = 52 primOpTag Int16QuotOp = 53 primOpTag Int16RemOp = 54 primOpTag Int16QuotRemOp = 55 primOpTag Int16SllOp = 56 primOpTag Int16SraOp = 57 primOpTag Int16SrlOp = 58 primOpTag Int16ToWord16Op = 59 primOpTag Int16EqOp = 60 primOpTag Int16GeOp = 61 primOpTag Int16GtOp = 62 primOpTag Int16LeOp = 63 primOpTag Int16LtOp = 64 primOpTag Int16NeOp = 65 primOpTag Word16ToWordOp = 66 primOpTag WordToWord16Op = 67 primOpTag Word16AddOp = 68 primOpTag Word16SubOp = 69 primOpTag Word16MulOp = 70 primOpTag Word16QuotOp = 71 primOpTag Word16RemOp = 72 primOpTag Word16QuotRemOp = 73 primOpTag Word16AndOp = 74 primOpTag Word16OrOp = 75 primOpTag Word16XorOp = 76 primOpTag Word16NotOp = 77 primOpTag Word16SllOp = 78 primOpTag Word16SrlOp = 79 primOpTag Word16ToInt16Op = 80 primOpTag Word16EqOp = 81 primOpTag Word16GeOp = 82 primOpTag Word16GtOp = 83 primOpTag Word16LeOp = 84 primOpTag Word16LtOp = 85 primOpTag Word16NeOp = 86 primOpTag Int32ToIntOp = 87 primOpTag IntToInt32Op = 88 primOpTag Int32NegOp = 89 primOpTag Int32AddOp = 90 primOpTag Int32SubOp = 91 primOpTag Int32MulOp = 92 primOpTag Int32QuotOp = 93 primOpTag Int32RemOp = 94 primOpTag Int32QuotRemOp = 95 primOpTag Int32SllOp = 96 primOpTag Int32SraOp = 97 primOpTag Int32SrlOp = 98 primOpTag Int32ToWord32Op = 99 primOpTag Int32EqOp = 100 primOpTag Int32GeOp = 101 primOpTag Int32GtOp = 102 primOpTag Int32LeOp = 103 primOpTag Int32LtOp = 104 primOpTag Int32NeOp = 105 primOpTag Word32ToWordOp = 106 primOpTag WordToWord32Op = 107 primOpTag Word32AddOp = 108 primOpTag Word32SubOp = 109 primOpTag Word32MulOp = 110 primOpTag Word32QuotOp = 111 primOpTag Word32RemOp = 112 primOpTag Word32QuotRemOp = 113 primOpTag Word32AndOp = 114 primOpTag Word32OrOp = 115 primOpTag Word32XorOp = 116 primOpTag Word32NotOp = 117 primOpTag Word32SllOp = 118 primOpTag Word32SrlOp = 119 primOpTag Word32ToInt32Op = 120 primOpTag Word32EqOp = 121 primOpTag Word32GeOp = 122 primOpTag Word32GtOp = 123 primOpTag Word32LeOp = 124 primOpTag Word32LtOp = 125 primOpTag Word32NeOp = 126 primOpTag Int64ToIntOp = 127 primOpTag IntToInt64Op = 128 primOpTag Int64NegOp = 129 primOpTag Int64AddOp = 130 primOpTag Int64SubOp = 131 primOpTag Int64MulOp = 132 primOpTag Int64QuotOp = 133 primOpTag Int64RemOp = 134 primOpTag Int64SllOp = 135 primOpTag Int64SraOp = 136 primOpTag Int64SrlOp = 137 primOpTag Int64ToWord64Op = 138 primOpTag Int64EqOp = 139 primOpTag Int64GeOp = 140 primOpTag Int64GtOp = 141 primOpTag Int64LeOp = 142 primOpTag Int64LtOp = 143 primOpTag Int64NeOp = 144 primOpTag Word64ToWordOp = 145 primOpTag WordToWord64Op = 146 primOpTag Word64AddOp = 147 primOpTag Word64SubOp = 148 primOpTag Word64MulOp = 149 primOpTag Word64QuotOp = 150 primOpTag Word64RemOp = 151 primOpTag Word64AndOp = 152 primOpTag Word64OrOp = 153 primOpTag Word64XorOp = 154 primOpTag Word64NotOp = 155 primOpTag Word64SllOp = 156 primOpTag Word64SrlOp = 157 primOpTag Word64ToInt64Op = 158 primOpTag Word64EqOp = 159 primOpTag Word64GeOp = 160 primOpTag Word64GtOp = 161 primOpTag Word64LeOp = 162 primOpTag Word64LtOp = 163 primOpTag Word64NeOp = 164 primOpTag IntAddOp = 165 primOpTag IntSubOp = 166 primOpTag IntMulOp = 167 primOpTag IntMul2Op = 168 primOpTag IntMulMayOfloOp = 169 primOpTag IntQuotOp = 170 primOpTag IntRemOp = 171 primOpTag IntQuotRemOp = 172 primOpTag IntAndOp = 173 primOpTag IntOrOp = 174 primOpTag IntXorOp = 175 primOpTag IntNotOp = 176 primOpTag IntNegOp = 177 primOpTag IntAddCOp = 178 primOpTag IntSubCOp = 179 primOpTag IntGtOp = 180 primOpTag IntGeOp = 181 primOpTag IntEqOp = 182 primOpTag IntNeOp = 183 primOpTag IntLtOp = 184 primOpTag IntLeOp = 185 primOpTag ChrOp = 186 primOpTag IntToWordOp = 187 primOpTag IntToFloatOp = 188 primOpTag IntToDoubleOp = 189 primOpTag WordToFloatOp = 190 primOpTag WordToDoubleOp = 191 primOpTag IntSllOp = 192 primOpTag IntSraOp = 193 primOpTag IntSrlOp = 194 primOpTag WordAddOp = 195 primOpTag WordAddCOp = 196 primOpTag WordSubCOp = 197 primOpTag WordAdd2Op = 198 primOpTag WordSubOp = 199 primOpTag WordMulOp = 200 primOpTag WordMul2Op = 201 primOpTag WordQuotOp = 202 primOpTag WordRemOp = 203 primOpTag WordQuotRemOp = 204 primOpTag WordQuotRem2Op = 205 primOpTag WordAndOp = 206 primOpTag WordOrOp = 207 primOpTag WordXorOp = 208 primOpTag WordNotOp = 209 primOpTag WordSllOp = 210 primOpTag WordSrlOp = 211 primOpTag WordToIntOp = 212 primOpTag WordGtOp = 213 primOpTag WordGeOp = 214 primOpTag WordEqOp = 215 primOpTag WordNeOp = 216 primOpTag WordLtOp = 217 primOpTag WordLeOp = 218 primOpTag PopCnt8Op = 219 primOpTag PopCnt16Op = 220 primOpTag PopCnt32Op = 221 primOpTag PopCnt64Op = 222 primOpTag PopCntOp = 223 primOpTag Pdep8Op = 224 primOpTag Pdep16Op = 225 primOpTag Pdep32Op = 226 primOpTag Pdep64Op = 227 primOpTag PdepOp = 228 primOpTag Pext8Op = 229 primOpTag Pext16Op = 230 primOpTag Pext32Op = 231 primOpTag Pext64Op = 232 primOpTag PextOp = 233 primOpTag Clz8Op = 234 primOpTag Clz16Op = 235 primOpTag Clz32Op = 236 primOpTag Clz64Op = 237 primOpTag ClzOp = 238 primOpTag Ctz8Op = 239 primOpTag Ctz16Op = 240 primOpTag Ctz32Op = 241 primOpTag Ctz64Op = 242 primOpTag CtzOp = 243 primOpTag BSwap16Op = 244 primOpTag BSwap32Op = 245 primOpTag BSwap64Op = 246 primOpTag BSwapOp = 247 primOpTag BRev8Op = 248 primOpTag BRev16Op = 249 primOpTag BRev32Op = 250 primOpTag BRev64Op = 251 primOpTag BRevOp = 252 primOpTag Narrow8IntOp = 253 primOpTag Narrow16IntOp = 254 primOpTag Narrow32IntOp = 255 primOpTag Narrow8WordOp = 256 primOpTag Narrow16WordOp = 257 primOpTag Narrow32WordOp = 258 primOpTag DoubleGtOp = 259 primOpTag DoubleGeOp = 260 primOpTag DoubleEqOp = 261 primOpTag DoubleNeOp = 262 primOpTag DoubleLtOp = 263 primOpTag DoubleLeOp = 264 primOpTag DoubleAddOp = 265 primOpTag DoubleSubOp = 266 primOpTag DoubleMulOp = 267 primOpTag DoubleDivOp = 268 primOpTag DoubleNegOp = 269 primOpTag DoubleFabsOp = 270 primOpTag DoubleToIntOp = 271 primOpTag DoubleToFloatOp = 272 primOpTag DoubleExpOp = 273 primOpTag DoubleExpM1Op = 274 primOpTag DoubleLogOp = 275 primOpTag DoubleLog1POp = 276 primOpTag DoubleSqrtOp = 277 primOpTag DoubleSinOp = 278 primOpTag DoubleCosOp = 279 primOpTag DoubleTanOp = 280 primOpTag DoubleAsinOp = 281 primOpTag DoubleAcosOp = 282 primOpTag DoubleAtanOp = 283 primOpTag DoubleSinhOp = 284 primOpTag DoubleCoshOp = 285 primOpTag DoubleTanhOp = 286 primOpTag DoubleAsinhOp = 287 primOpTag DoubleAcoshOp = 288 primOpTag DoubleAtanhOp = 289 primOpTag DoublePowerOp = 290 primOpTag DoubleDecode_2IntOp = 291 primOpTag DoubleDecode_Int64Op = 292 primOpTag FloatGtOp = 293 primOpTag FloatGeOp = 294 primOpTag FloatEqOp = 295 primOpTag FloatNeOp = 296 primOpTag FloatLtOp = 297 primOpTag FloatLeOp = 298 primOpTag FloatAddOp = 299 primOpTag FloatSubOp = 300 primOpTag FloatMulOp = 301 primOpTag FloatDivOp = 302 primOpTag FloatNegOp = 303 primOpTag FloatFabsOp = 304 primOpTag FloatToIntOp = 305 primOpTag FloatExpOp = 306 primOpTag FloatExpM1Op = 307 primOpTag FloatLogOp = 308 primOpTag FloatLog1POp = 309 primOpTag FloatSqrtOp = 310 primOpTag FloatSinOp = 311 primOpTag FloatCosOp = 312 primOpTag FloatTanOp = 313 primOpTag FloatAsinOp = 314 primOpTag FloatAcosOp = 315 primOpTag FloatAtanOp = 316 primOpTag FloatSinhOp = 317 primOpTag FloatCoshOp = 318 primOpTag FloatTanhOp = 319 primOpTag FloatAsinhOp = 320 primOpTag FloatAcoshOp = 321 primOpTag FloatAtanhOp = 322 primOpTag FloatPowerOp = 323 primOpTag FloatToDoubleOp = 324 primOpTag FloatDecode_IntOp = 325 primOpTag NewArrayOp = 326 primOpTag ReadArrayOp = 327 primOpTag WriteArrayOp = 328 primOpTag SizeofArrayOp = 329 primOpTag SizeofMutableArrayOp = 330 primOpTag IndexArrayOp = 331 primOpTag UnsafeFreezeArrayOp = 332 primOpTag UnsafeThawArrayOp = 333 primOpTag CopyArrayOp = 334 primOpTag CopyMutableArrayOp = 335 primOpTag CloneArrayOp = 336 primOpTag CloneMutableArrayOp = 337 primOpTag FreezeArrayOp = 338 primOpTag ThawArrayOp = 339 primOpTag CasArrayOp = 340 primOpTag NewSmallArrayOp = 341 primOpTag ShrinkSmallMutableArrayOp_Char = 342 primOpTag ReadSmallArrayOp = 343 primOpTag WriteSmallArrayOp = 344 primOpTag SizeofSmallArrayOp = 345 primOpTag SizeofSmallMutableArrayOp = 346 primOpTag GetSizeofSmallMutableArrayOp = 347 primOpTag IndexSmallArrayOp = 348 primOpTag UnsafeFreezeSmallArrayOp = 349 primOpTag UnsafeThawSmallArrayOp = 350 primOpTag CopySmallArrayOp = 351 primOpTag CopySmallMutableArrayOp = 352 primOpTag CloneSmallArrayOp = 353 primOpTag CloneSmallMutableArrayOp = 354 primOpTag FreezeSmallArrayOp = 355 primOpTag ThawSmallArrayOp = 356 primOpTag CasSmallArrayOp = 357 primOpTag NewByteArrayOp_Char = 358 primOpTag NewPinnedByteArrayOp_Char = 359 primOpTag NewAlignedPinnedByteArrayOp_Char = 360 primOpTag MutableByteArrayIsPinnedOp = 361 primOpTag ByteArrayIsPinnedOp = 362 primOpTag ByteArrayContents_Char = 363 primOpTag MutableByteArrayContents_Char = 364 primOpTag ShrinkMutableByteArrayOp_Char = 365 primOpTag ResizeMutableByteArrayOp_Char = 366 primOpTag UnsafeFreezeByteArrayOp = 367 primOpTag SizeofByteArrayOp = 368 primOpTag SizeofMutableByteArrayOp = 369 primOpTag GetSizeofMutableByteArrayOp = 370 primOpTag IndexByteArrayOp_Char = 371 primOpTag IndexByteArrayOp_WideChar = 372 primOpTag IndexByteArrayOp_Int = 373 primOpTag IndexByteArrayOp_Word = 374 primOpTag IndexByteArrayOp_Addr = 375 primOpTag IndexByteArrayOp_Float = 376 primOpTag IndexByteArrayOp_Double = 377 primOpTag IndexByteArrayOp_StablePtr = 378 primOpTag IndexByteArrayOp_Int8 = 379 primOpTag IndexByteArrayOp_Int16 = 380 primOpTag IndexByteArrayOp_Int32 = 381 primOpTag IndexByteArrayOp_Int64 = 382 primOpTag IndexByteArrayOp_Word8 = 383 primOpTag IndexByteArrayOp_Word16 = 384 primOpTag IndexByteArrayOp_Word32 = 385 primOpTag IndexByteArrayOp_Word64 = 386 primOpTag IndexByteArrayOp_Word8AsChar = 387 primOpTag IndexByteArrayOp_Word8AsWideChar = 388 primOpTag IndexByteArrayOp_Word8AsInt = 389 primOpTag IndexByteArrayOp_Word8AsWord = 390 primOpTag IndexByteArrayOp_Word8AsAddr = 391 primOpTag IndexByteArrayOp_Word8AsFloat = 392 primOpTag IndexByteArrayOp_Word8AsDouble = 393 primOpTag IndexByteArrayOp_Word8AsStablePtr = 394 primOpTag IndexByteArrayOp_Word8AsInt16 = 395 primOpTag IndexByteArrayOp_Word8AsInt32 = 396 primOpTag IndexByteArrayOp_Word8AsInt64 = 397 primOpTag IndexByteArrayOp_Word8AsWord16 = 398 primOpTag IndexByteArrayOp_Word8AsWord32 = 399 primOpTag IndexByteArrayOp_Word8AsWord64 = 400 primOpTag ReadByteArrayOp_Char = 401 primOpTag ReadByteArrayOp_WideChar = 402 primOpTag ReadByteArrayOp_Int = 403 primOpTag ReadByteArrayOp_Word = 404 primOpTag ReadByteArrayOp_Addr = 405 primOpTag ReadByteArrayOp_Float = 406 primOpTag ReadByteArrayOp_Double = 407 primOpTag ReadByteArrayOp_StablePtr = 408 primOpTag ReadByteArrayOp_Int8 = 409 primOpTag ReadByteArrayOp_Int16 = 410 primOpTag ReadByteArrayOp_Int32 = 411 primOpTag ReadByteArrayOp_Int64 = 412 primOpTag ReadByteArrayOp_Word8 = 413 primOpTag ReadByteArrayOp_Word16 = 414 primOpTag ReadByteArrayOp_Word32 = 415 primOpTag ReadByteArrayOp_Word64 = 416 primOpTag ReadByteArrayOp_Word8AsChar = 417 primOpTag ReadByteArrayOp_Word8AsWideChar = 418 primOpTag ReadByteArrayOp_Word8AsInt = 419 primOpTag ReadByteArrayOp_Word8AsWord = 420 primOpTag ReadByteArrayOp_Word8AsAddr = 421 primOpTag ReadByteArrayOp_Word8AsFloat = 422 primOpTag ReadByteArrayOp_Word8AsDouble = 423 primOpTag ReadByteArrayOp_Word8AsStablePtr = 424 primOpTag ReadByteArrayOp_Word8AsInt16 = 425 primOpTag ReadByteArrayOp_Word8AsInt32 = 426 primOpTag ReadByteArrayOp_Word8AsInt64 = 427 primOpTag ReadByteArrayOp_Word8AsWord16 = 428 primOpTag ReadByteArrayOp_Word8AsWord32 = 429 primOpTag ReadByteArrayOp_Word8AsWord64 = 430 primOpTag WriteByteArrayOp_Char = 431 primOpTag WriteByteArrayOp_WideChar = 432 primOpTag WriteByteArrayOp_Int = 433 primOpTag WriteByteArrayOp_Word = 434 primOpTag WriteByteArrayOp_Addr = 435 primOpTag WriteByteArrayOp_Float = 436 primOpTag WriteByteArrayOp_Double = 437 primOpTag WriteByteArrayOp_StablePtr = 438 primOpTag WriteByteArrayOp_Int8 = 439 primOpTag WriteByteArrayOp_Int16 = 440 primOpTag WriteByteArrayOp_Int32 = 441 primOpTag WriteByteArrayOp_Int64 = 442 primOpTag WriteByteArrayOp_Word8 = 443 primOpTag WriteByteArrayOp_Word16 = 444 primOpTag WriteByteArrayOp_Word32 = 445 primOpTag WriteByteArrayOp_Word64 = 446 primOpTag WriteByteArrayOp_Word8AsChar = 447 primOpTag WriteByteArrayOp_Word8AsWideChar = 448 primOpTag WriteByteArrayOp_Word8AsInt = 449 primOpTag WriteByteArrayOp_Word8AsWord = 450 primOpTag WriteByteArrayOp_Word8AsAddr = 451 primOpTag WriteByteArrayOp_Word8AsFloat = 452 primOpTag WriteByteArrayOp_Word8AsDouble = 453 primOpTag WriteByteArrayOp_Word8AsStablePtr = 454 primOpTag WriteByteArrayOp_Word8AsInt16 = 455 primOpTag WriteByteArrayOp_Word8AsInt32 = 456 primOpTag WriteByteArrayOp_Word8AsInt64 = 457 primOpTag WriteByteArrayOp_Word8AsWord16 = 458 primOpTag WriteByteArrayOp_Word8AsWord32 = 459 primOpTag WriteByteArrayOp_Word8AsWord64 = 460 primOpTag CompareByteArraysOp = 461 primOpTag CopyByteArrayOp = 462 primOpTag CopyMutableByteArrayOp = 463 primOpTag CopyByteArrayToAddrOp = 464 primOpTag CopyMutableByteArrayToAddrOp = 465 primOpTag CopyAddrToByteArrayOp = 466 primOpTag SetByteArrayOp = 467 primOpTag AtomicReadByteArrayOp_Int = 468 primOpTag AtomicWriteByteArrayOp_Int = 469 primOpTag CasByteArrayOp_Int = 470 primOpTag CasByteArrayOp_Int8 = 471 primOpTag CasByteArrayOp_Int16 = 472 primOpTag CasByteArrayOp_Int32 = 473 primOpTag CasByteArrayOp_Int64 = 474 primOpTag FetchAddByteArrayOp_Int = 475 primOpTag FetchSubByteArrayOp_Int = 476 primOpTag FetchAndByteArrayOp_Int = 477 primOpTag FetchNandByteArrayOp_Int = 478 primOpTag FetchOrByteArrayOp_Int = 479 primOpTag FetchXorByteArrayOp_Int = 480 primOpTag AddrAddOp = 481 primOpTag AddrSubOp = 482 primOpTag AddrRemOp = 483 primOpTag AddrToIntOp = 484 primOpTag IntToAddrOp = 485 primOpTag AddrGtOp = 486 primOpTag AddrGeOp = 487 primOpTag AddrEqOp = 488 primOpTag AddrNeOp = 489 primOpTag AddrLtOp = 490 primOpTag AddrLeOp = 491 primOpTag IndexOffAddrOp_Char = 492 primOpTag IndexOffAddrOp_WideChar = 493 primOpTag IndexOffAddrOp_Int = 494 primOpTag IndexOffAddrOp_Word = 495 primOpTag IndexOffAddrOp_Addr = 496 primOpTag IndexOffAddrOp_Float = 497 primOpTag IndexOffAddrOp_Double = 498 primOpTag IndexOffAddrOp_StablePtr = 499 primOpTag IndexOffAddrOp_Int8 = 500 primOpTag IndexOffAddrOp_Int16 = 501 primOpTag IndexOffAddrOp_Int32 = 502 primOpTag IndexOffAddrOp_Int64 = 503 primOpTag IndexOffAddrOp_Word8 = 504 primOpTag IndexOffAddrOp_Word16 = 505 primOpTag IndexOffAddrOp_Word32 = 506 primOpTag IndexOffAddrOp_Word64 = 507 primOpTag ReadOffAddrOp_Char = 508 primOpTag ReadOffAddrOp_WideChar = 509 primOpTag ReadOffAddrOp_Int = 510 primOpTag ReadOffAddrOp_Word = 511 primOpTag ReadOffAddrOp_Addr = 512 primOpTag ReadOffAddrOp_Float = 513 primOpTag ReadOffAddrOp_Double = 514 primOpTag ReadOffAddrOp_StablePtr = 515 primOpTag ReadOffAddrOp_Int8 = 516 primOpTag ReadOffAddrOp_Int16 = 517 primOpTag ReadOffAddrOp_Int32 = 518 primOpTag ReadOffAddrOp_Int64 = 519 primOpTag ReadOffAddrOp_Word8 = 520 primOpTag ReadOffAddrOp_Word16 = 521 primOpTag ReadOffAddrOp_Word32 = 522 primOpTag ReadOffAddrOp_Word64 = 523 primOpTag WriteOffAddrOp_Char = 524 primOpTag WriteOffAddrOp_WideChar = 525 primOpTag WriteOffAddrOp_Int = 526 primOpTag WriteOffAddrOp_Word = 527 primOpTag WriteOffAddrOp_Addr = 528 primOpTag WriteOffAddrOp_Float = 529 primOpTag WriteOffAddrOp_Double = 530 primOpTag WriteOffAddrOp_StablePtr = 531 primOpTag WriteOffAddrOp_Int8 = 532 primOpTag WriteOffAddrOp_Int16 = 533 primOpTag WriteOffAddrOp_Int32 = 534 primOpTag WriteOffAddrOp_Int64 = 535 primOpTag WriteOffAddrOp_Word8 = 536 primOpTag WriteOffAddrOp_Word16 = 537 primOpTag WriteOffAddrOp_Word32 = 538 primOpTag WriteOffAddrOp_Word64 = 539 primOpTag InterlockedExchange_Addr = 540 primOpTag InterlockedExchange_Word = 541 primOpTag CasAddrOp_Addr = 542 primOpTag CasAddrOp_Word = 543 primOpTag CasAddrOp_Word8 = 544 primOpTag CasAddrOp_Word16 = 545 primOpTag CasAddrOp_Word32 = 546 primOpTag CasAddrOp_Word64 = 547 primOpTag FetchAddAddrOp_Word = 548 primOpTag FetchSubAddrOp_Word = 549 primOpTag FetchAndAddrOp_Word = 550 primOpTag FetchNandAddrOp_Word = 551 primOpTag FetchOrAddrOp_Word = 552 primOpTag FetchXorAddrOp_Word = 553 primOpTag AtomicReadAddrOp_Word = 554 primOpTag AtomicWriteAddrOp_Word = 555 primOpTag NewMutVarOp = 556 primOpTag ReadMutVarOp = 557 primOpTag WriteMutVarOp = 558 primOpTag AtomicModifyMutVar2Op = 559 primOpTag AtomicModifyMutVar_Op = 560 primOpTag CasMutVarOp = 561 primOpTag CatchOp = 562 primOpTag RaiseOp = 563 primOpTag RaiseIOOp = 564 primOpTag MaskAsyncExceptionsOp = 565 primOpTag MaskUninterruptibleOp = 566 primOpTag UnmaskAsyncExceptionsOp = 567 primOpTag MaskStatus = 568 primOpTag AtomicallyOp = 569 primOpTag RetryOp = 570 primOpTag CatchRetryOp = 571 primOpTag CatchSTMOp = 572 primOpTag NewTVarOp = 573 primOpTag ReadTVarOp = 574 primOpTag ReadTVarIOOp = 575 primOpTag WriteTVarOp = 576 primOpTag NewMVarOp = 577 primOpTag TakeMVarOp = 578 primOpTag TryTakeMVarOp = 579 primOpTag PutMVarOp = 580 primOpTag TryPutMVarOp = 581 primOpTag ReadMVarOp = 582 primOpTag TryReadMVarOp = 583 primOpTag IsEmptyMVarOp = 584 primOpTag NewIOPortOp = 585 primOpTag ReadIOPortOp = 586 primOpTag WriteIOPortOp = 587 primOpTag DelayOp = 588 primOpTag WaitReadOp = 589 primOpTag WaitWriteOp = 590 primOpTag ForkOp = 591 primOpTag ForkOnOp = 592 primOpTag KillThreadOp = 593 primOpTag YieldOp = 594 primOpTag MyThreadIdOp = 595 primOpTag LabelThreadOp = 596 primOpTag IsCurrentThreadBoundOp = 597 primOpTag NoDuplicateOp = 598 primOpTag ThreadStatusOp = 599 primOpTag MkWeakOp = 600 primOpTag MkWeakNoFinalizerOp = 601 primOpTag AddCFinalizerToWeakOp = 602 primOpTag DeRefWeakOp = 603 primOpTag FinalizeWeakOp = 604 primOpTag TouchOp = 605 primOpTag MakeStablePtrOp = 606 primOpTag DeRefStablePtrOp = 607 primOpTag EqStablePtrOp = 608 primOpTag MakeStableNameOp = 609 primOpTag StableNameToIntOp = 610 primOpTag CompactNewOp = 611 primOpTag CompactResizeOp = 612 primOpTag CompactContainsOp = 613 primOpTag CompactContainsAnyOp = 614 primOpTag CompactGetFirstBlockOp = 615 primOpTag CompactGetNextBlockOp = 616 primOpTag CompactAllocateBlockOp = 617 primOpTag CompactFixupPointersOp = 618 primOpTag CompactAdd = 619 primOpTag CompactAddWithSharing = 620 primOpTag CompactSize = 621 primOpTag ReallyUnsafePtrEqualityOp = 622 primOpTag ParOp = 623 primOpTag SparkOp = 624 primOpTag SeqOp = 625 primOpTag GetSparkOp = 626 primOpTag NumSparks = 627 primOpTag KeepAliveOp = 628 primOpTag DataToTagOp = 629 primOpTag TagToEnumOp = 630 primOpTag AddrToAnyOp = 631 primOpTag AnyToAddrOp = 632 primOpTag MkApUpd0_Op = 633 primOpTag NewBCOOp = 634 primOpTag UnpackClosureOp = 635 primOpTag ClosureSizeOp = 636 primOpTag GetApStackValOp = 637 primOpTag GetCCSOfOp = 638 primOpTag GetCurrentCCSOp = 639 primOpTag ClearCCSOp = 640 primOpTag WhereFromOp = 641 primOpTag TraceEventOp = 642 primOpTag TraceEventBinaryOp = 643 primOpTag TraceMarkerOp = 644 primOpTag SetThreadAllocationCounter = 645 primOpTag (VecBroadcastOp IntVec 16 W8) = 646 primOpTag (VecBroadcastOp IntVec 8 W16) = 647 primOpTag (VecBroadcastOp IntVec 4 W32) = 648 primOpTag (VecBroadcastOp IntVec 2 W64) = 649 primOpTag (VecBroadcastOp IntVec 32 W8) = 650 primOpTag (VecBroadcastOp IntVec 16 W16) = 651 primOpTag (VecBroadcastOp IntVec 8 W32) = 652 primOpTag (VecBroadcastOp IntVec 4 W64) = 653 primOpTag (VecBroadcastOp IntVec 64 W8) = 654 primOpTag (VecBroadcastOp IntVec 32 W16) = 655 primOpTag (VecBroadcastOp IntVec 16 W32) = 656 primOpTag (VecBroadcastOp IntVec 8 W64) = 657 primOpTag (VecBroadcastOp WordVec 16 W8) = 658 primOpTag (VecBroadcastOp WordVec 8 W16) = 659 primOpTag (VecBroadcastOp WordVec 4 W32) = 660 primOpTag (VecBroadcastOp WordVec 2 W64) = 661 primOpTag (VecBroadcastOp WordVec 32 W8) = 662 primOpTag (VecBroadcastOp WordVec 16 W16) = 663 primOpTag (VecBroadcastOp WordVec 8 W32) = 664 primOpTag (VecBroadcastOp WordVec 4 W64) = 665 primOpTag (VecBroadcastOp WordVec 64 W8) = 666 primOpTag (VecBroadcastOp WordVec 32 W16) = 667 primOpTag (VecBroadcastOp WordVec 16 W32) = 668 primOpTag (VecBroadcastOp WordVec 8 W64) = 669 primOpTag (VecBroadcastOp FloatVec 4 W32) = 670 primOpTag (VecBroadcastOp FloatVec 2 W64) = 671 primOpTag (VecBroadcastOp FloatVec 8 W32) = 672 primOpTag (VecBroadcastOp FloatVec 4 W64) = 673 primOpTag (VecBroadcastOp FloatVec 16 W32) = 674 primOpTag (VecBroadcastOp FloatVec 8 W64) = 675 primOpTag (VecPackOp IntVec 16 W8) = 676 primOpTag (VecPackOp IntVec 8 W16) = 677 primOpTag (VecPackOp IntVec 4 W32) = 678 primOpTag (VecPackOp IntVec 2 W64) = 679 primOpTag (VecPackOp IntVec 32 W8) = 680 primOpTag (VecPackOp IntVec 16 W16) = 681 primOpTag (VecPackOp IntVec 8 W32) = 682 primOpTag (VecPackOp IntVec 4 W64) = 683 primOpTag (VecPackOp IntVec 64 W8) = 684 primOpTag (VecPackOp IntVec 32 W16) = 685 primOpTag (VecPackOp IntVec 16 W32) = 686 primOpTag (VecPackOp IntVec 8 W64) = 687 primOpTag (VecPackOp WordVec 16 W8) = 688 primOpTag (VecPackOp WordVec 8 W16) = 689 primOpTag (VecPackOp WordVec 4 W32) = 690 primOpTag (VecPackOp WordVec 2 W64) = 691 primOpTag (VecPackOp WordVec 32 W8) = 692 primOpTag (VecPackOp WordVec 16 W16) = 693 primOpTag (VecPackOp WordVec 8 W32) = 694 primOpTag (VecPackOp WordVec 4 W64) = 695 primOpTag (VecPackOp WordVec 64 W8) = 696 primOpTag (VecPackOp WordVec 32 W16) = 697 primOpTag (VecPackOp WordVec 16 W32) = 698 primOpTag (VecPackOp WordVec 8 W64) = 699 primOpTag (VecPackOp FloatVec 4 W32) = 700 primOpTag (VecPackOp FloatVec 2 W64) = 701 primOpTag (VecPackOp FloatVec 8 W32) = 702 primOpTag (VecPackOp FloatVec 4 W64) = 703 primOpTag (VecPackOp FloatVec 16 W32) = 704 primOpTag (VecPackOp FloatVec 8 W64) = 705 primOpTag (VecUnpackOp IntVec 16 W8) = 706 primOpTag (VecUnpackOp IntVec 8 W16) = 707 primOpTag (VecUnpackOp IntVec 4 W32) = 708 primOpTag (VecUnpackOp IntVec 2 W64) = 709 primOpTag (VecUnpackOp IntVec 32 W8) = 710 primOpTag (VecUnpackOp IntVec 16 W16) = 711 primOpTag (VecUnpackOp IntVec 8 W32) = 712 primOpTag (VecUnpackOp IntVec 4 W64) = 713 primOpTag (VecUnpackOp IntVec 64 W8) = 714 primOpTag (VecUnpackOp IntVec 32 W16) = 715 primOpTag (VecUnpackOp IntVec 16 W32) = 716 primOpTag (VecUnpackOp IntVec 8 W64) = 717 primOpTag (VecUnpackOp WordVec 16 W8) = 718 primOpTag (VecUnpackOp WordVec 8 W16) = 719 primOpTag (VecUnpackOp WordVec 4 W32) = 720 primOpTag (VecUnpackOp WordVec 2 W64) = 721 primOpTag (VecUnpackOp WordVec 32 W8) = 722 primOpTag (VecUnpackOp WordVec 16 W16) = 723 primOpTag (VecUnpackOp WordVec 8 W32) = 724 primOpTag (VecUnpackOp WordVec 4 W64) = 725 primOpTag (VecUnpackOp WordVec 64 W8) = 726 primOpTag (VecUnpackOp WordVec 32 W16) = 727 primOpTag (VecUnpackOp WordVec 16 W32) = 728 primOpTag (VecUnpackOp WordVec 8 W64) = 729 primOpTag (VecUnpackOp FloatVec 4 W32) = 730 primOpTag (VecUnpackOp FloatVec 2 W64) = 731 primOpTag (VecUnpackOp FloatVec 8 W32) = 732 primOpTag (VecUnpackOp FloatVec 4 W64) = 733 primOpTag (VecUnpackOp FloatVec 16 W32) = 734 primOpTag (VecUnpackOp FloatVec 8 W64) = 735 primOpTag (VecInsertOp IntVec 16 W8) = 736 primOpTag (VecInsertOp IntVec 8 W16) = 737 primOpTag (VecInsertOp IntVec 4 W32) = 738 primOpTag (VecInsertOp IntVec 2 W64) = 739 primOpTag (VecInsertOp IntVec 32 W8) = 740 primOpTag (VecInsertOp IntVec 16 W16) = 741 primOpTag (VecInsertOp IntVec 8 W32) = 742 primOpTag (VecInsertOp IntVec 4 W64) = 743 primOpTag (VecInsertOp IntVec 64 W8) = 744 primOpTag (VecInsertOp IntVec 32 W16) = 745 primOpTag (VecInsertOp IntVec 16 W32) = 746 primOpTag (VecInsertOp IntVec 8 W64) = 747 primOpTag (VecInsertOp WordVec 16 W8) = 748 primOpTag (VecInsertOp WordVec 8 W16) = 749 primOpTag (VecInsertOp WordVec 4 W32) = 750 primOpTag (VecInsertOp WordVec 2 W64) = 751 primOpTag (VecInsertOp WordVec 32 W8) = 752 primOpTag (VecInsertOp WordVec 16 W16) = 753 primOpTag (VecInsertOp WordVec 8 W32) = 754 primOpTag (VecInsertOp WordVec 4 W64) = 755 primOpTag (VecInsertOp WordVec 64 W8) = 756 primOpTag (VecInsertOp WordVec 32 W16) = 757 primOpTag (VecInsertOp WordVec 16 W32) = 758 primOpTag (VecInsertOp WordVec 8 W64) = 759 primOpTag (VecInsertOp FloatVec 4 W32) = 760 primOpTag (VecInsertOp FloatVec 2 W64) = 761 primOpTag (VecInsertOp FloatVec 8 W32) = 762 primOpTag (VecInsertOp FloatVec 4 W64) = 763 primOpTag (VecInsertOp FloatVec 16 W32) = 764 primOpTag (VecInsertOp FloatVec 8 W64) = 765 primOpTag (VecAddOp IntVec 16 W8) = 766 primOpTag (VecAddOp IntVec 8 W16) = 767 primOpTag (VecAddOp IntVec 4 W32) = 768 primOpTag (VecAddOp IntVec 2 W64) = 769 primOpTag (VecAddOp IntVec 32 W8) = 770 primOpTag (VecAddOp IntVec 16 W16) = 771 primOpTag (VecAddOp IntVec 8 W32) = 772 primOpTag (VecAddOp IntVec 4 W64) = 773 primOpTag (VecAddOp IntVec 64 W8) = 774 primOpTag (VecAddOp IntVec 32 W16) = 775 primOpTag (VecAddOp IntVec 16 W32) = 776 primOpTag (VecAddOp IntVec 8 W64) = 777 primOpTag (VecAddOp WordVec 16 W8) = 778 primOpTag (VecAddOp WordVec 8 W16) = 779 primOpTag (VecAddOp WordVec 4 W32) = 780 primOpTag (VecAddOp WordVec 2 W64) = 781 primOpTag (VecAddOp WordVec 32 W8) = 782 primOpTag (VecAddOp WordVec 16 W16) = 783 primOpTag (VecAddOp WordVec 8 W32) = 784 primOpTag (VecAddOp WordVec 4 W64) = 785 primOpTag (VecAddOp WordVec 64 W8) = 786 primOpTag (VecAddOp WordVec 32 W16) = 787 primOpTag (VecAddOp WordVec 16 W32) = 788 primOpTag (VecAddOp WordVec 8 W64) = 789 primOpTag (VecAddOp FloatVec 4 W32) = 790 primOpTag (VecAddOp FloatVec 2 W64) = 791 primOpTag (VecAddOp FloatVec 8 W32) = 792 primOpTag (VecAddOp FloatVec 4 W64) = 793 primOpTag (VecAddOp FloatVec 16 W32) = 794 primOpTag (VecAddOp FloatVec 8 W64) = 795 primOpTag (VecSubOp IntVec 16 W8) = 796 primOpTag (VecSubOp IntVec 8 W16) = 797 primOpTag (VecSubOp IntVec 4 W32) = 798 primOpTag (VecSubOp IntVec 2 W64) = 799 primOpTag (VecSubOp IntVec 32 W8) = 800 primOpTag (VecSubOp IntVec 16 W16) = 801 primOpTag (VecSubOp IntVec 8 W32) = 802 primOpTag (VecSubOp IntVec 4 W64) = 803 primOpTag (VecSubOp IntVec 64 W8) = 804 primOpTag (VecSubOp IntVec 32 W16) = 805 primOpTag (VecSubOp IntVec 16 W32) = 806 primOpTag (VecSubOp IntVec 8 W64) = 807 primOpTag (VecSubOp WordVec 16 W8) = 808 primOpTag (VecSubOp WordVec 8 W16) = 809 primOpTag (VecSubOp WordVec 4 W32) = 810 primOpTag (VecSubOp WordVec 2 W64) = 811 primOpTag (VecSubOp WordVec 32 W8) = 812 primOpTag (VecSubOp WordVec 16 W16) = 813 primOpTag (VecSubOp WordVec 8 W32) = 814 primOpTag (VecSubOp WordVec 4 W64) = 815 primOpTag (VecSubOp WordVec 64 W8) = 816 primOpTag (VecSubOp WordVec 32 W16) = 817 primOpTag (VecSubOp WordVec 16 W32) = 818 primOpTag (VecSubOp WordVec 8 W64) = 819 primOpTag (VecSubOp FloatVec 4 W32) = 820 primOpTag (VecSubOp FloatVec 2 W64) = 821 primOpTag (VecSubOp FloatVec 8 W32) = 822 primOpTag (VecSubOp FloatVec 4 W64) = 823 primOpTag (VecSubOp FloatVec 16 W32) = 824 primOpTag (VecSubOp FloatVec 8 W64) = 825 primOpTag (VecMulOp IntVec 16 W8) = 826 primOpTag (VecMulOp IntVec 8 W16) = 827 primOpTag (VecMulOp IntVec 4 W32) = 828 primOpTag (VecMulOp IntVec 2 W64) = 829 primOpTag (VecMulOp IntVec 32 W8) = 830 primOpTag (VecMulOp IntVec 16 W16) = 831 primOpTag (VecMulOp IntVec 8 W32) = 832 primOpTag (VecMulOp IntVec 4 W64) = 833 primOpTag (VecMulOp IntVec 64 W8) = 834 primOpTag (VecMulOp IntVec 32 W16) = 835 primOpTag (VecMulOp IntVec 16 W32) = 836 primOpTag (VecMulOp IntVec 8 W64) = 837 primOpTag (VecMulOp WordVec 16 W8) = 838 primOpTag (VecMulOp WordVec 8 W16) = 839 primOpTag (VecMulOp WordVec 4 W32) = 840 primOpTag (VecMulOp WordVec 2 W64) = 841 primOpTag (VecMulOp WordVec 32 W8) = 842 primOpTag (VecMulOp WordVec 16 W16) = 843 primOpTag (VecMulOp WordVec 8 W32) = 844 primOpTag (VecMulOp WordVec 4 W64) = 845 primOpTag (VecMulOp WordVec 64 W8) = 846 primOpTag (VecMulOp WordVec 32 W16) = 847 primOpTag (VecMulOp WordVec 16 W32) = 848 primOpTag (VecMulOp WordVec 8 W64) = 849 primOpTag (VecMulOp FloatVec 4 W32) = 850 primOpTag (VecMulOp FloatVec 2 W64) = 851 primOpTag (VecMulOp FloatVec 8 W32) = 852 primOpTag (VecMulOp FloatVec 4 W64) = 853 primOpTag (VecMulOp FloatVec 16 W32) = 854 primOpTag (VecMulOp FloatVec 8 W64) = 855 primOpTag (VecDivOp FloatVec 4 W32) = 856 primOpTag (VecDivOp FloatVec 2 W64) = 857 primOpTag (VecDivOp FloatVec 8 W32) = 858 primOpTag (VecDivOp FloatVec 4 W64) = 859 primOpTag (VecDivOp FloatVec 16 W32) = 860 primOpTag (VecDivOp FloatVec 8 W64) = 861 primOpTag (VecQuotOp IntVec 16 W8) = 862 primOpTag (VecQuotOp IntVec 8 W16) = 863 primOpTag (VecQuotOp IntVec 4 W32) = 864 primOpTag (VecQuotOp IntVec 2 W64) = 865 primOpTag (VecQuotOp IntVec 32 W8) = 866 primOpTag (VecQuotOp IntVec 16 W16) = 867 primOpTag (VecQuotOp IntVec 8 W32) = 868 primOpTag (VecQuotOp IntVec 4 W64) = 869 primOpTag (VecQuotOp IntVec 64 W8) = 870 primOpTag (VecQuotOp IntVec 32 W16) = 871 primOpTag (VecQuotOp IntVec 16 W32) = 872 primOpTag (VecQuotOp IntVec 8 W64) = 873 primOpTag (VecQuotOp WordVec 16 W8) = 874 primOpTag (VecQuotOp WordVec 8 W16) = 875 primOpTag (VecQuotOp WordVec 4 W32) = 876 primOpTag (VecQuotOp WordVec 2 W64) = 877 primOpTag (VecQuotOp WordVec 32 W8) = 878 primOpTag (VecQuotOp WordVec 16 W16) = 879 primOpTag (VecQuotOp WordVec 8 W32) = 880 primOpTag (VecQuotOp WordVec 4 W64) = 881 primOpTag (VecQuotOp WordVec 64 W8) = 882 primOpTag (VecQuotOp WordVec 32 W16) = 883 primOpTag (VecQuotOp WordVec 16 W32) = 884 primOpTag (VecQuotOp WordVec 8 W64) = 885 primOpTag (VecRemOp IntVec 16 W8) = 886 primOpTag (VecRemOp IntVec 8 W16) = 887 primOpTag (VecRemOp IntVec 4 W32) = 888 primOpTag (VecRemOp IntVec 2 W64) = 889 primOpTag (VecRemOp IntVec 32 W8) = 890 primOpTag (VecRemOp IntVec 16 W16) = 891 primOpTag (VecRemOp IntVec 8 W32) = 892 primOpTag (VecRemOp IntVec 4 W64) = 893 primOpTag (VecRemOp IntVec 64 W8) = 894 primOpTag (VecRemOp IntVec 32 W16) = 895 primOpTag (VecRemOp IntVec 16 W32) = 896 primOpTag (VecRemOp IntVec 8 W64) = 897 primOpTag (VecRemOp WordVec 16 W8) = 898 primOpTag (VecRemOp WordVec 8 W16) = 899 primOpTag (VecRemOp WordVec 4 W32) = 900 primOpTag (VecRemOp WordVec 2 W64) = 901 primOpTag (VecRemOp WordVec 32 W8) = 902 primOpTag (VecRemOp WordVec 16 W16) = 903 primOpTag (VecRemOp WordVec 8 W32) = 904 primOpTag (VecRemOp WordVec 4 W64) = 905 primOpTag (VecRemOp WordVec 64 W8) = 906 primOpTag (VecRemOp WordVec 32 W16) = 907 primOpTag (VecRemOp WordVec 16 W32) = 908 primOpTag (VecRemOp WordVec 8 W64) = 909 primOpTag (VecNegOp IntVec 16 W8) = 910 primOpTag (VecNegOp IntVec 8 W16) = 911 primOpTag (VecNegOp IntVec 4 W32) = 912 primOpTag (VecNegOp IntVec 2 W64) = 913 primOpTag (VecNegOp IntVec 32 W8) = 914 primOpTag (VecNegOp IntVec 16 W16) = 915 primOpTag (VecNegOp IntVec 8 W32) = 916 primOpTag (VecNegOp IntVec 4 W64) = 917 primOpTag (VecNegOp IntVec 64 W8) = 918 primOpTag (VecNegOp IntVec 32 W16) = 919 primOpTag (VecNegOp IntVec 16 W32) = 920 primOpTag (VecNegOp IntVec 8 W64) = 921 primOpTag (VecNegOp FloatVec 4 W32) = 922 primOpTag (VecNegOp FloatVec 2 W64) = 923 primOpTag (VecNegOp FloatVec 8 W32) = 924 primOpTag (VecNegOp FloatVec 4 W64) = 925 primOpTag (VecNegOp FloatVec 16 W32) = 926 primOpTag (VecNegOp FloatVec 8 W64) = 927 primOpTag (VecIndexByteArrayOp IntVec 16 W8) = 928 primOpTag (VecIndexByteArrayOp IntVec 8 W16) = 929 primOpTag (VecIndexByteArrayOp IntVec 4 W32) = 930 primOpTag (VecIndexByteArrayOp IntVec 2 W64) = 931 primOpTag (VecIndexByteArrayOp IntVec 32 W8) = 932 primOpTag (VecIndexByteArrayOp IntVec 16 W16) = 933 primOpTag (VecIndexByteArrayOp IntVec 8 W32) = 934 primOpTag (VecIndexByteArrayOp IntVec 4 W64) = 935 primOpTag (VecIndexByteArrayOp IntVec 64 W8) = 936 primOpTag (VecIndexByteArrayOp IntVec 32 W16) = 937 primOpTag (VecIndexByteArrayOp IntVec 16 W32) = 938 primOpTag (VecIndexByteArrayOp IntVec 8 W64) = 939 primOpTag (VecIndexByteArrayOp WordVec 16 W8) = 940 primOpTag (VecIndexByteArrayOp WordVec 8 W16) = 941 primOpTag (VecIndexByteArrayOp WordVec 4 W32) = 942 primOpTag (VecIndexByteArrayOp WordVec 2 W64) = 943 primOpTag (VecIndexByteArrayOp WordVec 32 W8) = 944 primOpTag (VecIndexByteArrayOp WordVec 16 W16) = 945 primOpTag (VecIndexByteArrayOp WordVec 8 W32) = 946 primOpTag (VecIndexByteArrayOp WordVec 4 W64) = 947 primOpTag (VecIndexByteArrayOp WordVec 64 W8) = 948 primOpTag (VecIndexByteArrayOp WordVec 32 W16) = 949 primOpTag (VecIndexByteArrayOp WordVec 16 W32) = 950 primOpTag (VecIndexByteArrayOp WordVec 8 W64) = 951 primOpTag (VecIndexByteArrayOp FloatVec 4 W32) = 952 primOpTag (VecIndexByteArrayOp FloatVec 2 W64) = 953 primOpTag (VecIndexByteArrayOp FloatVec 8 W32) = 954 primOpTag (VecIndexByteArrayOp FloatVec 4 W64) = 955 primOpTag (VecIndexByteArrayOp FloatVec 16 W32) = 956 primOpTag (VecIndexByteArrayOp FloatVec 8 W64) = 957 primOpTag (VecReadByteArrayOp IntVec 16 W8) = 958 primOpTag (VecReadByteArrayOp IntVec 8 W16) = 959 primOpTag (VecReadByteArrayOp IntVec 4 W32) = 960 primOpTag (VecReadByteArrayOp IntVec 2 W64) = 961 primOpTag (VecReadByteArrayOp IntVec 32 W8) = 962 primOpTag (VecReadByteArrayOp IntVec 16 W16) = 963 primOpTag (VecReadByteArrayOp IntVec 8 W32) = 964 primOpTag (VecReadByteArrayOp IntVec 4 W64) = 965 primOpTag (VecReadByteArrayOp IntVec 64 W8) = 966 primOpTag (VecReadByteArrayOp IntVec 32 W16) = 967 primOpTag (VecReadByteArrayOp IntVec 16 W32) = 968 primOpTag (VecReadByteArrayOp IntVec 8 W64) = 969 primOpTag (VecReadByteArrayOp WordVec 16 W8) = 970 primOpTag (VecReadByteArrayOp WordVec 8 W16) = 971 primOpTag (VecReadByteArrayOp WordVec 4 W32) = 972 primOpTag (VecReadByteArrayOp WordVec 2 W64) = 973 primOpTag (VecReadByteArrayOp WordVec 32 W8) = 974 primOpTag (VecReadByteArrayOp WordVec 16 W16) = 975 primOpTag (VecReadByteArrayOp WordVec 8 W32) = 976 primOpTag (VecReadByteArrayOp WordVec 4 W64) = 977 primOpTag (VecReadByteArrayOp WordVec 64 W8) = 978 primOpTag (VecReadByteArrayOp WordVec 32 W16) = 979 primOpTag (VecReadByteArrayOp WordVec 16 W32) = 980 primOpTag (VecReadByteArrayOp WordVec 8 W64) = 981 primOpTag (VecReadByteArrayOp FloatVec 4 W32) = 982 primOpTag (VecReadByteArrayOp FloatVec 2 W64) = 983 primOpTag (VecReadByteArrayOp FloatVec 8 W32) = 984 primOpTag (VecReadByteArrayOp FloatVec 4 W64) = 985 primOpTag (VecReadByteArrayOp FloatVec 16 W32) = 986 primOpTag (VecReadByteArrayOp FloatVec 8 W64) = 987 primOpTag (VecWriteByteArrayOp IntVec 16 W8) = 988 primOpTag (VecWriteByteArrayOp IntVec 8 W16) = 989 primOpTag (VecWriteByteArrayOp IntVec 4 W32) = 990 primOpTag (VecWriteByteArrayOp IntVec 2 W64) = 991 primOpTag (VecWriteByteArrayOp IntVec 32 W8) = 992 primOpTag (VecWriteByteArrayOp IntVec 16 W16) = 993 primOpTag (VecWriteByteArrayOp IntVec 8 W32) = 994 primOpTag (VecWriteByteArrayOp IntVec 4 W64) = 995 primOpTag (VecWriteByteArrayOp IntVec 64 W8) = 996 primOpTag (VecWriteByteArrayOp IntVec 32 W16) = 997 primOpTag (VecWriteByteArrayOp IntVec 16 W32) = 998 primOpTag (VecWriteByteArrayOp IntVec 8 W64) = 999 primOpTag (VecWriteByteArrayOp WordVec 16 W8) = 1000 primOpTag (VecWriteByteArrayOp WordVec 8 W16) = 1001 primOpTag (VecWriteByteArrayOp WordVec 4 W32) = 1002 primOpTag (VecWriteByteArrayOp WordVec 2 W64) = 1003 primOpTag (VecWriteByteArrayOp WordVec 32 W8) = 1004 primOpTag (VecWriteByteArrayOp WordVec 16 W16) = 1005 primOpTag (VecWriteByteArrayOp WordVec 8 W32) = 1006 primOpTag (VecWriteByteArrayOp WordVec 4 W64) = 1007 primOpTag (VecWriteByteArrayOp WordVec 64 W8) = 1008 primOpTag (VecWriteByteArrayOp WordVec 32 W16) = 1009 primOpTag (VecWriteByteArrayOp WordVec 16 W32) = 1010 primOpTag (VecWriteByteArrayOp WordVec 8 W64) = 1011 primOpTag (VecWriteByteArrayOp FloatVec 4 W32) = 1012 primOpTag (VecWriteByteArrayOp FloatVec 2 W64) = 1013 primOpTag (VecWriteByteArrayOp FloatVec 8 W32) = 1014 primOpTag (VecWriteByteArrayOp FloatVec 4 W64) = 1015 primOpTag (VecWriteByteArrayOp FloatVec 16 W32) = 1016 primOpTag (VecWriteByteArrayOp FloatVec 8 W64) = 1017 primOpTag (VecIndexOffAddrOp IntVec 16 W8) = 1018 primOpTag (VecIndexOffAddrOp IntVec 8 W16) = 1019 primOpTag (VecIndexOffAddrOp IntVec 4 W32) = 1020 primOpTag (VecIndexOffAddrOp IntVec 2 W64) = 1021 primOpTag (VecIndexOffAddrOp IntVec 32 W8) = 1022 primOpTag (VecIndexOffAddrOp IntVec 16 W16) = 1023 primOpTag (VecIndexOffAddrOp IntVec 8 W32) = 1024 primOpTag (VecIndexOffAddrOp IntVec 4 W64) = 1025 primOpTag (VecIndexOffAddrOp IntVec 64 W8) = 1026 primOpTag (VecIndexOffAddrOp IntVec 32 W16) = 1027 primOpTag (VecIndexOffAddrOp IntVec 16 W32) = 1028 primOpTag (VecIndexOffAddrOp IntVec 8 W64) = 1029 primOpTag (VecIndexOffAddrOp WordVec 16 W8) = 1030 primOpTag (VecIndexOffAddrOp WordVec 8 W16) = 1031 primOpTag (VecIndexOffAddrOp WordVec 4 W32) = 1032 primOpTag (VecIndexOffAddrOp WordVec 2 W64) = 1033 primOpTag (VecIndexOffAddrOp WordVec 32 W8) = 1034 primOpTag (VecIndexOffAddrOp WordVec 16 W16) = 1035 primOpTag (VecIndexOffAddrOp WordVec 8 W32) = 1036 primOpTag (VecIndexOffAddrOp WordVec 4 W64) = 1037 primOpTag (VecIndexOffAddrOp WordVec 64 W8) = 1038 primOpTag (VecIndexOffAddrOp WordVec 32 W16) = 1039 primOpTag (VecIndexOffAddrOp WordVec 16 W32) = 1040 primOpTag (VecIndexOffAddrOp WordVec 8 W64) = 1041 primOpTag (VecIndexOffAddrOp FloatVec 4 W32) = 1042 primOpTag (VecIndexOffAddrOp FloatVec 2 W64) = 1043 primOpTag (VecIndexOffAddrOp FloatVec 8 W32) = 1044 primOpTag (VecIndexOffAddrOp FloatVec 4 W64) = 1045 primOpTag (VecIndexOffAddrOp FloatVec 16 W32) = 1046 primOpTag (VecIndexOffAddrOp FloatVec 8 W64) = 1047 primOpTag (VecReadOffAddrOp IntVec 16 W8) = 1048 primOpTag (VecReadOffAddrOp IntVec 8 W16) = 1049 primOpTag (VecReadOffAddrOp IntVec 4 W32) = 1050 primOpTag (VecReadOffAddrOp IntVec 2 W64) = 1051 primOpTag (VecReadOffAddrOp IntVec 32 W8) = 1052 primOpTag (VecReadOffAddrOp IntVec 16 W16) = 1053 primOpTag (VecReadOffAddrOp IntVec 8 W32) = 1054 primOpTag (VecReadOffAddrOp IntVec 4 W64) = 1055 primOpTag (VecReadOffAddrOp IntVec 64 W8) = 1056 primOpTag (VecReadOffAddrOp IntVec 32 W16) = 1057 primOpTag (VecReadOffAddrOp IntVec 16 W32) = 1058 primOpTag (VecReadOffAddrOp IntVec 8 W64) = 1059 primOpTag (VecReadOffAddrOp WordVec 16 W8) = 1060 primOpTag (VecReadOffAddrOp WordVec 8 W16) = 1061 primOpTag (VecReadOffAddrOp WordVec 4 W32) = 1062 primOpTag (VecReadOffAddrOp WordVec 2 W64) = 1063 primOpTag (VecReadOffAddrOp WordVec 32 W8) = 1064 primOpTag (VecReadOffAddrOp WordVec 16 W16) = 1065 primOpTag (VecReadOffAddrOp WordVec 8 W32) = 1066 primOpTag (VecReadOffAddrOp WordVec 4 W64) = 1067 primOpTag (VecReadOffAddrOp WordVec 64 W8) = 1068 primOpTag (VecReadOffAddrOp WordVec 32 W16) = 1069 primOpTag (VecReadOffAddrOp WordVec 16 W32) = 1070 primOpTag (VecReadOffAddrOp WordVec 8 W64) = 1071 primOpTag (VecReadOffAddrOp FloatVec 4 W32) = 1072 primOpTag (VecReadOffAddrOp FloatVec 2 W64) = 1073 primOpTag (VecReadOffAddrOp FloatVec 8 W32) = 1074 primOpTag (VecReadOffAddrOp FloatVec 4 W64) = 1075 primOpTag (VecReadOffAddrOp FloatVec 16 W32) = 1076 primOpTag (VecReadOffAddrOp FloatVec 8 W64) = 1077 primOpTag (VecWriteOffAddrOp IntVec 16 W8) = 1078 primOpTag (VecWriteOffAddrOp IntVec 8 W16) = 1079 primOpTag (VecWriteOffAddrOp IntVec 4 W32) = 1080 primOpTag (VecWriteOffAddrOp IntVec 2 W64) = 1081 primOpTag (VecWriteOffAddrOp IntVec 32 W8) = 1082 primOpTag (VecWriteOffAddrOp IntVec 16 W16) = 1083 primOpTag (VecWriteOffAddrOp IntVec 8 W32) = 1084 primOpTag (VecWriteOffAddrOp IntVec 4 W64) = 1085 primOpTag (VecWriteOffAddrOp IntVec 64 W8) = 1086 primOpTag (VecWriteOffAddrOp IntVec 32 W16) = 1087 primOpTag (VecWriteOffAddrOp IntVec 16 W32) = 1088 primOpTag (VecWriteOffAddrOp IntVec 8 W64) = 1089 primOpTag (VecWriteOffAddrOp WordVec 16 W8) = 1090 primOpTag (VecWriteOffAddrOp WordVec 8 W16) = 1091 primOpTag (VecWriteOffAddrOp WordVec 4 W32) = 1092 primOpTag (VecWriteOffAddrOp WordVec 2 W64) = 1093 primOpTag (VecWriteOffAddrOp WordVec 32 W8) = 1094 primOpTag (VecWriteOffAddrOp WordVec 16 W16) = 1095 primOpTag (VecWriteOffAddrOp WordVec 8 W32) = 1096 primOpTag (VecWriteOffAddrOp WordVec 4 W64) = 1097 primOpTag (VecWriteOffAddrOp WordVec 64 W8) = 1098 primOpTag (VecWriteOffAddrOp WordVec 32 W16) = 1099 primOpTag (VecWriteOffAddrOp WordVec 16 W32) = 1100 primOpTag (VecWriteOffAddrOp WordVec 8 W64) = 1101 primOpTag (VecWriteOffAddrOp FloatVec 4 W32) = 1102 primOpTag (VecWriteOffAddrOp FloatVec 2 W64) = 1103 primOpTag (VecWriteOffAddrOp FloatVec 8 W32) = 1104 primOpTag (VecWriteOffAddrOp FloatVec 4 W64) = 1105 primOpTag (VecWriteOffAddrOp FloatVec 16 W32) = 1106 primOpTag (VecWriteOffAddrOp FloatVec 8 W64) = 1107 primOpTag (VecIndexScalarByteArrayOp IntVec 16 W8) = 1108 primOpTag (VecIndexScalarByteArrayOp IntVec 8 W16) = 1109 primOpTag (VecIndexScalarByteArrayOp IntVec 4 W32) = 1110 primOpTag (VecIndexScalarByteArrayOp IntVec 2 W64) = 1111 primOpTag (VecIndexScalarByteArrayOp IntVec 32 W8) = 1112 primOpTag (VecIndexScalarByteArrayOp IntVec 16 W16) = 1113 primOpTag (VecIndexScalarByteArrayOp IntVec 8 W32) = 1114 primOpTag (VecIndexScalarByteArrayOp IntVec 4 W64) = 1115 primOpTag (VecIndexScalarByteArrayOp IntVec 64 W8) = 1116 primOpTag (VecIndexScalarByteArrayOp IntVec 32 W16) = 1117 primOpTag (VecIndexScalarByteArrayOp IntVec 16 W32) = 1118 primOpTag (VecIndexScalarByteArrayOp IntVec 8 W64) = 1119 primOpTag (VecIndexScalarByteArrayOp WordVec 16 W8) = 1120 primOpTag (VecIndexScalarByteArrayOp WordVec 8 W16) = 1121 primOpTag (VecIndexScalarByteArrayOp WordVec 4 W32) = 1122 primOpTag (VecIndexScalarByteArrayOp WordVec 2 W64) = 1123 primOpTag (VecIndexScalarByteArrayOp WordVec 32 W8) = 1124 primOpTag (VecIndexScalarByteArrayOp WordVec 16 W16) = 1125 primOpTag (VecIndexScalarByteArrayOp WordVec 8 W32) = 1126 primOpTag (VecIndexScalarByteArrayOp WordVec 4 W64) = 1127 primOpTag (VecIndexScalarByteArrayOp WordVec 64 W8) = 1128 primOpTag (VecIndexScalarByteArrayOp WordVec 32 W16) = 1129 primOpTag (VecIndexScalarByteArrayOp WordVec 16 W32) = 1130 primOpTag (VecIndexScalarByteArrayOp WordVec 8 W64) = 1131 primOpTag (VecIndexScalarByteArrayOp FloatVec 4 W32) = 1132 primOpTag (VecIndexScalarByteArrayOp FloatVec 2 W64) = 1133 primOpTag (VecIndexScalarByteArrayOp FloatVec 8 W32) = 1134 primOpTag (VecIndexScalarByteArrayOp FloatVec 4 W64) = 1135 primOpTag (VecIndexScalarByteArrayOp FloatVec 16 W32) = 1136 primOpTag (VecIndexScalarByteArrayOp FloatVec 8 W64) = 1137 primOpTag (VecReadScalarByteArrayOp IntVec 16 W8) = 1138 primOpTag (VecReadScalarByteArrayOp IntVec 8 W16) = 1139 primOpTag (VecReadScalarByteArrayOp IntVec 4 W32) = 1140 primOpTag (VecReadScalarByteArrayOp IntVec 2 W64) = 1141 primOpTag (VecReadScalarByteArrayOp IntVec 32 W8) = 1142 primOpTag (VecReadScalarByteArrayOp IntVec 16 W16) = 1143 primOpTag (VecReadScalarByteArrayOp IntVec 8 W32) = 1144 primOpTag (VecReadScalarByteArrayOp IntVec 4 W64) = 1145 primOpTag (VecReadScalarByteArrayOp IntVec 64 W8) = 1146 primOpTag (VecReadScalarByteArrayOp IntVec 32 W16) = 1147 primOpTag (VecReadScalarByteArrayOp IntVec 16 W32) = 1148 primOpTag (VecReadScalarByteArrayOp IntVec 8 W64) = 1149 primOpTag (VecReadScalarByteArrayOp WordVec 16 W8) = 1150 primOpTag (VecReadScalarByteArrayOp WordVec 8 W16) = 1151 primOpTag (VecReadScalarByteArrayOp WordVec 4 W32) = 1152 primOpTag (VecReadScalarByteArrayOp WordVec 2 W64) = 1153 primOpTag (VecReadScalarByteArrayOp WordVec 32 W8) = 1154 primOpTag (VecReadScalarByteArrayOp WordVec 16 W16) = 1155 primOpTag (VecReadScalarByteArrayOp WordVec 8 W32) = 1156 primOpTag (VecReadScalarByteArrayOp WordVec 4 W64) = 1157 primOpTag (VecReadScalarByteArrayOp WordVec 64 W8) = 1158 primOpTag (VecReadScalarByteArrayOp WordVec 32 W16) = 1159 primOpTag (VecReadScalarByteArrayOp WordVec 16 W32) = 1160 primOpTag (VecReadScalarByteArrayOp WordVec 8 W64) = 1161 primOpTag (VecReadScalarByteArrayOp FloatVec 4 W32) = 1162 primOpTag (VecReadScalarByteArrayOp FloatVec 2 W64) = 1163 primOpTag (VecReadScalarByteArrayOp FloatVec 8 W32) = 1164 primOpTag (VecReadScalarByteArrayOp FloatVec 4 W64) = 1165 primOpTag (VecReadScalarByteArrayOp FloatVec 16 W32) = 1166 primOpTag (VecReadScalarByteArrayOp FloatVec 8 W64) = 1167 primOpTag (VecWriteScalarByteArrayOp IntVec 16 W8) = 1168 primOpTag (VecWriteScalarByteArrayOp IntVec 8 W16) = 1169 primOpTag (VecWriteScalarByteArrayOp IntVec 4 W32) = 1170 primOpTag (VecWriteScalarByteArrayOp IntVec 2 W64) = 1171 primOpTag (VecWriteScalarByteArrayOp IntVec 32 W8) = 1172 primOpTag (VecWriteScalarByteArrayOp IntVec 16 W16) = 1173 primOpTag (VecWriteScalarByteArrayOp IntVec 8 W32) = 1174 primOpTag (VecWriteScalarByteArrayOp IntVec 4 W64) = 1175 primOpTag (VecWriteScalarByteArrayOp IntVec 64 W8) = 1176 primOpTag (VecWriteScalarByteArrayOp IntVec 32 W16) = 1177 primOpTag (VecWriteScalarByteArrayOp IntVec 16 W32) = 1178 primOpTag (VecWriteScalarByteArrayOp IntVec 8 W64) = 1179 primOpTag (VecWriteScalarByteArrayOp WordVec 16 W8) = 1180 primOpTag (VecWriteScalarByteArrayOp WordVec 8 W16) = 1181 primOpTag (VecWriteScalarByteArrayOp WordVec 4 W32) = 1182 primOpTag (VecWriteScalarByteArrayOp WordVec 2 W64) = 1183 primOpTag (VecWriteScalarByteArrayOp WordVec 32 W8) = 1184 primOpTag (VecWriteScalarByteArrayOp WordVec 16 W16) = 1185 primOpTag (VecWriteScalarByteArrayOp WordVec 8 W32) = 1186 primOpTag (VecWriteScalarByteArrayOp WordVec 4 W64) = 1187 primOpTag (VecWriteScalarByteArrayOp WordVec 64 W8) = 1188 primOpTag (VecWriteScalarByteArrayOp WordVec 32 W16) = 1189 primOpTag (VecWriteScalarByteArrayOp WordVec 16 W32) = 1190 primOpTag (VecWriteScalarByteArrayOp WordVec 8 W64) = 1191 primOpTag (VecWriteScalarByteArrayOp FloatVec 4 W32) = 1192 primOpTag (VecWriteScalarByteArrayOp FloatVec 2 W64) = 1193 primOpTag (VecWriteScalarByteArrayOp FloatVec 8 W32) = 1194 primOpTag (VecWriteScalarByteArrayOp FloatVec 4 W64) = 1195 primOpTag (VecWriteScalarByteArrayOp FloatVec 16 W32) = 1196 primOpTag (VecWriteScalarByteArrayOp FloatVec 8 W64) = 1197 primOpTag (VecIndexScalarOffAddrOp IntVec 16 W8) = 1198 primOpTag (VecIndexScalarOffAddrOp IntVec 8 W16) = 1199 primOpTag (VecIndexScalarOffAddrOp IntVec 4 W32) = 1200 primOpTag (VecIndexScalarOffAddrOp IntVec 2 W64) = 1201 primOpTag (VecIndexScalarOffAddrOp IntVec 32 W8) = 1202 primOpTag (VecIndexScalarOffAddrOp IntVec 16 W16) = 1203 primOpTag (VecIndexScalarOffAddrOp IntVec 8 W32) = 1204 primOpTag (VecIndexScalarOffAddrOp IntVec 4 W64) = 1205 primOpTag (VecIndexScalarOffAddrOp IntVec 64 W8) = 1206 primOpTag (VecIndexScalarOffAddrOp IntVec 32 W16) = 1207 primOpTag (VecIndexScalarOffAddrOp IntVec 16 W32) = 1208 primOpTag (VecIndexScalarOffAddrOp IntVec 8 W64) = 1209 primOpTag (VecIndexScalarOffAddrOp WordVec 16 W8) = 1210 primOpTag (VecIndexScalarOffAddrOp WordVec 8 W16) = 1211 primOpTag (VecIndexScalarOffAddrOp WordVec 4 W32) = 1212 primOpTag (VecIndexScalarOffAddrOp WordVec 2 W64) = 1213 primOpTag (VecIndexScalarOffAddrOp WordVec 32 W8) = 1214 primOpTag (VecIndexScalarOffAddrOp WordVec 16 W16) = 1215 primOpTag (VecIndexScalarOffAddrOp WordVec 8 W32) = 1216 primOpTag (VecIndexScalarOffAddrOp WordVec 4 W64) = 1217 primOpTag (VecIndexScalarOffAddrOp WordVec 64 W8) = 1218 primOpTag (VecIndexScalarOffAddrOp WordVec 32 W16) = 1219 primOpTag (VecIndexScalarOffAddrOp WordVec 16 W32) = 1220 primOpTag (VecIndexScalarOffAddrOp WordVec 8 W64) = 1221 primOpTag (VecIndexScalarOffAddrOp FloatVec 4 W32) = 1222 primOpTag (VecIndexScalarOffAddrOp FloatVec 2 W64) = 1223 primOpTag (VecIndexScalarOffAddrOp FloatVec 8 W32) = 1224 primOpTag (VecIndexScalarOffAddrOp FloatVec 4 W64) = 1225 primOpTag (VecIndexScalarOffAddrOp FloatVec 16 W32) = 1226 primOpTag (VecIndexScalarOffAddrOp FloatVec 8 W64) = 1227 primOpTag (VecReadScalarOffAddrOp IntVec 16 W8) = 1228 primOpTag (VecReadScalarOffAddrOp IntVec 8 W16) = 1229 primOpTag (VecReadScalarOffAddrOp IntVec 4 W32) = 1230 primOpTag (VecReadScalarOffAddrOp IntVec 2 W64) = 1231 primOpTag (VecReadScalarOffAddrOp IntVec 32 W8) = 1232 primOpTag (VecReadScalarOffAddrOp IntVec 16 W16) = 1233 primOpTag (VecReadScalarOffAddrOp IntVec 8 W32) = 1234 primOpTag (VecReadScalarOffAddrOp IntVec 4 W64) = 1235 primOpTag (VecReadScalarOffAddrOp IntVec 64 W8) = 1236 primOpTag (VecReadScalarOffAddrOp IntVec 32 W16) = 1237 primOpTag (VecReadScalarOffAddrOp IntVec 16 W32) = 1238 primOpTag (VecReadScalarOffAddrOp IntVec 8 W64) = 1239 primOpTag (VecReadScalarOffAddrOp WordVec 16 W8) = 1240 primOpTag (VecReadScalarOffAddrOp WordVec 8 W16) = 1241 primOpTag (VecReadScalarOffAddrOp WordVec 4 W32) = 1242 primOpTag (VecReadScalarOffAddrOp WordVec 2 W64) = 1243 primOpTag (VecReadScalarOffAddrOp WordVec 32 W8) = 1244 primOpTag (VecReadScalarOffAddrOp WordVec 16 W16) = 1245 primOpTag (VecReadScalarOffAddrOp WordVec 8 W32) = 1246 primOpTag (VecReadScalarOffAddrOp WordVec 4 W64) = 1247 primOpTag (VecReadScalarOffAddrOp WordVec 64 W8) = 1248 primOpTag (VecReadScalarOffAddrOp WordVec 32 W16) = 1249 primOpTag (VecReadScalarOffAddrOp WordVec 16 W32) = 1250 primOpTag (VecReadScalarOffAddrOp WordVec 8 W64) = 1251 primOpTag (VecReadScalarOffAddrOp FloatVec 4 W32) = 1252 primOpTag (VecReadScalarOffAddrOp FloatVec 2 W64) = 1253 primOpTag (VecReadScalarOffAddrOp FloatVec 8 W32) = 1254 primOpTag (VecReadScalarOffAddrOp FloatVec 4 W64) = 1255 primOpTag (VecReadScalarOffAddrOp FloatVec 16 W32) = 1256 primOpTag (VecReadScalarOffAddrOp FloatVec 8 W64) = 1257 primOpTag (VecWriteScalarOffAddrOp IntVec 16 W8) = 1258 primOpTag (VecWriteScalarOffAddrOp IntVec 8 W16) = 1259 primOpTag (VecWriteScalarOffAddrOp IntVec 4 W32) = 1260 primOpTag (VecWriteScalarOffAddrOp IntVec 2 W64) = 1261 primOpTag (VecWriteScalarOffAddrOp IntVec 32 W8) = 1262 primOpTag (VecWriteScalarOffAddrOp IntVec 16 W16) = 1263 primOpTag (VecWriteScalarOffAddrOp IntVec 8 W32) = 1264 primOpTag (VecWriteScalarOffAddrOp IntVec 4 W64) = 1265 primOpTag (VecWriteScalarOffAddrOp IntVec 64 W8) = 1266 primOpTag (VecWriteScalarOffAddrOp IntVec 32 W16) = 1267 primOpTag (VecWriteScalarOffAddrOp IntVec 16 W32) = 1268 primOpTag (VecWriteScalarOffAddrOp IntVec 8 W64) = 1269 primOpTag (VecWriteScalarOffAddrOp WordVec 16 W8) = 1270 primOpTag (VecWriteScalarOffAddrOp WordVec 8 W16) = 1271 primOpTag (VecWriteScalarOffAddrOp WordVec 4 W32) = 1272 primOpTag (VecWriteScalarOffAddrOp WordVec 2 W64) = 1273 primOpTag (VecWriteScalarOffAddrOp WordVec 32 W8) = 1274 primOpTag (VecWriteScalarOffAddrOp WordVec 16 W16) = 1275 primOpTag (VecWriteScalarOffAddrOp WordVec 8 W32) = 1276 primOpTag (VecWriteScalarOffAddrOp WordVec 4 W64) = 1277 primOpTag (VecWriteScalarOffAddrOp WordVec 64 W8) = 1278 primOpTag (VecWriteScalarOffAddrOp WordVec 32 W16) = 1279 primOpTag (VecWriteScalarOffAddrOp WordVec 16 W32) = 1280 primOpTag (VecWriteScalarOffAddrOp WordVec 8 W64) = 1281 primOpTag (VecWriteScalarOffAddrOp FloatVec 4 W32) = 1282 primOpTag (VecWriteScalarOffAddrOp FloatVec 2 W64) = 1283 primOpTag (VecWriteScalarOffAddrOp FloatVec 8 W32) = 1284 primOpTag (VecWriteScalarOffAddrOp FloatVec 4 W64) = 1285 primOpTag (VecWriteScalarOffAddrOp FloatVec 16 W32) = 1286 primOpTag (VecWriteScalarOffAddrOp FloatVec 8 W64) = 1287 primOpTag PrefetchByteArrayOp3 = 1288 primOpTag PrefetchMutableByteArrayOp3 = 1289 primOpTag PrefetchAddrOp3 = 1290 primOpTag PrefetchValueOp3 = 1291 primOpTag PrefetchByteArrayOp2 = 1292 primOpTag PrefetchMutableByteArrayOp2 = 1293 primOpTag PrefetchAddrOp2 = 1294 primOpTag PrefetchValueOp2 = 1295 primOpTag PrefetchByteArrayOp1 = 1296 primOpTag PrefetchMutableByteArrayOp1 = 1297 primOpTag PrefetchAddrOp1 = 1298 primOpTag PrefetchValueOp1 = 1299 primOpTag PrefetchByteArrayOp0 = 1300 primOpTag PrefetchMutableByteArrayOp0 = 1301 primOpTag PrefetchAddrOp0 = 1302 primOpTag PrefetchValueOp0 = 1303 ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-vector-tycons.hs-incl0000644000000000000000000000131014472400111026415 0ustar0000000000000000 , int8X16PrimTyCon , int16X8PrimTyCon , int32X4PrimTyCon , int64X2PrimTyCon , int8X32PrimTyCon , int16X16PrimTyCon , int32X8PrimTyCon , int64X4PrimTyCon , int8X64PrimTyCon , int16X32PrimTyCon , int32X16PrimTyCon , int64X8PrimTyCon , word8X16PrimTyCon , word16X8PrimTyCon , word32X4PrimTyCon , word64X2PrimTyCon , word8X32PrimTyCon , word16X16PrimTyCon , word32X8PrimTyCon , word64X4PrimTyCon , word8X64PrimTyCon , word16X32PrimTyCon , word32X16PrimTyCon , word64X8PrimTyCon , floatX4PrimTyCon , doubleX2PrimTyCon , floatX8PrimTyCon , doubleX4PrimTyCon , floatX16PrimTyCon , doubleX8PrimTyCon ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-vector-tys-exports.hs-incl0000644000000000000000000000237214472400111027430 0ustar0000000000000000 int8X16PrimTy, int8X16PrimTyCon, int16X8PrimTy, int16X8PrimTyCon, int32X4PrimTy, int32X4PrimTyCon, int64X2PrimTy, int64X2PrimTyCon, int8X32PrimTy, int8X32PrimTyCon, int16X16PrimTy, int16X16PrimTyCon, int32X8PrimTy, int32X8PrimTyCon, int64X4PrimTy, int64X4PrimTyCon, int8X64PrimTy, int8X64PrimTyCon, int16X32PrimTy, int16X32PrimTyCon, int32X16PrimTy, int32X16PrimTyCon, int64X8PrimTy, int64X8PrimTyCon, word8X16PrimTy, word8X16PrimTyCon, word16X8PrimTy, word16X8PrimTyCon, word32X4PrimTy, word32X4PrimTyCon, word64X2PrimTy, word64X2PrimTyCon, word8X32PrimTy, word8X32PrimTyCon, word16X16PrimTy, word16X16PrimTyCon, word32X8PrimTy, word32X8PrimTyCon, word64X4PrimTy, word64X4PrimTyCon, word8X64PrimTy, word8X64PrimTyCon, word16X32PrimTy, word16X32PrimTyCon, word32X16PrimTy, word32X16PrimTyCon, word64X8PrimTy, word64X8PrimTyCon, floatX4PrimTy, floatX4PrimTyCon, doubleX2PrimTy, doubleX2PrimTyCon, floatX8PrimTy, floatX8PrimTyCon, doubleX4PrimTy, doubleX4PrimTyCon, floatX16PrimTy, floatX16PrimTyCon, doubleX8PrimTy, doubleX8PrimTyCon, ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-vector-tys.hs-incl0000644000000000000000000002366414472400111025735 0ustar0000000000000000int8X16PrimTyConName :: Name int8X16PrimTyConName = mkPrimTc (fsLit "Int8X16#") int8X16PrimTyConKey int8X16PrimTyCon int8X16PrimTy :: Type int8X16PrimTy = mkTyConTy int8X16PrimTyCon int8X16PrimTyCon :: TyCon int8X16PrimTyCon = pcPrimTyCon0 int8X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, int8ElemRepDataConTy]) int16X8PrimTyConName :: Name int16X8PrimTyConName = mkPrimTc (fsLit "Int16X8#") int16X8PrimTyConKey int16X8PrimTyCon int16X8PrimTy :: Type int16X8PrimTy = mkTyConTy int16X8PrimTyCon int16X8PrimTyCon :: TyCon int16X8PrimTyCon = pcPrimTyCon0 int16X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, int16ElemRepDataConTy]) int32X4PrimTyConName :: Name int32X4PrimTyConName = mkPrimTc (fsLit "Int32X4#") int32X4PrimTyConKey int32X4PrimTyCon int32X4PrimTy :: Type int32X4PrimTy = mkTyConTy int32X4PrimTyCon int32X4PrimTyCon :: TyCon int32X4PrimTyCon = pcPrimTyCon0 int32X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, int32ElemRepDataConTy]) int64X2PrimTyConName :: Name int64X2PrimTyConName = mkPrimTc (fsLit "Int64X2#") int64X2PrimTyConKey int64X2PrimTyCon int64X2PrimTy :: Type int64X2PrimTy = mkTyConTy int64X2PrimTyCon int64X2PrimTyCon :: TyCon int64X2PrimTyCon = pcPrimTyCon0 int64X2PrimTyConName (TyConApp vecRepDataConTyCon [vec2DataConTy, int64ElemRepDataConTy]) int8X32PrimTyConName :: Name int8X32PrimTyConName = mkPrimTc (fsLit "Int8X32#") int8X32PrimTyConKey int8X32PrimTyCon int8X32PrimTy :: Type int8X32PrimTy = mkTyConTy int8X32PrimTyCon int8X32PrimTyCon :: TyCon int8X32PrimTyCon = pcPrimTyCon0 int8X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, int8ElemRepDataConTy]) int16X16PrimTyConName :: Name int16X16PrimTyConName = mkPrimTc (fsLit "Int16X16#") int16X16PrimTyConKey int16X16PrimTyCon int16X16PrimTy :: Type int16X16PrimTy = mkTyConTy int16X16PrimTyCon int16X16PrimTyCon :: TyCon int16X16PrimTyCon = pcPrimTyCon0 int16X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, int16ElemRepDataConTy]) int32X8PrimTyConName :: Name int32X8PrimTyConName = mkPrimTc (fsLit "Int32X8#") int32X8PrimTyConKey int32X8PrimTyCon int32X8PrimTy :: Type int32X8PrimTy = mkTyConTy int32X8PrimTyCon int32X8PrimTyCon :: TyCon int32X8PrimTyCon = pcPrimTyCon0 int32X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, int32ElemRepDataConTy]) int64X4PrimTyConName :: Name int64X4PrimTyConName = mkPrimTc (fsLit "Int64X4#") int64X4PrimTyConKey int64X4PrimTyCon int64X4PrimTy :: Type int64X4PrimTy = mkTyConTy int64X4PrimTyCon int64X4PrimTyCon :: TyCon int64X4PrimTyCon = pcPrimTyCon0 int64X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, int64ElemRepDataConTy]) int8X64PrimTyConName :: Name int8X64PrimTyConName = mkPrimTc (fsLit "Int8X64#") int8X64PrimTyConKey int8X64PrimTyCon int8X64PrimTy :: Type int8X64PrimTy = mkTyConTy int8X64PrimTyCon int8X64PrimTyCon :: TyCon int8X64PrimTyCon = pcPrimTyCon0 int8X64PrimTyConName (TyConApp vecRepDataConTyCon [vec64DataConTy, int8ElemRepDataConTy]) int16X32PrimTyConName :: Name int16X32PrimTyConName = mkPrimTc (fsLit "Int16X32#") int16X32PrimTyConKey int16X32PrimTyCon int16X32PrimTy :: Type int16X32PrimTy = mkTyConTy int16X32PrimTyCon int16X32PrimTyCon :: TyCon int16X32PrimTyCon = pcPrimTyCon0 int16X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, int16ElemRepDataConTy]) int32X16PrimTyConName :: Name int32X16PrimTyConName = mkPrimTc (fsLit "Int32X16#") int32X16PrimTyConKey int32X16PrimTyCon int32X16PrimTy :: Type int32X16PrimTy = mkTyConTy int32X16PrimTyCon int32X16PrimTyCon :: TyCon int32X16PrimTyCon = pcPrimTyCon0 int32X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, int32ElemRepDataConTy]) int64X8PrimTyConName :: Name int64X8PrimTyConName = mkPrimTc (fsLit "Int64X8#") int64X8PrimTyConKey int64X8PrimTyCon int64X8PrimTy :: Type int64X8PrimTy = mkTyConTy int64X8PrimTyCon int64X8PrimTyCon :: TyCon int64X8PrimTyCon = pcPrimTyCon0 int64X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, int64ElemRepDataConTy]) word8X16PrimTyConName :: Name word8X16PrimTyConName = mkPrimTc (fsLit "Word8X16#") word8X16PrimTyConKey word8X16PrimTyCon word8X16PrimTy :: Type word8X16PrimTy = mkTyConTy word8X16PrimTyCon word8X16PrimTyCon :: TyCon word8X16PrimTyCon = pcPrimTyCon0 word8X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, word8ElemRepDataConTy]) word16X8PrimTyConName :: Name word16X8PrimTyConName = mkPrimTc (fsLit "Word16X8#") word16X8PrimTyConKey word16X8PrimTyCon word16X8PrimTy :: Type word16X8PrimTy = mkTyConTy word16X8PrimTyCon word16X8PrimTyCon :: TyCon word16X8PrimTyCon = pcPrimTyCon0 word16X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, word16ElemRepDataConTy]) word32X4PrimTyConName :: Name word32X4PrimTyConName = mkPrimTc (fsLit "Word32X4#") word32X4PrimTyConKey word32X4PrimTyCon word32X4PrimTy :: Type word32X4PrimTy = mkTyConTy word32X4PrimTyCon word32X4PrimTyCon :: TyCon word32X4PrimTyCon = pcPrimTyCon0 word32X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, word32ElemRepDataConTy]) word64X2PrimTyConName :: Name word64X2PrimTyConName = mkPrimTc (fsLit "Word64X2#") word64X2PrimTyConKey word64X2PrimTyCon word64X2PrimTy :: Type word64X2PrimTy = mkTyConTy word64X2PrimTyCon word64X2PrimTyCon :: TyCon word64X2PrimTyCon = pcPrimTyCon0 word64X2PrimTyConName (TyConApp vecRepDataConTyCon [vec2DataConTy, word64ElemRepDataConTy]) word8X32PrimTyConName :: Name word8X32PrimTyConName = mkPrimTc (fsLit "Word8X32#") word8X32PrimTyConKey word8X32PrimTyCon word8X32PrimTy :: Type word8X32PrimTy = mkTyConTy word8X32PrimTyCon word8X32PrimTyCon :: TyCon word8X32PrimTyCon = pcPrimTyCon0 word8X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, word8ElemRepDataConTy]) word16X16PrimTyConName :: Name word16X16PrimTyConName = mkPrimTc (fsLit "Word16X16#") word16X16PrimTyConKey word16X16PrimTyCon word16X16PrimTy :: Type word16X16PrimTy = mkTyConTy word16X16PrimTyCon word16X16PrimTyCon :: TyCon word16X16PrimTyCon = pcPrimTyCon0 word16X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, word16ElemRepDataConTy]) word32X8PrimTyConName :: Name word32X8PrimTyConName = mkPrimTc (fsLit "Word32X8#") word32X8PrimTyConKey word32X8PrimTyCon word32X8PrimTy :: Type word32X8PrimTy = mkTyConTy word32X8PrimTyCon word32X8PrimTyCon :: TyCon word32X8PrimTyCon = pcPrimTyCon0 word32X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, word32ElemRepDataConTy]) word64X4PrimTyConName :: Name word64X4PrimTyConName = mkPrimTc (fsLit "Word64X4#") word64X4PrimTyConKey word64X4PrimTyCon word64X4PrimTy :: Type word64X4PrimTy = mkTyConTy word64X4PrimTyCon word64X4PrimTyCon :: TyCon word64X4PrimTyCon = pcPrimTyCon0 word64X4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, word64ElemRepDataConTy]) word8X64PrimTyConName :: Name word8X64PrimTyConName = mkPrimTc (fsLit "Word8X64#") word8X64PrimTyConKey word8X64PrimTyCon word8X64PrimTy :: Type word8X64PrimTy = mkTyConTy word8X64PrimTyCon word8X64PrimTyCon :: TyCon word8X64PrimTyCon = pcPrimTyCon0 word8X64PrimTyConName (TyConApp vecRepDataConTyCon [vec64DataConTy, word8ElemRepDataConTy]) word16X32PrimTyConName :: Name word16X32PrimTyConName = mkPrimTc (fsLit "Word16X32#") word16X32PrimTyConKey word16X32PrimTyCon word16X32PrimTy :: Type word16X32PrimTy = mkTyConTy word16X32PrimTyCon word16X32PrimTyCon :: TyCon word16X32PrimTyCon = pcPrimTyCon0 word16X32PrimTyConName (TyConApp vecRepDataConTyCon [vec32DataConTy, word16ElemRepDataConTy]) word32X16PrimTyConName :: Name word32X16PrimTyConName = mkPrimTc (fsLit "Word32X16#") word32X16PrimTyConKey word32X16PrimTyCon word32X16PrimTy :: Type word32X16PrimTy = mkTyConTy word32X16PrimTyCon word32X16PrimTyCon :: TyCon word32X16PrimTyCon = pcPrimTyCon0 word32X16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, word32ElemRepDataConTy]) word64X8PrimTyConName :: Name word64X8PrimTyConName = mkPrimTc (fsLit "Word64X8#") word64X8PrimTyConKey word64X8PrimTyCon word64X8PrimTy :: Type word64X8PrimTy = mkTyConTy word64X8PrimTyCon word64X8PrimTyCon :: TyCon word64X8PrimTyCon = pcPrimTyCon0 word64X8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, word64ElemRepDataConTy]) floatX4PrimTyConName :: Name floatX4PrimTyConName = mkPrimTc (fsLit "FloatX4#") floatX4PrimTyConKey floatX4PrimTyCon floatX4PrimTy :: Type floatX4PrimTy = mkTyConTy floatX4PrimTyCon floatX4PrimTyCon :: TyCon floatX4PrimTyCon = pcPrimTyCon0 floatX4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, floatElemRepDataConTy]) doubleX2PrimTyConName :: Name doubleX2PrimTyConName = mkPrimTc (fsLit "DoubleX2#") doubleX2PrimTyConKey doubleX2PrimTyCon doubleX2PrimTy :: Type doubleX2PrimTy = mkTyConTy doubleX2PrimTyCon doubleX2PrimTyCon :: TyCon doubleX2PrimTyCon = pcPrimTyCon0 doubleX2PrimTyConName (TyConApp vecRepDataConTyCon [vec2DataConTy, doubleElemRepDataConTy]) floatX8PrimTyConName :: Name floatX8PrimTyConName = mkPrimTc (fsLit "FloatX8#") floatX8PrimTyConKey floatX8PrimTyCon floatX8PrimTy :: Type floatX8PrimTy = mkTyConTy floatX8PrimTyCon floatX8PrimTyCon :: TyCon floatX8PrimTyCon = pcPrimTyCon0 floatX8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, floatElemRepDataConTy]) doubleX4PrimTyConName :: Name doubleX4PrimTyConName = mkPrimTc (fsLit "DoubleX4#") doubleX4PrimTyConKey doubleX4PrimTyCon doubleX4PrimTy :: Type doubleX4PrimTy = mkTyConTy doubleX4PrimTyCon doubleX4PrimTyCon :: TyCon doubleX4PrimTyCon = pcPrimTyCon0 doubleX4PrimTyConName (TyConApp vecRepDataConTyCon [vec4DataConTy, doubleElemRepDataConTy]) floatX16PrimTyConName :: Name floatX16PrimTyConName = mkPrimTc (fsLit "FloatX16#") floatX16PrimTyConKey floatX16PrimTyCon floatX16PrimTy :: Type floatX16PrimTy = mkTyConTy floatX16PrimTyCon floatX16PrimTyCon :: TyCon floatX16PrimTyCon = pcPrimTyCon0 floatX16PrimTyConName (TyConApp vecRepDataConTyCon [vec16DataConTy, floatElemRepDataConTy]) doubleX8PrimTyConName :: Name doubleX8PrimTyConName = mkPrimTc (fsLit "DoubleX8#") doubleX8PrimTyConKey doubleX8PrimTyCon doubleX8PrimTy :: Type doubleX8PrimTy = mkTyConTy doubleX8PrimTyCon doubleX8PrimTyCon :: TyCon doubleX8PrimTyCon = pcPrimTyCon0 doubleX8PrimTyConName (TyConApp vecRepDataConTyCon [vec8DataConTy, doubleElemRepDataConTy]) ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-vector-uniques.hs-incl0000644000000000000000000000446214472400111026602 0ustar0000000000000000int8X16PrimTyConKey :: Unique int8X16PrimTyConKey = mkPreludeTyConUnique 300 int16X8PrimTyConKey :: Unique int16X8PrimTyConKey = mkPreludeTyConUnique 301 int32X4PrimTyConKey :: Unique int32X4PrimTyConKey = mkPreludeTyConUnique 302 int64X2PrimTyConKey :: Unique int64X2PrimTyConKey = mkPreludeTyConUnique 303 int8X32PrimTyConKey :: Unique int8X32PrimTyConKey = mkPreludeTyConUnique 304 int16X16PrimTyConKey :: Unique int16X16PrimTyConKey = mkPreludeTyConUnique 305 int32X8PrimTyConKey :: Unique int32X8PrimTyConKey = mkPreludeTyConUnique 306 int64X4PrimTyConKey :: Unique int64X4PrimTyConKey = mkPreludeTyConUnique 307 int8X64PrimTyConKey :: Unique int8X64PrimTyConKey = mkPreludeTyConUnique 308 int16X32PrimTyConKey :: Unique int16X32PrimTyConKey = mkPreludeTyConUnique 309 int32X16PrimTyConKey :: Unique int32X16PrimTyConKey = mkPreludeTyConUnique 310 int64X8PrimTyConKey :: Unique int64X8PrimTyConKey = mkPreludeTyConUnique 311 word8X16PrimTyConKey :: Unique word8X16PrimTyConKey = mkPreludeTyConUnique 312 word16X8PrimTyConKey :: Unique word16X8PrimTyConKey = mkPreludeTyConUnique 313 word32X4PrimTyConKey :: Unique word32X4PrimTyConKey = mkPreludeTyConUnique 314 word64X2PrimTyConKey :: Unique word64X2PrimTyConKey = mkPreludeTyConUnique 315 word8X32PrimTyConKey :: Unique word8X32PrimTyConKey = mkPreludeTyConUnique 316 word16X16PrimTyConKey :: Unique word16X16PrimTyConKey = mkPreludeTyConUnique 317 word32X8PrimTyConKey :: Unique word32X8PrimTyConKey = mkPreludeTyConUnique 318 word64X4PrimTyConKey :: Unique word64X4PrimTyConKey = mkPreludeTyConUnique 319 word8X64PrimTyConKey :: Unique word8X64PrimTyConKey = mkPreludeTyConUnique 320 word16X32PrimTyConKey :: Unique word16X32PrimTyConKey = mkPreludeTyConUnique 321 word32X16PrimTyConKey :: Unique word32X16PrimTyConKey = mkPreludeTyConUnique 322 word64X8PrimTyConKey :: Unique word64X8PrimTyConKey = mkPreludeTyConUnique 323 floatX4PrimTyConKey :: Unique floatX4PrimTyConKey = mkPreludeTyConUnique 324 doubleX2PrimTyConKey :: Unique doubleX2PrimTyConKey = mkPreludeTyConUnique 325 floatX8PrimTyConKey :: Unique floatX8PrimTyConKey = mkPreludeTyConUnique 326 doubleX4PrimTyConKey :: Unique doubleX4PrimTyConKey = mkPreludeTyConUnique 327 floatX16PrimTyConKey :: Unique floatX16PrimTyConKey = mkPreludeTyConUnique 328 doubleX8PrimTyConKey :: Unique doubleX8PrimTyConKey = mkPreludeTyConUnique 329 ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/compiler/build/primop-docs.hs-incl0000644000000000000000000032404114472400111024537 0ustar0000000000000000primOpDocs = [ ("*#","Low word of signed integer multiply.") , ("timesInt2#","Return a triple (isHighNeeded,high,low) where high and low are respectively\n the high and low bits of the double-word result. isHighNeeded is a cheap way\n to test if the high word is a sign-extension of the low word (isHighNeeded =\n 0#) or not (isHighNeeded = 1#).") , ("mulIntMayOflo#","Return non-zero if there is any possibility that the upper word of a\n signed integer multiply might contain useful information. Return\n zero only if you are completely sure that no overflow can occur.\n On a 32-bit platform, the recommended implementation is to do a\n 32 x 32 -> 64 signed multiply, and subtract result[63:32] from\n (result[31] >>signed 31). If this is zero, meaning that the\n upper word is merely a sign extension of the lower one, no\n overflow can occur.\n\n On a 64-bit platform it is not always possible to\n acquire the top 64 bits of the result. Therefore, a recommended\n implementation is to take the absolute value of both operands, and\n return 0 iff bits[63:31] of them are zero, since that means that their\n magnitudes fit within 31 bits, so the magnitude of the product must fit\n into 62 bits.\n\n If in doubt, return non-zero, but do make an effort to create the\n correct answer for small args, since otherwise the performance of\n @(*) :: Integer -> Integer -> Integer@ will be poor.\n ") , ("quotInt#","Rounds towards zero. The behavior is undefined if the second argument is\n zero.\n ") , ("remInt#","Satisfies @(quotInt\\# x y) *\\# y +\\# (remInt\\# x y) == x@. The\n behavior is undefined if the second argument is zero.\n ") , ("quotRemInt#","Rounds towards zero.") , ("andI#","Bitwise \"and\".") , ("orI#","Bitwise \"or\".") , ("xorI#","Bitwise \"xor\".") , ("notI#","Bitwise \"not\", also known as the binary complement.") , ("negateInt#","Unary negation.\n Since the negative @Int#@ range extends one further than the\n positive range, @negateInt#@ of the most negative number is an\n identity operation. This way, @negateInt#@ is always its own inverse.") , ("addIntC#","Add signed integers reporting overflow.\n First member of result is the sum truncated to an @Int#@;\n second member is zero if the true sum fits in an @Int#@,\n nonzero if overflow occurred (the sum is either too large\n or too small to fit in an @Int#@).") , ("subIntC#","Subtract signed integers reporting overflow.\n First member of result is the difference truncated to an @Int#@;\n second member is zero if the true difference fits in an @Int#@,\n nonzero if overflow occurred (the difference is either too large\n or too small to fit in an @Int#@).") , ("int2Float#","Convert an @Int#@ to the corresponding @Float#@ with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @int2Float# 1# == 1.0#@") , ("int2Double#","Convert an @Int#@ to the corresponding @Double#@ with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @int2Double# 1# == 1.0##@") , ("word2Float#","Convert an @Word#@ to the corresponding @Float#@ with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @word2Float# 1## == 1.0#@") , ("word2Double#","Convert an @Word#@ to the corresponding @Double#@ with the same\n integral value (up to truncation due to floating-point precision). e.g.\n @word2Double# 1## == 1.0##@") , ("uncheckedIShiftL#","Shift left. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , ("uncheckedIShiftRA#","Shift right arithmetic. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , ("uncheckedIShiftRL#","Shift right logical. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , ("addWordC#","Add unsigned integers reporting overflow.\n The first element of the pair is the result. The second element is\n the carry flag, which is nonzero on overflow. See also @plusWord2#@.") , ("subWordC#","Subtract unsigned integers reporting overflow.\n The first element of the pair is the result. The second element is\n the carry flag, which is nonzero on overflow.") , ("plusWord2#","Add unsigned integers, with the high part (carry) in the first\n component of the returned pair and the low part in the second\n component of the pair. See also @addWordC#@.") , ("quotRemWord2#"," Takes high word of dividend, then low word of dividend, then divisor.\n Requires that high word < divisor.") , ("uncheckedShiftL#","Shift left logical. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , ("uncheckedShiftRL#","Shift right logical. Result undefined if shift amount is not\n in the range 0 to word size - 1 inclusive.") , ("popCnt8#","Count the number of set bits in the lower 8 bits of a word.") , ("popCnt16#","Count the number of set bits in the lower 16 bits of a word.") , ("popCnt32#","Count the number of set bits in the lower 32 bits of a word.") , ("popCnt64#","Count the number of set bits in a 64-bit word.") , ("popCnt#","Count the number of set bits in a word.") , ("pdep8#","Deposit bits to lower 8 bits of a word at locations specified by a mask.") , ("pdep16#","Deposit bits to lower 16 bits of a word at locations specified by a mask.") , ("pdep32#","Deposit bits to lower 32 bits of a word at locations specified by a mask.") , ("pdep64#","Deposit bits to a word at locations specified by a mask.") , ("pdep#","Deposit bits to a word at locations specified by a mask.") , ("pext8#","Extract bits from lower 8 bits of a word at locations specified by a mask.") , ("pext16#","Extract bits from lower 16 bits of a word at locations specified by a mask.") , ("pext32#","Extract bits from lower 32 bits of a word at locations specified by a mask.") , ("pext64#","Extract bits from a word at locations specified by a mask.") , ("pext#","Extract bits from a word at locations specified by a mask.") , ("clz8#","Count leading zeros in the lower 8 bits of a word.") , ("clz16#","Count leading zeros in the lower 16 bits of a word.") , ("clz32#","Count leading zeros in the lower 32 bits of a word.") , ("clz64#","Count leading zeros in a 64-bit word.") , ("clz#","Count leading zeros in a word.") , ("ctz8#","Count trailing zeros in the lower 8 bits of a word.") , ("ctz16#","Count trailing zeros in the lower 16 bits of a word.") , ("ctz32#","Count trailing zeros in the lower 32 bits of a word.") , ("ctz64#","Count trailing zeros in a 64-bit word.") , ("ctz#","Count trailing zeros in a word.") , ("byteSwap16#","Swap bytes in the lower 16 bits of a word. The higher bytes are undefined. ") , ("byteSwap32#","Swap bytes in the lower 32 bits of a word. The higher bytes are undefined. ") , ("byteSwap64#","Swap bytes in a 64 bits of a word.") , ("byteSwap#","Swap bytes in a word.") , ("bitReverse8#","Reverse the order of the bits in a 8-bit word.") , ("bitReverse16#","Reverse the order of the bits in a 16-bit word.") , ("bitReverse32#","Reverse the order of the bits in a 32-bit word.") , ("bitReverse64#","Reverse the order of the bits in a 64-bit word.") , ("bitReverse#","Reverse the order of the bits in a word.") , ("double2Int#","Truncates a @Double#@ value to the nearest @Int#@.\n Results are undefined if the truncation if truncation yields\n a value outside the range of @Int#@.") , ("**##","Exponentiation.") , ("decodeDouble_2Int#","Convert to integer.\n First component of the result is -1 or 1, indicating the sign of the\n mantissa. The next two are the high and low 32 bits of the mantissa\n respectively, and the last is the exponent.") , ("decodeDouble_Int64#","Decode @Double\\#@ into mantissa and base-2 exponent.") , ("float2Int#","Truncates a @Float#@ value to the nearest @Int#@.\n Results are undefined if the truncation if truncation yields\n a value outside the range of @Int#@.") , ("decodeFloat_Int#","Convert to integers.\n First @Int\\#@ in result is the mantissa; second is the exponent.") , ("newArray#","Create a new mutable array with the specified number of elements,\n in the specified state thread,\n with each element containing the specified initial value.") , ("readArray#","Read from specified index of mutable array. Result is not yet evaluated.") , ("writeArray#","Write to specified index of mutable array.") , ("sizeofArray#","Return the number of elements in the array.") , ("sizeofMutableArray#","Return the number of elements in the array.") , ("indexArray#","Read from the specified index of an immutable array. The result is packaged\n into an unboxed unary tuple; the result itself is not yet\n evaluated. Pattern matching on the tuple forces the indexing of the\n array to happen but does not evaluate the element itself. Evaluating\n the thunk prevents additional thunks from building up on the\n heap. Avoiding these thunks, in turn, reduces references to the\n argument array, allowing it to be garbage collected more promptly.") , ("unsafeFreezeArray#","Make a mutable array immutable, without copying.") , ("unsafeThawArray#","Make an immutable array mutable, without copying.") , ("copyArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. Both arrays must fully contain the\n specified ranges, but this is not checked. The two arrays must not\n be the same array in different states, but this is not checked\n either.") , ("copyMutableArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. Both arrays must fully contain the\n specified ranges, but this is not checked. In the case where\n the source and destination are the same array the source and\n destination regions may overlap.") , ("cloneArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("cloneMutableArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("freezeArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("thawArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("casArray#","Given an array, an offset, the expected old value, and\n the new value, perform an atomic compare and swap (i.e. write the new\n value if the current value and the old value are the same pointer).\n Returns 0 if the swap succeeds and 1 if it fails. Additionally, returns\n the element at the offset after the operation completes. This means that\n on a success the new value is returned, and on a failure the actual old\n value (not the expected one) is returned. Implies a full memory barrier.\n The use of a pointer equality on a boxed value makes this function harder\n to use correctly than @casIntArray\\#@. All of the difficulties\n of using @reallyUnsafePtrEquality\\#@ correctly apply to\n @casArray\\#@ as well.\n ") , ("newSmallArray#","Create a new mutable array with the specified number of elements,\n in the specified state thread,\n with each element containing the specified initial value.") , ("shrinkSmallMutableArray#","Shrink mutable array to new specified size, in\n the specified state thread. The new size argument must be less than or\n equal to the current size as reported by @getSizeofSmallMutableArray\\#@.") , ("readSmallArray#","Read from specified index of mutable array. Result is not yet evaluated.") , ("writeSmallArray#","Write to specified index of mutable array.") , ("sizeofSmallArray#","Return the number of elements in the array.") , ("sizeofSmallMutableArray#","Return the number of elements in the array. Note that this is deprecated\n as it is unsafe in the presence of shrink and resize operations on the\n same small mutable array.") , ("getSizeofSmallMutableArray#","Return the number of elements in the array.") , ("indexSmallArray#","Read from specified index of immutable array. Result is packaged into\n an unboxed singleton; the result itself is not yet evaluated.") , ("unsafeFreezeSmallArray#","Make a mutable array immutable, without copying.") , ("unsafeThawSmallArray#","Make an immutable array mutable, without copying.") , ("copySmallArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. Both arrays must fully contain the\n specified ranges, but this is not checked. The two arrays must not\n be the same array in different states, but this is not checked\n either.") , ("copySmallMutableArray#","Given a source array, an offset into the source array, a\n destination array, an offset into the destination array, and a\n number of elements to copy, copy the elements from the source array\n to the destination array. The source and destination arrays can\n refer to the same array. Both arrays must fully contain the\n specified ranges, but this is not checked.\n The regions are allowed to overlap, although this is only possible when the same\n array is provided as both the source and the destination. ") , ("cloneSmallArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("cloneSmallMutableArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("freezeSmallArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("thawSmallArray#","Given a source array, an offset into the source array, and a number\n of elements to copy, create a new array with the elements from the\n source array. The provided array must fully contain the specified\n range, but this is not checked.") , ("casSmallArray#","Unsafe, machine-level atomic compare and swap on an element within an array.\n See the documentation of @casArray\\#@.") , ("ByteArray#","\n A boxed, unlifted datatype representing a region of raw memory in the garbage-collected heap,\n which is not scanned for pointers during garbage collection.\n\n It is created by freezing a 'MutableByteArray#' with 'unsafeFreezeByteArray#'.\n Freezing is essentially a no-op, as MutableByteArray# and ByteArray# share the same heap structure under the hood.\n\n The immutable and mutable variants are commonly used for scenarios requiring high-performance data structures,\n like Text, Primitive Vector, Unboxed Array, and ShortByteString.\n \n Another application of fundamental importance is 'Integer', which is backed by 'ByteArray#'.\n \n The representation on the heap of a Byte Array is:\n \n > +------------+-----------------+-----------------------+\n > | | | |\n > | HEADER | SIZE (in bytes) | PAYLOAD |\n > | | | |\n > +------------+-----------------+-----------------------+\n \n To obtain a pointer to actual payload (e.g., for FFI purposes) use 'byteArrayContents#' or 'mutableByteArrayContents#'.\n \n Alternatively, enabling the UnliftedFFITypes extension\n allows to mention 'ByteArray#' and 'MutableByteArray#' in FFI type signatures directly.\n") , ("MutableByteArray#"," A mutable ByteAray#. It can be created in three ways:\n\n * 'newByteArray#': Create an unpinned array.\n * 'newPinnedByteArray#': This will create a pinned array,\n * 'newAlignedPinnedByteArray#': This will create a pinned array, with a custom alignment.\n\n Unpinned arrays can be moved around during garbage collection, so you must not store or pass pointers to these values\n if there is a chance for the garbage collector to kick in. That said, even unpinned arrays can be passed to unsafe FFI calls,\n because no garbage collection happens during these unsafe calls\n (see [Guaranteed Call Safety](https://ghc.gitlab.haskell.org/ghc/doc/users_guide/exts/ffi.html#guaranteed-call-safety)\n in the GHC Manual). For safe FFI calls, byte arrays must be not only pinned, but also kept alive by means of the keepAlive# function\n for the duration of a call (that's because garbage collection cannot move a pinned array, but is free to scrap it altogether).\n") , ("newByteArray#","Create a new mutable byte array of specified size (in bytes), in\n the specified state thread. The size of the memory underlying the\n array will be rounded up to the platform's word size.") , ("newPinnedByteArray#","Like 'newByteArray#' but GC guarantees not to move it.") , ("newAlignedPinnedByteArray#","Like 'newPinnedByteArray#' but allow specifying an arbitrary\n alignment, which must be a power of two.") , ("isMutableByteArrayPinned#","Determine whether a @MutableByteArray\\#@ is guaranteed not to move\n during GC.") , ("isByteArrayPinned#","Determine whether a @ByteArray\\#@ is guaranteed not to move during GC.") , ("byteArrayContents#","Intended for use with pinned arrays; otherwise very unsafe!") , ("mutableByteArrayContents#","Intended for use with pinned arrays; otherwise very unsafe!") , ("shrinkMutableByteArray#","Shrink mutable byte array to new specified size (in bytes), in\n the specified state thread. The new size argument must be less than or\n equal to the current size as reported by @getSizeofMutableByteArray\\#@.") , ("resizeMutableByteArray#","Resize (unpinned) mutable byte array to new specified size (in bytes).\n The returned @MutableByteArray\\#@ is either the original\n @MutableByteArray\\#@ resized in-place or, if not possible, a newly\n allocated (unpinned) @MutableByteArray\\#@ (with the original content\n copied over).\n\n To avoid undefined behaviour, the original @MutableByteArray\\#@ shall\n not be accessed anymore after a @resizeMutableByteArray\\#@ has been\n performed. Moreover, no reference to the old one should be kept in order\n to allow garbage collection of the original @MutableByteArray\\#@ in\n case a new @MutableByteArray\\#@ had to be allocated.") , ("unsafeFreezeByteArray#","Make a mutable byte array immutable, without copying.") , ("sizeofByteArray#","Return the size of the array in bytes.") , ("sizeofMutableByteArray#","Return the size of the array in bytes. Note that this is deprecated as it is\n unsafe in the presence of shrink and resize operations on the same mutable byte\n array.") , ("getSizeofMutableByteArray#","Return the number of elements in the array.") , ("indexCharArray#","Read a 8-bit character; offset in bytes.") , ("indexWideCharArray#","Read a 32-bit character; offset in 4-byte words.") , ("indexIntArray#","Read a word-sized integer; offset in machine words.") , ("indexWordArray#","Read a word-sized unsigned integer; offset in machine words.") , ("indexAddrArray#","Read a machine address; offset in machine words.") , ("indexFloatArray#","Read a single-precision floating-point value; offset in 4-byte words.") , ("indexDoubleArray#","Read a double-precision floating-point value; offset in 8-byte words.") , ("indexStablePtrArray#","Read a @StablePtr#@ value; offset in machine words.") , ("indexInt8Array#","Read a 8-bit signed integer; offset in bytes.") , ("indexInt16Array#","Read a 16-bit signed integer; offset in 2-byte words.") , ("indexInt32Array#","Read a 32-bit signed integer; offset in 4-byte words.") , ("indexInt64Array#","Read a 64-bit signed integer; offset in 8-byte words.") , ("indexWord8Array#","Read a 8-bit unsigned integer; offset in bytes.") , ("indexWord16Array#","Read a 16-bit unsigned integer; offset in 2-byte words.") , ("indexWord32Array#","Read a 32-bit unsigned integer; offset in 4-byte words.") , ("indexWord64Array#","Read a 64-bit unsigned integer; offset in 8-byte words.") , ("indexWord8ArrayAsChar#","Read a 8-bit character; offset in bytes.") , ("indexWord8ArrayAsWideChar#","Read a 32-bit character; offset in bytes.") , ("indexWord8ArrayAsInt#","Read a word-sized integer; offset in bytes.") , ("indexWord8ArrayAsWord#","Read a word-sized unsigned integer; offset in bytes.") , ("indexWord8ArrayAsAddr#","Read a machine address; offset in bytes.") , ("indexWord8ArrayAsFloat#","Read a single-precision floating-point value; offset in bytes.") , ("indexWord8ArrayAsDouble#","Read a double-precision floating-point value; offset in bytes.") , ("indexWord8ArrayAsStablePtr#","Read a @StablePtr#@ value; offset in bytes.") , ("indexWord8ArrayAsInt16#","Read a 16-bit signed integer; offset in bytes.") , ("indexWord8ArrayAsInt32#","Read a 32-bit signed integer; offset in bytes.") , ("indexWord8ArrayAsInt64#","Read a 64-bit signed integer; offset in bytes.") , ("indexWord8ArrayAsWord16#","Read a 16-bit unsigned integer; offset in bytes.") , ("indexWord8ArrayAsWord32#","Read a 32-bit unsigned integer; offset in bytes.") , ("indexWord8ArrayAsWord64#","Read a 64-bit unsigned integer; offset in bytes.") , ("readCharArray#","Read a 8-bit character; offset in bytes.") , ("readWideCharArray#","Read a 32-bit character; offset in 4-byte words.") , ("readIntArray#","Read a word-sized integer; offset in machine words.") , ("readWordArray#","Read a word-sized unsigned integer; offset in machine words.") , ("readAddrArray#","Read a machine address; offset in machine words.") , ("readFloatArray#","Read a single-precision floating-point value; offset in 4-byte words.") , ("readDoubleArray#","Read a double-precision floating-point value; offset in 8-byte words.") , ("readStablePtrArray#","Read a @StablePtr#@ value; offset in machine words.") , ("readInt8Array#","Read a 8-bit signed integer; offset in bytes.") , ("readInt16Array#","Read a 16-bit signed integer; offset in 2-byte words.") , ("readInt32Array#","Read a 32-bit signed integer; offset in 4-byte words.") , ("readInt64Array#","Read a 64-bit signed integer; offset in 8-byte words.") , ("readWord8Array#","Read a 8-bit unsigned integer; offset in bytes.") , ("readWord16Array#","Read a 16-bit unsigned integer; offset in 2-byte words.") , ("readWord32Array#","Read a 32-bit unsigned integer; offset in 4-byte words.") , ("readWord64Array#","Read a 64-bit unsigned integer; offset in 8-byte words.") , ("readWord8ArrayAsChar#","Read a 8-bit character; offset in bytes.") , ("readWord8ArrayAsWideChar#","Read a 32-bit character; offset in bytes.") , ("readWord8ArrayAsInt#","Read a word-sized integer; offset in bytes.") , ("readWord8ArrayAsWord#","Read a word-sized unsigned integer; offset in bytes.") , ("readWord8ArrayAsAddr#","Read a machine address; offset in bytes.") , ("readWord8ArrayAsFloat#","Read a single-precision floating-point value; offset in bytes.") , ("readWord8ArrayAsDouble#","Read a double-precision floating-point value; offset in bytes.") , ("readWord8ArrayAsStablePtr#","Read a @StablePtr#@ value; offset in bytes.") , ("readWord8ArrayAsInt16#","Read a 16-bit signed integer; offset in bytes.") , ("readWord8ArrayAsInt32#","Read a 32-bit signed integer; offset in bytes.") , ("readWord8ArrayAsInt64#","Read a 64-bit signed integer; offset in bytes.") , ("readWord8ArrayAsWord16#","Read a 16-bit unsigned integer; offset in bytes.") , ("readWord8ArrayAsWord32#","Read a 32-bit unsigned integer; offset in bytes.") , ("readWord8ArrayAsWord64#","Read a 64-bit unsigned integer; offset in bytes.") , ("writeCharArray#","Write a 8-bit character; offset in bytes.") , ("writeWideCharArray#","Write a 32-bit character; offset in 4-byte words.") , ("writeIntArray#","Write a word-sized integer; offset in machine words.") , ("writeWordArray#","Write a word-sized unsigned integer; offset in machine words.") , ("writeAddrArray#","Write a machine address; offset in machine words.") , ("writeFloatArray#","Write a single-precision floating-point value; offset in 4-byte words.") , ("writeDoubleArray#","Write a double-precision floating-point value; offset in 8-byte words.") , ("writeStablePtrArray#","Write a @StablePtr#@ value; offset in machine words.") , ("writeInt8Array#","Write a 8-bit signed integer; offset in bytes.") , ("writeInt16Array#","Write a 16-bit signed integer; offset in 2-byte words.") , ("writeInt32Array#","Write a 32-bit signed integer; offset in 4-byte words.") , ("writeInt64Array#","Write a 64-bit signed integer; offset in 8-byte words.") , ("writeWord8Array#","Write a 8-bit unsigned integer; offset in bytes.") , ("writeWord16Array#","Write a 16-bit unsigned integer; offset in 2-byte words.") , ("writeWord32Array#","Write a 32-bit unsigned integer; offset in 4-byte words.") , ("writeWord64Array#","Write a 64-bit unsigned integer; offset in 8-byte words.") , ("writeWord8ArrayAsChar#","Write a 8-bit character; offset in bytes.") , ("writeWord8ArrayAsWideChar#","Write a 32-bit character; offset in bytes.") , ("writeWord8ArrayAsInt#","Write a word-sized integer; offset in bytes.") , ("writeWord8ArrayAsWord#","Write a word-sized unsigned integer; offset in bytes.") , ("writeWord8ArrayAsAddr#","Write a machine address; offset in bytes.") , ("writeWord8ArrayAsFloat#","Write a single-precision floating-point value; offset in bytes.") , ("writeWord8ArrayAsDouble#","Write a double-precision floating-point value; offset in bytes.") , ("writeWord8ArrayAsStablePtr#","Write a @StablePtr#@ value; offset in bytes.") , ("writeWord8ArrayAsInt16#","Write a 16-bit signed integer; offset in bytes.") , ("writeWord8ArrayAsInt32#","Write a 32-bit signed integer; offset in bytes.") , ("writeWord8ArrayAsInt64#","Write a 64-bit signed integer; offset in bytes.") , ("writeWord8ArrayAsWord16#","Write a 16-bit unsigned integer; offset in bytes.") , ("writeWord8ArrayAsWord32#","Write a 32-bit unsigned integer; offset in bytes.") , ("writeWord8ArrayAsWord64#","Write a 64-bit unsigned integer; offset in bytes.") , ("compareByteArrays#","@compareByteArrays# src1 src1_ofs src2 src2_ofs n@ compares\n @n@ bytes starting at offset @src1_ofs@ in the first\n @ByteArray#@ @src1@ to the range of @n@ bytes\n (i.e. same length) starting at offset @src2_ofs@ of the second\n @ByteArray#@ @src2@. Both arrays must fully contain the\n specified ranges, but this is not checked. Returns an @Int#@\n less than, equal to, or greater than zero if the range is found,\n respectively, to be byte-wise lexicographically less than, to\n match, or be greater than the second range.") , ("copyByteArray#","@copyByteArray# src src_ofs dst dst_ofs n@ copies the range\n starting at offset @src_ofs@ of length @n@ from the\n @ByteArray#@ @src@ to the @MutableByteArray#@ @dst@\n starting at offset @dst_ofs@. Both arrays must fully contain\n the specified ranges, but this is not checked. The two arrays must\n not be the same array in different states, but this is not checked\n either.") , ("copyMutableByteArray#","Copy a range of the first MutableByteArray\\# to the specified region in the second MutableByteArray\\#.\n Both arrays must fully contain the specified ranges, but this is not checked. The regions are\n allowed to overlap, although this is only possible when the same array is provided\n as both the source and the destination.") , ("copyByteArrayToAddr#","Copy a range of the ByteArray\\# to the memory range starting at the Addr\\#.\n The ByteArray\\# and the memory region at Addr\\# must fully contain the\n specified ranges, but this is not checked. The Addr\\# must not point into the\n ByteArray\\# (e.g. if the ByteArray\\# were pinned), but this is not checked\n either.") , ("copyMutableByteArrayToAddr#","Copy a range of the MutableByteArray\\# to the memory range starting at the\n Addr\\#. The MutableByteArray\\# and the memory region at Addr\\# must fully\n contain the specified ranges, but this is not checked. The Addr\\# must not\n point into the MutableByteArray\\# (e.g. if the MutableByteArray\\# were\n pinned), but this is not checked either.") , ("copyAddrToByteArray#","Copy a memory range starting at the Addr\\# to the specified range in the\n MutableByteArray\\#. The memory region at Addr\\# and the ByteArray\\# must fully\n contain the specified ranges, but this is not checked. The Addr\\# must not\n point into the MutableByteArray\\# (e.g. if the MutableByteArray\\# were pinned),\n but this is not checked either.") , ("setByteArray#","@setByteArray# ba off len c@ sets the byte range @[off, off+len)@ of\n the @MutableByteArray#@ to the byte @c@.") , ("atomicReadIntArray#","Given an array and an offset in machine words, read an element. The\n index is assumed to be in bounds. Implies a full memory barrier.") , ("atomicWriteIntArray#","Given an array and an offset in machine words, write an element. The\n index is assumed to be in bounds. Implies a full memory barrier.") , ("casIntArray#","Given an array, an offset in machine words, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , ("casInt8Array#","Given an array, an offset in bytes, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , ("casInt16Array#","Given an array, an offset in 16 bit units, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , ("casInt32Array#","Given an array, an offset in 32 bit units, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , ("casInt64Array#","Given an array, an offset in 64 bit units, the expected old value, and\n the new value, perform an atomic compare and swap i.e. write the new\n value if the current value matches the provided old value. Returns\n the value of the element before the operation. Implies a full memory\n barrier.") , ("fetchAddIntArray#","Given an array, and offset in machine words, and a value to add,\n atomically add the value to the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchSubIntArray#","Given an array, and offset in machine words, and a value to subtract,\n atomically subtract the value from the element. Returns the value of\n the element before the operation. Implies a full memory barrier.") , ("fetchAndIntArray#","Given an array, and offset in machine words, and a value to AND,\n atomically AND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchNandIntArray#","Given an array, and offset in machine words, and a value to NAND,\n atomically NAND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchOrIntArray#","Given an array, and offset in machine words, and a value to OR,\n atomically OR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchXorIntArray#","Given an array, and offset in machine words, and a value to XOR,\n atomically XOR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("Addr#"," An arbitrary machine address assumed to point outside\n the garbage-collected heap. ") , ("nullAddr#"," The null address. ") , ("minusAddr#","Result is meaningless if two @Addr\\#@s are so far apart that their\n difference doesn't fit in an @Int\\#@.") , ("remAddr#","Return the remainder when the @Addr\\#@ arg, treated like an @Int\\#@,\n is divided by the @Int\\#@ arg.") , ("addr2Int#","Coerce directly from address to int.") , ("int2Addr#","Coerce directly from int to address.") , ("indexCharOffAddr#","Reads 8-bit character; offset in bytes.") , ("indexWideCharOffAddr#","Reads 31-bit character; offset in 4-byte words.") , ("readCharOffAddr#","Reads 8-bit character; offset in bytes.") , ("readWideCharOffAddr#","Reads 31-bit character; offset in 4-byte words.") , ("atomicExchangeAddrAddr#","The atomic exchange operation. Atomically exchanges the value at the first address\n with the Addr# given as second argument. Implies a read barrier.") , ("atomicExchangeWordAddr#","The atomic exchange operation. Atomically exchanges the value at the address\n with the given value. Returns the old value. Implies a read barrier.") , ("atomicCasAddrAddr#"," Compare and swap on a word-sized memory location.\n\n Use as: \\s -> atomicCasAddrAddr# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , ("atomicCasWordAddr#"," Compare and swap on a word-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , ("atomicCasWord8Addr#"," Compare and swap on a 8 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr8# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , ("atomicCasWord16Addr#"," Compare and swap on a 16 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr16# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , ("atomicCasWord32Addr#"," Compare and swap on a 32 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr32# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , ("atomicCasWord64Addr#"," Compare and swap on a 64 bit-sized and aligned memory location.\n\n Use as: \\s -> atomicCasWordAddr64# location expected desired s\n\n This version always returns the old value read. This follows the normal\n protocol for CAS operations (and matches the underlying instruction on\n most architectures).\n\n Implies a full memory barrier.") , ("fetchAddWordAddr#","Given an address, and a value to add,\n atomically add the value to the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchSubWordAddr#","Given an address, and a value to subtract,\n atomically subtract the value from the element. Returns the value of\n the element before the operation. Implies a full memory barrier.") , ("fetchAndWordAddr#","Given an address, and a value to AND,\n atomically AND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchNandWordAddr#","Given an address, and a value to NAND,\n atomically NAND the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchOrWordAddr#","Given an address, and a value to OR,\n atomically OR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("fetchXorWordAddr#","Given an address, and a value to XOR,\n atomically XOR the value into the element. Returns the value of the\n element before the operation. Implies a full memory barrier.") , ("atomicReadWordAddr#","Given an address, read a machine word. Implies a full memory barrier.") , ("atomicWriteWordAddr#","Given an address, write a machine word. Implies a full memory barrier.") , ("MutVar#","A @MutVar\\#@ behaves like a single-element mutable array.") , ("newMutVar#","Create @MutVar\\#@ with specified initial value in specified state thread.") , ("readMutVar#","Read contents of @MutVar\\#@. Result is not yet evaluated.") , ("writeMutVar#","Write contents of @MutVar\\#@.") , ("atomicModifyMutVar2#"," Modify the contents of a @MutVar\\#@, returning the previous\n contents and the result of applying the given function to the\n previous contents. Note that this isn't strictly\n speaking the correct type for this function; it should really be\n @MutVar\\# s a -> (a -> (a,b)) -> State\\# s -> (\\# State\\# s, a, (a, b) \\#)@,\n but we don't know about pairs here. ") , ("atomicModifyMutVar_#"," Modify the contents of a @MutVar\\#@, returning the previous\n contents and the result of applying the given function to the\n previous contents. ") , ("casMutVar#"," Compare-and-swap: perform a pointer equality test between\n the first value passed to this function and the value\n stored inside the @MutVar\\#@. If the pointers are equal,\n replace the stored value with the second value passed to this\n function, otherwise do nothing.\n Returns the final value stored inside the @MutVar\\#@.\n The @Int\\#@ indicates whether a swap took place,\n with @1\\#@ meaning that we didn't swap, and @0\\#@\n that we did.\n Implies a full memory barrier.\n Because the comparison is done on the level of pointers,\n all of the difficulties of using\n @reallyUnsafePtrEquality\\#@ correctly apply to\n @casMutVar\\#@ as well.\n ") , ("newTVar#","Create a new @TVar\\#@ holding a specified initial value.") , ("readTVar#","Read contents of @TVar\\#@ inside an STM transaction,\n i.e. within a call to @atomically\\#@.\n Does not force evaluation of the result.") , ("readTVarIO#","Read contents of @TVar\\#@ outside an STM transaction.\n Does not force evaluation of the result.") , ("writeTVar#","Write contents of @TVar\\#@.") , ("MVar#"," A shared mutable variable (/not/ the same as a @MutVar\\#@!).\n (Note: in a non-concurrent implementation, @(MVar\\# a)@ can be\n represented by @(MutVar\\# (Maybe a))@.) ") , ("newMVar#","Create new @MVar\\#@; initially empty.") , ("takeMVar#","If @MVar\\#@ is empty, block until it becomes full.\n Then remove and return its contents, and set it empty.") , ("tryTakeMVar#","If @MVar\\#@ is empty, immediately return with integer 0 and value undefined.\n Otherwise, return with integer 1 and contents of @MVar\\#@, and set @MVar\\#@ empty.") , ("putMVar#","If @MVar\\#@ is full, block until it becomes empty.\n Then store value arg as its new contents.") , ("tryPutMVar#","If @MVar\\#@ is full, immediately return with integer 0.\n Otherwise, store value arg as @MVar\\#@'s new contents, and return with integer 1.") , ("readMVar#","If @MVar\\#@ is empty, block until it becomes full.\n Then read its contents without modifying the MVar, without possibility\n of intervention from other threads.") , ("tryReadMVar#","If @MVar\\#@ is empty, immediately return with integer 0 and value undefined.\n Otherwise, return with integer 1 and contents of @MVar\\#@.") , ("isEmptyMVar#","Return 1 if @MVar\\#@ is empty; 0 otherwise.") , ("IOPort#"," A shared I/O port is almost the same as a @MVar\\#@!).\n The main difference is that IOPort has no deadlock detection or\n deadlock breaking code that forcibly releases the lock. ") , ("newIOPort#","Create new @IOPort\\#@; initially empty.") , ("readIOPort#","If @IOPort\\#@ is empty, block until it becomes full.\n Then remove and return its contents, and set it empty.\n Throws an @IOPortException@ if another thread is already\n waiting to read this @IOPort\\#@.") , ("writeIOPort#","If @IOPort\\#@ is full, immediately return with integer 0,\n throwing an @IOPortException@.\n Otherwise, store value arg as @IOPort\\#@'s new contents,\n and return with integer 1. ") , ("delay#","Sleep specified number of microseconds.") , ("waitRead#","Block until input is available on specified file descriptor.") , ("waitWrite#","Block until output is possible on specified file descriptor.") , ("State#"," @State\\#@ is the primitive, unlifted type of states. It has\n one type parameter, thus @State\\# RealWorld@, or @State\\# s@,\n where s is a type variable. The only purpose of the type parameter\n is to keep different state threads separate. It is represented by\n nothing at all. ") , ("RealWorld"," @RealWorld@ is deeply magical. It is /primitive/, but it is not\n /unlifted/ (hence @ptrArg@). We never manipulate values of type\n @RealWorld@; it's only used in the type system, to parameterise @State\\#@. ") , ("ThreadId#","(In a non-concurrent implementation, this can be a singleton\n type, whose (unique) value is returned by @myThreadId\\#@. The\n other operations can be omitted.)") , ("mkWeak#"," @mkWeak# k v finalizer s@ creates a weak reference to value @k@,\n with an associated reference to some value @v@. If @k@ is still\n alive then @v@ can be retrieved using @deRefWeak#@. Note that\n the type of @k@ must be represented by a pointer (i.e. of kind @TYPE 'LiftedRep@ or @TYPE 'UnliftedRep@). ") , ("addCFinalizerToWeak#"," @addCFinalizerToWeak# fptr ptr flag eptr w@ attaches a C\n function pointer @fptr@ to a weak pointer @w@ as a finalizer. If\n @flag@ is zero, @fptr@ will be called with one argument,\n @ptr@. Otherwise, it will be called with two arguments,\n @eptr@ and @ptr@. @addCFinalizerToWeak#@ returns\n 1 on success, or 0 if @w@ is already dead. ") , ("finalizeWeak#"," Finalize a weak pointer. The return value is an unboxed tuple\n containing the new state of the world and an \"unboxed Maybe\",\n represented by an @Int#@ and a (possibly invalid) finalization\n action. An @Int#@ of @1@ indicates that the finalizer is valid. The\n return value @b@ from the finalizer should be ignored. ") , ("compactNew#"," Create a new CNF with a single compact block. The argument is\n the capacity of the compact block (in bytes, not words).\n The capacity is rounded up to a multiple of the allocator block size\n and is capped to one mega block. ") , ("compactResize#"," Set the new allocation size of the CNF. This value (in bytes)\n determines the capacity of each compact block in the CNF. It\n does not retroactively affect existing compact blocks in the CNF. ") , ("compactContains#"," Returns 1\\# if the object is contained in the CNF, 0\\# otherwise. ") , ("compactContainsAny#"," Returns 1\\# if the object is in any CNF at all, 0\\# otherwise. ") , ("compactGetFirstBlock#"," Returns the address and the utilized size (in bytes) of the\n first compact block of a CNF.") , ("compactGetNextBlock#"," Given a CNF and the address of one its compact blocks, returns the\n next compact block and its utilized size, or @nullAddr\\#@ if the\n argument was the last compact block in the CNF. ") , ("compactAllocateBlock#"," Attempt to allocate a compact block with the capacity (in\n bytes) given by the first argument. The @Addr\\#@ is a pointer\n to previous compact block of the CNF or @nullAddr\\#@ to create a\n new CNF with a single compact block.\n\n The resulting block is not known to the GC until\n @compactFixupPointers\\#@ is called on it, and care must be taken\n so that the address does not escape or memory will be leaked.\n ") , ("compactFixupPointers#"," Given the pointer to the first block of a CNF and the\n address of the root object in the old address space, fix up\n the internal pointers inside the CNF to account for\n a different position in memory than when it was serialized.\n This method must be called exactly once after importing\n a serialized CNF. It returns the new CNF and the new adjusted\n root address. ") , ("compactAdd#"," Recursively add a closure and its transitive closure to a\n @Compact\\#@ (a CNF), evaluating any unevaluated components\n at the same time. Note: @compactAdd\\#@ is not thread-safe, so\n only one thread may call @compactAdd\\#@ with a particular\n @Compact\\#@ at any given time. The primop does not\n enforce any mutual exclusion; the caller is expected to\n arrange this. ") , ("compactAddWithSharing#"," Like @compactAdd\\#@, but retains sharing and cycles\n during compaction. ") , ("compactSize#"," Return the total capacity (in bytes) of all the compact blocks\n in the CNF. ") , ("reallyUnsafePtrEquality#"," Returns @1\\#@ if the given pointers are equal and @0\\#@ otherwise. ") , ("numSparks#"," Returns the number of sparks in the local spark pool. ") , ("keepAlive#"," \\tt{keepAlive# x s k} keeps the value \\tt{x} alive during the execution\n of the computation \\tt{k}.\n\n Note that the result type here isn't quite as unrestricted as the\n polymorphic type might suggest; ticket \\#21868 for details. ") , ("BCO"," Primitive bytecode type. ") , ("addrToAny#"," Convert an @Addr\\#@ to a followable Any type. ") , ("anyToAddr#"," Retrieve the address of any Haskell value. This is\n essentially an @unsafeCoerce\\#@, but if implemented as such\n the core lint pass complains and fails to compile.\n As a primop, it is opaque to core/stg, and only appears\n in cmm (where the copy propagation pass will get rid of it).\n Note that \"a\" must be a value, not a thunk! It's too late\n for strictness analysis to enforce this, so you're on your\n own to guarantee this. Also note that @Addr\\#@ is not a GC\n pointer - up to you to guarantee that it does not become\n a dangling pointer immediately after you get it.") , ("mkApUpd0#"," Wrap a BCO in a @AP_UPD@ thunk which will be updated with the value of\n the BCO when evaluated. ") , ("newBCO#"," @newBCO\\# instrs lits ptrs arity bitmap@ creates a new bytecode object. The\n resulting object encodes a function of the given arity with the instructions\n encoded in @instrs@, and a static reference table usage bitmap given by\n @bitmap@. ") , ("unpackClosure#"," @unpackClosure\\# closure@ copies the closure and pointers in the\n payload of the given closure into two new arrays, and returns a pointer to\n the first word of the closure's info table, a non-pointer array for the raw\n bytes of the closure, and a pointer array for the pointers in the payload. ") , ("closureSize#"," @closureSize\\# closure@ returns the size of the given closure in\n machine words. ") , ("getCurrentCCS#"," Returns the current @CostCentreStack@ (value is @NULL@ if\n not profiling). Takes a dummy argument which can be used to\n avoid the call to @getCurrentCCS\\#@ being floated out by the\n simplifier, which would result in an uninformative stack\n (\"CAF\"). ") , ("clearCCS#"," Run the supplied IO action with an empty CCS. For example, this\n is used by the interpreter to run an interpreted computation\n without the call stack showing that it was invoked from GHC. ") , ("whereFrom#"," Returns the @InfoProvEnt @ for the info table of the given object\n (value is @NULL@ if the table does not exist or there is no information\n about the closure).") , ("FUN","The builtin function type, written in infix form as @a # m -> b@.\n Values of this type are functions taking inputs of type @a@ and\n producing outputs of type @b@. The multiplicity of the input is\n @m@.\n\n Note that @FUN m a b@ permits representation polymorphism in both\n @a@ and @b@, so that types like @Int\\# -> Int\\#@ can still be\n well-kinded.\n ") , ("realWorld#"," The token used in the implementation of the IO monad as a state monad.\n It does not pass any information at runtime.\n See also @GHC.Magic.runRW\\#@. ") , ("void#"," This is an alias for the unboxed unit tuple constructor.\n In earlier versions of GHC, @void\\#@ was a value\n of the primitive type @Void\\#@, which is now defined to be @(\\# \\#)@.\n ") , ("Proxy#"," The type constructor @Proxy#@ is used to bear witness to some\n type variable. It's used when you want to pass around proxy values\n for doing things like modelling type applications. A @Proxy#@\n is not only unboxed, it also has a polymorphic kind, and has no\n runtime representation, being totally free. ") , ("proxy#"," Witness for an unboxed @Proxy#@ value, which has no runtime\n representation. ") , ("seq"," The value of @seq a b@ is bottom if @a@ is bottom, and\n otherwise equal to @b@. In other words, it evaluates the first\n argument @a@ to weak head normal form (WHNF). @seq@ is usually\n introduced to improve performance by avoiding unneeded laziness.\n\n A note on evaluation order: the expression @seq a b@ does\n /not/ guarantee that @a@ will be evaluated before @b@.\n The only guarantee given by @seq@ is that the both @a@\n and @b@ will be evaluated before @seq@ returns a value.\n In particular, this means that @b@ may be evaluated before\n @a@. If you need to guarantee a specific order of evaluation,\n you must use the function @pseq@ from the \"parallel\" package. ") , ("unsafeCoerce#"," The function @unsafeCoerce\\#@ allows you to side-step the typechecker entirely. That\n is, it allows you to coerce any type into any other type. If you use this function,\n you had better get it right, otherwise segmentation faults await. It is generally\n used when you want to write a program that you know is well-typed, but where Haskell's\n type system is not expressive enough to prove that it is well typed.\n\n The following uses of @unsafeCoerce\\#@ are supposed to work (i.e. not lead to\n spurious compile-time or run-time crashes):\n\n * Casting any lifted type to @Any@\n\n * Casting @Any@ back to the real type\n\n * Casting an unboxed type to another unboxed type of the same size.\n (Casting between floating-point and integral types does not work.\n See the @GHC.Float@ module for functions to do work.)\n\n * Casting between two types that have the same runtime representation. One case is when\n the two types differ only in \"phantom\" type parameters, for example\n @Ptr Int@ to @Ptr Float@, or @[Int]@ to @[Float]@ when the list is\n known to be empty. Also, a @newtype@ of a type @T@ has the same representation\n at runtime as @T@.\n\n Other uses of @unsafeCoerce\\#@ are undefined. In particular, you should not use\n @unsafeCoerce\\#@ to cast a T to an algebraic data type D, unless T is also\n an algebraic data type. For example, do not cast @Int->Int@ to @Bool@, even if\n you later cast that @Bool@ back to @Int->Int@ before applying it. The reasons\n have to do with GHC's internal representation details (for the cognoscenti, data values\n can be entered but function closures cannot). If you want a safe type to cast things\n to, use @Any@, which is not an algebraic data type.\n\n ") , ("traceEvent#"," Emits an event via the RTS tracing framework. The contents\n of the event is the zero-terminated byte string passed as the first\n argument. The event will be emitted either to the @.eventlog@ file,\n or to stderr, depending on the runtime RTS flags. ") , ("traceBinaryEvent#"," Emits an event via the RTS tracing framework. The contents\n of the event is the binary object passed as the first argument with\n the given length passed as the second argument. The event will be\n emitted to the @.eventlog@ file. ") , ("traceMarker#"," Emits a marker event via the RTS tracing framework. The contents\n of the event is the zero-terminated byte string passed as the first\n argument. The event will be emitted either to the @.eventlog@ file,\n or to stderr, depending on the runtime RTS flags. ") , ("setThreadAllocationCounter#"," Sets the allocation counter for the current thread to the given value. ") , ("StackSnapshot#"," Haskell representation of a @StgStack*@ that was created (cloned)\n with a function in @GHC.Stack.CloneStack@. Please check the\n documentation in this module for more detailed explanations. ") , ("coerce"," The function @coerce@ allows you to safely convert between values of\n types that have the same representation with no run-time overhead. In the\n simplest case you can use it instead of a newtype constructor, to go from\n the newtype's concrete type to the abstract type. But it also works in\n more complicated settings, e.g. converting a list of newtypes to a list of\n concrete types.\n\n This function is representation-polymorphic, but the\n @RuntimeRep@ type argument is marked as @Inferred@, meaning\n that it is not available for visible type application. This means\n the typechecker will accept @coerce @Int @Age 42@.\n ") , ("broadcastInt8X16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt16X8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt32X4#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt64X2#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt8X32#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt16X16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt32X8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt64X4#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt8X64#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt16X32#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt32X16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastInt64X8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord8X16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord16X8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord32X4#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord64X2#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord8X32#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord16X16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord32X8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord64X4#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord8X64#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord16X32#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord32X16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastWord64X8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastFloatX4#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastDoubleX2#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastFloatX8#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastDoubleX4#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastFloatX16#"," Broadcast a scalar to all elements of a vector. ") , ("broadcastDoubleX8#"," Broadcast a scalar to all elements of a vector. ") , ("packInt8X16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt16X8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt32X4#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt64X2#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt8X32#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt16X16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt32X8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt64X4#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt8X64#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt16X32#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt32X16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packInt64X8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord8X16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord16X8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord32X4#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord64X2#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord8X32#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord16X16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord32X8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord64X4#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord8X64#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord16X32#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord32X16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packWord64X8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packFloatX4#"," Pack the elements of an unboxed tuple into a vector. ") , ("packDoubleX2#"," Pack the elements of an unboxed tuple into a vector. ") , ("packFloatX8#"," Pack the elements of an unboxed tuple into a vector. ") , ("packDoubleX4#"," Pack the elements of an unboxed tuple into a vector. ") , ("packFloatX16#"," Pack the elements of an unboxed tuple into a vector. ") , ("packDoubleX8#"," Pack the elements of an unboxed tuple into a vector. ") , ("unpackInt8X16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt16X8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt32X4#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt64X2#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt8X32#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt16X16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt32X8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt64X4#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt8X64#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt16X32#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt32X16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackInt64X8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord8X16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord16X8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord32X4#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord64X2#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord8X32#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord16X16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord32X8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord64X4#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord8X64#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord16X32#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord32X16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackWord64X8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackFloatX4#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackDoubleX2#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackFloatX8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackDoubleX4#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackFloatX16#"," Unpack the elements of a vector into an unboxed tuple. #") , ("unpackDoubleX8#"," Unpack the elements of a vector into an unboxed tuple. #") , ("insertInt8X16#"," Insert a scalar at the given position in a vector. ") , ("insertInt16X8#"," Insert a scalar at the given position in a vector. ") , ("insertInt32X4#"," Insert a scalar at the given position in a vector. ") , ("insertInt64X2#"," Insert a scalar at the given position in a vector. ") , ("insertInt8X32#"," Insert a scalar at the given position in a vector. ") , ("insertInt16X16#"," Insert a scalar at the given position in a vector. ") , ("insertInt32X8#"," Insert a scalar at the given position in a vector. ") , ("insertInt64X4#"," Insert a scalar at the given position in a vector. ") , ("insertInt8X64#"," Insert a scalar at the given position in a vector. ") , ("insertInt16X32#"," Insert a scalar at the given position in a vector. ") , ("insertInt32X16#"," Insert a scalar at the given position in a vector. ") , ("insertInt64X8#"," Insert a scalar at the given position in a vector. ") , ("insertWord8X16#"," Insert a scalar at the given position in a vector. ") , ("insertWord16X8#"," Insert a scalar at the given position in a vector. ") , ("insertWord32X4#"," Insert a scalar at the given position in a vector. ") , ("insertWord64X2#"," Insert a scalar at the given position in a vector. ") , ("insertWord8X32#"," Insert a scalar at the given position in a vector. ") , ("insertWord16X16#"," Insert a scalar at the given position in a vector. ") , ("insertWord32X8#"," Insert a scalar at the given position in a vector. ") , ("insertWord64X4#"," Insert a scalar at the given position in a vector. ") , ("insertWord8X64#"," Insert a scalar at the given position in a vector. ") , ("insertWord16X32#"," Insert a scalar at the given position in a vector. ") , ("insertWord32X16#"," Insert a scalar at the given position in a vector. ") , ("insertWord64X8#"," Insert a scalar at the given position in a vector. ") , ("insertFloatX4#"," Insert a scalar at the given position in a vector. ") , ("insertDoubleX2#"," Insert a scalar at the given position in a vector. ") , ("insertFloatX8#"," Insert a scalar at the given position in a vector. ") , ("insertDoubleX4#"," Insert a scalar at the given position in a vector. ") , ("insertFloatX16#"," Insert a scalar at the given position in a vector. ") , ("insertDoubleX8#"," Insert a scalar at the given position in a vector. ") , ("plusInt8X16#"," Add two vectors element-wise. ") , ("plusInt16X8#"," Add two vectors element-wise. ") , ("plusInt32X4#"," Add two vectors element-wise. ") , ("plusInt64X2#"," Add two vectors element-wise. ") , ("plusInt8X32#"," Add two vectors element-wise. ") , ("plusInt16X16#"," Add two vectors element-wise. ") , ("plusInt32X8#"," Add two vectors element-wise. ") , ("plusInt64X4#"," Add two vectors element-wise. ") , ("plusInt8X64#"," Add two vectors element-wise. ") , ("plusInt16X32#"," Add two vectors element-wise. ") , ("plusInt32X16#"," Add two vectors element-wise. ") , ("plusInt64X8#"," Add two vectors element-wise. ") , ("plusWord8X16#"," Add two vectors element-wise. ") , ("plusWord16X8#"," Add two vectors element-wise. ") , ("plusWord32X4#"," Add two vectors element-wise. ") , ("plusWord64X2#"," Add two vectors element-wise. ") , ("plusWord8X32#"," Add two vectors element-wise. ") , ("plusWord16X16#"," Add two vectors element-wise. ") , ("plusWord32X8#"," Add two vectors element-wise. ") , ("plusWord64X4#"," Add two vectors element-wise. ") , ("plusWord8X64#"," Add two vectors element-wise. ") , ("plusWord16X32#"," Add two vectors element-wise. ") , ("plusWord32X16#"," Add two vectors element-wise. ") , ("plusWord64X8#"," Add two vectors element-wise. ") , ("plusFloatX4#"," Add two vectors element-wise. ") , ("plusDoubleX2#"," Add two vectors element-wise. ") , ("plusFloatX8#"," Add two vectors element-wise. ") , ("plusDoubleX4#"," Add two vectors element-wise. ") , ("plusFloatX16#"," Add two vectors element-wise. ") , ("plusDoubleX8#"," Add two vectors element-wise. ") , ("minusInt8X16#"," Subtract two vectors element-wise. ") , ("minusInt16X8#"," Subtract two vectors element-wise. ") , ("minusInt32X4#"," Subtract two vectors element-wise. ") , ("minusInt64X2#"," Subtract two vectors element-wise. ") , ("minusInt8X32#"," Subtract two vectors element-wise. ") , ("minusInt16X16#"," Subtract two vectors element-wise. ") , ("minusInt32X8#"," Subtract two vectors element-wise. ") , ("minusInt64X4#"," Subtract two vectors element-wise. ") , ("minusInt8X64#"," Subtract two vectors element-wise. ") , ("minusInt16X32#"," Subtract two vectors element-wise. ") , ("minusInt32X16#"," Subtract two vectors element-wise. ") , ("minusInt64X8#"," Subtract two vectors element-wise. ") , ("minusWord8X16#"," Subtract two vectors element-wise. ") , ("minusWord16X8#"," Subtract two vectors element-wise. ") , ("minusWord32X4#"," Subtract two vectors element-wise. ") , ("minusWord64X2#"," Subtract two vectors element-wise. ") , ("minusWord8X32#"," Subtract two vectors element-wise. ") , ("minusWord16X16#"," Subtract two vectors element-wise. ") , ("minusWord32X8#"," Subtract two vectors element-wise. ") , ("minusWord64X4#"," Subtract two vectors element-wise. ") , ("minusWord8X64#"," Subtract two vectors element-wise. ") , ("minusWord16X32#"," Subtract two vectors element-wise. ") , ("minusWord32X16#"," Subtract two vectors element-wise. ") , ("minusWord64X8#"," Subtract two vectors element-wise. ") , ("minusFloatX4#"," Subtract two vectors element-wise. ") , ("minusDoubleX2#"," Subtract two vectors element-wise. ") , ("minusFloatX8#"," Subtract two vectors element-wise. ") , ("minusDoubleX4#"," Subtract two vectors element-wise. ") , ("minusFloatX16#"," Subtract two vectors element-wise. ") , ("minusDoubleX8#"," Subtract two vectors element-wise. ") , ("timesInt8X16#"," Multiply two vectors element-wise. ") , ("timesInt16X8#"," Multiply two vectors element-wise. ") , ("timesInt32X4#"," Multiply two vectors element-wise. ") , ("timesInt64X2#"," Multiply two vectors element-wise. ") , ("timesInt8X32#"," Multiply two vectors element-wise. ") , ("timesInt16X16#"," Multiply two vectors element-wise. ") , ("timesInt32X8#"," Multiply two vectors element-wise. ") , ("timesInt64X4#"," Multiply two vectors element-wise. ") , ("timesInt8X64#"," Multiply two vectors element-wise. ") , ("timesInt16X32#"," Multiply two vectors element-wise. ") , ("timesInt32X16#"," Multiply two vectors element-wise. ") , ("timesInt64X8#"," Multiply two vectors element-wise. ") , ("timesWord8X16#"," Multiply two vectors element-wise. ") , ("timesWord16X8#"," Multiply two vectors element-wise. ") , ("timesWord32X4#"," Multiply two vectors element-wise. ") , ("timesWord64X2#"," Multiply two vectors element-wise. ") , ("timesWord8X32#"," Multiply two vectors element-wise. ") , ("timesWord16X16#"," Multiply two vectors element-wise. ") , ("timesWord32X8#"," Multiply two vectors element-wise. ") , ("timesWord64X4#"," Multiply two vectors element-wise. ") , ("timesWord8X64#"," Multiply two vectors element-wise. ") , ("timesWord16X32#"," Multiply two vectors element-wise. ") , ("timesWord32X16#"," Multiply two vectors element-wise. ") , ("timesWord64X8#"," Multiply two vectors element-wise. ") , ("timesFloatX4#"," Multiply two vectors element-wise. ") , ("timesDoubleX2#"," Multiply two vectors element-wise. ") , ("timesFloatX8#"," Multiply two vectors element-wise. ") , ("timesDoubleX4#"," Multiply two vectors element-wise. ") , ("timesFloatX16#"," Multiply two vectors element-wise. ") , ("timesDoubleX8#"," Multiply two vectors element-wise. ") , ("divideFloatX4#"," Divide two vectors element-wise. ") , ("divideDoubleX2#"," Divide two vectors element-wise. ") , ("divideFloatX8#"," Divide two vectors element-wise. ") , ("divideDoubleX4#"," Divide two vectors element-wise. ") , ("divideFloatX16#"," Divide two vectors element-wise. ") , ("divideDoubleX8#"," Divide two vectors element-wise. ") , ("quotInt8X16#"," Rounds towards zero element-wise. ") , ("quotInt16X8#"," Rounds towards zero element-wise. ") , ("quotInt32X4#"," Rounds towards zero element-wise. ") , ("quotInt64X2#"," Rounds towards zero element-wise. ") , ("quotInt8X32#"," Rounds towards zero element-wise. ") , ("quotInt16X16#"," Rounds towards zero element-wise. ") , ("quotInt32X8#"," Rounds towards zero element-wise. ") , ("quotInt64X4#"," Rounds towards zero element-wise. ") , ("quotInt8X64#"," Rounds towards zero element-wise. ") , ("quotInt16X32#"," Rounds towards zero element-wise. ") , ("quotInt32X16#"," Rounds towards zero element-wise. ") , ("quotInt64X8#"," Rounds towards zero element-wise. ") , ("quotWord8X16#"," Rounds towards zero element-wise. ") , ("quotWord16X8#"," Rounds towards zero element-wise. ") , ("quotWord32X4#"," Rounds towards zero element-wise. ") , ("quotWord64X2#"," Rounds towards zero element-wise. ") , ("quotWord8X32#"," Rounds towards zero element-wise. ") , ("quotWord16X16#"," Rounds towards zero element-wise. ") , ("quotWord32X8#"," Rounds towards zero element-wise. ") , ("quotWord64X4#"," Rounds towards zero element-wise. ") , ("quotWord8X64#"," Rounds towards zero element-wise. ") , ("quotWord16X32#"," Rounds towards zero element-wise. ") , ("quotWord32X16#"," Rounds towards zero element-wise. ") , ("quotWord64X8#"," Rounds towards zero element-wise. ") , ("remInt8X16#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt16X8#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt32X4#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt64X2#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt8X32#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt16X16#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt32X8#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt64X4#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt8X64#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt16X32#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt32X16#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remInt64X8#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord8X16#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord16X8#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord32X4#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord64X2#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord8X32#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord16X16#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord32X8#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord64X4#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord8X64#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord16X32#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord32X16#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("remWord64X8#"," Satisfies @(quot\\# x y) times\\# y plus\\# (rem\\# x y) == x@. ") , ("negateInt8X16#"," Negate element-wise. ") , ("negateInt16X8#"," Negate element-wise. ") , ("negateInt32X4#"," Negate element-wise. ") , ("negateInt64X2#"," Negate element-wise. ") , ("negateInt8X32#"," Negate element-wise. ") , ("negateInt16X16#"," Negate element-wise. ") , ("negateInt32X8#"," Negate element-wise. ") , ("negateInt64X4#"," Negate element-wise. ") , ("negateInt8X64#"," Negate element-wise. ") , ("negateInt16X32#"," Negate element-wise. ") , ("negateInt32X16#"," Negate element-wise. ") , ("negateInt64X8#"," Negate element-wise. ") , ("negateFloatX4#"," Negate element-wise. ") , ("negateDoubleX2#"," Negate element-wise. ") , ("negateFloatX8#"," Negate element-wise. ") , ("negateDoubleX4#"," Negate element-wise. ") , ("negateFloatX16#"," Negate element-wise. ") , ("negateDoubleX8#"," Negate element-wise. ") , ("indexInt8X16Array#"," Read a vector from specified index of immutable array. ") , ("indexInt16X8Array#"," Read a vector from specified index of immutable array. ") , ("indexInt32X4Array#"," Read a vector from specified index of immutable array. ") , ("indexInt64X2Array#"," Read a vector from specified index of immutable array. ") , ("indexInt8X32Array#"," Read a vector from specified index of immutable array. ") , ("indexInt16X16Array#"," Read a vector from specified index of immutable array. ") , ("indexInt32X8Array#"," Read a vector from specified index of immutable array. ") , ("indexInt64X4Array#"," Read a vector from specified index of immutable array. ") , ("indexInt8X64Array#"," Read a vector from specified index of immutable array. ") , ("indexInt16X32Array#"," Read a vector from specified index of immutable array. ") , ("indexInt32X16Array#"," Read a vector from specified index of immutable array. ") , ("indexInt64X8Array#"," Read a vector from specified index of immutable array. ") , ("indexWord8X16Array#"," Read a vector from specified index of immutable array. ") , ("indexWord16X8Array#"," Read a vector from specified index of immutable array. ") , ("indexWord32X4Array#"," Read a vector from specified index of immutable array. ") , ("indexWord64X2Array#"," Read a vector from specified index of immutable array. ") , ("indexWord8X32Array#"," Read a vector from specified index of immutable array. ") , ("indexWord16X16Array#"," Read a vector from specified index of immutable array. ") , ("indexWord32X8Array#"," Read a vector from specified index of immutable array. ") , ("indexWord64X4Array#"," Read a vector from specified index of immutable array. ") , ("indexWord8X64Array#"," Read a vector from specified index of immutable array. ") , ("indexWord16X32Array#"," Read a vector from specified index of immutable array. ") , ("indexWord32X16Array#"," Read a vector from specified index of immutable array. ") , ("indexWord64X8Array#"," Read a vector from specified index of immutable array. ") , ("indexFloatX4Array#"," Read a vector from specified index of immutable array. ") , ("indexDoubleX2Array#"," Read a vector from specified index of immutable array. ") , ("indexFloatX8Array#"," Read a vector from specified index of immutable array. ") , ("indexDoubleX4Array#"," Read a vector from specified index of immutable array. ") , ("indexFloatX16Array#"," Read a vector from specified index of immutable array. ") , ("indexDoubleX8Array#"," Read a vector from specified index of immutable array. ") , ("readInt8X16Array#"," Read a vector from specified index of mutable array. ") , ("readInt16X8Array#"," Read a vector from specified index of mutable array. ") , ("readInt32X4Array#"," Read a vector from specified index of mutable array. ") , ("readInt64X2Array#"," Read a vector from specified index of mutable array. ") , ("readInt8X32Array#"," Read a vector from specified index of mutable array. ") , ("readInt16X16Array#"," Read a vector from specified index of mutable array. ") , ("readInt32X8Array#"," Read a vector from specified index of mutable array. ") , ("readInt64X4Array#"," Read a vector from specified index of mutable array. ") , ("readInt8X64Array#"," Read a vector from specified index of mutable array. ") , ("readInt16X32Array#"," Read a vector from specified index of mutable array. ") , ("readInt32X16Array#"," Read a vector from specified index of mutable array. ") , ("readInt64X8Array#"," Read a vector from specified index of mutable array. ") , ("readWord8X16Array#"," Read a vector from specified index of mutable array. ") , ("readWord16X8Array#"," Read a vector from specified index of mutable array. ") , ("readWord32X4Array#"," Read a vector from specified index of mutable array. ") , ("readWord64X2Array#"," Read a vector from specified index of mutable array. ") , ("readWord8X32Array#"," Read a vector from specified index of mutable array. ") , ("readWord16X16Array#"," Read a vector from specified index of mutable array. ") , ("readWord32X8Array#"," Read a vector from specified index of mutable array. ") , ("readWord64X4Array#"," Read a vector from specified index of mutable array. ") , ("readWord8X64Array#"," Read a vector from specified index of mutable array. ") , ("readWord16X32Array#"," Read a vector from specified index of mutable array. ") , ("readWord32X16Array#"," Read a vector from specified index of mutable array. ") , ("readWord64X8Array#"," Read a vector from specified index of mutable array. ") , ("readFloatX4Array#"," Read a vector from specified index of mutable array. ") , ("readDoubleX2Array#"," Read a vector from specified index of mutable array. ") , ("readFloatX8Array#"," Read a vector from specified index of mutable array. ") , ("readDoubleX4Array#"," Read a vector from specified index of mutable array. ") , ("readFloatX16Array#"," Read a vector from specified index of mutable array. ") , ("readDoubleX8Array#"," Read a vector from specified index of mutable array. ") , ("writeInt8X16Array#"," Write a vector to specified index of mutable array. ") , ("writeInt16X8Array#"," Write a vector to specified index of mutable array. ") , ("writeInt32X4Array#"," Write a vector to specified index of mutable array. ") , ("writeInt64X2Array#"," Write a vector to specified index of mutable array. ") , ("writeInt8X32Array#"," Write a vector to specified index of mutable array. ") , ("writeInt16X16Array#"," Write a vector to specified index of mutable array. ") , ("writeInt32X8Array#"," Write a vector to specified index of mutable array. ") , ("writeInt64X4Array#"," Write a vector to specified index of mutable array. ") , ("writeInt8X64Array#"," Write a vector to specified index of mutable array. ") , ("writeInt16X32Array#"," Write a vector to specified index of mutable array. ") , ("writeInt32X16Array#"," Write a vector to specified index of mutable array. ") , ("writeInt64X8Array#"," Write a vector to specified index of mutable array. ") , ("writeWord8X16Array#"," Write a vector to specified index of mutable array. ") , ("writeWord16X8Array#"," Write a vector to specified index of mutable array. ") , ("writeWord32X4Array#"," Write a vector to specified index of mutable array. ") , ("writeWord64X2Array#"," Write a vector to specified index of mutable array. ") , ("writeWord8X32Array#"," Write a vector to specified index of mutable array. ") , ("writeWord16X16Array#"," Write a vector to specified index of mutable array. ") , ("writeWord32X8Array#"," Write a vector to specified index of mutable array. ") , ("writeWord64X4Array#"," Write a vector to specified index of mutable array. ") , ("writeWord8X64Array#"," Write a vector to specified index of mutable array. ") , ("writeWord16X32Array#"," Write a vector to specified index of mutable array. ") , ("writeWord32X16Array#"," Write a vector to specified index of mutable array. ") , ("writeWord64X8Array#"," Write a vector to specified index of mutable array. ") , ("writeFloatX4Array#"," Write a vector to specified index of mutable array. ") , ("writeDoubleX2Array#"," Write a vector to specified index of mutable array. ") , ("writeFloatX8Array#"," Write a vector to specified index of mutable array. ") , ("writeDoubleX4Array#"," Write a vector to specified index of mutable array. ") , ("writeFloatX16Array#"," Write a vector to specified index of mutable array. ") , ("writeDoubleX8Array#"," Write a vector to specified index of mutable array. ") , ("indexInt8X16OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt16X8OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt32X4OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt64X2OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt8X32OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt16X16OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt32X8OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt64X4OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt8X64OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt16X32OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt32X16OffAddr#"," Reads vector; offset in bytes. ") , ("indexInt64X8OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord8X16OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord16X8OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord32X4OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord64X2OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord8X32OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord16X16OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord32X8OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord64X4OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord8X64OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord16X32OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord32X16OffAddr#"," Reads vector; offset in bytes. ") , ("indexWord64X8OffAddr#"," Reads vector; offset in bytes. ") , ("indexFloatX4OffAddr#"," Reads vector; offset in bytes. ") , ("indexDoubleX2OffAddr#"," Reads vector; offset in bytes. ") , ("indexFloatX8OffAddr#"," Reads vector; offset in bytes. ") , ("indexDoubleX4OffAddr#"," Reads vector; offset in bytes. ") , ("indexFloatX16OffAddr#"," Reads vector; offset in bytes. ") , ("indexDoubleX8OffAddr#"," Reads vector; offset in bytes. ") , ("readInt8X16OffAddr#"," Reads vector; offset in bytes. ") , ("readInt16X8OffAddr#"," Reads vector; offset in bytes. ") , ("readInt32X4OffAddr#"," Reads vector; offset in bytes. ") , ("readInt64X2OffAddr#"," Reads vector; offset in bytes. ") , ("readInt8X32OffAddr#"," Reads vector; offset in bytes. ") , ("readInt16X16OffAddr#"," Reads vector; offset in bytes. ") , ("readInt32X8OffAddr#"," Reads vector; offset in bytes. ") , ("readInt64X4OffAddr#"," Reads vector; offset in bytes. ") , ("readInt8X64OffAddr#"," Reads vector; offset in bytes. ") , ("readInt16X32OffAddr#"," Reads vector; offset in bytes. ") , ("readInt32X16OffAddr#"," Reads vector; offset in bytes. ") , ("readInt64X8OffAddr#"," Reads vector; offset in bytes. ") , ("readWord8X16OffAddr#"," Reads vector; offset in bytes. ") , ("readWord16X8OffAddr#"," Reads vector; offset in bytes. ") , ("readWord32X4OffAddr#"," Reads vector; offset in bytes. ") , ("readWord64X2OffAddr#"," Reads vector; offset in bytes. ") , ("readWord8X32OffAddr#"," Reads vector; offset in bytes. ") , ("readWord16X16OffAddr#"," Reads vector; offset in bytes. ") , ("readWord32X8OffAddr#"," Reads vector; offset in bytes. ") , ("readWord64X4OffAddr#"," Reads vector; offset in bytes. ") , ("readWord8X64OffAddr#"," Reads vector; offset in bytes. ") , ("readWord16X32OffAddr#"," Reads vector; offset in bytes. ") , ("readWord32X16OffAddr#"," Reads vector; offset in bytes. ") , ("readWord64X8OffAddr#"," Reads vector; offset in bytes. ") , ("readFloatX4OffAddr#"," Reads vector; offset in bytes. ") , ("readDoubleX2OffAddr#"," Reads vector; offset in bytes. ") , ("readFloatX8OffAddr#"," Reads vector; offset in bytes. ") , ("readDoubleX4OffAddr#"," Reads vector; offset in bytes. ") , ("readFloatX16OffAddr#"," Reads vector; offset in bytes. ") , ("readDoubleX8OffAddr#"," Reads vector; offset in bytes. ") , ("writeInt8X16OffAddr#"," Write vector; offset in bytes. ") , ("writeInt16X8OffAddr#"," Write vector; offset in bytes. ") , ("writeInt32X4OffAddr#"," Write vector; offset in bytes. ") , ("writeInt64X2OffAddr#"," Write vector; offset in bytes. ") , ("writeInt8X32OffAddr#"," Write vector; offset in bytes. ") , ("writeInt16X16OffAddr#"," Write vector; offset in bytes. ") , ("writeInt32X8OffAddr#"," Write vector; offset in bytes. ") , ("writeInt64X4OffAddr#"," Write vector; offset in bytes. ") , ("writeInt8X64OffAddr#"," Write vector; offset in bytes. ") , ("writeInt16X32OffAddr#"," Write vector; offset in bytes. ") , ("writeInt32X16OffAddr#"," Write vector; offset in bytes. ") , ("writeInt64X8OffAddr#"," Write vector; offset in bytes. ") , ("writeWord8X16OffAddr#"," Write vector; offset in bytes. ") , ("writeWord16X8OffAddr#"," Write vector; offset in bytes. ") , ("writeWord32X4OffAddr#"," Write vector; offset in bytes. ") , ("writeWord64X2OffAddr#"," Write vector; offset in bytes. ") , ("writeWord8X32OffAddr#"," Write vector; offset in bytes. ") , ("writeWord16X16OffAddr#"," Write vector; offset in bytes. ") , ("writeWord32X8OffAddr#"," Write vector; offset in bytes. ") , ("writeWord64X4OffAddr#"," Write vector; offset in bytes. ") , ("writeWord8X64OffAddr#"," Write vector; offset in bytes. ") , ("writeWord16X32OffAddr#"," Write vector; offset in bytes. ") , ("writeWord32X16OffAddr#"," Write vector; offset in bytes. ") , ("writeWord64X8OffAddr#"," Write vector; offset in bytes. ") , ("writeFloatX4OffAddr#"," Write vector; offset in bytes. ") , ("writeDoubleX2OffAddr#"," Write vector; offset in bytes. ") , ("writeFloatX8OffAddr#"," Write vector; offset in bytes. ") , ("writeDoubleX4OffAddr#"," Write vector; offset in bytes. ") , ("writeFloatX16OffAddr#"," Write vector; offset in bytes. ") , ("writeDoubleX8OffAddr#"," Write vector; offset in bytes. ") , ("indexInt8ArrayAsInt8X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt16ArrayAsInt16X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt32ArrayAsInt32X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt64ArrayAsInt64X2#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt8ArrayAsInt8X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt16ArrayAsInt16X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt32ArrayAsInt32X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt64ArrayAsInt64X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt8ArrayAsInt8X64#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt16ArrayAsInt16X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt32ArrayAsInt32X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexInt64ArrayAsInt64X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord8ArrayAsWord8X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord16ArrayAsWord16X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord32ArrayAsWord32X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord64ArrayAsWord64X2#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord8ArrayAsWord8X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord16ArrayAsWord16X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord32ArrayAsWord32X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord64ArrayAsWord64X4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord8ArrayAsWord8X64#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord16ArrayAsWord16X32#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord32ArrayAsWord32X16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexWord64ArrayAsWord64X8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexFloatArrayAsFloatX4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexDoubleArrayAsDoubleX2#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexFloatArrayAsFloatX8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexDoubleArrayAsDoubleX4#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexFloatArrayAsFloatX16#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("indexDoubleArrayAsDoubleX8#"," Read a vector from specified index of immutable array of scalars; offset is in scalar elements. ") , ("readInt8ArrayAsInt8X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt16ArrayAsInt16X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt32ArrayAsInt32X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt64ArrayAsInt64X2#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt8ArrayAsInt8X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt16ArrayAsInt16X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt32ArrayAsInt32X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt64ArrayAsInt64X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt8ArrayAsInt8X64#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt16ArrayAsInt16X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt32ArrayAsInt32X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readInt64ArrayAsInt64X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord8ArrayAsWord8X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord16ArrayAsWord16X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord32ArrayAsWord32X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord64ArrayAsWord64X2#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord8ArrayAsWord8X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord16ArrayAsWord16X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord32ArrayAsWord32X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord64ArrayAsWord64X4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord8ArrayAsWord8X64#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord16ArrayAsWord16X32#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord32ArrayAsWord32X16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readWord64ArrayAsWord64X8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readFloatArrayAsFloatX4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readDoubleArrayAsDoubleX2#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readFloatArrayAsFloatX8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readDoubleArrayAsDoubleX4#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readFloatArrayAsFloatX16#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("readDoubleArrayAsDoubleX8#"," Read a vector from specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt8ArrayAsInt8X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt16ArrayAsInt16X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt32ArrayAsInt32X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt64ArrayAsInt64X2#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt8ArrayAsInt8X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt16ArrayAsInt16X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt32ArrayAsInt32X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt64ArrayAsInt64X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt8ArrayAsInt8X64#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt16ArrayAsInt16X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt32ArrayAsInt32X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeInt64ArrayAsInt64X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord8ArrayAsWord8X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord16ArrayAsWord16X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord32ArrayAsWord32X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord64ArrayAsWord64X2#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord8ArrayAsWord8X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord16ArrayAsWord16X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord32ArrayAsWord32X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord64ArrayAsWord64X4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord8ArrayAsWord8X64#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord16ArrayAsWord16X32#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord32ArrayAsWord32X16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeWord64ArrayAsWord64X8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeFloatArrayAsFloatX4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeDoubleArrayAsDoubleX2#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeFloatArrayAsFloatX8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeDoubleArrayAsDoubleX4#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeFloatArrayAsFloatX16#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("writeDoubleArrayAsDoubleX8#"," Write a vector to specified index of mutable array of scalars; offset is in scalar elements. ") , ("indexInt8OffAddrAsInt8X16#"," Reads vector; offset in scalar elements. ") , ("indexInt16OffAddrAsInt16X8#"," Reads vector; offset in scalar elements. ") , ("indexInt32OffAddrAsInt32X4#"," Reads vector; offset in scalar elements. ") , ("indexInt64OffAddrAsInt64X2#"," Reads vector; offset in scalar elements. ") , ("indexInt8OffAddrAsInt8X32#"," Reads vector; offset in scalar elements. ") , ("indexInt16OffAddrAsInt16X16#"," Reads vector; offset in scalar elements. ") , ("indexInt32OffAddrAsInt32X8#"," Reads vector; offset in scalar elements. ") , ("indexInt64OffAddrAsInt64X4#"," Reads vector; offset in scalar elements. ") , ("indexInt8OffAddrAsInt8X64#"," Reads vector; offset in scalar elements. ") , ("indexInt16OffAddrAsInt16X32#"," Reads vector; offset in scalar elements. ") , ("indexInt32OffAddrAsInt32X16#"," Reads vector; offset in scalar elements. ") , ("indexInt64OffAddrAsInt64X8#"," Reads vector; offset in scalar elements. ") , ("indexWord8OffAddrAsWord8X16#"," Reads vector; offset in scalar elements. ") , ("indexWord16OffAddrAsWord16X8#"," Reads vector; offset in scalar elements. ") , ("indexWord32OffAddrAsWord32X4#"," Reads vector; offset in scalar elements. ") , ("indexWord64OffAddrAsWord64X2#"," Reads vector; offset in scalar elements. ") , ("indexWord8OffAddrAsWord8X32#"," Reads vector; offset in scalar elements. ") , ("indexWord16OffAddrAsWord16X16#"," Reads vector; offset in scalar elements. ") , ("indexWord32OffAddrAsWord32X8#"," Reads vector; offset in scalar elements. ") , ("indexWord64OffAddrAsWord64X4#"," Reads vector; offset in scalar elements. ") , ("indexWord8OffAddrAsWord8X64#"," Reads vector; offset in scalar elements. ") , ("indexWord16OffAddrAsWord16X32#"," Reads vector; offset in scalar elements. ") , ("indexWord32OffAddrAsWord32X16#"," Reads vector; offset in scalar elements. ") , ("indexWord64OffAddrAsWord64X8#"," Reads vector; offset in scalar elements. ") , ("indexFloatOffAddrAsFloatX4#"," Reads vector; offset in scalar elements. ") , ("indexDoubleOffAddrAsDoubleX2#"," Reads vector; offset in scalar elements. ") , ("indexFloatOffAddrAsFloatX8#"," Reads vector; offset in scalar elements. ") , ("indexDoubleOffAddrAsDoubleX4#"," Reads vector; offset in scalar elements. ") , ("indexFloatOffAddrAsFloatX16#"," Reads vector; offset in scalar elements. ") , ("indexDoubleOffAddrAsDoubleX8#"," Reads vector; offset in scalar elements. ") , ("readInt8OffAddrAsInt8X16#"," Reads vector; offset in scalar elements. ") , ("readInt16OffAddrAsInt16X8#"," Reads vector; offset in scalar elements. ") , ("readInt32OffAddrAsInt32X4#"," Reads vector; offset in scalar elements. ") , ("readInt64OffAddrAsInt64X2#"," Reads vector; offset in scalar elements. ") , ("readInt8OffAddrAsInt8X32#"," Reads vector; offset in scalar elements. ") , ("readInt16OffAddrAsInt16X16#"," Reads vector; offset in scalar elements. ") , ("readInt32OffAddrAsInt32X8#"," Reads vector; offset in scalar elements. ") , ("readInt64OffAddrAsInt64X4#"," Reads vector; offset in scalar elements. ") , ("readInt8OffAddrAsInt8X64#"," Reads vector; offset in scalar elements. ") , ("readInt16OffAddrAsInt16X32#"," Reads vector; offset in scalar elements. ") , ("readInt32OffAddrAsInt32X16#"," Reads vector; offset in scalar elements. ") , ("readInt64OffAddrAsInt64X8#"," Reads vector; offset in scalar elements. ") , ("readWord8OffAddrAsWord8X16#"," Reads vector; offset in scalar elements. ") , ("readWord16OffAddrAsWord16X8#"," Reads vector; offset in scalar elements. ") , ("readWord32OffAddrAsWord32X4#"," Reads vector; offset in scalar elements. ") , ("readWord64OffAddrAsWord64X2#"," Reads vector; offset in scalar elements. ") , ("readWord8OffAddrAsWord8X32#"," Reads vector; offset in scalar elements. ") , ("readWord16OffAddrAsWord16X16#"," Reads vector; offset in scalar elements. ") , ("readWord32OffAddrAsWord32X8#"," Reads vector; offset in scalar elements. ") , ("readWord64OffAddrAsWord64X4#"," Reads vector; offset in scalar elements. ") , ("readWord8OffAddrAsWord8X64#"," Reads vector; offset in scalar elements. ") , ("readWord16OffAddrAsWord16X32#"," Reads vector; offset in scalar elements. ") , ("readWord32OffAddrAsWord32X16#"," Reads vector; offset in scalar elements. ") , ("readWord64OffAddrAsWord64X8#"," Reads vector; offset in scalar elements. ") , ("readFloatOffAddrAsFloatX4#"," Reads vector; offset in scalar elements. ") , ("readDoubleOffAddrAsDoubleX2#"," Reads vector; offset in scalar elements. ") , ("readFloatOffAddrAsFloatX8#"," Reads vector; offset in scalar elements. ") , ("readDoubleOffAddrAsDoubleX4#"," Reads vector; offset in scalar elements. ") , ("readFloatOffAddrAsFloatX16#"," Reads vector; offset in scalar elements. ") , ("readDoubleOffAddrAsDoubleX8#"," Reads vector; offset in scalar elements. ") , ("writeInt8OffAddrAsInt8X16#"," Write vector; offset in scalar elements. ") , ("writeInt16OffAddrAsInt16X8#"," Write vector; offset in scalar elements. ") , ("writeInt32OffAddrAsInt32X4#"," Write vector; offset in scalar elements. ") , ("writeInt64OffAddrAsInt64X2#"," Write vector; offset in scalar elements. ") , ("writeInt8OffAddrAsInt8X32#"," Write vector; offset in scalar elements. ") , ("writeInt16OffAddrAsInt16X16#"," Write vector; offset in scalar elements. ") , ("writeInt32OffAddrAsInt32X8#"," Write vector; offset in scalar elements. ") , ("writeInt64OffAddrAsInt64X4#"," Write vector; offset in scalar elements. ") , ("writeInt8OffAddrAsInt8X64#"," Write vector; offset in scalar elements. ") , ("writeInt16OffAddrAsInt16X32#"," Write vector; offset in scalar elements. ") , ("writeInt32OffAddrAsInt32X16#"," Write vector; offset in scalar elements. ") , ("writeInt64OffAddrAsInt64X8#"," Write vector; offset in scalar elements. ") , ("writeWord8OffAddrAsWord8X16#"," Write vector; offset in scalar elements. ") , ("writeWord16OffAddrAsWord16X8#"," Write vector; offset in scalar elements. ") , ("writeWord32OffAddrAsWord32X4#"," Write vector; offset in scalar elements. ") , ("writeWord64OffAddrAsWord64X2#"," Write vector; offset in scalar elements. ") , ("writeWord8OffAddrAsWord8X32#"," Write vector; offset in scalar elements. ") , ("writeWord16OffAddrAsWord16X16#"," Write vector; offset in scalar elements. ") , ("writeWord32OffAddrAsWord32X8#"," Write vector; offset in scalar elements. ") , ("writeWord64OffAddrAsWord64X4#"," Write vector; offset in scalar elements. ") , ("writeWord8OffAddrAsWord8X64#"," Write vector; offset in scalar elements. ") , ("writeWord16OffAddrAsWord16X32#"," Write vector; offset in scalar elements. ") , ("writeWord32OffAddrAsWord32X16#"," Write vector; offset in scalar elements. ") , ("writeWord64OffAddrAsWord64X8#"," Write vector; offset in scalar elements. ") , ("writeFloatOffAddrAsFloatX4#"," Write vector; offset in scalar elements. ") , ("writeDoubleOffAddrAsDoubleX2#"," Write vector; offset in scalar elements. ") , ("writeFloatOffAddrAsFloatX8#"," Write vector; offset in scalar elements. ") , ("writeDoubleOffAddrAsDoubleX4#"," Write vector; offset in scalar elements. ") , ("writeFloatOffAddrAsFloatX16#"," Write vector; offset in scalar elements. ") , ("writeDoubleOffAddrAsDoubleX8#"," Write vector; offset in scalar elements. ") ] ghc-lib-parser-9.4.7.20230826/ghc-lib/stage0/libraries/ghc-boot/build/GHC/Platform/Host.hs0000644000000000000000000000040114472400073026324 0ustar0000000000000000module GHC.Platform.Host where import GHC.Platform.ArchOS hostPlatformArch :: Arch hostPlatformArch = ArchX86_64 hostPlatformOS :: OS hostPlatformOS = OSDarwin hostPlatformArchOS :: ArchOS hostPlatformArchOS = ArchOS hostPlatformArch hostPlatformOS ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser.y0000644000000000000000000065154414472400112017300 0ustar0000000000000000-- -*-haskell-*- -- --------------------------------------------------------------------------- -- (c) The University of Glasgow 1997-2003 --- -- The GHC grammar. -- -- Author(s): Simon Marlow, Sven Panne 1997, 1998, 1999 -- --------------------------------------------------------------------------- { {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE ScopedTypeVariables #-} -- | This module provides the generated Happy parser for Haskell. It exports -- a number of parsers which may be used in any library that uses the GHC API. -- A common usage pattern is to initialize the parser state with a given string -- and then parse that string: -- -- @ -- runParser :: ParserOpts -> String -> P a -> ParseResult a -- runParser opts str parser = unP parser parseState -- where -- filename = "\" -- location = mkRealSrcLoc (mkFastString filename) 1 1 -- buffer = stringToStringBuffer str -- parseState = initParserState opts buffer location -- @ module GHC.Parser ( parseModule, parseSignature, parseImport, parseStatement, parseBackpack , parseDeclaration, parseExpression, parsePattern , parseTypeSignature , parseStmt, parseIdentifier , parseType, parseHeader , parseModuleNoHaddock ) where -- base import Control.Monad ( unless, liftM, when, (<=<) ) import GHC.Exts import Data.Maybe ( maybeToList ) import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import qualified Prelude -- for happy-generated code import GHC.Hs import GHC.Driver.Backpack.Syntax import GHC.Unit.Info import GHC.Unit.Module import GHC.Unit.Module.Warnings import GHC.Data.OrdList import GHC.Data.BooleanFormula ( BooleanFormula(..), LBooleanFormula, mkTrue ) import GHC.Data.FastString import GHC.Data.Maybe ( orElse ) import GHC.Utils.Outputable import GHC.Utils.Error import GHC.Utils.Misc ( looksLikePackageName, fstOf3, sndOf3, thdOf3 ) import GHC.Utils.Panic import GHC.Prelude import qualified GHC.Data.Strict as Strict import GHC.Types.Name.Reader import GHC.Types.Name.Occurrence ( varName, dataName, tcClsName, tvName, occNameFS, mkVarOcc, occNameString) import GHC.Types.SrcLoc import GHC.Types.Basic import GHC.Types.Error ( GhcHint(..) ) import GHC.Types.Fixity import GHC.Types.ForeignCall import GHC.Types.SourceFile import GHC.Types.SourceText import GHC.Types.PkgQual import GHC.Core.Type ( unrestrictedFunTyCon, Specificity(..) ) import GHC.Core.Class ( FunDep ) import GHC.Core.DataCon ( DataCon, dataConName ) import GHC.Parser.PostProcess import GHC.Parser.PostProcess.Haddock import GHC.Parser.Lexer import GHC.Parser.HaddockLex import GHC.Parser.Annotation import GHC.Parser.Errors.Types import GHC.Parser.Errors.Ppr () import GHC.Builtin.Types ( unitTyCon, unitDataCon, sumTyCon, tupleTyCon, tupleDataCon, nilDataCon, unboxedUnitTyCon, unboxedUnitDataCon, listTyCon_RDR, consDataCon_RDR) import qualified Data.Semigroup as Semi } %expect 0 -- shift/reduce conflicts {- Note [shift/reduce conflicts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 'happy' tool turns this grammar into an efficient parser that follows the shift-reduce parsing model. There's a parse stack that contains items parsed so far (both terminals and non-terminals). Every next token produced by the lexer results in one of two actions: SHIFT: push the token onto the parse stack REDUCE: pop a few items off the parse stack and combine them with a function (reduction rule) However, sometimes it's unclear which of the two actions to take. Consider this code example: if x then y else f z There are two ways to parse it: (if x then y else f) z if x then y else (f z) How is this determined? At some point, the parser gets to the following state: parse stack: 'if' exp 'then' exp 'else' "f" next token: "z" Scenario A (simplified): 1. REDUCE, parse stack: 'if' exp 'then' exp 'else' exp next token: "z" (Note that "f" reduced to exp here) 2. REDUCE, parse stack: exp next token: "z" 3. SHIFT, parse stack: exp "z" next token: ... 4. REDUCE, parse stack: exp next token: ... This way we get: (if x then y else f) z Scenario B (simplified): 1. SHIFT, parse stack: 'if' exp 'then' exp 'else' "f" "z" next token: ... 2. REDUCE, parse stack: 'if' exp 'then' exp 'else' exp next token: ... 3. REDUCE, parse stack: exp next token: ... This way we get: if x then y else (f z) The end result is determined by the chosen action. When Happy detects this, it reports a shift/reduce conflict. At the top of the file, we have the following directive: %expect 0 It means that we expect no unresolved shift/reduce conflicts in this grammar. If you modify the grammar and get shift/reduce conflicts, follow the steps below to resolve them. STEP ONE is to figure out what causes the conflict. That's where the -i flag comes in handy: happy -agc --strict compiler/GHC/Parser.y -idetailed-info By analysing the output of this command, in a new file `detailed-info`, you can figure out which reduction rule causes the issue. At the top of the generated report, you will see a line like this: state 147 contains 67 shift/reduce conflicts. Scroll down to section State 147 (in your case it could be a different state). The start of the section lists the reduction rules that can fire and shows their context: exp10 -> fexp . (rule 492) fexp -> fexp . aexp (rule 498) fexp -> fexp . PREFIX_AT atype (rule 499) And then, for every token, it tells you the parsing action: ']' reduce using rule 492 '::' reduce using rule 492 '(' shift, and enter state 178 QVARID shift, and enter state 44 DO shift, and enter state 182 ... But if you look closer, some of these tokens also have another parsing action in parentheses: QVARID shift, and enter state 44 (reduce using rule 492) That's how you know rule 492 is causing trouble. Scroll back to the top to see what this rule is: ---------------------------------- Grammar ---------------------------------- ... ... exp10 -> fexp (492) optSemi -> ';' (493) ... ... Hence the shift/reduce conflict is caused by this parser production: exp10 :: { ECP } : '-' fexp { ... } | fexp { ... } -- problematic rule STEP TWO is to mark the problematic rule with the %shift pragma. This signals to 'happy' that any shift/reduce conflicts involving this rule must be resolved in favor of a shift. There's currently no dedicated pragma to resolve in favor of the reduce. STEP THREE is to add a dedicated Note for this specific conflict, as is done for all other conflicts below. -} {- Note [%shift: rule_activation -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: rule -> STRING . rule_activation rule_foralls infixexp '=' exp Example: {-# RULES "name" [0] f = rhs #-} Ambiguity: If we reduced, then we'd get an empty activation rule, and [0] would be parsed as part of the left-hand side expression. We shift, so [0] is parsed as an activation rule. -} {- Note [%shift: rule_foralls -> 'forall' rule_vars '.'] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: rule_foralls -> 'forall' rule_vars '.' . 'forall' rule_vars '.' rule_foralls -> 'forall' rule_vars '.' . Example: {-# RULES "name" forall a1. forall a2. lhs = rhs #-} Ambiguity: Same as in Note [%shift: rule_foralls -> {- empty -}] but for the second 'forall'. -} {- Note [%shift: rule_foralls -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: rule -> STRING rule_activation . rule_foralls infixexp '=' exp Example: {-# RULES "name" forall a1. lhs = rhs #-} Ambiguity: If we reduced, then we would get an empty rule_foralls; the 'forall', being a valid term-level identifier, would be parsed as part of the left-hand side expression. We shift, so the 'forall' is parsed as part of rule_foralls. -} {- Note [%shift: type -> btype] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: context -> btype . type -> btype . type -> btype . '->' ctype type -> btype . '->.' ctype Example: a :: Maybe Integer -> Bool Ambiguity: If we reduced, we would get: (a :: Maybe Integer) -> Bool We shift to get this instead: a :: (Maybe Integer -> Bool) -} {- Note [%shift: infixtype -> ftype] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: infixtype -> ftype . infixtype -> ftype . tyop infixtype ftype -> ftype . tyarg ftype -> ftype . PREFIX_AT tyarg Example: a :: Maybe Integer Ambiguity: If we reduced, we would get: (a :: Maybe) Integer We shift to get this instead: a :: (Maybe Integer) -} {- Note [%shift: atype -> tyvar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: atype -> tyvar . tv_bndr_no_braces -> '(' tyvar . '::' kind ')' Example: class C a where type D a = (a :: Type ... Ambiguity: If we reduced, we could specify a default for an associated type like this: class C a where type D a type D a = (a :: Type) But we shift in order to allow injectivity signatures like this: class C a where type D a = (r :: Type) | r -> a -} {- Note [%shift: exp -> infixexp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: exp -> infixexp . '::' sigtype exp -> infixexp . '-<' exp exp -> infixexp . '>-' exp exp -> infixexp . '-<<' exp exp -> infixexp . '>>-' exp exp -> infixexp . infixexp -> infixexp . qop exp10p Examples: 1) if x then y else z -< e 2) if x then y else z :: T 3) if x then y else z + 1 -- (NB: '+' is in VARSYM) Ambiguity: If we reduced, we would get: 1) (if x then y else z) -< e 2) (if x then y else z) :: T 3) (if x then y else z) + 1 We shift to get this instead: 1) if x then y else (z -< e) 2) if x then y else (z :: T) 3) if x then y else (z + 1) -} {- Note [%shift: exp10 -> '-' fexp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: exp10 -> '-' fexp . fexp -> fexp . aexp fexp -> fexp . PREFIX_AT atype Examples & Ambiguity: Same as in Note [%shift: exp10 -> fexp], but with a '-' in front. -} {- Note [%shift: exp10 -> fexp] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: exp10 -> fexp . fexp -> fexp . aexp fexp -> fexp . PREFIX_AT atype Examples: 1) if x then y else f z 2) if x then y else f @z Ambiguity: If we reduced, we would get: 1) (if x then y else f) z 2) (if x then y else f) @z We shift to get this instead: 1) if x then y else (f z) 2) if x then y else (f @z) -} {- Note [%shift: aexp2 -> ipvar] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: aexp2 -> ipvar . dbind -> ipvar . '=' exp Example: let ?x = ... Ambiguity: If we reduced, ?x would be parsed as the LHS of a normal binding, eventually producing an error. We shift, so it is parsed as the LHS of an implicit binding. -} {- Note [%shift: aexp2 -> TH_TY_QUOTE] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: aexp2 -> TH_TY_QUOTE . tyvar aexp2 -> TH_TY_QUOTE . gtycon aexp2 -> TH_TY_QUOTE . Examples: 1) x = '' 2) x = ''a 3) x = ''T Ambiguity: If we reduced, the '' would result in reportEmptyDoubleQuotes even when followed by a type variable or a type constructor. But the only reason this reduction rule exists is to improve error messages. Naturally, we shift instead, so that ''a and ''T work as expected. -} {- Note [%shift: tup_tail -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: tup_exprs -> commas . tup_tail sysdcon_nolist -> '(' commas . ')' sysdcon_nolist -> '(#' commas . '#)' commas -> commas . ',' Example: (,,) Ambiguity: A tuple section with no components is indistinguishable from the Haskell98 data constructor for a tuple. If we reduced, (,,) would be parsed as a tuple section. We shift, so (,,) is parsed as a data constructor. This is preferable because we want to accept (,,) without -XTupleSections. See also Note [ExplicitTuple] in GHC.Hs.Expr. -} {- Note [%shift: qtyconop -> qtyconsym] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: oqtycon -> '(' qtyconsym . ')' qtyconop -> qtyconsym . Example: foo :: (:%) Ambiguity: If we reduced, (:%) would be parsed as a parenthehsized infix type expression without arguments, resulting in the 'failOpFewArgs' error. We shift, so it is parsed as a type constructor. -} {- Note [%shift: special_id -> 'group'] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: transformqual -> 'then' 'group' . 'using' exp transformqual -> 'then' 'group' . 'by' exp 'using' exp special_id -> 'group' . Example: [ ... | then group by dept using groupWith , then take 5 ] Ambiguity: If we reduced, 'group' would be parsed as a term-level identifier, just as 'take' in the other clause. We shift, so it is parsed as part of the 'group by' clause introduced by the -XTransformListComp extension. -} {- Note [%shift: activation -> {- empty -}] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Context: sigdecl -> '{-# INLINE' . activation qvarcon '#-}' activation -> {- empty -} activation -> explicit_activation Example: {-# INLINE [0] Something #-} Ambiguity: We don't know whether the '[' is the start of the activation or the beginning of the [] data constructor. We parse this as having '[0]' activation for inlining 'Something', rather than empty activation and inlining '[0] Something'. -} {- Note [Parser API Annotations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A lot of the productions are now cluttered with calls to aa,am,acs,acsA etc. These are helper functions to make sure that the locations of the various keywords such as do / let / in are captured for use by tools that want to do source to source conversions, such as refactorers or structured editors. The helper functions are defined at the bottom of this file. See https://gitlab.haskell.org/ghc/ghc/wikis/api-annotations and https://gitlab.haskell.org/ghc/ghc/wikis/ghc-ast-annotations for some background. -} {- Note [Parsing lists] ~~~~~~~~~~~~~~~~~~~~~~~ You might be wondering why we spend so much effort encoding our lists this way: importdecls : importdecls ';' importdecl | importdecls ';' | importdecl | {- empty -} This might seem like an awfully roundabout way to declare a list; plus, to add insult to injury you have to reverse the results at the end. The answer is that left recursion prevents us from running out of stack space when parsing long sequences. See: https://www.haskell.org/happy/doc/html/sec-sequences.html for more guidance. By adding/removing branches, you can affect what lists are accepted. Here are the most common patterns, rewritten as regular expressions for clarity: -- Equivalent to: ';'* (x ';'+)* x? (can be empty, permits leading/trailing semis) xs : xs ';' x | xs ';' | x | {- empty -} -- Equivalent to x (';' x)* ';'* (non-empty, permits trailing semis) xs : xs ';' x | xs ';' | x -- Equivalent to ';'* alts (';' alts)* ';'* (non-empty, permits leading/trailing semis) alts : alts1 | ';' alts alts1 : alts1 ';' alt | alts1 ';' | alt -- Equivalent to x (',' x)+ (non-empty, no trailing semis) xs : x | x ',' xs -} %token '_' { L _ ITunderscore } -- Haskell keywords 'as' { L _ ITas } 'case' { L _ ITcase } 'class' { L _ ITclass } 'data' { L _ ITdata } 'default' { L _ ITdefault } 'deriving' { L _ ITderiving } 'else' { L _ ITelse } 'hiding' { L _ IThiding } 'if' { L _ ITif } 'import' { L _ ITimport } 'in' { L _ ITin } 'infix' { L _ ITinfix } 'infixl' { L _ ITinfixl } 'infixr' { L _ ITinfixr } 'instance' { L _ ITinstance } 'let' { L _ ITlet } 'module' { L _ ITmodule } 'newtype' { L _ ITnewtype } 'of' { L _ ITof } 'qualified' { L _ ITqualified } 'then' { L _ ITthen } 'type' { L _ ITtype } 'where' { L _ ITwhere } 'forall' { L _ (ITforall _) } -- GHC extension keywords 'foreign' { L _ ITforeign } 'export' { L _ ITexport } 'label' { L _ ITlabel } 'dynamic' { L _ ITdynamic } 'safe' { L _ ITsafe } 'interruptible' { L _ ITinterruptible } 'unsafe' { L _ ITunsafe } 'family' { L _ ITfamily } 'role' { L _ ITrole } 'stdcall' { L _ ITstdcallconv } 'ccall' { L _ ITccallconv } 'capi' { L _ ITcapiconv } 'prim' { L _ ITprimcallconv } 'javascript' { L _ ITjavascriptcallconv } 'proc' { L _ ITproc } -- for arrow notation extension 'rec' { L _ ITrec } -- for arrow notation extension 'group' { L _ ITgroup } -- for list transform extension 'by' { L _ ITby } -- for list transform extension 'using' { L _ ITusing } -- for list transform extension 'pattern' { L _ ITpattern } -- for pattern synonyms 'static' { L _ ITstatic } -- for static pointers extension 'stock' { L _ ITstock } -- for DerivingStrategies extension 'anyclass' { L _ ITanyclass } -- for DerivingStrategies extension 'via' { L _ ITvia } -- for DerivingStrategies extension 'unit' { L _ ITunit } 'signature' { L _ ITsignature } 'dependency' { L _ ITdependency } '{-# INLINE' { L _ (ITinline_prag _ _ _) } -- INLINE or INLINABLE '{-# OPAQUE' { L _ (ITopaque_prag _) } '{-# SPECIALISE' { L _ (ITspec_prag _) } '{-# SPECIALISE_INLINE' { L _ (ITspec_inline_prag _ _) } '{-# SOURCE' { L _ (ITsource_prag _) } '{-# RULES' { L _ (ITrules_prag _) } '{-# SCC' { L _ (ITscc_prag _)} '{-# DEPRECATED' { L _ (ITdeprecated_prag _) } '{-# WARNING' { L _ (ITwarning_prag _) } '{-# UNPACK' { L _ (ITunpack_prag _) } '{-# NOUNPACK' { L _ (ITnounpack_prag _) } '{-# ANN' { L _ (ITann_prag _) } '{-# MINIMAL' { L _ (ITminimal_prag _) } '{-# CTYPE' { L _ (ITctype _) } '{-# OVERLAPPING' { L _ (IToverlapping_prag _) } '{-# OVERLAPPABLE' { L _ (IToverlappable_prag _) } '{-# OVERLAPS' { L _ (IToverlaps_prag _) } '{-# INCOHERENT' { L _ (ITincoherent_prag _) } '{-# COMPLETE' { L _ (ITcomplete_prag _) } '#-}' { L _ ITclose_prag } '..' { L _ ITdotdot } -- reserved symbols ':' { L _ ITcolon } '::' { L _ (ITdcolon _) } '=' { L _ ITequal } '\\' { L _ ITlam } 'lcase' { L _ ITlcase } 'lcases' { L _ ITlcases } '|' { L _ ITvbar } '<-' { L _ (ITlarrow _) } '->' { L _ (ITrarrow _) } '->.' { L _ ITlolly } TIGHT_INFIX_AT { L _ ITat } '=>' { L _ (ITdarrow _) } '-' { L _ ITminus } PREFIX_TILDE { L _ ITtilde } PREFIX_BANG { L _ ITbang } PREFIX_MINUS { L _ ITprefixminus } '*' { L _ (ITstar _) } '-<' { L _ (ITlarrowtail _) } -- for arrow notation '>-' { L _ (ITrarrowtail _) } -- for arrow notation '-<<' { L _ (ITLarrowtail _) } -- for arrow notation '>>-' { L _ (ITRarrowtail _) } -- for arrow notation '.' { L _ ITdot } PREFIX_PROJ { L _ (ITproj True) } -- RecordDotSyntax TIGHT_INFIX_PROJ { L _ (ITproj False) } -- RecordDotSyntax PREFIX_AT { L _ ITtypeApp } PREFIX_PERCENT { L _ ITpercent } -- for linear types '{' { L _ ITocurly } -- special symbols '}' { L _ ITccurly } vocurly { L _ ITvocurly } -- virtual open curly (from layout) vccurly { L _ ITvccurly } -- virtual close curly (from layout) '[' { L _ ITobrack } ']' { L _ ITcbrack } '(' { L _ IToparen } ')' { L _ ITcparen } '(#' { L _ IToubxparen } '#)' { L _ ITcubxparen } '(|' { L _ (IToparenbar _) } '|)' { L _ (ITcparenbar _) } ';' { L _ ITsemi } ',' { L _ ITcomma } '`' { L _ ITbackquote } SIMPLEQUOTE { L _ ITsimpleQuote } -- 'x VARID { L _ (ITvarid _) } -- identifiers CONID { L _ (ITconid _) } VARSYM { L _ (ITvarsym _) } CONSYM { L _ (ITconsym _) } QVARID { L _ (ITqvarid _) } QCONID { L _ (ITqconid _) } QVARSYM { L _ (ITqvarsym _) } QCONSYM { L _ (ITqconsym _) } -- QualifiedDo DO { L _ (ITdo _) } MDO { L _ (ITmdo _) } IPDUPVARID { L _ (ITdupipvarid _) } -- GHC extension LABELVARID { L _ (ITlabelvarid _) } CHAR { L _ (ITchar _ _) } STRING { L _ (ITstring _ _) } INTEGER { L _ (ITinteger _) } RATIONAL { L _ (ITrational _) } PRIMCHAR { L _ (ITprimchar _ _) } PRIMSTRING { L _ (ITprimstring _ _) } PRIMINTEGER { L _ (ITprimint _ _) } PRIMWORD { L _ (ITprimword _ _) } PRIMFLOAT { L _ (ITprimfloat _) } PRIMDOUBLE { L _ (ITprimdouble _) } -- Template Haskell '[|' { L _ (ITopenExpQuote _ _) } '[p|' { L _ ITopenPatQuote } '[t|' { L _ ITopenTypQuote } '[d|' { L _ ITopenDecQuote } '|]' { L _ (ITcloseQuote _) } '[||' { L _ (ITopenTExpQuote _) } '||]' { L _ ITcloseTExpQuote } PREFIX_DOLLAR { L _ ITdollar } PREFIX_DOLLAR_DOLLAR { L _ ITdollardollar } TH_TY_QUOTE { L _ ITtyQuote } -- ''T TH_QUASIQUOTE { L _ (ITquasiQuote _) } TH_QQUASIQUOTE { L _ (ITqQuasiQuote _) } %monad { P } { >>= } { return } %lexer { (lexer True) } { L _ ITeof } -- Replace 'lexer' above with 'lexerDbg' -- to dump the tokens fed to the parser. %tokentype { (Located Token) } -- Exported parsers %name parseModuleNoHaddock module %name parseSignature signature %name parseImport importdecl %name parseStatement e_stmt %name parseDeclaration topdecl %name parseExpression exp %name parsePattern pat %name parseTypeSignature sigdecl %name parseStmt maybe_stmt %name parseIdentifier identifier %name parseType ktype %name parseBackpack backpack %partial parseHeader header %% ----------------------------------------------------------------------------- -- Identifiers; one of the entry points identifier :: { LocatedN RdrName } : qvar { $1 } | qcon { $1 } | qvarop { $1 } | qconop { $1 } | '(' '->' ')' {% amsrn (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnn NameParens (glAA $1) (glAA $2) (glAA $3) []) } | '->' {% amsrn (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnnRArrow (glAA $1) []) } ----------------------------------------------------------------------------- -- Backpack stuff backpack :: { [LHsUnit PackageName] } : implicit_top units close { fromOL $2 } | '{' units '}' { fromOL $2 } units :: { OrdList (LHsUnit PackageName) } : units ';' unit { $1 `appOL` unitOL $3 } | units ';' { $1 } | unit { unitOL $1 } unit :: { LHsUnit PackageName } : 'unit' pkgname 'where' unitbody { sL1 $1 $ HsUnit { hsunitName = $2 , hsunitBody = fromOL $4 } } unitid :: { LHsUnitId PackageName } : pkgname { sL1 $1 $ HsUnitId $1 [] } | pkgname '[' msubsts ']' { sLL $1 $> $ HsUnitId $1 (fromOL $3) } msubsts :: { OrdList (LHsModuleSubst PackageName) } : msubsts ',' msubst { $1 `appOL` unitOL $3 } | msubsts ',' { $1 } | msubst { unitOL $1 } msubst :: { LHsModuleSubst PackageName } : modid '=' moduleid { sLL (reLoc $1) $> $ (reLoc $1, $3) } | modid VARSYM modid VARSYM { sLL (reLoc $1) $> $ (reLoc $1, sLL $2 $> $ HsModuleVar (reLoc $3)) } moduleid :: { LHsModuleId PackageName } : VARSYM modid VARSYM { sLL $1 $> $ HsModuleVar (reLoc $2) } | unitid ':' modid { sLL $1 (reLoc $>) $ HsModuleId $1 (reLoc $3) } pkgname :: { Located PackageName } : STRING { sL1 $1 $ PackageName (getSTRING $1) } | litpkgname { sL1 $1 $ PackageName (unLoc $1) } litpkgname_segment :: { Located FastString } : VARID { sL1 $1 $ getVARID $1 } | CONID { sL1 $1 $ getCONID $1 } | special_id { $1 } -- Parse a minus sign regardless of whether -XLexicalNegation is turned on or off. -- See Note [Minus tokens] in GHC.Parser.Lexer HYPHEN :: { [AddEpAnn] } : '-' { [mj AnnMinus $1 ] } | PREFIX_MINUS { [mj AnnMinus $1 ] } | VARSYM {% if (getVARSYM $1 == fsLit "-") then return [mj AnnMinus $1] else do { addError $ mkPlainErrorMsgEnvelope (getLoc $1) $ PsErrExpectedHyphen ; return [] } } litpkgname :: { Located FastString } : litpkgname_segment { $1 } -- a bit of a hack, means p - b is parsed same as p-b, enough for now. | litpkgname_segment HYPHEN litpkgname { sLL $1 $> $ appendFS (unLoc $1) (consFS '-' (unLoc $3)) } mayberns :: { Maybe [LRenaming] } : {- empty -} { Nothing } | '(' rns ')' { Just (fromOL $2) } rns :: { OrdList LRenaming } : rns ',' rn { $1 `appOL` unitOL $3 } | rns ',' { $1 } | rn { unitOL $1 } rn :: { LRenaming } : modid 'as' modid { sLL (reLoc $1) (reLoc $>) $ Renaming (reLoc $1) (Just (reLoc $3)) } | modid { sL1 (reLoc $1) $ Renaming (reLoc $1) Nothing } unitbody :: { OrdList (LHsUnitDecl PackageName) } : '{' unitdecls '}' { $2 } | vocurly unitdecls close { $2 } unitdecls :: { OrdList (LHsUnitDecl PackageName) } : unitdecls ';' unitdecl { $1 `appOL` unitOL $3 } | unitdecls ';' { $1 } | unitdecl { unitOL $1 } unitdecl :: { LHsUnitDecl PackageName } : 'module' maybe_src modid maybemodwarning maybeexports 'where' body -- XXX not accurate { sL1 $1 $ DeclD (case snd $2 of NotBoot -> HsSrcFile IsBoot -> HsBootFile) (reLoc $3) (sL1 $1 (HsModule noAnn (thdOf3 $7) (Just $3) $5 (fst $ sndOf3 $7) (snd $ sndOf3 $7) $4 Nothing)) } | 'signature' modid maybemodwarning maybeexports 'where' body { sL1 $1 $ DeclD HsigFile (reLoc $2) (sL1 $1 (HsModule noAnn (thdOf3 $6) (Just $2) $4 (fst $ sndOf3 $6) (snd $ sndOf3 $6) $3 Nothing)) } | 'dependency' unitid mayberns { sL1 $1 $ IncludeD (IncludeDecl { idUnitId = $2 , idModRenaming = $3 , idSignatureInclude = False }) } | 'dependency' 'signature' unitid { sL1 $1 $ IncludeD (IncludeDecl { idUnitId = $3 , idModRenaming = Nothing , idSignatureInclude = True }) } ----------------------------------------------------------------------------- -- Module Header -- The place for module deprecation is really too restrictive, but if it -- was allowed at its natural place just before 'module', we get an ugly -- s/r conflict with the second alternative. Another solution would be the -- introduction of a new pragma DEPRECATED_MODULE, but this is not very nice, -- either, and DEPRECATED is only expected to be used by people who really -- know what they are doing. :-) signature :: { Located HsModule } : 'signature' modid maybemodwarning maybeexports 'where' body {% fileSrcSpan >>= \ loc -> acs (\cs-> (L loc (HsModule (EpAnn (spanAsAnchor loc) (AnnsModule [mj AnnSignature $1, mj AnnWhere $5] (fstOf3 $6)) cs) (thdOf3 $6) (Just $2) $4 (fst $ sndOf3 $6) (snd $ sndOf3 $6) $3 Nothing)) ) } module :: { Located HsModule } : 'module' modid maybemodwarning maybeexports 'where' body {% fileSrcSpan >>= \ loc -> acsFinal (\cs -> (L loc (HsModule (EpAnn (spanAsAnchor loc) (AnnsModule [mj AnnModule $1, mj AnnWhere $5] (fstOf3 $6)) cs) (thdOf3 $6) (Just $2) $4 (fst $ sndOf3 $6) (snd $ sndOf3 $6) $3 Nothing) )) } | body2 {% fileSrcSpan >>= \ loc -> acsFinal (\cs -> (L loc (HsModule (EpAnn (spanAsAnchor loc) (AnnsModule [] (fstOf3 $1)) cs) (thdOf3 $1) Nothing Nothing (fst $ sndOf3 $1) (snd $ sndOf3 $1) Nothing Nothing))) } missing_module_keyword :: { () } : {- empty -} {% pushModuleContext } implicit_top :: { () } : {- empty -} {% pushModuleContext } maybemodwarning :: { Maybe (LocatedP (WarningTxt GhcPs)) } : '{-# DEPRECATED' strings '#-}' {% fmap Just $ amsrp (sLL $1 $> $ DeprecatedTxt (sL1 $1 $ getDEPRECATED_PRAGs $1) (map stringLiteralToHsDocWst $ snd $ unLoc $2)) (AnnPragma (mo $1) (mc $3) (fst $ unLoc $2)) } | '{-# WARNING' strings '#-}' {% fmap Just $ amsrp (sLL $1 $> $ WarningTxt (sL1 $1 $ getWARNING_PRAGs $1) (map stringLiteralToHsDocWst $ snd $ unLoc $2)) (AnnPragma (mo $1) (mc $3) (fst $ unLoc $2))} | {- empty -} { Nothing } body :: { (AnnList ,([LImportDecl GhcPs], [LHsDecl GhcPs]) ,LayoutInfo) } : '{' top '}' { (AnnList Nothing (Just $ moc $1) (Just $ mcc $3) [] (fst $2) , snd $2, ExplicitBraces) } | vocurly top close { (AnnList Nothing Nothing Nothing [] (fst $2) , snd $2, VirtualBraces (getVOCURLY $1)) } body2 :: { (AnnList ,([LImportDecl GhcPs], [LHsDecl GhcPs]) ,LayoutInfo) } : '{' top '}' { (AnnList Nothing (Just $ moc $1) (Just $ mcc $3) [] (fst $2) , snd $2, ExplicitBraces) } | missing_module_keyword top close { (AnnList Nothing Nothing Nothing [] [], snd $2, VirtualBraces leftmostColumn) } top :: { ([TrailingAnn] ,([LImportDecl GhcPs], [LHsDecl GhcPs])) } : semis top1 { (reverse $1, $2) } top1 :: { ([LImportDecl GhcPs], [LHsDecl GhcPs]) } : importdecls_semi topdecls_cs_semi { (reverse $1, cvTopDecls $2) } | importdecls_semi topdecls_cs { (reverse $1, cvTopDecls $2) } | importdecls { (reverse $1, []) } ----------------------------------------------------------------------------- -- Module declaration & imports only header :: { Located HsModule } : 'module' modid maybemodwarning maybeexports 'where' header_body {% fileSrcSpan >>= \ loc -> acs (\cs -> (L loc (HsModule (EpAnn (spanAsAnchor loc) (AnnsModule [mj AnnModule $1,mj AnnWhere $5] (AnnList Nothing Nothing Nothing [] [])) cs) NoLayoutInfo (Just $2) $4 $6 [] $3 Nothing ))) } | 'signature' modid maybemodwarning maybeexports 'where' header_body {% fileSrcSpan >>= \ loc -> acs (\cs -> (L loc (HsModule (EpAnn (spanAsAnchor loc) (AnnsModule [mj AnnModule $1,mj AnnWhere $5] (AnnList Nothing Nothing Nothing [] [])) cs) NoLayoutInfo (Just $2) $4 $6 [] $3 Nothing ))) } | header_body2 {% fileSrcSpan >>= \ loc -> return (L loc (HsModule noAnn NoLayoutInfo Nothing Nothing $1 [] Nothing Nothing)) } header_body :: { [LImportDecl GhcPs] } : '{' header_top { $2 } | vocurly header_top { $2 } header_body2 :: { [LImportDecl GhcPs] } : '{' header_top { $2 } | missing_module_keyword header_top { $2 } header_top :: { [LImportDecl GhcPs] } : semis header_top_importdecls { $2 } header_top_importdecls :: { [LImportDecl GhcPs] } : importdecls_semi { $1 } | importdecls { $1 } ----------------------------------------------------------------------------- -- The Export List maybeexports :: { (Maybe (LocatedL [LIE GhcPs])) } : '(' exportlist ')' {% fmap Just $ amsrl (sLL $1 $> (fromOL $ snd $2)) (AnnList Nothing (Just $ mop $1) (Just $ mcp $3) (fst $2) []) } | {- empty -} { Nothing } exportlist :: { ([AddEpAnn], OrdList (LIE GhcPs)) } : exportlist1 { ([], $1) } | {- empty -} { ([], nilOL) } -- trailing comma: | exportlist1 ',' {% case $1 of SnocOL hs t -> do t' <- addTrailingCommaA t (gl $2) return ([], snocOL hs t')} | ',' { ([mj AnnComma $1], nilOL) } exportlist1 :: { OrdList (LIE GhcPs) } : exportlist1 ',' export {% let ls = $1 in if isNilOL ls then return (ls `appOL` $3) else case ls of SnocOL hs t -> do t' <- addTrailingCommaA t (gl $2) return (snocOL hs t' `appOL` $3)} | export { $1 } -- No longer allow things like [] and (,,,) to be exported -- They are built in syntax, always available export :: { OrdList (LIE GhcPs) } : qcname_ext export_subspec {% mkModuleImpExp (fst $ unLoc $2) $1 (snd $ unLoc $2) >>= \ie -> fmap (unitOL . reLocA) (return (sLL (reLoc $1) $> ie)) } | 'module' modid {% fmap (unitOL . reLocA) (acs (\cs -> sLL $1 (reLoc $>) (IEModuleContents (EpAnn (glR $1) [mj AnnModule $1] cs) $2))) } | 'pattern' qcon { unitOL (reLocA (sLL $1 (reLocN $>) (IEVar noExtField (sLLa $1 (reLocN $>) (IEPattern (glAA $1) $2))))) } export_subspec :: { Located ([AddEpAnn],ImpExpSubSpec) } : {- empty -} { sL0 ([],ImpExpAbs) } | '(' qcnames ')' {% mkImpExpSubSpec (reverse (snd $2)) >>= \(as,ie) -> return $ sLL $1 $> (as ++ [mop $1,mcp $3] ++ fst $2, ie) } qcnames :: { ([AddEpAnn], [LocatedA ImpExpQcSpec]) } : {- empty -} { ([],[]) } | qcnames1 { $1 } qcnames1 :: { ([AddEpAnn], [LocatedA ImpExpQcSpec]) } -- A reversed list : qcnames1 ',' qcname_ext_w_wildcard {% case (snd $1) of (l@(L la ImpExpQcWildcard):t) -> do { l' <- addTrailingCommaA l (gl $2) ; return ([mj AnnDotdot (reLoc l), mj AnnComma $2] ,(snd (unLoc $3) : l' : t)) } (l:t) -> do { l' <- addTrailingCommaA l (gl $2) ; return (fst $1 ++ fst (unLoc $3) , snd (unLoc $3) : l' : t)} } -- Annotations re-added in mkImpExpSubSpec | qcname_ext_w_wildcard { (fst (unLoc $1),[snd (unLoc $1)]) } -- Variable, data constructor or wildcard -- or tagged type constructor qcname_ext_w_wildcard :: { Located ([AddEpAnn], LocatedA ImpExpQcSpec) } : qcname_ext { sL1A $1 ([],$1) } | '..' { sL1 $1 ([mj AnnDotdot $1], sL1a $1 ImpExpQcWildcard) } qcname_ext :: { LocatedA ImpExpQcSpec } : qcname { reLocA $ sL1N $1 (ImpExpQcName $1) } | 'type' oqtycon {% do { n <- mkTypeImpExp $2 ; return $ sLLa $1 (reLocN $>) (ImpExpQcType (glAA $1) n) }} qcname :: { LocatedN RdrName } -- Variable or type constructor : qvar { $1 } -- Things which look like functions -- Note: This includes record selectors but -- also (-.->), see #11432 | oqtycon_no_varcon { $1 } -- see Note [Type constructors in export list] ----------------------------------------------------------------------------- -- Import Declarations -- importdecls and topdecls must contain at least one declaration; -- top handles the fact that these may be optional. -- One or more semicolons semis1 :: { Located [TrailingAnn] } semis1 : semis1 ';' { sLL $1 $> $ if isZeroWidthSpan (gl $2) then (unLoc $1) else (AddSemiAnn (glAA $2) : (unLoc $1)) } | ';' { sL1 $1 $ msemi $1 } -- Zero or more semicolons semis :: { [TrailingAnn] } semis : semis ';' { if isZeroWidthSpan (gl $2) then $1 else (AddSemiAnn (glAA $2) : $1) } | {- empty -} { [] } -- No trailing semicolons, non-empty importdecls :: { [LImportDecl GhcPs] } importdecls : importdecls_semi importdecl { $2 : $1 } -- May have trailing semicolons, can be empty importdecls_semi :: { [LImportDecl GhcPs] } importdecls_semi : importdecls_semi importdecl semis1 {% do { i <- amsAl $2 (comb2 (reLoc $2) $3) (reverse $ unLoc $3) ; return (i : $1)} } | {- empty -} { [] } importdecl :: { LImportDecl GhcPs } : 'import' maybe_src maybe_safe optqualified maybe_pkg modid optqualified maybeas maybeimpspec {% do { ; let { ; mPreQual = unLoc $4 ; mPostQual = unLoc $7 } ; checkImportDecl mPreQual mPostQual ; let anns = EpAnnImportDecl { importDeclAnnImport = glAA $1 , importDeclAnnPragma = fst $ fst $2 , importDeclAnnSafe = fst $3 , importDeclAnnQualified = fst $ importDeclQualifiedStyle mPreQual mPostQual , importDeclAnnPackage = fst $5 , importDeclAnnAs = fst $8 } ; fmap reLocA $ acs (\cs -> L (comb5 $1 (reLoc $6) $7 (snd $8) $9) $ ImportDecl { ideclExt = EpAnn (glR $1) anns cs , ideclSourceSrc = snd $ fst $2 , ideclName = $6, ideclPkgQual = snd $5 , ideclSource = snd $2, ideclSafe = snd $3 , ideclQualified = snd $ importDeclQualifiedStyle mPreQual mPostQual , ideclImplicit = False , ideclAs = unLoc (snd $8) , ideclHiding = unLoc $9 }) } } maybe_src :: { ((Maybe (EpaLocation,EpaLocation),SourceText),IsBootInterface) } : '{-# SOURCE' '#-}' { ((Just (glAA $1,glAA $2),getSOURCE_PRAGs $1) , IsBoot) } | {- empty -} { ((Nothing,NoSourceText),NotBoot) } maybe_safe :: { (Maybe EpaLocation,Bool) } : 'safe' { (Just (glAA $1),True) } | {- empty -} { (Nothing, False) } maybe_pkg :: { (Maybe EpaLocation, RawPkgQual) } : STRING {% do { let { pkgFS = getSTRING $1 } ; unless (looksLikePackageName (unpackFS pkgFS)) $ addError $ mkPlainErrorMsgEnvelope (getLoc $1) $ (PsErrInvalidPackageName pkgFS) ; return (Just (glAA $1), RawPkgQual (StringLiteral (getSTRINGs $1) pkgFS Nothing)) } } | {- empty -} { (Nothing,NoRawPkgQual) } optqualified :: { Located (Maybe EpaLocation) } : 'qualified' { sL1 $1 (Just (glAA $1)) } | {- empty -} { noLoc Nothing } maybeas :: { (Maybe EpaLocation,Located (Maybe (LocatedA ModuleName))) } : 'as' modid { (Just (glAA $1) ,sLL $1 (reLoc $>) (Just $2)) } | {- empty -} { (Nothing,noLoc Nothing) } maybeimpspec :: { Located (Maybe (Bool, LocatedL [LIE GhcPs])) } : impspec {% let (b, ie) = unLoc $1 in checkImportSpec ie >>= \checkedIe -> return (L (gl $1) (Just (b, checkedIe))) } | {- empty -} { noLoc Nothing } impspec :: { Located (Bool, LocatedL [LIE GhcPs]) } : '(' exportlist ')' {% do { es <- amsrl (sLL $1 $> $ fromOL $ snd $2) (AnnList Nothing (Just $ mop $1) (Just $ mcp $3) (fst $2) []) ; return $ sLL $1 $> (False, es)} } | 'hiding' '(' exportlist ')' {% do { es <- amsrl (sLL $1 $> $ fromOL $ snd $3) (AnnList Nothing (Just $ mop $2) (Just $ mcp $4) (mj AnnHiding $1:fst $3) []) ; return $ sLL $1 $> (True, es)} } ----------------------------------------------------------------------------- -- Fixity Declarations prec :: { Maybe (Located (SourceText,Int)) } : {- empty -} { Nothing } | INTEGER { Just (sL1 $1 (getINTEGERs $1,fromInteger (il_value (getINTEGER $1)))) } infix :: { Located FixityDirection } : 'infix' { sL1 $1 InfixN } | 'infixl' { sL1 $1 InfixL } | 'infixr' { sL1 $1 InfixR } ops :: { Located (OrdList (LocatedN RdrName)) } : ops ',' op {% case (unLoc $1) of SnocOL hs t -> do t' <- addTrailingCommaN t (gl $2) return (sLL $1 (reLocN $>) (snocOL hs t' `appOL` unitOL $3)) } | op { sL1N $1 (unitOL $1) } ----------------------------------------------------------------------------- -- Top-Level Declarations -- No trailing semicolons, non-empty topdecls :: { OrdList (LHsDecl GhcPs) } : topdecls_semi topdecl { $1 `snocOL` $2 } -- May have trailing semicolons, can be empty topdecls_semi :: { OrdList (LHsDecl GhcPs) } : topdecls_semi topdecl semis1 {% do { t <- amsAl $2 (comb2 (reLoc $2) $3) (reverse $ unLoc $3) ; return ($1 `snocOL` t) }} | {- empty -} { nilOL } ----------------------------------------------------------------------------- -- Each topdecl accumulates prior comments -- No trailing semicolons, non-empty topdecls_cs :: { OrdList (LHsDecl GhcPs) } : topdecls_cs_semi topdecl_cs { $1 `snocOL` $2 } -- May have trailing semicolons, can be empty topdecls_cs_semi :: { OrdList (LHsDecl GhcPs) } : topdecls_cs_semi topdecl_cs semis1 {% do { t <- amsAl $2 (comb2 (reLoc $2) $3) (reverse $ unLoc $3) ; return ($1 `snocOL` t) }} | {- empty -} { nilOL } -- Each topdecl accumulates prior comments topdecl_cs :: { LHsDecl GhcPs } topdecl_cs : topdecl {% commentsPA $1 } ----------------------------------------------------------------------------- topdecl :: { LHsDecl GhcPs } : cl_decl { sL1 $1 (TyClD noExtField (unLoc $1)) } | ty_decl { sL1 $1 (TyClD noExtField (unLoc $1)) } | standalone_kind_sig { sL1 $1 (KindSigD noExtField (unLoc $1)) } | inst_decl { sL1 $1 (InstD noExtField (unLoc $1)) } | stand_alone_deriving { sL1 $1 (DerivD noExtField (unLoc $1)) } | role_annot { sL1 $1 (RoleAnnotD noExtField (unLoc $1)) } | 'default' '(' comma_types0 ')' {% acsA (\cs -> sLL $1 $> (DefD noExtField (DefaultDecl (EpAnn (glR $1) [mj AnnDefault $1,mop $2,mcp $4] cs) $3))) } | 'foreign' fdecl {% acsA (\cs -> sLL $1 $> ((snd $ unLoc $2) (EpAnn (glR $1) (mj AnnForeign $1:(fst $ unLoc $2)) cs))) } | '{-# DEPRECATED' deprecations '#-}' {% acsA (\cs -> sLL $1 $> $ WarningD noExtField (Warnings (EpAnn (glR $1) [mo $1,mc $3] cs) (getDEPRECATED_PRAGs $1) (fromOL $2))) } | '{-# WARNING' warnings '#-}' {% acsA (\cs -> sLL $1 $> $ WarningD noExtField (Warnings (EpAnn (glR $1) [mo $1,mc $3] cs) (getWARNING_PRAGs $1) (fromOL $2))) } | '{-# RULES' rules '#-}' {% acsA (\cs -> sLL $1 $> $ RuleD noExtField (HsRules (EpAnn (glR $1) [mo $1,mc $3] cs) (getRULES_PRAGs $1) (reverse $2))) } | annotation { $1 } | decl_no_th { $1 } -- Template Haskell Extension -- The $(..) form is one possible form of infixexp -- but we treat an arbitrary expression just as if -- it had a $(..) wrapped around it | infixexp {% runPV (unECP $1) >>= \ $1 -> do { d <- mkSpliceDecl $1 ; commentsPA d }} -- Type classes -- cl_decl :: { LTyClDecl GhcPs } : 'class' tycl_hdr fds where_cls {% (mkClassDecl (comb4 $1 $2 $3 $4) $2 $3 (sndOf3 $ unLoc $4) (thdOf3 $ unLoc $4)) (mj AnnClass $1:(fst $ unLoc $3)++(fstOf3 $ unLoc $4)) } -- Type declarations (toplevel) -- ty_decl :: { LTyClDecl GhcPs } -- ordinary type synonyms : 'type' type '=' ktype -- Note ktype, not sigtype, on the right of '=' -- We allow an explicit for-all but we don't insert one -- in type Foo a = (b,b) -- Instead we just say b is out of scope -- -- Note the use of type for the head; this allows -- infix type constructors to be declared {% mkTySynonym (comb2A $1 $4) $2 $4 [mj AnnType $1,mj AnnEqual $3] } -- type family declarations | 'type' 'family' type opt_tyfam_kind_sig opt_injective_info where_type_family -- Note the use of type for the head; this allows -- infix type constructors to be declared {% mkFamDecl (comb5 $1 (reLoc $3) $4 $5 $6) (snd $ unLoc $6) TopLevel $3 (snd $ unLoc $4) (snd $ unLoc $5) (mj AnnType $1:mj AnnFamily $2:(fst $ unLoc $4) ++ (fst $ unLoc $5) ++ (fst $ unLoc $6)) } -- ordinary data type or newtype declaration | data_or_newtype capi_ctype tycl_hdr constrs maybe_derivings {% mkTyData (comb4 $1 $3 $4 $5) (snd $ unLoc $1) $2 $3 Nothing (reverse (snd $ unLoc $4)) (fmap reverse $5) ((fst $ unLoc $1):(fst $ unLoc $4)) } -- We need the location on tycl_hdr in case -- constrs and deriving are both empty -- ordinary GADT declaration | data_or_newtype capi_ctype tycl_hdr opt_kind_sig gadt_constrlist maybe_derivings {% mkTyData (comb4 $1 $3 $5 $6) (snd $ unLoc $1) $2 $3 (snd $ unLoc $4) (snd $ unLoc $5) (fmap reverse $6) ((fst $ unLoc $1):(fst $ unLoc $4)++(fst $ unLoc $5)) } -- We need the location on tycl_hdr in case -- constrs and deriving are both empty -- data/newtype family | 'data' 'family' type opt_datafam_kind_sig {% mkFamDecl (comb3 $1 $2 $4) DataFamily TopLevel $3 (snd $ unLoc $4) Nothing (mj AnnData $1:mj AnnFamily $2:(fst $ unLoc $4)) } -- standalone kind signature standalone_kind_sig :: { LStandaloneKindSig GhcPs } : 'type' sks_vars '::' sigktype {% mkStandaloneKindSig (comb2A $1 $4) (L (gl $2) $ unLoc $2) $4 [mj AnnType $1,mu AnnDcolon $3]} -- See also: sig_vars sks_vars :: { Located [LocatedN RdrName] } -- Returned in reverse order : sks_vars ',' oqtycon {% case unLoc $1 of (h:t) -> do h' <- addTrailingCommaN h (gl $2) return (sLL $1 (reLocN $>) ($3 : h' : t)) } | oqtycon { sL1N $1 [$1] } inst_decl :: { LInstDecl GhcPs } : 'instance' overlap_pragma inst_type where_inst {% do { (binds, sigs, _, ats, adts, _) <- cvBindsAndSigs (snd $ unLoc $4) ; let anns = (mj AnnInstance $1 : (fst $ unLoc $4)) ; let cid cs = ClsInstDecl { cid_ext = (EpAnn (glR $1) anns cs, NoAnnSortKey) , cid_poly_ty = $3, cid_binds = binds , cid_sigs = mkClassOpSigs sigs , cid_tyfam_insts = ats , cid_overlap_mode = $2 , cid_datafam_insts = adts } ; acsA (\cs -> L (comb3 $1 (reLoc $3) $4) (ClsInstD { cid_d_ext = noExtField, cid_inst = cid cs })) } } -- type instance declarations | 'type' 'instance' ty_fam_inst_eqn {% mkTyFamInst (comb2A $1 $3) (unLoc $3) (mj AnnType $1:mj AnnInstance $2:[]) } -- data/newtype instance declaration | data_or_newtype 'instance' capi_ctype datafam_inst_hdr constrs maybe_derivings {% mkDataFamInst (comb4 $1 $4 $5 $6) (snd $ unLoc $1) $3 (unLoc $4) Nothing (reverse (snd $ unLoc $5)) (fmap reverse $6) ((fst $ unLoc $1):mj AnnInstance $2:(fst $ unLoc $5)) } -- GADT instance declaration | data_or_newtype 'instance' capi_ctype datafam_inst_hdr opt_kind_sig gadt_constrlist maybe_derivings {% mkDataFamInst (comb4 $1 $4 $6 $7) (snd $ unLoc $1) $3 (unLoc $4) (snd $ unLoc $5) (snd $ unLoc $6) (fmap reverse $7) ((fst $ unLoc $1):mj AnnInstance $2 :(fst $ unLoc $5)++(fst $ unLoc $6)) } overlap_pragma :: { Maybe (LocatedP OverlapMode) } : '{-# OVERLAPPABLE' '#-}' {% fmap Just $ amsrp (sLL $1 $> (Overlappable (getOVERLAPPABLE_PRAGs $1))) (AnnPragma (mo $1) (mc $2) []) } | '{-# OVERLAPPING' '#-}' {% fmap Just $ amsrp (sLL $1 $> (Overlapping (getOVERLAPPING_PRAGs $1))) (AnnPragma (mo $1) (mc $2) []) } | '{-# OVERLAPS' '#-}' {% fmap Just $ amsrp (sLL $1 $> (Overlaps (getOVERLAPS_PRAGs $1))) (AnnPragma (mo $1) (mc $2) []) } | '{-# INCOHERENT' '#-}' {% fmap Just $ amsrp (sLL $1 $> (Incoherent (getINCOHERENT_PRAGs $1))) (AnnPragma (mo $1) (mc $2) []) } | {- empty -} { Nothing } deriv_strategy_no_via :: { LDerivStrategy GhcPs } : 'stock' {% acsA (\cs -> sL1 $1 (StockStrategy (EpAnn (glR $1) [mj AnnStock $1] cs))) } | 'anyclass' {% acsA (\cs -> sL1 $1 (AnyclassStrategy (EpAnn (glR $1) [mj AnnAnyclass $1] cs))) } | 'newtype' {% acsA (\cs -> sL1 $1 (NewtypeStrategy (EpAnn (glR $1) [mj AnnNewtype $1] cs))) } deriv_strategy_via :: { LDerivStrategy GhcPs } : 'via' sigktype {% acsA (\cs -> sLLlA $1 $> (ViaStrategy (XViaStrategyPs (EpAnn (glR $1) [mj AnnVia $1] cs) $2))) } deriv_standalone_strategy :: { Maybe (LDerivStrategy GhcPs) } : 'stock' {% fmap Just $ acsA (\cs -> sL1 $1 (StockStrategy (EpAnn (glR $1) [mj AnnStock $1] cs))) } | 'anyclass' {% fmap Just $ acsA (\cs -> sL1 $1 (AnyclassStrategy (EpAnn (glR $1) [mj AnnAnyclass $1] cs))) } | 'newtype' {% fmap Just $ acsA (\cs -> sL1 $1 (NewtypeStrategy (EpAnn (glR $1) [mj AnnNewtype $1] cs))) } | deriv_strategy_via { Just $1 } | {- empty -} { Nothing } -- Injective type families opt_injective_info :: { Located ([AddEpAnn], Maybe (LInjectivityAnn GhcPs)) } : {- empty -} { noLoc ([], Nothing) } | '|' injectivity_cond { sLL $1 (reLoc $>) ([mj AnnVbar $1] , Just ($2)) } injectivity_cond :: { LInjectivityAnn GhcPs } : tyvarid '->' inj_varids {% acsA (\cs -> sLL (reLocN $1) $> (InjectivityAnn (EpAnn (glNR $1) [mu AnnRarrow $2] cs) $1 (reverse (unLoc $3)))) } inj_varids :: { Located [LocatedN RdrName] } : inj_varids tyvarid { sLL $1 (reLocN $>) ($2 : unLoc $1) } | tyvarid { sL1N $1 [$1] } -- Closed type families where_type_family :: { Located ([AddEpAnn],FamilyInfo GhcPs) } : {- empty -} { noLoc ([],OpenTypeFamily) } | 'where' ty_fam_inst_eqn_list { sLL $1 $> (mj AnnWhere $1:(fst $ unLoc $2) ,ClosedTypeFamily (fmap reverse $ snd $ unLoc $2)) } ty_fam_inst_eqn_list :: { Located ([AddEpAnn],Maybe [LTyFamInstEqn GhcPs]) } : '{' ty_fam_inst_eqns '}' { sLL $1 $> ([moc $1,mcc $3] ,Just (unLoc $2)) } | vocurly ty_fam_inst_eqns close { let (L loc _) = $2 in L loc ([],Just (unLoc $2)) } | '{' '..' '}' { sLL $1 $> ([moc $1,mj AnnDotdot $2 ,mcc $3],Nothing) } | vocurly '..' close { let (L loc _) = $2 in L loc ([mj AnnDotdot $2],Nothing) } ty_fam_inst_eqns :: { Located [LTyFamInstEqn GhcPs] } : ty_fam_inst_eqns ';' ty_fam_inst_eqn {% let (L loc eqn) = $3 in case unLoc $1 of [] -> return (sLLlA $1 $> (L loc eqn : unLoc $1)) (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (sLLlA $1 $> ($3 : h' : t)) } | ty_fam_inst_eqns ';' {% case unLoc $1 of [] -> return (sLL $1 $> (unLoc $1)) (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (sLL $1 $> (h':t)) } | ty_fam_inst_eqn { sLLAA $1 $> [$1] } | {- empty -} { noLoc [] } ty_fam_inst_eqn :: { LTyFamInstEqn GhcPs } : 'forall' tv_bndrs '.' type '=' ktype {% do { hintExplicitForall $1 ; tvbs <- fromSpecTyVarBndrs $2 ; let loc = comb2A $1 $> ; cs <- getCommentsFor loc ; mkTyFamInstEqn loc (mkHsOuterExplicit (EpAnn (glR $1) (mu AnnForall $1, mj AnnDot $3) cs) tvbs) $4 $6 [mj AnnEqual $5] }} | type '=' ktype {% mkTyFamInstEqn (comb2A (reLoc $1) $>) mkHsOuterImplicit $1 $3 (mj AnnEqual $2:[]) } -- Note the use of type for the head; this allows -- infix type constructors and type patterns -- Associated type family declarations -- -- * They have a different syntax than on the toplevel (no family special -- identifier). -- -- * They also need to be separate from instances; otherwise, data family -- declarations without a kind signature cause parsing conflicts with empty -- data declarations. -- at_decl_cls :: { LHsDecl GhcPs } : -- data family declarations, with optional 'family' keyword 'data' opt_family type opt_datafam_kind_sig {% liftM mkTyClD (mkFamDecl (comb3 $1 (reLoc $3) $4) DataFamily NotTopLevel $3 (snd $ unLoc $4) Nothing (mj AnnData $1:$2++(fst $ unLoc $4))) } -- type family declarations, with optional 'family' keyword -- (can't use opt_instance because you get shift/reduce errors | 'type' type opt_at_kind_inj_sig {% liftM mkTyClD (mkFamDecl (comb3 $1 (reLoc $2) $3) OpenTypeFamily NotTopLevel $2 (fst . snd $ unLoc $3) (snd . snd $ unLoc $3) (mj AnnType $1:(fst $ unLoc $3)) )} | 'type' 'family' type opt_at_kind_inj_sig {% liftM mkTyClD (mkFamDecl (comb3 $1 (reLoc $3) $4) OpenTypeFamily NotTopLevel $3 (fst . snd $ unLoc $4) (snd . snd $ unLoc $4) (mj AnnType $1:mj AnnFamily $2:(fst $ unLoc $4)))} -- default type instances, with optional 'instance' keyword | 'type' ty_fam_inst_eqn {% liftM mkInstD (mkTyFamInst (comb2A $1 $2) (unLoc $2) [mj AnnType $1]) } | 'type' 'instance' ty_fam_inst_eqn {% liftM mkInstD (mkTyFamInst (comb2A $1 $3) (unLoc $3) (mj AnnType $1:mj AnnInstance $2:[]) )} opt_family :: { [AddEpAnn] } : {- empty -} { [] } | 'family' { [mj AnnFamily $1] } opt_instance :: { [AddEpAnn] } : {- empty -} { [] } | 'instance' { [mj AnnInstance $1] } -- Associated type instances -- at_decl_inst :: { LInstDecl GhcPs } -- type instance declarations, with optional 'instance' keyword : 'type' opt_instance ty_fam_inst_eqn -- Note the use of type for the head; this allows -- infix type constructors and type patterns {% mkTyFamInst (comb2A $1 $3) (unLoc $3) (mj AnnType $1:$2) } -- data/newtype instance declaration, with optional 'instance' keyword | data_or_newtype opt_instance capi_ctype datafam_inst_hdr constrs maybe_derivings {% mkDataFamInst (comb4 $1 $4 $5 $6) (snd $ unLoc $1) $3 (unLoc $4) Nothing (reverse (snd $ unLoc $5)) (fmap reverse $6) ((fst $ unLoc $1):$2++(fst $ unLoc $5)) } -- GADT instance declaration, with optional 'instance' keyword | data_or_newtype opt_instance capi_ctype datafam_inst_hdr opt_kind_sig gadt_constrlist maybe_derivings {% mkDataFamInst (comb4 $1 $4 $6 $7) (snd $ unLoc $1) $3 (unLoc $4) (snd $ unLoc $5) (snd $ unLoc $6) (fmap reverse $7) ((fst $ unLoc $1):$2++(fst $ unLoc $5)++(fst $ unLoc $6)) } data_or_newtype :: { Located (AddEpAnn, NewOrData) } : 'data' { sL1 $1 (mj AnnData $1,DataType) } | 'newtype' { sL1 $1 (mj AnnNewtype $1,NewType) } -- Family result/return kind signatures opt_kind_sig :: { Located ([AddEpAnn], Maybe (LHsKind GhcPs)) } : { noLoc ([] , Nothing) } | '::' kind { sLL $1 (reLoc $>) ([mu AnnDcolon $1], Just $2) } opt_datafam_kind_sig :: { Located ([AddEpAnn], LFamilyResultSig GhcPs) } : { noLoc ([] , noLocA (NoSig noExtField) )} | '::' kind { sLL $1 (reLoc $>) ([mu AnnDcolon $1], sLLa $1 (reLoc $>) (KindSig noExtField $2))} opt_tyfam_kind_sig :: { Located ([AddEpAnn], LFamilyResultSig GhcPs) } : { noLoc ([] , noLocA (NoSig noExtField) )} | '::' kind { sLL $1 (reLoc $>) ([mu AnnDcolon $1], sLLa $1 (reLoc $>) (KindSig noExtField $2))} | '=' tv_bndr {% do { tvb <- fromSpecTyVarBndr $2 ; return $ sLL $1 (reLoc $>) ([mj AnnEqual $1], sLLa $1 (reLoc $>) (TyVarSig noExtField tvb))} } opt_at_kind_inj_sig :: { Located ([AddEpAnn], ( LFamilyResultSig GhcPs , Maybe (LInjectivityAnn GhcPs)))} : { noLoc ([], (noLocA (NoSig noExtField), Nothing)) } | '::' kind { sLL $1 (reLoc $>) ( [mu AnnDcolon $1] , (sL1a (reLoc $>) (KindSig noExtField $2), Nothing)) } | '=' tv_bndr_no_braces '|' injectivity_cond {% do { tvb <- fromSpecTyVarBndr $2 ; return $ sLL $1 (reLoc $>) ([mj AnnEqual $1, mj AnnVbar $3] , (sLLa $1 (reLoc $2) (TyVarSig noExtField tvb), Just $4))} } -- tycl_hdr parses the header of a class or data type decl, -- which takes the form -- T a b -- Eq a => T a -- (Eq a, Ord b) => T a b -- T Int [a] -- for associated types -- Rather a lot of inlining here, else we get reduce/reduce errors tycl_hdr :: { Located (Maybe (LHsContext GhcPs), LHsType GhcPs) } : context '=>' type {% acs (\cs -> (sLLAA $1 $> (Just (addTrailingDarrowC $1 $2 cs), $3))) } | type { sL1A $1 (Nothing, $1) } datafam_inst_hdr :: { Located (Maybe (LHsContext GhcPs), HsOuterFamEqnTyVarBndrs GhcPs, LHsType GhcPs) } : 'forall' tv_bndrs '.' context '=>' type {% hintExplicitForall $1 >> fromSpecTyVarBndrs $2 >>= \tvbs -> (acs (\cs -> (sLL $1 (reLoc $>) (Just ( addTrailingDarrowC $4 $5 cs) , mkHsOuterExplicit (EpAnn (glR $1) (mu AnnForall $1, mj AnnDot $3) emptyComments) tvbs, $6)))) } | 'forall' tv_bndrs '.' type {% do { hintExplicitForall $1 ; tvbs <- fromSpecTyVarBndrs $2 ; let loc = comb2 $1 (reLoc $>) ; cs <- getCommentsFor loc ; return (sL loc (Nothing, mkHsOuterExplicit (EpAnn (glR $1) (mu AnnForall $1, mj AnnDot $3) cs) tvbs, $4)) } } | context '=>' type {% acs (\cs -> (sLLAA $1 $>(Just (addTrailingDarrowC $1 $2 cs), mkHsOuterImplicit, $3))) } | type { sL1A $1 (Nothing, mkHsOuterImplicit, $1) } capi_ctype :: { Maybe (LocatedP CType) } capi_ctype : '{-# CTYPE' STRING STRING '#-}' {% fmap Just $ amsrp (sLL $1 $> (CType (getCTYPEs $1) (Just (Header (getSTRINGs $2) (getSTRING $2))) (getSTRINGs $3,getSTRING $3))) (AnnPragma (mo $1) (mc $4) [mj AnnHeader $2,mj AnnVal $3]) } | '{-# CTYPE' STRING '#-}' {% fmap Just $ amsrp (sLL $1 $> (CType (getCTYPEs $1) Nothing (getSTRINGs $2, getSTRING $2))) (AnnPragma (mo $1) (mc $3) [mj AnnVal $2]) } | { Nothing } ----------------------------------------------------------------------------- -- Stand-alone deriving -- Glasgow extension: stand-alone deriving declarations stand_alone_deriving :: { LDerivDecl GhcPs } : 'deriving' deriv_standalone_strategy 'instance' overlap_pragma inst_type {% do { let { err = text "in the stand-alone deriving instance" <> colon <+> quotes (ppr $5) } ; acsA (\cs -> sLL $1 (reLoc $>) (DerivDecl (EpAnn (glR $1) [mj AnnDeriving $1, mj AnnInstance $3] cs) (mkHsWildCardBndrs $5) $2 $4)) }} ----------------------------------------------------------------------------- -- Role annotations role_annot :: { LRoleAnnotDecl GhcPs } role_annot : 'type' 'role' oqtycon maybe_roles {% mkRoleAnnotDecl (comb3N $1 $4 $3) $3 (reverse (unLoc $4)) [mj AnnType $1,mj AnnRole $2] } -- Reversed! maybe_roles :: { Located [Located (Maybe FastString)] } maybe_roles : {- empty -} { noLoc [] } | roles { $1 } roles :: { Located [Located (Maybe FastString)] } roles : role { sLL $1 $> [$1] } | roles role { sLL $1 $> $ $2 : unLoc $1 } -- read it in as a varid for better error messages role :: { Located (Maybe FastString) } role : VARID { sL1 $1 $ Just $ getVARID $1 } | '_' { sL1 $1 Nothing } -- Pattern synonyms -- Glasgow extension: pattern synonyms pattern_synonym_decl :: { LHsDecl GhcPs } : 'pattern' pattern_synonym_lhs '=' pat {% let (name, args, as ) = $2 in acsA (\cs -> sLL $1 (reLoc $>) . ValD noExtField $ mkPatSynBind name args $4 ImplicitBidirectional (EpAnn (glR $1) (as ++ [mj AnnPattern $1, mj AnnEqual $3]) cs)) } | 'pattern' pattern_synonym_lhs '<-' pat {% let (name, args, as) = $2 in acsA (\cs -> sLL $1 (reLoc $>) . ValD noExtField $ mkPatSynBind name args $4 Unidirectional (EpAnn (glR $1) (as ++ [mj AnnPattern $1,mu AnnLarrow $3]) cs)) } | 'pattern' pattern_synonym_lhs '<-' pat where_decls {% do { let (name, args, as) = $2 ; mg <- mkPatSynMatchGroup name $5 ; acsA (\cs -> sLL $1 (reLoc $>) . ValD noExtField $ mkPatSynBind name args $4 (ExplicitBidirectional mg) (EpAnn (glR $1) (as ++ [mj AnnPattern $1,mu AnnLarrow $3]) cs)) }} pattern_synonym_lhs :: { (LocatedN RdrName, HsPatSynDetails GhcPs, [AddEpAnn]) } : con vars0 { ($1, PrefixCon noTypeArgs $2, []) } | varid conop varid { ($2, InfixCon $1 $3, []) } | con '{' cvars1 '}' { ($1, RecCon $3, [moc $2, mcc $4] ) } vars0 :: { [LocatedN RdrName] } : {- empty -} { [] } | varid vars0 { $1 : $2 } cvars1 :: { [RecordPatSynField GhcPs] } : var { [RecordPatSynField (mkFieldOcc $1) $1] } | var ',' cvars1 {% do { h <- addTrailingCommaN $1 (gl $2) ; return ((RecordPatSynField (mkFieldOcc h) h) : $3 )}} where_decls :: { LocatedL (OrdList (LHsDecl GhcPs)) } : 'where' '{' decls '}' {% amsrl (sLL $1 $> (snd $ unLoc $3)) (AnnList (Just $ glR $3) (Just $ moc $2) (Just $ mcc $4) [mj AnnWhere $1] (fst $ unLoc $3)) } | 'where' vocurly decls close {% amsrl (sLL $1 $3 (snd $ unLoc $3)) (AnnList (Just $ glR $3) Nothing Nothing [mj AnnWhere $1] (fst $ unLoc $3))} pattern_synonym_sig :: { LSig GhcPs } : 'pattern' con_list '::' sigtype {% acsA (\cs -> sLL $1 (reLoc $>) $ PatSynSig (EpAnn (glR $1) (AnnSig (mu AnnDcolon $3) [mj AnnPattern $1]) cs) (unLoc $2) $4) } qvarcon :: { LocatedN RdrName } : qvar { $1 } | qcon { $1 } ----------------------------------------------------------------------------- -- Nested declarations -- Declaration in class bodies -- decl_cls :: { LHsDecl GhcPs } decl_cls : at_decl_cls { $1 } | decl { $1 } -- A 'default' signature used with the generic-programming extension | 'default' infixexp '::' sigtype {% runPV (unECP $2) >>= \ $2 -> do { v <- checkValSigLhs $2 ; let err = text "in default signature" <> colon <+> quotes (ppr $2) ; acsA (\cs -> sLL $1 (reLoc $>) $ SigD noExtField $ ClassOpSig (EpAnn (glR $1) (AnnSig (mu AnnDcolon $3) [mj AnnDefault $1]) cs) True [v] $4) }} decls_cls :: { Located ([AddEpAnn],OrdList (LHsDecl GhcPs)) } -- Reversed : decls_cls ';' decl_cls {% if isNilOL (snd $ unLoc $1) then return (sLLlA $1 $> ((fst $ unLoc $1) ++ (mz AnnSemi $2) , unitOL $3)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (sLLlA $1 $> (fst $ unLoc $1 , snocOL hs t' `appOL` unitOL $3)) } | decls_cls ';' {% if isNilOL (snd $ unLoc $1) then return (sLL $1 $> ( (fst $ unLoc $1) ++ (mz AnnSemi $2) ,snd $ unLoc $1)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (sLL $1 $> (fst $ unLoc $1 , snocOL hs t')) } | decl_cls { sL1A $1 ([], unitOL $1) } | {- empty -} { noLoc ([],nilOL) } decllist_cls :: { Located ([AddEpAnn] , OrdList (LHsDecl GhcPs) , LayoutInfo) } -- Reversed : '{' decls_cls '}' { sLL $1 $> (moc $1:mcc $3:(fst $ unLoc $2) ,snd $ unLoc $2, ExplicitBraces) } | vocurly decls_cls close { let { L l (anns, decls) = $2 } in L l (anns, decls, VirtualBraces (getVOCURLY $1)) } -- Class body -- where_cls :: { Located ([AddEpAnn] ,(OrdList (LHsDecl GhcPs)) -- Reversed ,LayoutInfo) } -- No implicit parameters -- May have type declarations : 'where' decllist_cls { sLL $1 $> (mj AnnWhere $1:(fstOf3 $ unLoc $2) ,sndOf3 $ unLoc $2,thdOf3 $ unLoc $2) } | {- empty -} { noLoc ([],nilOL,NoLayoutInfo) } -- Declarations in instance bodies -- decl_inst :: { Located (OrdList (LHsDecl GhcPs)) } decl_inst : at_decl_inst { sL1A $1 (unitOL (sL1 $1 (InstD noExtField (unLoc $1)))) } | decl { sL1A $1 (unitOL $1) } decls_inst :: { Located ([AddEpAnn],OrdList (LHsDecl GhcPs)) } -- Reversed : decls_inst ';' decl_inst {% if isNilOL (snd $ unLoc $1) then return (sLL $1 $> ((fst $ unLoc $1) ++ (mz AnnSemi $2) , unLoc $3)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (sLL $1 $> (fst $ unLoc $1 , snocOL hs t' `appOL` unLoc $3)) } | decls_inst ';' {% if isNilOL (snd $ unLoc $1) then return (sLL $1 $> ((fst $ unLoc $1) ++ (mz AnnSemi $2) ,snd $ unLoc $1)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (sLL $1 $> (fst $ unLoc $1 , snocOL hs t')) } | decl_inst { sL1 $1 ([],unLoc $1) } | {- empty -} { noLoc ([],nilOL) } decllist_inst :: { Located ([AddEpAnn] , OrdList (LHsDecl GhcPs)) } -- Reversed : '{' decls_inst '}' { sLL $1 $> (moc $1:mcc $3:(fst $ unLoc $2),snd $ unLoc $2) } | vocurly decls_inst close { L (gl $2) (unLoc $2) } -- Instance body -- where_inst :: { Located ([AddEpAnn] , OrdList (LHsDecl GhcPs)) } -- Reversed -- No implicit parameters -- May have type declarations : 'where' decllist_inst { sLL $1 $> (mj AnnWhere $1:(fst $ unLoc $2) ,(snd $ unLoc $2)) } | {- empty -} { noLoc ([],nilOL) } -- Declarations in binding groups other than classes and instances -- decls :: { Located ([TrailingAnn], OrdList (LHsDecl GhcPs)) } : decls ';' decl {% if isNilOL (snd $ unLoc $1) then return (sLLlA $1 $> ((fst $ unLoc $1) ++ (msemi $2) , unitOL $3)) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) let { this = unitOL $3; rest = snocOL hs t'; these = rest `appOL` this } return (rest `seq` this `seq` these `seq` (sLLlA $1 $> (fst $ unLoc $1, these))) } | decls ';' {% if isNilOL (snd $ unLoc $1) then return (sLL $1 $> (((fst $ unLoc $1) ++ (msemi $2) ,snd $ unLoc $1))) else case (snd $ unLoc $1) of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (sLL $1 $> (fst $ unLoc $1 , snocOL hs t')) } | decl { sL1A $1 ([], unitOL $1) } | {- empty -} { noLoc ([],nilOL) } decllist :: { Located (AnnList,Located (OrdList (LHsDecl GhcPs))) } : '{' decls '}' { sLL $1 $> (AnnList (Just $ glR $2) (Just $ moc $1) (Just $ mcc $3) [] (fst $ unLoc $2) ,sL1 $2 $ snd $ unLoc $2) } | vocurly decls close { L (gl $2) (AnnList (Just $ glR $2) Nothing Nothing [] (fst $ unLoc $2) ,sL1 $2 $ snd $ unLoc $2) } -- Binding groups other than those of class and instance declarations -- binds :: { Located (HsLocalBinds GhcPs) } -- May have implicit parameters -- No type declarations : decllist {% do { val_binds <- cvBindGroup (unLoc $ snd $ unLoc $1) ; cs <- getCommentsFor (gl $1) ; return (sL1 $1 $ HsValBinds (fixValbindsAnn $ EpAnn (glR $1) (fst $ unLoc $1) cs) val_binds)} } | '{' dbinds '}' {% acs (\cs -> (L (comb3 $1 $2 $3) $ HsIPBinds (EpAnn (glR $1) (AnnList (Just$ glR $2) (Just $ moc $1) (Just $ mcc $3) [] []) cs) (IPBinds noExtField (reverse $ unLoc $2)))) } | vocurly dbinds close {% acs (\cs -> (L (gl $2) $ HsIPBinds (EpAnn (glR $1) (AnnList (Just $ glR $2) Nothing Nothing [] []) cs) (IPBinds noExtField (reverse $ unLoc $2)))) } wherebinds :: { Maybe (Located (HsLocalBinds GhcPs, Maybe EpAnnComments )) } -- May have implicit parameters -- No type declarations : 'where' binds {% do { r <- acs (\cs -> (sLL $1 $> (annBinds (mj AnnWhere $1) cs (unLoc $2)))) ; return $ Just r} } | {- empty -} { Nothing } ----------------------------------------------------------------------------- -- Transformation Rules rules :: { [LRuleDecl GhcPs] } -- Reversed : rules ';' rule {% case $1 of [] -> return ($3:$1) (h:t) -> do h' <- addTrailingSemiA h (gl $2) return ($3:h':t) } | rules ';' {% case $1 of [] -> return $1 (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (h':t) } | rule { [$1] } | {- empty -} { [] } rule :: { LRuleDecl GhcPs } : STRING rule_activation rule_foralls infixexp '=' exp {%runPV (unECP $4) >>= \ $4 -> runPV (unECP $6) >>= \ $6 -> acsA (\cs -> (sLLlA $1 $> $ HsRule { rd_ext = EpAnn (glR $1) ((fstOf3 $3) (mj AnnEqual $5 : (fst $2))) cs , rd_name = L (noAnnSrcSpan $ gl $1) (getSTRINGs $1, getSTRING $1) , rd_act = (snd $2) `orElse` AlwaysActive , rd_tyvs = sndOf3 $3, rd_tmvs = thdOf3 $3 , rd_lhs = $4, rd_rhs = $6 })) } -- Rules can be specified to be NeverActive, unlike inline/specialize pragmas rule_activation :: { ([AddEpAnn],Maybe Activation) } -- See Note [%shift: rule_activation -> {- empty -}] : {- empty -} %shift { ([],Nothing) } | rule_explicit_activation { (fst $1,Just (snd $1)) } -- This production is used to parse the tilde syntax in pragmas such as -- * {-# INLINE[~2] ... #-} -- * {-# SPECIALISE [~ 001] ... #-} -- * {-# RULES ... [~0] ... g #-} -- Note that it can be written either -- without a space [~1] (the PREFIX_TILDE case), or -- with a space [~ 1] (the VARSYM case). -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer rule_activation_marker :: { [AddEpAnn] } : PREFIX_TILDE { [mj AnnTilde $1] } | VARSYM {% if (getVARSYM $1 == fsLit "~") then return [mj AnnTilde $1] else do { addError $ mkPlainErrorMsgEnvelope (getLoc $1) $ PsErrInvalidRuleActivationMarker ; return [] } } rule_explicit_activation :: { ([AddEpAnn] ,Activation) } -- In brackets : '[' INTEGER ']' { ([mos $1,mj AnnVal $2,mcs $3] ,ActiveAfter (getINTEGERs $2) (fromInteger (il_value (getINTEGER $2)))) } | '[' rule_activation_marker INTEGER ']' { ($2++[mos $1,mj AnnVal $3,mcs $4] ,ActiveBefore (getINTEGERs $3) (fromInteger (il_value (getINTEGER $3)))) } | '[' rule_activation_marker ']' { ($2++[mos $1,mcs $3] ,NeverActive) } rule_foralls :: { ([AddEpAnn] -> HsRuleAnn, Maybe [LHsTyVarBndr () GhcPs], [LRuleBndr GhcPs]) } : 'forall' rule_vars '.' 'forall' rule_vars '.' {% let tyvs = mkRuleTyVarBndrs $2 in hintExplicitForall $1 >> checkRuleTyVarBndrNames (mkRuleTyVarBndrs $2) >> return (\anns -> HsRuleAnn (Just (mu AnnForall $1,mj AnnDot $3)) (Just (mu AnnForall $4,mj AnnDot $6)) anns, Just (mkRuleTyVarBndrs $2), mkRuleBndrs $5) } -- See Note [%shift: rule_foralls -> 'forall' rule_vars '.'] | 'forall' rule_vars '.' %shift { (\anns -> HsRuleAnn Nothing (Just (mu AnnForall $1,mj AnnDot $3)) anns, Nothing, mkRuleBndrs $2) } -- See Note [%shift: rule_foralls -> {- empty -}] | {- empty -} %shift { (\anns -> HsRuleAnn Nothing Nothing anns, Nothing, []) } rule_vars :: { [LRuleTyTmVar] } : rule_var rule_vars { $1 : $2 } | {- empty -} { [] } rule_var :: { LRuleTyTmVar } : varid { sL1l $1 (RuleTyTmVar noAnn $1 Nothing) } | '(' varid '::' ctype ')' {% acsA (\cs -> sLL $1 $> (RuleTyTmVar (EpAnn (glR $1) [mop $1,mu AnnDcolon $3,mcp $5] cs) $2 (Just $4))) } {- Note [Parsing explicit foralls in Rules] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We really want the above definition of rule_foralls to be: rule_foralls : 'forall' tv_bndrs '.' 'forall' rule_vars '.' | 'forall' rule_vars '.' | {- empty -} where rule_vars (term variables) can be named "forall", "family", or "role", but tv_vars (type variables) cannot be. However, such a definition results in a reduce/reduce conflict. For example, when parsing: > {-# RULE "name" forall a ... #-} before the '...' it is impossible to determine whether we should be in the first or second case of the above. This is resolved by using rule_vars (which is more general) for both, and ensuring that type-level quantified variables do not have the names "forall", "family", or "role" in the function 'checkRuleTyVarBndrNames' in GHC.Parser.PostProcess. Thus, whenever the definition of tyvarid (used for tv_bndrs) is changed relative to varid (used for rule_vars), 'checkRuleTyVarBndrNames' must be updated. -} ----------------------------------------------------------------------------- -- Warnings and deprecations (c.f. rules) warnings :: { OrdList (LWarnDecl GhcPs) } : warnings ';' warning {% if isNilOL $1 then return ($1 `appOL` $3) else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (snocOL hs t' `appOL` $3) } | warnings ';' {% if isNilOL $1 then return $1 else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (snocOL hs t') } | warning { $1 } | {- empty -} { nilOL } -- SUP: TEMPORARY HACK, not checking for `module Foo' warning :: { OrdList (LWarnDecl GhcPs) } : namelist strings {% fmap unitOL $ acsA (\cs -> sLL $1 $> (Warning (EpAnn (glR $1) (fst $ unLoc $2) cs) (unLoc $1) (WarningTxt (noLoc NoSourceText) $ map stringLiteralToHsDocWst $ snd $ unLoc $2))) } deprecations :: { OrdList (LWarnDecl GhcPs) } : deprecations ';' deprecation {% if isNilOL $1 then return ($1 `appOL` $3) else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (snocOL hs t' `appOL` $3) } | deprecations ';' {% if isNilOL $1 then return $1 else case $1 of SnocOL hs t -> do t' <- addTrailingSemiA t (gl $2) return (snocOL hs t') } | deprecation { $1 } | {- empty -} { nilOL } -- SUP: TEMPORARY HACK, not checking for `module Foo' deprecation :: { OrdList (LWarnDecl GhcPs) } : namelist strings {% fmap unitOL $ acsA (\cs -> sLL $1 $> $ (Warning (EpAnn (glR $1) (fst $ unLoc $2) cs) (unLoc $1) (DeprecatedTxt (noLoc NoSourceText) $ map stringLiteralToHsDocWst $ snd $ unLoc $2))) } strings :: { Located ([AddEpAnn],[Located StringLiteral]) } : STRING { sL1 $1 ([],[L (gl $1) (getStringLiteral $1)]) } | '[' stringlist ']' { sLL $1 $> $ ([mos $1,mcs $3],fromOL (unLoc $2)) } stringlist :: { Located (OrdList (Located StringLiteral)) } : stringlist ',' STRING {% if isNilOL (unLoc $1) then return (sLL $1 $> (unLoc $1 `snocOL` (L (gl $3) (getStringLiteral $3)))) else case (unLoc $1) of SnocOL hs t -> do let { t' = addTrailingCommaS t (glAA $2) } return (sLL $1 $> (snocOL hs t' `snocOL` (L (gl $3) (getStringLiteral $3)))) } | STRING { sLL $1 $> (unitOL (L (gl $1) (getStringLiteral $1))) } | {- empty -} { noLoc nilOL } ----------------------------------------------------------------------------- -- Annotations annotation :: { LHsDecl GhcPs } : '{-# ANN' name_var aexp '#-}' {% runPV (unECP $3) >>= \ $3 -> acsA (\cs -> sLL $1 $> (AnnD noExtField $ HsAnnotation (EpAnn (glR $1) (AnnPragma (mo $1) (mc $4) []) cs) (getANN_PRAGs $1) (ValueAnnProvenance $2) $3)) } | '{-# ANN' 'type' otycon aexp '#-}' {% runPV (unECP $4) >>= \ $4 -> acsA (\cs -> sLL $1 $> (AnnD noExtField $ HsAnnotation (EpAnn (glR $1) (AnnPragma (mo $1) (mc $5) [mj AnnType $2]) cs) (getANN_PRAGs $1) (TypeAnnProvenance $3) $4)) } | '{-# ANN' 'module' aexp '#-}' {% runPV (unECP $3) >>= \ $3 -> acsA (\cs -> sLL $1 $> (AnnD noExtField $ HsAnnotation (EpAnn (glR $1) (AnnPragma (mo $1) (mc $4) [mj AnnModule $2]) cs) (getANN_PRAGs $1) ModuleAnnProvenance $3)) } ----------------------------------------------------------------------------- -- Foreign import and export declarations fdecl :: { Located ([AddEpAnn],EpAnn [AddEpAnn] -> HsDecl GhcPs) } fdecl : 'import' callconv safety fspec {% mkImport $2 $3 (snd $ unLoc $4) >>= \i -> return (sLL $1 $> (mj AnnImport $1 : (fst $ unLoc $4),i)) } | 'import' callconv fspec {% do { d <- mkImport $2 (noLoc PlaySafe) (snd $ unLoc $3); return (sLL $1 $> (mj AnnImport $1 : (fst $ unLoc $3),d)) }} | 'export' callconv fspec {% mkExport $2 (snd $ unLoc $3) >>= \i -> return (sLL $1 $> (mj AnnExport $1 : (fst $ unLoc $3),i) ) } callconv :: { Located CCallConv } : 'stdcall' { sLL $1 $> StdCallConv } | 'ccall' { sLL $1 $> CCallConv } | 'capi' { sLL $1 $> CApiConv } | 'prim' { sLL $1 $> PrimCallConv} | 'javascript' { sLL $1 $> JavaScriptCallConv } safety :: { Located Safety } : 'unsafe' { sLL $1 $> PlayRisky } | 'safe' { sLL $1 $> PlaySafe } | 'interruptible' { sLL $1 $> PlayInterruptible } fspec :: { Located ([AddEpAnn] ,(Located StringLiteral, LocatedN RdrName, LHsSigType GhcPs)) } : STRING var '::' sigtype { sLL $1 (reLoc $>) ([mu AnnDcolon $3] ,(L (getLoc $1) (getStringLiteral $1), $2, $4)) } | var '::' sigtype { sLL (reLocN $1) (reLoc $>) ([mu AnnDcolon $2] ,(noLoc (StringLiteral NoSourceText nilFS Nothing), $1, $3)) } -- if the entity string is missing, it defaults to the empty string; -- the meaning of an empty entity string depends on the calling -- convention ----------------------------------------------------------------------------- -- Type signatures opt_sig :: { Maybe (AddEpAnn, LHsType GhcPs) } : {- empty -} { Nothing } | '::' ctype { Just (mu AnnDcolon $1, $2) } opt_tyconsig :: { ([AddEpAnn], Maybe (LocatedN RdrName)) } : {- empty -} { ([], Nothing) } | '::' gtycon { ([mu AnnDcolon $1], Just $2) } -- Like ktype, but for types that obey the forall-or-nothing rule. -- See Note [forall-or-nothing rule] in GHC.Hs.Type. sigktype :: { LHsSigType GhcPs } : sigtype { $1 } | ctype '::' kind {% acsA (\cs -> sLLAA $1 $> $ mkHsImplicitSigType $ sLLa (reLoc $1) (reLoc $>) $ HsKindSig (EpAnn (glAR $1) [mu AnnDcolon $2] cs) $1 $3) } -- Like ctype, but for types that obey the forall-or-nothing rule. -- See Note [forall-or-nothing rule] in GHC.Hs.Type. To avoid duplicating the -- logic in ctype here, we simply reuse the ctype production and perform -- surgery on the LHsType it returns to turn it into an LHsSigType. sigtype :: { LHsSigType GhcPs } : ctype { hsTypeToHsSigType $1 } sig_vars :: { Located [LocatedN RdrName] } -- Returned in reversed order : sig_vars ',' var {% case unLoc $1 of [] -> return (sLL $1 (reLocN $>) ($3 : unLoc $1)) (h:t) -> do h' <- addTrailingCommaN h (gl $2) return (sLL $1 (reLocN $>) ($3 : h' : t)) } | var { sL1N $1 [$1] } sigtypes1 :: { OrdList (LHsSigType GhcPs) } : sigtype { unitOL $1 } | sigtype ',' sigtypes1 {% do { st <- addTrailingCommaA $1 (gl $2) ; return $ unitOL st `appOL` $3 } } ----------------------------------------------------------------------------- -- Types unpackedness :: { Located UnpackednessPragma } : '{-# UNPACK' '#-}' { sLL $1 $> (UnpackednessPragma [mo $1, mc $2] (getUNPACK_PRAGs $1) SrcUnpack) } | '{-# NOUNPACK' '#-}' { sLL $1 $> (UnpackednessPragma [mo $1, mc $2] (getNOUNPACK_PRAGs $1) SrcNoUnpack) } forall_telescope :: { Located (HsForAllTelescope GhcPs) } : 'forall' tv_bndrs '.' {% do { hintExplicitForall $1 ; acs (\cs -> (sLL $1 $> $ mkHsForAllInvisTele (EpAnn (glR $1) (mu AnnForall $1,mu AnnDot $3) cs) $2 )) }} | 'forall' tv_bndrs '->' {% do { hintExplicitForall $1 ; req_tvbs <- fromSpecTyVarBndrs $2 ; acs (\cs -> (sLL $1 $> $ mkHsForAllVisTele (EpAnn (glR $1) (mu AnnForall $1,mu AnnRarrow $3) cs) req_tvbs )) }} -- A ktype is a ctype, possibly with a kind annotation ktype :: { LHsType GhcPs } : ctype { $1 } | ctype '::' kind {% acsA (\cs -> sLLAA $1 $> $ HsKindSig (EpAnn (glAR $1) [mu AnnDcolon $2] cs) $1 $3) } -- A ctype is a for-all type ctype :: { LHsType GhcPs } : forall_telescope ctype { reLocA $ sLL $1 (reLoc $>) $ HsForAllTy { hst_tele = unLoc $1 , hst_xforall = noExtField , hst_body = $2 } } | context '=>' ctype {% acsA (\cs -> (sLL (reLoc $1) (reLoc $>) $ HsQualTy { hst_ctxt = addTrailingDarrowC $1 $2 cs , hst_xqual = NoExtField , hst_body = $3 })) } | ipvar '::' ctype {% acsA (\cs -> sLL $1 (reLoc $>) (HsIParamTy (EpAnn (glR $1) [mu AnnDcolon $2] cs) (reLocA $1) $3)) } | type { $1 } ---------------------- -- Notes for 'context' -- We parse a context as a btype so that we don't get reduce/reduce -- errors in ctype. The basic problem is that -- (Eq a, Ord a) -- looks so much like a tuple type. We can't tell until we find the => context :: { LHsContext GhcPs } : btype {% checkContext $1 } {- Note [GADT decl discards annotations] ~~~~~~~~~~~~~~~~~~~~~ The type production for btype `->` ctype add the AnnRarrow annotation twice, in different places. This is because if the type is processed as usual, it belongs on the annotations for the type as a whole. But if the type is passed to mkGadtDecl, it discards the top level SrcSpan, and the top-level annotation will be disconnected. Hence for this specific case it is connected to the first type too. -} type :: { LHsType GhcPs } -- See Note [%shift: type -> btype] : btype %shift { $1 } | btype '->' ctype {% acsA (\cs -> sLL (reLoc $1) (reLoc $>) $ HsFunTy (EpAnn (glAR $1) NoEpAnns cs) (HsUnrestrictedArrow (hsUniTok $2)) $1 $3) } | btype mult '->' ctype {% hintLinear (getLoc $2) >> let arr = (unLoc $2) (hsUniTok $3) in acsA (\cs -> sLL (reLoc $1) (reLoc $>) $ HsFunTy (EpAnn (glAR $1) NoEpAnns cs) arr $1 $4) } | btype '->.' ctype {% hintLinear (getLoc $2) >> acsA (\cs -> sLL (reLoc $1) (reLoc $>) $ HsFunTy (EpAnn (glAR $1) NoEpAnns cs) (HsLinearArrow (HsLolly (hsTok $2))) $1 $3) } -- [mu AnnLollyU $2] } mult :: { Located (LHsUniToken "->" "\8594" GhcPs -> HsArrow GhcPs) } : PREFIX_PERCENT atype { sLL $1 (reLoc $>) (mkMultTy (hsTok $1) $2) } btype :: { LHsType GhcPs } : infixtype {% runPV $1 } infixtype :: { forall b. DisambTD b => PV (LocatedA b) } -- See Note [%shift: infixtype -> ftype] : ftype %shift { $1 } | ftype tyop infixtype { $1 >>= \ $1 -> $3 >>= \ $3 -> do { let (op, prom) = $2 ; when (looksLikeMult $1 op $3) $ hintLinear (getLocA op) ; mkHsOpTyPV prom $1 op $3 } } | unpackedness infixtype { $2 >>= \ $2 -> mkUnpackednessPV $1 $2 } ftype :: { forall b. DisambTD b => PV (LocatedA b) } : atype { mkHsAppTyHeadPV $1 } | tyop { failOpFewArgs (fst $1) } | ftype tyarg { $1 >>= \ $1 -> mkHsAppTyPV $1 $2 } | ftype PREFIX_AT atype { $1 >>= \ $1 -> mkHsAppKindTyPV $1 (getLoc $2) $3 } tyarg :: { LHsType GhcPs } : atype { $1 } | unpackedness atype {% addUnpackednessP $1 $2 } tyop :: { (LocatedN RdrName, PromotionFlag) } : qtyconop { ($1, NotPromoted) } | tyvarop { ($1, NotPromoted) } | SIMPLEQUOTE qconop {% do { op <- amsrn (sLL $1 (reLoc $>) (unLoc $2)) (NameAnnQuote (glAA $1) (gl $2) []) ; return (op, IsPromoted) } } | SIMPLEQUOTE varop {% do { op <- amsrn (sLL $1 (reLoc $>) (unLoc $2)) (NameAnnQuote (glAA $1) (gl $2) []) ; return (op, IsPromoted) } } atype :: { LHsType GhcPs } : ntgtycon {% acsa (\cs -> sL1a (reLocN $1) (HsTyVar (EpAnn (glNR $1) [] cs) NotPromoted $1)) } -- Not including unit tuples -- See Note [%shift: atype -> tyvar] | tyvar %shift {% acsa (\cs -> sL1a (reLocN $1) (HsTyVar (EpAnn (glNR $1) [] cs) NotPromoted $1)) } -- (See Note [Unit tuples]) | '*' {% do { warnStarIsType (getLoc $1) ; return $ reLocA $ sL1 $1 (HsStarTy noExtField (isUnicode $1)) } } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | PREFIX_TILDE atype {% acsA (\cs -> sLLlA $1 $> (mkBangTy (EpAnn (glR $1) [mj AnnTilde $1] cs) SrcLazy $2)) } | PREFIX_BANG atype {% acsA (\cs -> sLLlA $1 $> (mkBangTy (EpAnn (glR $1) [mj AnnBang $1] cs) SrcStrict $2)) } | '{' fielddecls '}' {% do { decls <- acsA (\cs -> (sLL $1 $> $ HsRecTy (EpAnn (glR $1) (AnnList (Just $ listAsAnchor $2) (Just $ moc $1) (Just $ mcc $3) [] []) cs) $2)) ; checkRecordSyntax decls }} -- Constructor sigs only | '(' ')' {% acsA (\cs -> sLL $1 $> $ HsTupleTy (EpAnn (glR $1) (AnnParen AnnParens (glAA $1) (glAA $2)) cs) HsBoxedOrConstraintTuple []) } | '(' ktype ',' comma_types1 ')' {% do { h <- addTrailingCommaA $2 (gl $3) ; acsA (\cs -> sLL $1 $> $ HsTupleTy (EpAnn (glR $1) (AnnParen AnnParens (glAA $1) (glAA $5)) cs) HsBoxedOrConstraintTuple (h : $4)) }} | '(#' '#)' {% acsA (\cs -> sLL $1 $> $ HsTupleTy (EpAnn (glR $1) (AnnParen AnnParensHash (glAA $1) (glAA $2)) cs) HsUnboxedTuple []) } | '(#' comma_types1 '#)' {% acsA (\cs -> sLL $1 $> $ HsTupleTy (EpAnn (glR $1) (AnnParen AnnParensHash (glAA $1) (glAA $3)) cs) HsUnboxedTuple $2) } | '(#' bar_types2 '#)' {% acsA (\cs -> sLL $1 $> $ HsSumTy (EpAnn (glR $1) (AnnParen AnnParensHash (glAA $1) (glAA $3)) cs) $2) } | '[' ktype ']' {% acsA (\cs -> sLL $1 $> $ HsListTy (EpAnn (glR $1) (AnnParen AnnParensSquare (glAA $1) (glAA $3)) cs) $2) } | '(' ktype ')' {% acsA (\cs -> sLL $1 $> $ HsParTy (EpAnn (glR $1) (AnnParen AnnParens (glAA $1) (glAA $3)) cs) $2) } | quasiquote { mapLocA (HsSpliceTy noExtField) $1 } | splice_untyped { mapLocA (HsSpliceTy noExtField) $1 } -- see Note [Promotion] for the followings | SIMPLEQUOTE qcon_nowiredlist {% acsA (\cs -> sLL $1 (reLocN $>) $ HsTyVar (EpAnn (glR $1) [mj AnnSimpleQuote $1,mjN AnnName $2] cs) IsPromoted $2) } | SIMPLEQUOTE '(' ktype ',' comma_types1 ')' {% do { h <- addTrailingCommaA $3 (gl $4) ; acsA (\cs -> sLL $1 $> $ HsExplicitTupleTy (EpAnn (glR $1) [mj AnnSimpleQuote $1,mop $2,mcp $6] cs) (h : $5)) }} | SIMPLEQUOTE '[' comma_types0 ']' {% acsA (\cs -> sLL $1 $> $ HsExplicitListTy (EpAnn (glR $1) [mj AnnSimpleQuote $1,mos $2,mcs $4] cs) IsPromoted $3) } | SIMPLEQUOTE var {% acsA (\cs -> sLL $1 (reLocN $>) $ HsTyVar (EpAnn (glR $1) [mj AnnSimpleQuote $1,mjN AnnName $2] cs) IsPromoted $2) } -- Two or more [ty, ty, ty] must be a promoted list type, just as -- if you had written '[ty, ty, ty] -- (One means a list type, zero means the list type constructor, -- so you have to quote those.) | '[' ktype ',' comma_types1 ']' {% do { h <- addTrailingCommaA $2 (gl $3) ; acsA (\cs -> sLL $1 $> $ HsExplicitListTy (EpAnn (glR $1) [mos $1,mcs $5] cs) NotPromoted (h:$4)) }} | INTEGER { reLocA $ sLL $1 $> $ HsTyLit noExtField $ HsNumTy (getINTEGERs $1) (il_value (getINTEGER $1)) } | CHAR { reLocA $ sLL $1 $> $ HsTyLit noExtField $ HsCharTy (getCHARs $1) (getCHAR $1) } | STRING { reLocA $ sLL $1 $> $ HsTyLit noExtField $ HsStrTy (getSTRINGs $1) (getSTRING $1) } | '_' { reLocA $ sL1 $1 $ mkAnonWildCardTy } -- An inst_type is what occurs in the head of an instance decl -- e.g. (Foo a, Gaz b) => Wibble a b -- It's kept as a single type for convenience. inst_type :: { LHsSigType GhcPs } : sigtype { $1 } deriv_types :: { [LHsSigType GhcPs] } : sigktype { [$1] } | sigktype ',' deriv_types {% do { h <- addTrailingCommaA $1 (gl $2) ; return (h : $3) } } comma_types0 :: { [LHsType GhcPs] } -- Zero or more: ty,ty,ty : comma_types1 { $1 } | {- empty -} { [] } comma_types1 :: { [LHsType GhcPs] } -- One or more: ty,ty,ty : ktype { [$1] } | ktype ',' comma_types1 {% do { h <- addTrailingCommaA $1 (gl $2) ; return (h : $3) }} bar_types2 :: { [LHsType GhcPs] } -- Two or more: ty|ty|ty : ktype '|' ktype {% do { h <- addTrailingVbarA $1 (gl $2) ; return [h,$3] }} | ktype '|' bar_types2 {% do { h <- addTrailingVbarA $1 (gl $2) ; return (h : $3) }} tv_bndrs :: { [LHsTyVarBndr Specificity GhcPs] } : tv_bndr tv_bndrs { $1 : $2 } | {- empty -} { [] } tv_bndr :: { LHsTyVarBndr Specificity GhcPs } : tv_bndr_no_braces { $1 } | '{' tyvar '}' {% acsA (\cs -> sLL $1 $> (UserTyVar (EpAnn (glR $1) [moc $1, mcc $3] cs) InferredSpec $2)) } | '{' tyvar '::' kind '}' {% acsA (\cs -> sLL $1 $> (KindedTyVar (EpAnn (glR $1) [moc $1,mu AnnDcolon $3 ,mcc $5] cs) InferredSpec $2 $4)) } tv_bndr_no_braces :: { LHsTyVarBndr Specificity GhcPs } : tyvar {% acsA (\cs -> (sL1 (reLocN $1) (UserTyVar (EpAnn (glNR $1) [] cs) SpecifiedSpec $1))) } | '(' tyvar '::' kind ')' {% acsA (\cs -> (sLL $1 $> (KindedTyVar (EpAnn (glR $1) [mop $1,mu AnnDcolon $3 ,mcp $5] cs) SpecifiedSpec $2 $4))) } fds :: { Located ([AddEpAnn],[LHsFunDep GhcPs]) } : {- empty -} { noLoc ([],[]) } | '|' fds1 { (sLL $1 $> ([mj AnnVbar $1] ,reverse (unLoc $2))) } fds1 :: { Located [LHsFunDep GhcPs] } : fds1 ',' fd {% do { let (h:t) = unLoc $1 -- Safe from fds1 rules ; h' <- addTrailingCommaA h (gl $2) ; return (sLLlA $1 $> ($3 : h' : t)) }} | fd { sL1A $1 [$1] } fd :: { LHsFunDep GhcPs } : varids0 '->' varids0 {% acsA (\cs -> L (comb3 $1 $2 $3) (FunDep (EpAnn (glR $1) [mu AnnRarrow $2] cs) (reverse (unLoc $1)) (reverse (unLoc $3)))) } varids0 :: { Located [LocatedN RdrName] } : {- empty -} { noLoc [] } | varids0 tyvar { sLL $1 (reLocN $>) ($2 : (unLoc $1)) } ----------------------------------------------------------------------------- -- Kinds kind :: { LHsKind GhcPs } : ctype { $1 } {- Note [Promotion] ~~~~~~~~~~~~~~~~ - Syntax of promoted qualified names We write 'Nat.Zero instead of Nat.'Zero when dealing with qualified names. Moreover ticks are only allowed in types, not in kinds, for a few reasons: 1. we don't need quotes since we cannot define names in kinds 2. if one day we merge types and kinds, tick would mean look in DataName 3. we don't have a kind namespace anyway - Name resolution When the user write Zero instead of 'Zero in types, we parse it a HsTyVar ("Zero", TcClsName) instead of HsTyVar ("Zero", DataName). We deal with this in the renamer. If a HsTyVar ("Zero", TcClsName) is not bounded in the type level, then we look for it in the term level (we change its namespace to DataName, see Note [Demotion] in GHC.Types.Names.OccName). And both become a HsTyVar ("Zero", DataName) after the renamer. -} ----------------------------------------------------------------------------- -- Datatype declarations gadt_constrlist :: { Located ([AddEpAnn] ,[LConDecl GhcPs]) } -- Returned in order : 'where' '{' gadt_constrs '}' {% checkEmptyGADTs $ L (comb2 $1 $3) ([mj AnnWhere $1 ,moc $2 ,mcc $4] , unLoc $3) } | 'where' vocurly gadt_constrs close {% checkEmptyGADTs $ L (comb2 $1 $3) ([mj AnnWhere $1] , unLoc $3) } | {- empty -} { noLoc ([],[]) } gadt_constrs :: { Located [LConDecl GhcPs] } : gadt_constr ';' gadt_constrs {% do { h <- addTrailingSemiA $1 (gl $2) ; return (L (comb2 (reLoc $1) $3) (h : unLoc $3)) }} | gadt_constr { L (glA $1) [$1] } | {- empty -} { noLoc [] } -- We allow the following forms: -- C :: Eq a => a -> T a -- C :: forall a. Eq a => !a -> T a -- D { x,y :: a } :: T a -- forall a. Eq a => D { x,y :: a } :: T a gadt_constr :: { LConDecl GhcPs } -- see Note [Difference in parsing GADT and data constructors] -- Returns a list because of: C,D :: ty -- TODO:AZ capture the optSemi. Why leading? : optSemi con_list '::' sigtype {% mkGadtDecl (comb2A $2 $>) (unLoc $2) $4 [mu AnnDcolon $3] } {- Note [Difference in parsing GADT and data constructors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GADT constructors have simpler syntax than usual data constructors: in GADTs, types cannot occur to the left of '::', so they cannot be mixed with constructor names (see Note [Parsing data constructors is hard]). Due to simplified syntax, GADT constructor names (left-hand side of '::') use simpler grammar production than usual data constructor names. As a consequence, GADT constructor names are restricted (names like '(*)' are allowed in usual data constructors, but not in GADTs). -} constrs :: { Located ([AddEpAnn],[LConDecl GhcPs]) } : '=' constrs1 { sLL $1 $2 ([mj AnnEqual $1],unLoc $2)} constrs1 :: { Located [LConDecl GhcPs] } : constrs1 '|' constr {% do { let (h:t) = unLoc $1 ; h' <- addTrailingVbarA h (gl $2) ; return (sLLlA $1 $> ($3 : h' : t)) }} | constr { sL1A $1 [$1] } constr :: { LConDecl GhcPs } : forall context '=>' constr_stuff {% acsA (\cs -> let (con,details) = unLoc $4 in (L (comb4 $1 (reLoc $2) $3 $4) (mkConDeclH98 (EpAnn (spanAsAnchor (comb4 $1 (reLoc $2) $3 $4)) (mu AnnDarrow $3:(fst $ unLoc $1)) cs) con (snd $ unLoc $1) (Just $2) details))) } | forall constr_stuff {% acsA (\cs -> let (con,details) = unLoc $2 in (L (comb2 $1 $2) (mkConDeclH98 (EpAnn (spanAsAnchor (comb2 $1 $2)) (fst $ unLoc $1) cs) con (snd $ unLoc $1) Nothing -- No context details))) } forall :: { Located ([AddEpAnn], Maybe [LHsTyVarBndr Specificity GhcPs]) } : 'forall' tv_bndrs '.' { sLL $1 $> ([mu AnnForall $1,mj AnnDot $3], Just $2) } | {- empty -} { noLoc ([], Nothing) } constr_stuff :: { Located (LocatedN RdrName, HsConDeclH98Details GhcPs) } : infixtype {% fmap (reLoc. (mapLoc (\b -> (dataConBuilderCon b, dataConBuilderDetails b)))) (runPV $1) } fielddecls :: { [LConDeclField GhcPs] } : {- empty -} { [] } | fielddecls1 { $1 } fielddecls1 :: { [LConDeclField GhcPs] } : fielddecl ',' fielddecls1 {% do { h <- addTrailingCommaA $1 (gl $2) ; return (h : $3) }} | fielddecl { [$1] } fielddecl :: { LConDeclField GhcPs } -- A list because of f,g :: Int : sig_vars '::' ctype {% acsA (\cs -> L (comb2 $1 (reLoc $3)) (ConDeclField (EpAnn (glR $1) [mu AnnDcolon $2] cs) (reverse (map (\ln@(L l n) -> L (l2l l) $ FieldOcc noExtField ln) (unLoc $1))) $3 Nothing))} -- Reversed! maybe_derivings :: { Located (HsDeriving GhcPs) } : {- empty -} { noLoc [] } | derivings { $1 } -- A list of one or more deriving clauses at the end of a datatype derivings :: { Located (HsDeriving GhcPs) } : derivings deriving { sLL $1 (reLoc $>) ($2 : unLoc $1) } -- AZ: order? | deriving { sL1 (reLoc $>) [$1] } -- The outer Located is just to allow the caller to -- know the rightmost extremity of the 'deriving' clause deriving :: { LHsDerivingClause GhcPs } : 'deriving' deriv_clause_types {% let { full_loc = comb2A $1 $> } in acsA (\cs -> L full_loc $ HsDerivingClause (EpAnn (glR $1) [mj AnnDeriving $1] cs) Nothing $2) } | 'deriving' deriv_strategy_no_via deriv_clause_types {% let { full_loc = comb2A $1 $> } in acsA (\cs -> L full_loc $ HsDerivingClause (EpAnn (glR $1) [mj AnnDeriving $1] cs) (Just $2) $3) } | 'deriving' deriv_clause_types deriv_strategy_via {% let { full_loc = comb2 $1 (reLoc $>) } in acsA (\cs -> L full_loc $ HsDerivingClause (EpAnn (glR $1) [mj AnnDeriving $1] cs) (Just $3) $2) } deriv_clause_types :: { LDerivClauseTys GhcPs } : qtycon { let { tc = sL1 (reLocL $1) $ mkHsImplicitSigType $ sL1 (reLocL $1) $ HsTyVar noAnn NotPromoted $1 } in sL1 (reLocC $1) (DctSingle noExtField tc) } | '(' ')' {% amsrc (sLL $1 $> (DctMulti noExtField [])) (AnnContext Nothing [glAA $1] [glAA $2]) } | '(' deriv_types ')' {% amsrc (sLL $1 $> (DctMulti noExtField $2)) (AnnContext Nothing [glAA $1] [glAA $3])} ----------------------------------------------------------------------------- -- Value definitions {- Note [Declaration/signature overlap] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There's an awkward overlap with a type signature. Consider f :: Int -> Int = ...rhs... Then we can't tell whether it's a type signature or a value definition with a result signature until we see the '='. So we have to inline enough to postpone reductions until we know. -} {- ATTENTION: Dirty Hackery Ahead! If the second alternative of vars is var instead of qvar, we get another shift/reduce-conflict. Consider the following programs: { (^^) :: Int->Int ; } Type signature; only var allowed { (^^) :: Int->Int = ... ; } Value defn with result signature; qvar allowed (because of instance decls) We can't tell whether to reduce var to qvar until after we've read the signatures. -} decl_no_th :: { LHsDecl GhcPs } : sigdecl { $1 } | infixexp opt_sig rhs {% runPV (unECP $1) >>= \ $1 -> do { let { l = comb2Al $1 $> } ; r <- checkValDef l $1 $2 $3; -- Depending upon what the pattern looks like we might get either -- a FunBind or PatBind back from checkValDef. See Note -- [FunBind vs PatBind] ; cs <- getCommentsFor l ; return $! (sL (commentsA l cs) $ ValD noExtField r) } } | pattern_synonym_decl { $1 } decl :: { LHsDecl GhcPs } : decl_no_th { $1 } -- Why do we only allow naked declaration splices in top-level -- declarations and not here? Short answer: because readFail009 -- fails terribly with a panic in cvBindsAndSigs otherwise. | splice_exp {% mkSpliceDecl $1 } rhs :: { Located (GRHSs GhcPs (LHsExpr GhcPs)) } : '=' exp wherebinds {% runPV (unECP $2) >>= \ $2 -> do { let L l (bs, csw) = adaptWhereBinds $3 ; let loc = (comb3 $1 (reLoc $2) (L l bs)) ; acs (\cs -> sL loc (GRHSs csw (unguardedRHS (EpAnn (anc $ rs loc) (GrhsAnn Nothing (mj AnnEqual $1)) cs) loc $2) bs)) } } | gdrhs wherebinds {% do { let {L l (bs, csw) = adaptWhereBinds $2} ; acs (\cs -> sL (comb2 $1 (L l bs)) (GRHSs (cs Semi.<> csw) (reverse (unLoc $1)) bs)) }} gdrhs :: { Located [LGRHS GhcPs (LHsExpr GhcPs)] } : gdrhs gdrh { sLL $1 (reLoc $>) ($2 : unLoc $1) } | gdrh { sL1 (reLoc $1) [$1] } gdrh :: { LGRHS GhcPs (LHsExpr GhcPs) } : '|' guardquals '=' exp {% runPV (unECP $4) >>= \ $4 -> acsA (\cs -> sL (comb2A $1 $>) $ GRHS (EpAnn (glR $1) (GrhsAnn (Just $ glAA $1) (mj AnnEqual $3)) cs) (unLoc $2) $4) } sigdecl :: { LHsDecl GhcPs } : -- See Note [Declaration/signature overlap] for why we need infixexp here infixexp '::' sigtype {% do { $1 <- runPV (unECP $1) ; v <- checkValSigLhs $1 ; acsA (\cs -> (sLLAl $1 (reLoc $>) $ SigD noExtField $ TypeSig (EpAnn (glAR $1) (AnnSig (mu AnnDcolon $2) []) cs) [v] (mkHsWildCardBndrs $3)))} } | var ',' sig_vars '::' sigtype {% do { v <- addTrailingCommaN $1 (gl $2) ; let sig cs = TypeSig (EpAnn (glNR $1) (AnnSig (mu AnnDcolon $4) []) cs) (v : reverse (unLoc $3)) (mkHsWildCardBndrs $5) ; acsA (\cs -> sLL (reLocN $1) (reLoc $>) $ SigD noExtField (sig cs) ) }} | infix prec ops {% do { mbPrecAnn <- traverse (\l2 -> do { checkPrecP l2 $3 ; pure (mj AnnVal l2) }) $2 ; let (fixText, fixPrec) = case $2 of -- If an explicit precedence isn't supplied, -- it defaults to maxPrecedence Nothing -> (NoSourceText, maxPrecedence) Just l2 -> (fst $ unLoc l2, snd $ unLoc l2) ; acsA (\cs -> sLL $1 $> $ SigD noExtField (FixSig (EpAnn (glR $1) (mj AnnInfix $1 : maybeToList mbPrecAnn) cs) (FixitySig noExtField (fromOL $ unLoc $3) (Fixity fixText fixPrec (unLoc $1))))) }} | pattern_synonym_sig { sL1 $1 . SigD noExtField . unLoc $ $1 } | '{-# COMPLETE' qcon_list opt_tyconsig '#-}' {% let (dcolon, tc) = $3 in acsA (\cs -> sLL $1 $> (SigD noExtField (CompleteMatchSig (EpAnn (glR $1) ([ mo $1 ] ++ dcolon ++ [mc $4]) cs) (getCOMPLETE_PRAGs $1) $2 tc))) } -- This rule is for both INLINE and INLINABLE pragmas | '{-# INLINE' activation qvarcon '#-}' {% acsA (\cs -> (sLL $1 $> $ SigD noExtField (InlineSig (EpAnn (glR $1) ((mo $1:fst $2) ++ [mc $4]) cs) $3 (mkInlinePragma (getINLINE_PRAGs $1) (getINLINE $1) (snd $2))))) } | '{-# OPAQUE' qvar '#-}' {% acsA (\cs -> (sLL $1 $> $ SigD noExtField (InlineSig (EpAnn (glR $1) [mo $1, mc $3] cs) $2 (mkOpaquePragma (getOPAQUE_PRAGs $1))))) } | '{-# SCC' qvar '#-}' {% acsA (\cs -> sLL $1 $> (SigD noExtField (SCCFunSig (EpAnn (glR $1) [mo $1, mc $3] cs) (getSCC_PRAGs $1) $2 Nothing))) } | '{-# SCC' qvar STRING '#-}' {% do { scc <- getSCC $3 ; let str_lit = StringLiteral (getSTRINGs $3) scc Nothing ; acsA (\cs -> sLL $1 $> (SigD noExtField (SCCFunSig (EpAnn (glR $1) [mo $1, mc $4] cs) (getSCC_PRAGs $1) $2 (Just ( sL1a $3 str_lit))))) }} | '{-# SPECIALISE' activation qvar '::' sigtypes1 '#-}' {% acsA (\cs -> let inl_prag = mkInlinePragma (getSPEC_PRAGs $1) (NoUserInlinePrag, FunLike) (snd $2) in sLL $1 $> $ SigD noExtField (SpecSig (EpAnn (glR $1) (mo $1:mu AnnDcolon $4:mc $6:(fst $2)) cs) $3 (fromOL $5) inl_prag)) } | '{-# SPECIALISE_INLINE' activation qvar '::' sigtypes1 '#-}' {% acsA (\cs -> sLL $1 $> $ SigD noExtField (SpecSig (EpAnn (glR $1) (mo $1:mu AnnDcolon $4:mc $6:(fst $2)) cs) $3 (fromOL $5) (mkInlinePragma (getSPEC_INLINE_PRAGs $1) (getSPEC_INLINE $1) (snd $2)))) } | '{-# SPECIALISE' 'instance' inst_type '#-}' {% acsA (\cs -> sLL $1 $> $ SigD noExtField (SpecInstSig (EpAnn (glR $1) [mo $1,mj AnnInstance $2,mc $4] cs) (getSPEC_PRAGs $1) $3)) } -- A minimal complete definition | '{-# MINIMAL' name_boolformula_opt '#-}' {% acsA (\cs -> sLL $1 $> $ SigD noExtField (MinimalSig (EpAnn (glR $1) [mo $1,mc $3] cs) (getMINIMAL_PRAGs $1) $2)) } activation :: { ([AddEpAnn],Maybe Activation) } -- See Note [%shift: activation -> {- empty -}] : {- empty -} %shift { ([],Nothing) } | explicit_activation { (fst $1,Just (snd $1)) } explicit_activation :: { ([AddEpAnn],Activation) } -- In brackets : '[' INTEGER ']' { ([mj AnnOpenS $1,mj AnnVal $2,mj AnnCloseS $3] ,ActiveAfter (getINTEGERs $2) (fromInteger (il_value (getINTEGER $2)))) } | '[' rule_activation_marker INTEGER ']' { ($2++[mj AnnOpenS $1,mj AnnVal $3,mj AnnCloseS $4] ,ActiveBefore (getINTEGERs $3) (fromInteger (il_value (getINTEGER $3)))) } ----------------------------------------------------------------------------- -- Expressions quasiquote :: { Located (HsSplice GhcPs) } : TH_QUASIQUOTE { let { loc = getLoc $1 ; ITquasiQuote (quoter, quote, quoteSpan) = unLoc $1 ; quoterId = mkUnqual varName quoter } in sL1 $1 (mkHsQuasiQuote quoterId (mkSrcSpanPs quoteSpan) quote) } | TH_QQUASIQUOTE { let { loc = getLoc $1 ; ITqQuasiQuote (qual, quoter, quote, quoteSpan) = unLoc $1 ; quoterId = mkQual varName (qual, quoter) } in sL1 $1 (mkHsQuasiQuote quoterId (mkSrcSpanPs quoteSpan) quote) } exp :: { ECP } : infixexp '::' ctype { ECP $ unECP $1 >>= \ $1 -> rejectPragmaPV $1 >> mkHsTySigPV (noAnnSrcSpan $ comb2Al $1 (reLoc $>)) $1 $3 [(mu AnnDcolon $2)] } | infixexp '-<' exp {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ acsA (\cs -> sLLAA $1 $> $ HsCmdArrApp (EpAnn (glAR $1) (mu Annlarrowtail $2) cs) $1 $3 HsFirstOrderApp True) } | infixexp '>-' exp {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ acsA (\cs -> sLLAA $1 $> $ HsCmdArrApp (EpAnn (glAR $1) (mu Annrarrowtail $2) cs) $3 $1 HsFirstOrderApp False) } | infixexp '-<<' exp {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ acsA (\cs -> sLLAA $1 $> $ HsCmdArrApp (EpAnn (glAR $1) (mu AnnLarrowtail $2) cs) $1 $3 HsHigherOrderApp True) } | infixexp '>>-' exp {% runPV (unECP $1) >>= \ $1 -> runPV (unECP $3) >>= \ $3 -> fmap ecpFromCmd $ acsA (\cs -> sLLAA $1 $> $ HsCmdArrApp (EpAnn (glAR $1) (mu AnnRarrowtail $2) cs) $3 $1 HsHigherOrderApp False) } -- See Note [%shift: exp -> infixexp] | infixexp %shift { $1 } | exp_prag(exp) { $1 } -- See Note [Pragmas and operator fixity] infixexp :: { ECP } : exp10 { $1 } | infixexp qop exp10p -- See Note [Pragmas and operator fixity] { ECP $ superInfixOp $ $2 >>= \ $2 -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> rejectPragmaPV $1 >> (mkHsOpAppPV (comb2A (reLoc $1) $3) $1 $2 $3) } -- AnnVal annotation for NPlusKPat, which discards the operator exp10p :: { ECP } : exp10 { $1 } | exp_prag(exp10p) { $1 } -- See Note [Pragmas and operator fixity] exp_prag(e) :: { ECP } : prag_e e -- See Note [Pragmas and operator fixity] {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ return $ (reLocA $ sLLlA $1 $> $ HsPragE noExtField (unLoc $1) $2) } exp10 :: { ECP } -- See Note [%shift: exp10 -> '-' fexp] : '-' fexp %shift { ECP $ unECP $2 >>= \ $2 -> mkHsNegAppPV (comb2A $1 $>) $2 [mj AnnMinus $1] } -- See Note [%shift: exp10 -> fexp] | fexp %shift { $1 } optSemi :: { (Maybe EpaLocation,Bool) } : ';' { (msemim $1,True) } | {- empty -} { (Nothing,False) } {- Note [Pragmas and operator fixity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'prag_e' is an expression pragma, such as {-# SCC ... #-}. It must be used with care, or else #15730 happens. Consider this infix expression: 1 / 2 / 2 There are two ways to parse it: 1. (1 / 2) / 2 = 0.25 2. 1 / (2 / 2) = 1.0 Due to the fixity of the (/) operator (assuming it comes from Prelude), option 1 is the correct parse. However, in the past GHC's parser used to get confused by the SCC annotation when it occurred in the middle of an infix expression: 1 / {-# SCC ann #-} 2 / 2 -- used to get parsed as option 2 There are several ways to address this issue, see GHC Proposal #176 for a detailed exposition: https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0176-scc-parsing.rst The accepted fix is to disallow pragmas that occur within infix expressions. Infix expressions are assembled out of 'exp10', so 'exp10' must not accept pragmas. Instead, we accept them in exactly two places: * at the start of an expression or a parenthesized subexpression: f = {-# SCC ann #-} 1 / 2 / 2 -- at the start of the expression g = 5 + ({-# SCC ann #-} 1 / 2 / 2) -- at the start of a parenthesized subexpression * immediately after the last operator: f = 1 / 2 / {-# SCC ann #-} 2 In both cases, the parse does not depend on operator fixity. The second case may sound unnecessary, but it's actually needed to support a common idiom: f $ {-# SCC ann $-} ... -} prag_e :: { Located (HsPragE GhcPs) } : '{-# SCC' STRING '#-}' {% do { scc <- getSCC $2 ; acs (\cs -> (sLL $1 $> (HsPragSCC (EpAnn (glR $1) (AnnPragma (mo $1) (mc $3) [mj AnnValStr $2]) cs) (getSCC_PRAGs $1) (StringLiteral (getSTRINGs $2) scc Nothing))))} } | '{-# SCC' VARID '#-}' {% acs (\cs -> (sLL $1 $> (HsPragSCC (EpAnn (glR $1) (AnnPragma (mo $1) (mc $3) [mj AnnVal $2]) cs) (getSCC_PRAGs $1) (StringLiteral NoSourceText (getVARID $2) Nothing)))) } fexp :: { ECP } : fexp aexp { ECP $ superFunArg $ unECP $1 >>= \ $1 -> unECP $2 >>= \ $2 -> mkHsAppPV (noAnnSrcSpan $ comb2A (reLoc $1) $>) $1 $2 } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | fexp PREFIX_AT atype { ECP $ unECP $1 >>= \ $1 -> mkHsAppTypePV (noAnnSrcSpan $ comb2 (reLoc $1) (reLoc $>)) $1 (getLoc $2) $3 } | 'static' aexp {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ acsA (\cs -> sLL $1 (reLoc $>) $ HsStatic (EpAnn (glR $1) [mj AnnStatic $1] cs) $2) } | aexp { $1 } aexp :: { ECP } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer : qvar TIGHT_INFIX_AT aexp { ECP $ unECP $3 >>= \ $3 -> mkHsAsPatPV (comb2 (reLocN $1) (reLoc $>)) $1 $3 [mj AnnAt $2] } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | PREFIX_TILDE aexp { ECP $ unECP $2 >>= \ $2 -> mkHsLazyPatPV (comb2 $1 (reLoc $>)) $2 [mj AnnTilde $1] } | PREFIX_BANG aexp { ECP $ unECP $2 >>= \ $2 -> mkHsBangPatPV (comb2 $1 (reLoc $>)) $2 [mj AnnBang $1] } | PREFIX_MINUS aexp { ECP $ unECP $2 >>= \ $2 -> mkHsNegAppPV (comb2A $1 $>) $2 [mj AnnMinus $1] } | '\\' apats '->' exp { ECP $ unECP $4 >>= \ $4 -> mkHsLamPV (comb2 $1 (reLoc $>)) (\cs -> mkMatchGroup FromSource (reLocA $ sLLlA $1 $> [reLocA $ sLLlA $1 $> $ Match { m_ext = EpAnn (glR $1) [mj AnnLam $1] cs , m_ctxt = LambdaExpr , m_pats = $2 , m_grhss = unguardedGRHSs (comb2 $3 (reLoc $4)) $4 (EpAnn (glR $3) (GrhsAnn Nothing (mu AnnRarrow $3)) emptyComments) }])) } | 'let' binds 'in' exp { ECP $ unECP $4 >>= \ $4 -> mkHsLetPV (comb2A $1 $>) (hsTok $1) (unLoc $2) (hsTok $3) $4 } | '\\' 'lcase' altslist(pats1) { ECP $ $3 >>= \ $3 -> mkHsLamCasePV (comb2 $1 (reLoc $>)) LamCase $3 [mj AnnLam $1,mj AnnCase $2] } | '\\' 'lcases' altslist(apats) { ECP $ $3 >>= \ $3 -> mkHsLamCasePV (comb2 $1 (reLoc $>)) LamCases $3 [mj AnnLam $1,mj AnnCases $2] } | 'if' exp optSemi 'then' exp optSemi 'else' exp {% runPV (unECP $2) >>= \ ($2 :: LHsExpr GhcPs) -> return $ ECP $ unECP $5 >>= \ $5 -> unECP $8 >>= \ $8 -> mkHsIfPV (comb2A $1 $>) $2 (snd $3) $5 (snd $6) $8 (AnnsIf { aiIf = glAA $1 , aiThen = glAA $4 , aiElse = glAA $7 , aiThenSemi = fst $3 , aiElseSemi = fst $6})} | 'if' ifgdpats {% hintMultiWayIf (getLoc $1) >>= \_ -> fmap ecpFromExp $ acsA (\cs -> sLL $1 $> $ HsMultiIf (EpAnn (glR $1) (mj AnnIf $1:(fst $ unLoc $2)) cs) (reverse $ snd $ unLoc $2)) } | 'case' exp 'of' altslist(pats1) {% runPV (unECP $2) >>= \ ($2 :: LHsExpr GhcPs) -> return $ ECP $ $4 >>= \ $4 -> mkHsCasePV (comb3 $1 $3 (reLoc $4)) $2 $4 (EpAnnHsCase (glAA $1) (glAA $3) []) } -- QualifiedDo. | DO stmtlist {% do hintQualifiedDo $1 return $ ECP $ $2 >>= \ $2 -> mkHsDoPV (comb2A $1 $2) (fmap mkModuleNameFS (getDO $1)) $2 (AnnList (Just $ glAR $2) Nothing Nothing [mj AnnDo $1] []) } | MDO stmtlist {% hintQualifiedDo $1 >> runPV $2 >>= \ $2 -> fmap ecpFromExp $ acsA (\cs -> L (comb2A $1 $2) (mkHsDoAnns (MDoExpr $ fmap mkModuleNameFS (getMDO $1)) $2 (EpAnn (glR $1) (AnnList (Just $ glAR $2) Nothing Nothing [mj AnnMdo $1] []) cs) )) } | 'proc' aexp '->' exp {% (checkPattern <=< runPV) (unECP $2) >>= \ p -> runPV (unECP $4) >>= \ $4@cmd -> fmap ecpFromExp $ acsA (\cs -> sLLlA $1 $> $ HsProc (EpAnn (glR $1) [mj AnnProc $1,mu AnnRarrow $3] cs) p (sLLa $1 (reLoc $>) $ HsCmdTop noExtField cmd)) } | aexp1 { $1 } aexp1 :: { ECP } : aexp1 '{' fbinds '}' { ECP $ getBit OverloadedRecordUpdateBit >>= \ overloaded -> unECP $1 >>= \ $1 -> $3 >>= \ $3 -> mkHsRecordPV overloaded (comb2 (reLoc $1) $>) (comb2 $2 $4) $1 $3 [moc $2,mcc $4] } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer | aexp1 TIGHT_INFIX_PROJ field {% runPV (unECP $1) >>= \ $1 -> fmap ecpFromExp $ acsa (\cs -> let fl = sLLa $2 $> (DotFieldOcc ((EpAnn (glR $2) (AnnFieldLabel (Just $ glAA $2)) emptyComments)) (reLocA $3)) in mkRdrGetField (noAnnSrcSpan $ comb2 (reLoc $1) $>) $1 fl (EpAnn (glAR $1) NoEpAnns cs)) } | aexp2 { $1 } aexp2 :: { ECP } : qvar { ECP $ mkHsVarPV $! $1 } | qcon { ECP $ mkHsVarPV $! $1 } -- See Note [%shift: aexp2 -> ipvar] | ipvar %shift {% acsExpr (\cs -> sL1a $1 (HsIPVar (comment (glRR $1) cs) $! unLoc $1)) } | overloaded_label {% acsExpr (\cs -> sL1a $1 (HsOverLabel (comment (glRR $1) cs) $! unLoc $1)) } | literal { ECP $ pvA (mkHsLitPV $! $1) } -- This will enable overloaded strings permanently. Normally the renamer turns HsString -- into HsOverLit when -XOverloadedStrings is on. -- | STRING { sL (getLoc $1) (HsOverLit $! mkHsIsString (getSTRINGs $1) -- (getSTRING $1) noExtField) } | INTEGER { ECP $ mkHsOverLitPV (sL1a $1 $ mkHsIntegral (getINTEGER $1)) } | RATIONAL { ECP $ mkHsOverLitPV (sL1a $1 $ mkHsFractional (getRATIONAL $1)) } -- N.B.: sections get parsed by these next two productions. -- This allows you to write, e.g., '(+ 3, 4 -)', which isn't -- correct Haskell (you'd have to write '((+ 3), (4 -))') -- but the less cluttered version fell out of having texps. | '(' texp ')' { ECP $ unECP $2 >>= \ $2 -> mkHsParPV (comb2 $1 $>) (hsTok $1) $2 (hsTok $3) } | '(' tup_exprs ')' { ECP $ $2 >>= \ $2 -> mkSumOrTuplePV (noAnnSrcSpan $ comb2 $1 $>) Boxed $2 [mop $1,mcp $3]} -- This case is only possible when 'OverloadedRecordDotBit' is enabled. | '(' projection ')' { ECP $ acsA (\cs -> sLL $1 $> $ mkRdrProjection (NE.reverse (unLoc $2)) (EpAnn (glR $1) (AnnProjection (glAA $1) (glAA $3)) cs)) >>= ecpFromExp' } | '(#' texp '#)' { ECP $ unECP $2 >>= \ $2 -> mkSumOrTuplePV (noAnnSrcSpan $ comb2 $1 $>) Unboxed (Tuple [Right $2]) [moh $1,mch $3] } | '(#' tup_exprs '#)' { ECP $ $2 >>= \ $2 -> mkSumOrTuplePV (noAnnSrcSpan $ comb2 $1 $>) Unboxed $2 [moh $1,mch $3] } | '[' list ']' { ECP $ $2 (comb2 $1 $>) (mos $1,mcs $3) } | '_' { ECP $ pvA $ mkHsWildCardPV (getLoc $1) } -- Template Haskell Extension | splice_untyped { ECP $ pvA $ mkHsSplicePV $1 } | splice_typed { ecpFromExp $ mapLoc (HsSpliceE noAnn) (reLocA $1) } | SIMPLEQUOTE qvar {% fmap ecpFromExp $ acsA (\cs -> sLL $1 (reLocN $>) $ HsUntypedBracket (EpAnn (glR $1) [mj AnnSimpleQuote $1] cs) (VarBr noExtField True $2)) } | SIMPLEQUOTE qcon {% fmap ecpFromExp $ acsA (\cs -> sLL $1 (reLocN $>) $ HsUntypedBracket (EpAnn (glR $1) [mj AnnSimpleQuote $1] cs) (VarBr noExtField True $2)) } | TH_TY_QUOTE tyvar {% fmap ecpFromExp $ acsA (\cs -> sLL $1 (reLocN $>) $ HsUntypedBracket (EpAnn (glR $1) [mj AnnThTyQuote $1 ] cs) (VarBr noExtField False $2)) } | TH_TY_QUOTE gtycon {% fmap ecpFromExp $ acsA (\cs -> sLL $1 (reLocN $>) $ HsUntypedBracket (EpAnn (glR $1) [mj AnnThTyQuote $1 ] cs) (VarBr noExtField False $2)) } -- See Note [%shift: aexp2 -> TH_TY_QUOTE] | TH_TY_QUOTE %shift {% reportEmptyDoubleQuotes (getLoc $1) } | '[|' exp '|]' {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ acsA (\cs -> sLL $1 $> $ HsUntypedBracket (EpAnn (glR $1) (if (hasE $1) then [mj AnnOpenE $1, mu AnnCloseQ $3] else [mu AnnOpenEQ $1,mu AnnCloseQ $3]) cs) (ExpBr noExtField $2)) } | '[||' exp '||]' {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromExp $ acsA (\cs -> sLL $1 $> $ HsTypedBracket (EpAnn (glR $1) (if (hasE $1) then [mj AnnOpenE $1,mc $3] else [mo $1,mc $3]) cs) $2) } | '[t|' ktype '|]' {% fmap ecpFromExp $ acsA (\cs -> sLL $1 $> $ HsUntypedBracket (EpAnn (glR $1) [mo $1,mu AnnCloseQ $3] cs) (TypBr noExtField $2)) } | '[p|' infixexp '|]' {% (checkPattern <=< runPV) (unECP $2) >>= \p -> fmap ecpFromExp $ acsA (\cs -> sLL $1 $> $ HsUntypedBracket (EpAnn (glR $1) [mo $1,mu AnnCloseQ $3] cs) (PatBr noExtField p)) } | '[d|' cvtopbody '|]' {% fmap ecpFromExp $ acsA (\cs -> sLL $1 $> $ HsUntypedBracket (EpAnn (glR $1) (mo $1:mu AnnCloseQ $3:fst $2) cs) (DecBrL noExtField (snd $2))) } | quasiquote { ECP $ pvA $ mkHsSplicePV $1 } -- arrow notation extension | '(|' aexp cmdargs '|)' {% runPV (unECP $2) >>= \ $2 -> fmap ecpFromCmd $ acsA (\cs -> sLL $1 $> $ HsCmdArrForm (EpAnn (glR $1) (AnnList (Just $ glR $1) (Just $ mu AnnOpenB $1) (Just $ mu AnnCloseB $4) [] []) cs) $2 Prefix Nothing (reverse $3)) } projection :: { Located (NonEmpty (LocatedAn NoEpAnns (DotFieldOcc GhcPs))) } projection -- See Note [Whitespace-sensitive operator parsing] in GHC.Parsing.Lexer : projection TIGHT_INFIX_PROJ field {% acs (\cs -> sLL $1 $> ((sLLa $2 $> $ DotFieldOcc (EpAnn (glR $1) (AnnFieldLabel (Just $ glAA $2)) cs) (reLocA $3)) `NE.cons` unLoc $1)) } | PREFIX_PROJ field {% acs (\cs -> sLL $1 $> ((sLLa $1 $> $ DotFieldOcc (EpAnn (glR $1) (AnnFieldLabel (Just $ glAA $1)) cs) (reLocA $2)) :| [])) } splice_exp :: { LHsExpr GhcPs } : splice_untyped { mapLoc (HsSpliceE noAnn) (reLocA $1) } | splice_typed { mapLoc (HsSpliceE noAnn) (reLocA $1) } splice_untyped :: { Located (HsSplice GhcPs) } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer : PREFIX_DOLLAR aexp2 {% runPV (unECP $2) >>= \ $2 -> acs (\cs -> sLLlA $1 $> $ mkUntypedSplice (EpAnn (glR $1) [mj AnnDollar $1] cs) DollarSplice $2) } splice_typed :: { Located (HsSplice GhcPs) } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer : PREFIX_DOLLAR_DOLLAR aexp2 {% runPV (unECP $2) >>= \ $2 -> acs (\cs -> sLLlA $1 $> $ mkTypedSplice (EpAnn (glR $1) [mj AnnDollarDollar $1] cs) DollarSplice $2) } cmdargs :: { [LHsCmdTop GhcPs] } : cmdargs acmd { $2 : $1 } | {- empty -} { [] } acmd :: { LHsCmdTop GhcPs } : aexp {% runPV (unECP $1) >>= \ (cmd :: LHsCmd GhcPs) -> runPV (checkCmdBlockArguments cmd) >>= \ _ -> return (sL1a (reLoc cmd) $ HsCmdTop noExtField cmd) } cvtopbody :: { ([AddEpAnn],[LHsDecl GhcPs]) } : '{' cvtopdecls0 '}' { ([mj AnnOpenC $1 ,mj AnnCloseC $3],$2) } | vocurly cvtopdecls0 close { ([],$2) } cvtopdecls0 :: { [LHsDecl GhcPs] } : topdecls_semi { cvTopDecls $1 } | topdecls { cvTopDecls $1 } ----------------------------------------------------------------------------- -- Tuple expressions -- "texp" is short for tuple expressions: -- things that can appear unparenthesized as long as they're -- inside parens or delimited by commas texp :: { ECP } : exp { $1 } -- Note [Parsing sections] -- ~~~~~~~~~~~~~~~~~~~~~~~ -- We include left and right sections here, which isn't -- technically right according to the Haskell standard. -- For example (3 +, True) isn't legal. -- However, we want to parse bang patterns like -- (!x, !y) -- and it's convenient to do so here as a section -- Then when converting expr to pattern we unravel it again -- Meanwhile, the renamer checks that real sections appear -- inside parens. | infixexp qop {% runPV (unECP $1) >>= \ $1 -> runPV (rejectPragmaPV $1) >> runPV $2 >>= \ $2 -> return $ ecpFromExp $ reLocA $ sLL (reLoc $1) (reLocN $>) $ SectionL noAnn $1 (n2l $2) } | qopm infixexp { ECP $ superInfixOp $ unECP $2 >>= \ $2 -> $1 >>= \ $1 -> pvA $ mkHsSectionR_PV (comb2 (reLocN $1) (reLoc $>)) (n2l $1) $2 } -- View patterns get parenthesized above | exp '->' texp { ECP $ unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> mkHsViewPatPV (comb2 (reLoc $1) (reLoc $>)) $1 $3 [mu AnnRarrow $2] } -- Always at least one comma or bar. -- Though this can parse just commas (without any expressions), it won't -- in practice, because (,,,) is parsed as a name. See Note [ExplicitTuple] -- in GHC.Hs.Expr. tup_exprs :: { forall b. DisambECP b => PV (SumOrTuple b) } : texp commas_tup_tail { unECP $1 >>= \ $1 -> $2 >>= \ $2 -> do { t <- amsA $1 [AddCommaAnn (EpaSpan $ rs $ fst $2)] ; return (Tuple (Right t : snd $2)) } } | commas tup_tail { $2 >>= \ $2 -> do { let {cos = map (\ll -> (Left (EpAnn (anc $ rs ll) (EpaSpan $ rs ll) emptyComments))) (fst $1) } ; return (Tuple (cos ++ $2)) } } | texp bars { unECP $1 >>= \ $1 -> return $ (Sum 1 (snd $2 + 1) $1 [] (map (EpaSpan . realSrcSpan) $ fst $2)) } | bars texp bars0 { unECP $2 >>= \ $2 -> return $ (Sum (snd $1 + 1) (snd $1 + snd $3 + 1) $2 (map (EpaSpan . realSrcSpan) $ fst $1) (map (EpaSpan . realSrcSpan) $ fst $3)) } -- Always starts with commas; always follows an expr commas_tup_tail :: { forall b. DisambECP b => PV (SrcSpan,[Either (EpAnn EpaLocation) (LocatedA b)]) } commas_tup_tail : commas tup_tail { $2 >>= \ $2 -> do { let {cos = map (\l -> (Left (EpAnn (anc $ rs l) (EpaSpan $ rs l) emptyComments))) (tail $ fst $1) } ; return ((head $ fst $1, cos ++ $2)) } } -- Always follows a comma tup_tail :: { forall b. DisambECP b => PV [Either (EpAnn EpaLocation) (LocatedA b)] } : texp commas_tup_tail { unECP $1 >>= \ $1 -> $2 >>= \ $2 -> do { t <- amsA $1 [AddCommaAnn (EpaSpan $ rs $ fst $2)] ; return (Right t : snd $2) } } | texp { unECP $1 >>= \ $1 -> return [Right $1] } -- See Note [%shift: tup_tail -> {- empty -}] | {- empty -} %shift { return [Left noAnn] } ----------------------------------------------------------------------------- -- List expressions -- The rules below are little bit contorted to keep lexps left-recursive while -- avoiding another shift/reduce-conflict. -- Never empty. list :: { forall b. DisambECP b => SrcSpan -> (AddEpAnn, AddEpAnn) -> PV (LocatedA b) } : texp { \loc (ao,ac) -> unECP $1 >>= \ $1 -> mkHsExplicitListPV loc [$1] (AnnList Nothing (Just ao) (Just ac) [] []) } | lexps { \loc (ao,ac) -> $1 >>= \ $1 -> mkHsExplicitListPV loc (reverse $1) (AnnList Nothing (Just ao) (Just ac) [] []) } | texp '..' { \loc (ao,ac) -> unECP $1 >>= \ $1 -> acsA (\cs -> L loc $ ArithSeq (EpAnn (spanAsAnchor loc) [ao,mj AnnDotdot $2,ac] cs) Nothing (From $1)) >>= ecpFromExp' } | texp ',' exp '..' { \loc (ao,ac) -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> acsA (\cs -> L loc $ ArithSeq (EpAnn (spanAsAnchor loc) [ao,mj AnnComma $2,mj AnnDotdot $4,ac] cs) Nothing (FromThen $1 $3)) >>= ecpFromExp' } | texp '..' exp { \loc (ao,ac) -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> acsA (\cs -> L loc $ ArithSeq (EpAnn (spanAsAnchor loc) [ao,mj AnnDotdot $2,ac] cs) Nothing (FromTo $1 $3)) >>= ecpFromExp' } | texp ',' exp '..' exp { \loc (ao,ac) -> unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> unECP $5 >>= \ $5 -> acsA (\cs -> L loc $ ArithSeq (EpAnn (spanAsAnchor loc) [ao,mj AnnComma $2,mj AnnDotdot $4,ac] cs) Nothing (FromThenTo $1 $3 $5)) >>= ecpFromExp' } | texp '|' flattenedpquals { \loc (ao,ac) -> checkMonadComp >>= \ ctxt -> unECP $1 >>= \ $1 -> do { t <- addTrailingVbarA $1 (gl $2) ; acsA (\cs -> L loc $ mkHsCompAnns ctxt (unLoc $3) t (EpAnn (spanAsAnchor loc) (AnnList Nothing (Just ao) (Just ac) [] []) cs)) >>= ecpFromExp' } } lexps :: { forall b. DisambECP b => PV [LocatedA b] } : lexps ',' texp { $1 >>= \ $1 -> unECP $3 >>= \ $3 -> case $1 of (h:t) -> do h' <- addTrailingCommaA h (gl $2) return (((:) $! $3) $! (h':t)) } | texp ',' texp { unECP $1 >>= \ $1 -> unECP $3 >>= \ $3 -> do { h <- addTrailingCommaA $1 (gl $2) ; return [$3,h] }} ----------------------------------------------------------------------------- -- List Comprehensions flattenedpquals :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } : pquals { case (unLoc $1) of [qs] -> sL1 $1 qs -- We just had one thing in our "parallel" list so -- we simply return that thing directly qss -> sL1 $1 [sL1a $1 $ ParStmt noExtField [ParStmtBlock noExtField qs [] noSyntaxExpr | qs <- qss] noExpr noSyntaxExpr] -- We actually found some actual parallel lists so -- we wrap them into as a ParStmt } pquals :: { Located [[LStmt GhcPs (LHsExpr GhcPs)]] } : squals '|' pquals {% case unLoc $1 of (h:t) -> do h' <- addTrailingVbarA h (gl $2) return (sLL $1 $> (reverse (h':t) : unLoc $3)) } | squals { L (getLoc $1) [reverse (unLoc $1)] } squals :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } -- In reverse order, because the last -- one can "grab" the earlier ones : squals ',' transformqual {% case unLoc $1 of (h:t) -> do h' <- addTrailingCommaA h (gl $2) return (sLL $1 $> [sLLa $1 $> ((unLoc $3) (glRR $1) (reverse (h':t)))]) } | squals ',' qual {% runPV $3 >>= \ $3 -> case unLoc $1 of (h:t) -> do h' <- addTrailingCommaA h (gl $2) return (sLL $1 (reLoc $>) ($3 : (h':t))) } | transformqual {% return (sLL $1 $> [L (getLocAnn $1) ((unLoc $1) (glRR $1) [])]) } | qual {% runPV $1 >>= \ $1 -> return $ sL1A $1 [$1] } -- | transformquals1 ',' '{|' pquals '|}' { sLL $1 $> ($4 : unLoc $1) } -- | '{|' pquals '|}' { sL1 $1 [$2] } -- It is possible to enable bracketing (associating) qualifier lists -- by uncommenting the lines with {| |} above. Due to a lack of -- consensus on the syntax, this feature is not being used until we -- get user demand. transformqual :: { Located (RealSrcSpan -> [LStmt GhcPs (LHsExpr GhcPs)] -> Stmt GhcPs (LHsExpr GhcPs)) } -- Function is applied to a list of stmts *in order* : 'then' exp {% runPV (unECP $2) >>= \ $2 -> acs (\cs-> sLLlA $1 $> (\r ss -> (mkTransformStmt (EpAnn (anc r) [mj AnnThen $1] cs) ss $2))) } | 'then' exp 'by' exp {% runPV (unECP $2) >>= \ $2 -> runPV (unECP $4) >>= \ $4 -> acs (\cs -> sLLlA $1 $> ( \r ss -> (mkTransformByStmt (EpAnn (anc r) [mj AnnThen $1,mj AnnBy $3] cs) ss $2 $4))) } | 'then' 'group' 'using' exp {% runPV (unECP $4) >>= \ $4 -> acs (\cs -> sLLlA $1 $> ( \r ss -> (mkGroupUsingStmt (EpAnn (anc r) [mj AnnThen $1,mj AnnGroup $2,mj AnnUsing $3] cs) ss $4))) } | 'then' 'group' 'by' exp 'using' exp {% runPV (unECP $4) >>= \ $4 -> runPV (unECP $6) >>= \ $6 -> acs (\cs -> sLLlA $1 $> ( \r ss -> (mkGroupByUsingStmt (EpAnn (anc r) [mj AnnThen $1,mj AnnGroup $2,mj AnnBy $3,mj AnnUsing $5] cs) ss $4 $6))) } -- Note that 'group' is a special_id, which means that you can enable -- TransformListComp while still using Data.List.group. However, this -- introduces a shift/reduce conflict. Happy chooses to resolve the conflict -- in by choosing the "group by" variant, which is what we want. ----------------------------------------------------------------------------- -- Guards guardquals :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } : guardquals1 { L (getLoc $1) (reverse (unLoc $1)) } guardquals1 :: { Located [LStmt GhcPs (LHsExpr GhcPs)] } : guardquals1 ',' qual {% runPV $3 >>= \ $3 -> case unLoc $1 of (h:t) -> do h' <- addTrailingCommaA h (gl $2) return (sLL $1 (reLoc $>) ($3 : (h':t))) } | qual {% runPV $1 >>= \ $1 -> return $ sL1A $1 [$1] } ----------------------------------------------------------------------------- -- Case alternatives altslist(PATS) :: { forall b. DisambECP b => PV (LocatedL [LMatch GhcPs (LocatedA b)]) } : '{' alts(PATS) '}' { $2 >>= \ $2 -> amsrl (sLL $1 $> (reverse (snd $ unLoc $2))) (AnnList (Just $ glR $2) (Just $ moc $1) (Just $ mcc $3) (fst $ unLoc $2) []) } | vocurly alts(PATS) close { $2 >>= \ $2 -> amsrl (L (getLoc $2) (reverse (snd $ unLoc $2))) (AnnList (Just $ glR $2) Nothing Nothing (fst $ unLoc $2) []) } | '{' '}' { amsrl (sLL $1 $> []) (AnnList Nothing (Just $ moc $1) (Just $ mcc $2) [] []) } | vocurly close { return $ noLocA [] } alts(PATS) :: { forall b. DisambECP b => PV (Located ([AddEpAnn],[LMatch GhcPs (LocatedA b)])) } : alts1(PATS) { $1 >>= \ $1 -> return $ sL1 $1 (fst $ unLoc $1,snd $ unLoc $1) } | ';' alts(PATS) { $2 >>= \ $2 -> return $ sLL $1 $> (((mz AnnSemi $1) ++ (fst $ unLoc $2) ) ,snd $ unLoc $2) } alts1(PATS) :: { forall b. DisambECP b => PV (Located ([AddEpAnn],[LMatch GhcPs (LocatedA b)])) } : alts1(PATS) ';' alt(PATS) { $1 >>= \ $1 -> $3 >>= \ $3 -> case snd $ unLoc $1 of [] -> return (sLL $1 (reLoc $>) ((fst $ unLoc $1) ++ (mz AnnSemi $2) ,[$3])) (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (sLL $1 (reLoc $>) (fst $ unLoc $1,$3 : h' : t)) } | alts1(PATS) ';' { $1 >>= \ $1 -> case snd $ unLoc $1 of [] -> return (sLL $1 $> ((fst $ unLoc $1) ++ (mz AnnSemi $2) ,[])) (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (sLL $1 $> (fst $ unLoc $1, h' : t)) } | alt(PATS) { $1 >>= \ $1 -> return $ sL1 (reLoc $1) ([],[$1]) } alt(PATS) :: { forall b. DisambECP b => PV (LMatch GhcPs (LocatedA b)) } : PATS alt_rhs { $2 >>= \ $2 -> acsA (\cs -> sLLAsl $1 $> (Match { m_ext = EpAnn (listAsAnchor $1) [] cs , m_ctxt = CaseAlt -- for \case and \cases, this will be changed during post-processing , m_pats = $1 , m_grhss = unLoc $2 }))} alt_rhs :: { forall b. DisambECP b => PV (Located (GRHSs GhcPs (LocatedA b))) } : ralt wherebinds { $1 >>= \alt -> do { let {L l (bs, csw) = adaptWhereBinds $2} ; acs (\cs -> sLL alt (L l bs) (GRHSs (cs Semi.<> csw) (unLoc alt) bs)) }} ralt :: { forall b. DisambECP b => PV (Located [LGRHS GhcPs (LocatedA b)]) } : '->' exp { unECP $2 >>= \ $2 -> acs (\cs -> sLLlA $1 $> (unguardedRHS (EpAnn (glR $1) (GrhsAnn Nothing (mu AnnRarrow $1)) cs) (comb2 $1 (reLoc $2)) $2)) } | gdpats { $1 >>= \gdpats -> return $ sL1 gdpats (reverse (unLoc gdpats)) } gdpats :: { forall b. DisambECP b => PV (Located [LGRHS GhcPs (LocatedA b)]) } : gdpats gdpat { $1 >>= \gdpats -> $2 >>= \gdpat -> return $ sLL gdpats (reLoc gdpat) (gdpat : unLoc gdpats) } | gdpat { $1 >>= \gdpat -> return $ sL1A gdpat [gdpat] } -- layout for MultiWayIf doesn't begin with an open brace, because it's hard to -- generate the open brace in addition to the vertical bar in the lexer, and -- we don't need it. ifgdpats :: { Located ([AddEpAnn],[LGRHS GhcPs (LHsExpr GhcPs)]) } : '{' gdpats '}' {% runPV $2 >>= \ $2 -> return $ sLL $1 $> ([moc $1,mcc $3],unLoc $2) } | gdpats close {% runPV $1 >>= \ $1 -> return $ sL1 $1 ([],unLoc $1) } gdpat :: { forall b. DisambECP b => PV (LGRHS GhcPs (LocatedA b)) } : '|' guardquals '->' exp { unECP $4 >>= \ $4 -> acsA (\cs -> sL (comb2A $1 $>) $ GRHS (EpAnn (glR $1) (GrhsAnn (Just $ glAA $1) (mu AnnRarrow $3)) cs) (unLoc $2) $4) } -- 'pat' recognises a pattern, including one with a bang at the top -- e.g. "!x" or "!(x,y)" or "C a b" etc -- Bangs inside are parsed as infix operator applications, so that -- we parse them right when bang-patterns are off pat :: { LPat GhcPs } pat : exp {% (checkPattern <=< runPV) (unECP $1) } -- 'pats1' does the same thing as 'pat', but returns it as a singleton -- list so that it can be used with a parameterized production rule pats1 :: { [LPat GhcPs] } pats1 : pat { [ $1 ] } bindpat :: { LPat GhcPs } bindpat : exp {% -- See Note [Parser-Validator Details] in GHC.Parser.PostProcess checkPattern_details incompleteDoBlock (unECP $1) } apat :: { LPat GhcPs } apat : aexp {% (checkPattern <=< runPV) (unECP $1) } apats :: { [LPat GhcPs] } : apat apats { $1 : $2 } | {- empty -} { [] } ----------------------------------------------------------------------------- -- Statement sequences stmtlist :: { forall b. DisambECP b => PV (LocatedL [LocatedA (Stmt GhcPs (LocatedA b))]) } : '{' stmts '}' { $2 >>= \ $2 -> amsrl (sLL $1 $> (reverse $ snd $ unLoc $2)) (AnnList (Just $ glR $2) (Just $ moc $1) (Just $ mcc $3) (fromOL $ fst $ unLoc $2) []) } | vocurly stmts close { $2 >>= \ $2 -> amsrl (L (gl $2) (reverse $ snd $ unLoc $2)) (AnnList (Just $ glR $2) Nothing Nothing (fromOL $ fst $ unLoc $2) []) } -- do { ;; s ; s ; ; s ;; } -- The last Stmt should be an expression, but that's hard to enforce -- here, because we need too much lookahead if we see do { e ; } -- So we use BodyStmts throughout, and switch the last one over -- in ParseUtils.checkDo instead stmts :: { forall b. DisambECP b => PV (Located (OrdList AddEpAnn,[LStmt GhcPs (LocatedA b)])) } : stmts ';' stmt { $1 >>= \ $1 -> $3 >>= \ ($3 :: LStmt GhcPs (LocatedA b)) -> case (snd $ unLoc $1) of [] -> return (sLL $1 (reLoc $>) ((fst $ unLoc $1) `snocOL` (mj AnnSemi $2) ,$3 : (snd $ unLoc $1))) (h:t) -> do { h' <- addTrailingSemiA h (gl $2) ; return $ sLL $1 (reLoc $>) (fst $ unLoc $1,$3 :(h':t)) }} | stmts ';' { $1 >>= \ $1 -> case (snd $ unLoc $1) of [] -> return (sLL $1 $> ((fst $ unLoc $1) `snocOL` (mj AnnSemi $2),snd $ unLoc $1)) (h:t) -> do { h' <- addTrailingSemiA h (gl $2) ; return $ sL1 $1 (fst $ unLoc $1,h':t) }} | stmt { $1 >>= \ $1 -> return $ sL1A $1 (nilOL,[$1]) } | {- empty -} { return $ noLoc (nilOL,[]) } -- For typing stmts at the GHCi prompt, where -- the input may consist of just comments. maybe_stmt :: { Maybe (LStmt GhcPs (LHsExpr GhcPs)) } : stmt {% fmap Just (runPV $1) } | {- nothing -} { Nothing } -- For GHC API. e_stmt :: { LStmt GhcPs (LHsExpr GhcPs) } : stmt {% runPV $1 } stmt :: { forall b. DisambECP b => PV (LStmt GhcPs (LocatedA b)) } : qual { $1 } | 'rec' stmtlist { $2 >>= \ $2 -> acsA (\cs -> (sLL $1 (reLoc $>) $ mkRecStmt (EpAnn (glR $1) (hsDoAnn $1 $2 AnnRec) cs) $2)) } qual :: { forall b. DisambECP b => PV (LStmt GhcPs (LocatedA b)) } : bindpat '<-' exp { unECP $3 >>= \ $3 -> acsA (\cs -> sLLlA (reLoc $1) $> $ mkPsBindStmt (EpAnn (glAR $1) [mu AnnLarrow $2] cs) $1 $3) } | exp { unECP $1 >>= \ $1 -> return $ sL1 $1 $ mkBodyStmt $1 } | 'let' binds { acsA (\cs -> (sLL $1 $> $ mkLetStmt (EpAnn (glR $1) [mj AnnLet $1] cs) (unLoc $2))) } ----------------------------------------------------------------------------- -- Record Field Update/Construction fbinds :: { forall b. DisambECP b => PV ([Fbind b], Maybe SrcSpan) } : fbinds1 { $1 } | {- empty -} { return ([], Nothing) } fbinds1 :: { forall b. DisambECP b => PV ([Fbind b], Maybe SrcSpan) } : fbind ',' fbinds1 { $1 >>= \ $1 -> $3 >>= \ $3 -> do h <- addTrailingCommaFBind $1 (gl $2) return (case $3 of (flds, dd) -> (h : flds, dd)) } | fbind { $1 >>= \ $1 -> return ([$1], Nothing) } | '..' { return ([], Just (getLoc $1)) } fbind :: { forall b. DisambECP b => PV (Fbind b) } : qvar '=' texp { unECP $3 >>= \ $3 -> fmap Left $ acsA (\cs -> sLL (reLocN $1) (reLoc $>) $ HsFieldBind (EpAnn (glNR $1) [mj AnnEqual $2] cs) (sL1l $1 $ mkFieldOcc $1) $3 False) } -- RHS is a 'texp', allowing view patterns (#6038) -- and, incidentally, sections. Eg -- f (R { x = show -> s }) = ... | qvar { placeHolderPunRhs >>= \rhs -> fmap Left $ acsa (\cs -> sL1a (reLocN $1) $ HsFieldBind (EpAnn (glNR $1) [] cs) (sL1l $1 $ mkFieldOcc $1) rhs True) } -- In the punning case, use a place-holder -- The renamer fills in the final value -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer -- AZ: need to pull out the let block into a helper | field TIGHT_INFIX_PROJ fieldToUpdate '=' texp { do let top = sL1a $1 $ DotFieldOcc noAnn (reLocA $1) ((L lf (DotFieldOcc _ f)):t) = reverse (unLoc $3) lf' = comb2 $2 (reLoc $ L lf ()) fields = top : L (noAnnSrcSpan lf') (DotFieldOcc (EpAnn (spanAsAnchor lf') (AnnFieldLabel (Just $ glAA $2)) emptyComments) f) : t final = last fields l = comb2 $1 $3 isPun = False $5 <- unECP $5 fmap Right $ mkHsProjUpdatePV (comb2 $1 (reLoc $5)) (L l fields) $5 isPun [mj AnnEqual $4] } -- See Note [Whitespace-sensitive operator parsing] in GHC.Parser.Lexer -- AZ: need to pull out the let block into a helper | field TIGHT_INFIX_PROJ fieldToUpdate { do let top = sL1a $1 $ DotFieldOcc noAnn (reLocA $1) ((L lf (DotFieldOcc _ f)):t) = reverse (unLoc $3) lf' = comb2 $2 (reLoc $ L lf ()) fields = top : L (noAnnSrcSpan lf') (DotFieldOcc (EpAnn (spanAsAnchor lf') (AnnFieldLabel (Just $ glAA $2)) emptyComments) f) : t final = last fields l = comb2 $1 $3 isPun = True var <- mkHsVarPV (L (noAnnSrcSpan $ getLocA final) (mkRdrUnqual . mkVarOcc . unpackFS . unLoc . dfoLabel . unLoc $ final)) fmap Right $ mkHsProjUpdatePV l (L l fields) var isPun [] } fieldToUpdate :: { Located [LocatedAn NoEpAnns (DotFieldOcc GhcPs)] } fieldToUpdate -- See Note [Whitespace-sensitive operator parsing] in Lexer.x : fieldToUpdate TIGHT_INFIX_PROJ field {% getCommentsFor (getLoc $3) >>= \cs -> return (sLL $1 $> ((sLLa $2 $> (DotFieldOcc (EpAnn (glR $2) (AnnFieldLabel $ Just $ glAA $2) cs) (reLocA $3))) : unLoc $1)) } | field {% getCommentsFor (getLoc $1) >>= \cs -> return (sL1 $1 [sL1a $1 (DotFieldOcc (EpAnn (glR $1) (AnnFieldLabel Nothing) cs) (reLocA $1))]) } ----------------------------------------------------------------------------- -- Implicit Parameter Bindings dbinds :: { Located [LIPBind GhcPs] } -- reversed : dbinds ';' dbind {% case unLoc $1 of (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (let { this = $3; rest = h':t } in rest `seq` this `seq` sLL $1 (reLoc $>) (this : rest)) } | dbinds ';' {% case unLoc $1 of (h:t) -> do h' <- addTrailingSemiA h (gl $2) return (sLL $1 $> (h':t)) } | dbind { let this = $1 in this `seq` (sL1 (reLoc $1) [this]) } -- | {- empty -} { [] } dbind :: { LIPBind GhcPs } dbind : ipvar '=' exp {% runPV (unECP $3) >>= \ $3 -> acsA (\cs -> sLLlA $1 $> (IPBind (EpAnn (glR $1) [mj AnnEqual $2] cs) (reLocA $1) $3)) } ipvar :: { Located HsIPName } : IPDUPVARID { sL1 $1 (HsIPName (getIPDUPVARID $1)) } ----------------------------------------------------------------------------- -- Overloaded labels overloaded_label :: { Located FastString } : LABELVARID { sL1 $1 (getLABELVARID $1) } ----------------------------------------------------------------------------- -- Warnings and deprecations name_boolformula_opt :: { LBooleanFormula (LocatedN RdrName) } : name_boolformula { $1 } | {- empty -} { noLocA mkTrue } name_boolformula :: { LBooleanFormula (LocatedN RdrName) } : name_boolformula_and { $1 } | name_boolformula_and '|' name_boolformula {% do { h <- addTrailingVbarL $1 (gl $2) ; return (reLocA $ sLLAA $1 $> (Or [h,$3])) } } name_boolformula_and :: { LBooleanFormula (LocatedN RdrName) } : name_boolformula_and_list { reLocA $ sLLAA (head $1) (last $1) (And ($1)) } name_boolformula_and_list :: { [LBooleanFormula (LocatedN RdrName)] } : name_boolformula_atom { [$1] } | name_boolformula_atom ',' name_boolformula_and_list {% do { h <- addTrailingCommaL $1 (gl $2) ; return (h : $3) } } name_boolformula_atom :: { LBooleanFormula (LocatedN RdrName) } : '(' name_boolformula ')' {% amsrl (sLL $1 $> (Parens $2)) (AnnList Nothing (Just (mop $1)) (Just (mcp $3)) [] []) } | name_var { reLocA $ sL1N $1 (Var $1) } namelist :: { Located [LocatedN RdrName] } namelist : name_var { sL1N $1 [$1] } | name_var ',' namelist {% do { h <- addTrailingCommaN $1 (gl $2) ; return (sLL (reLocN $1) $> (h : unLoc $3)) }} name_var :: { LocatedN RdrName } name_var : var { $1 } | con { $1 } ----------------------------------------- -- Data constructors -- There are two different productions here as lifted list constructors -- are parsed differently. qcon_nowiredlist :: { LocatedN RdrName } : gen_qcon { $1 } | sysdcon_nolist { L (getLoc $1) $ nameRdrName (dataConName (unLoc $1)) } qcon :: { LocatedN RdrName } : gen_qcon { $1} | sysdcon { L (getLoc $1) $ nameRdrName (dataConName (unLoc $1)) } gen_qcon :: { LocatedN RdrName } : qconid { $1 } | '(' qconsym ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } con :: { LocatedN RdrName } : conid { $1 } | '(' consym ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } | sysdcon { L (getLoc $1) $ nameRdrName (dataConName (unLoc $1)) } con_list :: { Located [LocatedN RdrName] } con_list : con { sL1N $1 [$1] } | con ',' con_list {% do { h <- addTrailingCommaN $1 (gl $2) ; return (sLL (reLocN $1) $> (h : unLoc $3)) }} qcon_list :: { Located [LocatedN RdrName] } qcon_list : qcon { sL1N $1 [$1] } | qcon ',' qcon_list {% do { h <- addTrailingCommaN $1 (gl $2) ; return (sLL (reLocN $1) $> (h : unLoc $3)) }} -- See Note [ExplicitTuple] in GHC.Hs.Expr sysdcon_nolist :: { LocatedN DataCon } -- Wired in data constructors : '(' ')' {% amsrn (sLL $1 $> unitDataCon) (NameAnnOnly NameParens (glAA $1) (glAA $2) []) } | '(' commas ')' {% amsrn (sLL $1 $> $ tupleDataCon Boxed (snd $2 + 1)) (NameAnnCommas NameParens (glAA $1) (map (EpaSpan . realSrcSpan) (fst $2)) (glAA $3) []) } | '(#' '#)' {% amsrn (sLL $1 $> $ unboxedUnitDataCon) (NameAnnOnly NameParensHash (glAA $1) (glAA $2) []) } | '(#' commas '#)' {% amsrn (sLL $1 $> $ tupleDataCon Unboxed (snd $2 + 1)) (NameAnnCommas NameParensHash (glAA $1) (map (EpaSpan . realSrcSpan) (fst $2)) (glAA $3) []) } -- See Note [Empty lists] in GHC.Hs.Expr sysdcon :: { LocatedN DataCon } : sysdcon_nolist { $1 } | '[' ']' {% amsrn (sLL $1 $> nilDataCon) (NameAnnOnly NameSquare (glAA $1) (glAA $2) []) } conop :: { LocatedN RdrName } : consym { $1 } | '`' conid '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } qconop :: { LocatedN RdrName } : qconsym { $1 } | '`' qconid '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } ---------------------------------------------------------------------------- -- Type constructors -- See Note [Unit tuples] in GHC.Hs.Type for the distinction -- between gtycon and ntgtycon gtycon :: { LocatedN RdrName } -- A "general" qualified tycon, including unit tuples : ntgtycon { $1 } | '(' ')' {% amsrn (sLL $1 $> $ getRdrName unitTyCon) (NameAnnOnly NameParens (glAA $1) (glAA $2) []) } | '(#' '#)' {% amsrn (sLL $1 $> $ getRdrName unboxedUnitTyCon) (NameAnnOnly NameParensHash (glAA $1) (glAA $2) []) } ntgtycon :: { LocatedN RdrName } -- A "general" qualified tycon, excluding unit tuples : oqtycon { $1 } | '(' commas ')' {% amsrn (sLL $1 $> $ getRdrName (tupleTyCon Boxed (snd $2 + 1))) (NameAnnCommas NameParens (glAA $1) (map (EpaSpan . realSrcSpan) (fst $2)) (glAA $3) []) } | '(#' commas '#)' {% amsrn (sLL $1 $> $ getRdrName (tupleTyCon Unboxed (snd $2 + 1))) (NameAnnCommas NameParensHash (glAA $1) (map (EpaSpan . realSrcSpan) (fst $2)) (glAA $3) []) } | '(#' bars '#)' {% amsrn (sLL $1 $> $ getRdrName (sumTyCon (snd $2 + 1))) (NameAnnBars NameParensHash (glAA $1) (map (EpaSpan . realSrcSpan) (fst $2)) (glAA $3) []) } | '(' '->' ')' {% amsrn (sLL $1 $> $ getRdrName unrestrictedFunTyCon) (NameAnn NameParens (glAA $1) (glAA $2) (glAA $3) []) } | '[' ']' {% amsrn (sLL $1 $> $ listTyCon_RDR) (NameAnnOnly NameSquare (glAA $1) (glAA $2) []) } oqtycon :: { LocatedN RdrName } -- An "ordinary" qualified tycon; -- These can appear in export lists : qtycon { $1 } | '(' qtyconsym ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } oqtycon_no_varcon :: { LocatedN RdrName } -- Type constructor which cannot be mistaken -- for variable constructor in export lists -- see Note [Type constructors in export list] : qtycon { $1 } | '(' QCONSYM ')' {% let { name :: Located RdrName ; name = sL1 $2 $! mkQual tcClsName (getQCONSYM $2) } in amsrn (sLL $1 $> (unLoc name)) (NameAnn NameParens (glAA $1) (glAA $2) (glAA $3) []) } | '(' CONSYM ')' {% let { name :: Located RdrName ; name = sL1 $2 $! mkUnqual tcClsName (getCONSYM $2) } in amsrn (sLL $1 $> (unLoc name)) (NameAnn NameParens (glAA $1) (glAA $2) (glAA $3) []) } | '(' ':' ')' {% let { name :: Located RdrName ; name = sL1 $2 $! consDataCon_RDR } in amsrn (sLL $1 $> (unLoc name)) (NameAnn NameParens (glAA $1) (glAA $2) (glAA $3) []) } {- Note [Type constructors in export list] ~~~~~~~~~~~~~~~~~~~~~ Mixing type constructors and data constructors in export lists introduces ambiguity in grammar: e.g. (*) may be both a type constructor and a function. -XExplicitNamespaces allows to disambiguate by explicitly prefixing type constructors with 'type' keyword. This ambiguity causes reduce/reduce conflicts in parser, which are always resolved in favour of data constructors. To get rid of conflicts we demand that ambiguous type constructors (those, which are formed by the same productions as variable constructors) are always prefixed with 'type' keyword. Unambiguous type constructors may occur both with or without 'type' keyword. Note that in the parser we still parse data constructors as type constructors. As such, they still end up in the type constructor namespace until after renaming when we resolve the proper namespace for each exported child. -} qtyconop :: { LocatedN RdrName } -- Qualified or unqualified -- See Note [%shift: qtyconop -> qtyconsym] : qtyconsym %shift { $1 } | '`' qtycon '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } qtycon :: { LocatedN RdrName } -- Qualified or unqualified : QCONID { sL1n $1 $! mkQual tcClsName (getQCONID $1) } | tycon { $1 } tycon :: { LocatedN RdrName } -- Unqualified : CONID { sL1n $1 $! mkUnqual tcClsName (getCONID $1) } qtyconsym :: { LocatedN RdrName } : QCONSYM { sL1n $1 $! mkQual tcClsName (getQCONSYM $1) } | QVARSYM { sL1n $1 $! mkQual tcClsName (getQVARSYM $1) } | tyconsym { $1 } tyconsym :: { LocatedN RdrName } : CONSYM { sL1n $1 $! mkUnqual tcClsName (getCONSYM $1) } | VARSYM { sL1n $1 $! mkUnqual tcClsName (getVARSYM $1) } | ':' { sL1n $1 $! consDataCon_RDR } | '-' { sL1n $1 $! mkUnqual tcClsName (fsLit "-") } | '.' { sL1n $1 $! mkUnqual tcClsName (fsLit ".") } -- An "ordinary" unqualified tycon. See `oqtycon` for the qualified version. -- These can appear in `ANN type` declarations (#19374). otycon :: { LocatedN RdrName } : tycon { $1 } | '(' tyconsym ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } ----------------------------------------------------------------------------- -- Operators op :: { LocatedN RdrName } -- used in infix decls : varop { $1 } | conop { $1 } | '->' { sL1n $1 $ getRdrName unrestrictedFunTyCon } varop :: { LocatedN RdrName } : varsym { $1 } | '`' varid '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } qop :: { forall b. DisambInfixOp b => PV (LocatedN b) } -- used in sections : qvarop { mkHsVarOpPV $1 } | qconop { mkHsConOpPV $1 } | hole_op { pvN $1 } qopm :: { forall b. DisambInfixOp b => PV (LocatedN b) } -- used in sections : qvaropm { mkHsVarOpPV $1 } | qconop { mkHsConOpPV $1 } | hole_op { pvN $1 } hole_op :: { forall b. DisambInfixOp b => PV (Located b) } -- used in sections hole_op : '`' '_' '`' { mkHsInfixHolePV (comb2 $1 $>) (\cs -> EpAnn (glR $1) (EpAnnUnboundVar (glAA $1, glAA $3) (glAA $2)) cs) } qvarop :: { LocatedN RdrName } : qvarsym { $1 } | '`' qvarid '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } qvaropm :: { LocatedN RdrName } : qvarsym_no_minus { $1 } | '`' qvarid '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } ----------------------------------------------------------------------------- -- Type variables tyvar :: { LocatedN RdrName } tyvar : tyvarid { $1 } tyvarop :: { LocatedN RdrName } tyvarop : '`' tyvarid '`' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameBackquotes (glAA $1) (glNRR $2) (glAA $3) []) } tyvarid :: { LocatedN RdrName } : VARID { sL1n $1 $! mkUnqual tvName (getVARID $1) } | special_id { sL1n $1 $! mkUnqual tvName (unLoc $1) } | 'unsafe' { sL1n $1 $! mkUnqual tvName (fsLit "unsafe") } | 'safe' { sL1n $1 $! mkUnqual tvName (fsLit "safe") } | 'interruptible' { sL1n $1 $! mkUnqual tvName (fsLit "interruptible") } -- If this changes relative to varid, update 'checkRuleTyVarBndrNames' -- in GHC.Parser.PostProcess -- See Note [Parsing explicit foralls in Rules] ----------------------------------------------------------------------------- -- Variables var :: { LocatedN RdrName } : varid { $1 } | '(' varsym ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } qvar :: { LocatedN RdrName } : qvarid { $1 } | '(' varsym ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } | '(' qvarsym1 ')' {% amsrn (sLL $1 $> (unLoc $2)) (NameAnn NameParens (glAA $1) (glNRR $2) (glAA $3) []) } -- We've inlined qvarsym here so that the decision about -- whether it's a qvar or a var can be postponed until -- *after* we see the close paren. field :: { Located FastString } : varid { reLocN $ fmap (occNameFS . rdrNameOcc) $1 } qvarid :: { LocatedN RdrName } : varid { $1 } | QVARID { sL1n $1 $! mkQual varName (getQVARID $1) } -- Note that 'role' and 'family' get lexed separately regardless of -- the use of extensions. However, because they are listed here, -- this is OK and they can be used as normal varids. -- See Note [Lexing type pseudo-keywords] in GHC.Parser.Lexer varid :: { LocatedN RdrName } : VARID { sL1n $1 $! mkUnqual varName (getVARID $1) } | special_id { sL1n $1 $! mkUnqual varName (unLoc $1) } | 'unsafe' { sL1n $1 $! mkUnqual varName (fsLit "unsafe") } | 'safe' { sL1n $1 $! mkUnqual varName (fsLit "safe") } | 'interruptible' { sL1n $1 $! mkUnqual varName (fsLit "interruptible")} | 'forall' { sL1n $1 $! mkUnqual varName (fsLit "forall") } | 'family' { sL1n $1 $! mkUnqual varName (fsLit "family") } | 'role' { sL1n $1 $! mkUnqual varName (fsLit "role") } -- If this changes relative to tyvarid, update 'checkRuleTyVarBndrNames' -- in GHC.Parser.PostProcess -- See Note [Parsing explicit foralls in Rules] qvarsym :: { LocatedN RdrName } : varsym { $1 } | qvarsym1 { $1 } qvarsym_no_minus :: { LocatedN RdrName } : varsym_no_minus { $1 } | qvarsym1 { $1 } qvarsym1 :: { LocatedN RdrName } qvarsym1 : QVARSYM { sL1n $1 $ mkQual varName (getQVARSYM $1) } varsym :: { LocatedN RdrName } : varsym_no_minus { $1 } | '-' { sL1n $1 $ mkUnqual varName (fsLit "-") } varsym_no_minus :: { LocatedN RdrName } -- varsym not including '-' : VARSYM { sL1n $1 $ mkUnqual varName (getVARSYM $1) } | special_sym { sL1n $1 $ mkUnqual varName (unLoc $1) } -- These special_ids are treated as keywords in various places, -- but as ordinary ids elsewhere. 'special_id' collects all these -- except 'unsafe', 'interruptible', 'forall', 'family', 'role', 'stock', and -- 'anyclass', whose treatment differs depending on context special_id :: { Located FastString } special_id : 'as' { sL1 $1 (fsLit "as") } | 'qualified' { sL1 $1 (fsLit "qualified") } | 'hiding' { sL1 $1 (fsLit "hiding") } | 'export' { sL1 $1 (fsLit "export") } | 'label' { sL1 $1 (fsLit "label") } | 'dynamic' { sL1 $1 (fsLit "dynamic") } | 'stdcall' { sL1 $1 (fsLit "stdcall") } | 'ccall' { sL1 $1 (fsLit "ccall") } | 'capi' { sL1 $1 (fsLit "capi") } | 'prim' { sL1 $1 (fsLit "prim") } | 'javascript' { sL1 $1 (fsLit "javascript") } -- See Note [%shift: special_id -> 'group'] | 'group' %shift { sL1 $1 (fsLit "group") } | 'stock' { sL1 $1 (fsLit "stock") } | 'anyclass' { sL1 $1 (fsLit "anyclass") } | 'via' { sL1 $1 (fsLit "via") } | 'unit' { sL1 $1 (fsLit "unit") } | 'dependency' { sL1 $1 (fsLit "dependency") } | 'signature' { sL1 $1 (fsLit "signature") } special_sym :: { Located FastString } special_sym : '.' { sL1 $1 (fsLit ".") } | '*' { sL1 $1 (fsLit (starSym (isUnicode $1))) } ----------------------------------------------------------------------------- -- Data constructors qconid :: { LocatedN RdrName } -- Qualified or unqualified : conid { $1 } | QCONID { sL1n $1 $! mkQual dataName (getQCONID $1) } conid :: { LocatedN RdrName } : CONID { sL1n $1 $ mkUnqual dataName (getCONID $1) } qconsym :: { LocatedN RdrName } -- Qualified or unqualified : consym { $1 } | QCONSYM { sL1n $1 $ mkQual dataName (getQCONSYM $1) } consym :: { LocatedN RdrName } : CONSYM { sL1n $1 $ mkUnqual dataName (getCONSYM $1) } -- ':' means only list cons | ':' { sL1n $1 $ consDataCon_RDR } ----------------------------------------------------------------------------- -- Literals literal :: { Located (HsLit GhcPs) } : CHAR { sL1 $1 $ HsChar (getCHARs $1) $ getCHAR $1 } | STRING { sL1 $1 $ HsString (getSTRINGs $1) $ getSTRING $1 } | PRIMINTEGER { sL1 $1 $ HsIntPrim (getPRIMINTEGERs $1) $ getPRIMINTEGER $1 } | PRIMWORD { sL1 $1 $ HsWordPrim (getPRIMWORDs $1) $ getPRIMWORD $1 } | PRIMCHAR { sL1 $1 $ HsCharPrim (getPRIMCHARs $1) $ getPRIMCHAR $1 } | PRIMSTRING { sL1 $1 $ HsStringPrim (getPRIMSTRINGs $1) $ getPRIMSTRING $1 } | PRIMFLOAT { sL1 $1 $ HsFloatPrim noExtField $ getPRIMFLOAT $1 } | PRIMDOUBLE { sL1 $1 $ HsDoublePrim noExtField $ getPRIMDOUBLE $1 } ----------------------------------------------------------------------------- -- Layout close :: { () } : vccurly { () } -- context popped in lexer. | error {% popContext } ----------------------------------------------------------------------------- -- Miscellaneous (mostly renamings) modid :: { LocatedA ModuleName } : CONID { sL1a $1 $ mkModuleNameFS (getCONID $1) } | QCONID { sL1a $1 $ let (mod,c) = getQCONID $1 in mkModuleNameFS (mkFastString (unpackFS mod ++ '.':unpackFS c)) } commas :: { ([SrcSpan],Int) } -- One or more commas : commas ',' { ((fst $1)++[gl $2],snd $1 + 1) } | ',' { ([gl $1],1) } bars0 :: { ([SrcSpan],Int) } -- Zero or more bars : bars { $1 } | { ([], 0) } bars :: { ([SrcSpan],Int) } -- One or more bars : bars '|' { ((fst $1)++[gl $2],snd $1 + 1) } | '|' { ([gl $1],1) } { happyError :: P a happyError = srcParseFail getVARID (L _ (ITvarid x)) = x getCONID (L _ (ITconid x)) = x getVARSYM (L _ (ITvarsym x)) = x getCONSYM (L _ (ITconsym x)) = x getDO (L _ (ITdo x)) = x getMDO (L _ (ITmdo x)) = x getQVARID (L _ (ITqvarid x)) = x getQCONID (L _ (ITqconid x)) = x getQVARSYM (L _ (ITqvarsym x)) = x getQCONSYM (L _ (ITqconsym x)) = x getIPDUPVARID (L _ (ITdupipvarid x)) = x getLABELVARID (L _ (ITlabelvarid x)) = x getCHAR (L _ (ITchar _ x)) = x getSTRING (L _ (ITstring _ x)) = x getINTEGER (L _ (ITinteger x)) = x getRATIONAL (L _ (ITrational x)) = x getPRIMCHAR (L _ (ITprimchar _ x)) = x getPRIMSTRING (L _ (ITprimstring _ x)) = x getPRIMINTEGER (L _ (ITprimint _ x)) = x getPRIMWORD (L _ (ITprimword _ x)) = x getPRIMFLOAT (L _ (ITprimfloat x)) = x getPRIMDOUBLE (L _ (ITprimdouble x)) = x getINLINE (L _ (ITinline_prag _ inl conl)) = (inl,conl) getSPEC_INLINE (L _ (ITspec_inline_prag src True)) = (Inline src,FunLike) getSPEC_INLINE (L _ (ITspec_inline_prag src False)) = (NoInline src,FunLike) getCOMPLETE_PRAGs (L _ (ITcomplete_prag x)) = x getVOCURLY (L (RealSrcSpan l _) ITvocurly) = srcSpanStartCol l getINTEGERs (L _ (ITinteger (IL src _ _))) = src getCHARs (L _ (ITchar src _)) = src getSTRINGs (L _ (ITstring src _)) = src getPRIMCHARs (L _ (ITprimchar src _)) = src getPRIMSTRINGs (L _ (ITprimstring src _)) = src getPRIMINTEGERs (L _ (ITprimint src _)) = src getPRIMWORDs (L _ (ITprimword src _)) = src -- See Note [Pragma source text] in "GHC.Types.Basic" for the following getINLINE_PRAGs (L _ (ITinline_prag _ inl _)) = inlineSpecSource inl getOPAQUE_PRAGs (L _ (ITopaque_prag src)) = src getSPEC_PRAGs (L _ (ITspec_prag src)) = src getSPEC_INLINE_PRAGs (L _ (ITspec_inline_prag src _)) = src getSOURCE_PRAGs (L _ (ITsource_prag src)) = src getRULES_PRAGs (L _ (ITrules_prag src)) = src getWARNING_PRAGs (L _ (ITwarning_prag src)) = src getDEPRECATED_PRAGs (L _ (ITdeprecated_prag src)) = src getSCC_PRAGs (L _ (ITscc_prag src)) = src getUNPACK_PRAGs (L _ (ITunpack_prag src)) = src getNOUNPACK_PRAGs (L _ (ITnounpack_prag src)) = src getANN_PRAGs (L _ (ITann_prag src)) = src getMINIMAL_PRAGs (L _ (ITminimal_prag src)) = src getOVERLAPPABLE_PRAGs (L _ (IToverlappable_prag src)) = src getOVERLAPPING_PRAGs (L _ (IToverlapping_prag src)) = src getOVERLAPS_PRAGs (L _ (IToverlaps_prag src)) = src getINCOHERENT_PRAGs (L _ (ITincoherent_prag src)) = src getCTYPEs (L _ (ITctype src)) = src getStringLiteral l = StringLiteral (getSTRINGs l) (getSTRING l) Nothing isUnicode :: Located Token -> Bool isUnicode (L _ (ITforall iu)) = iu == UnicodeSyntax isUnicode (L _ (ITdarrow iu)) = iu == UnicodeSyntax isUnicode (L _ (ITdcolon iu)) = iu == UnicodeSyntax isUnicode (L _ (ITlarrow iu)) = iu == UnicodeSyntax isUnicode (L _ (ITrarrow iu)) = iu == UnicodeSyntax isUnicode (L _ (ITlarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (ITrarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (ITLarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (ITRarrowtail iu)) = iu == UnicodeSyntax isUnicode (L _ (IToparenbar iu)) = iu == UnicodeSyntax isUnicode (L _ (ITcparenbar iu)) = iu == UnicodeSyntax isUnicode (L _ (ITopenExpQuote _ iu)) = iu == UnicodeSyntax isUnicode (L _ (ITcloseQuote iu)) = iu == UnicodeSyntax isUnicode (L _ (ITstar iu)) = iu == UnicodeSyntax isUnicode (L _ ITlolly) = True isUnicode _ = False hasE :: Located Token -> Bool hasE (L _ (ITopenExpQuote HasE _)) = True hasE (L _ (ITopenTExpQuote HasE)) = True hasE _ = False getSCC :: Located Token -> P FastString getSCC lt = do let s = getSTRING lt -- We probably actually want to be more restrictive than this if ' ' `elem` unpackFS s then addFatalError $ mkPlainErrorMsgEnvelope (getLoc lt) $ PsErrSpaceInSCC else return s stringLiteralToHsDocWst :: Located StringLiteral -> Located (WithHsDocIdentifiers StringLiteral GhcPs) stringLiteralToHsDocWst = lexStringLiteral parseIdentifier -- Utilities for combining source spans comb2 :: Located a -> Located b -> SrcSpan comb2 a b = a `seq` b `seq` combineLocs a b -- Utilities for combining source spans comb2A :: Located a -> LocatedAn t b -> SrcSpan comb2A a b = a `seq` b `seq` combineLocs a (reLoc b) comb2N :: Located a -> LocatedN b -> SrcSpan comb2N a b = a `seq` b `seq` combineLocs a (reLocN b) comb2Al :: LocatedAn t a -> Located b -> SrcSpan comb2Al a b = a `seq` b `seq` combineLocs (reLoc a) b comb3 :: Located a -> Located b -> Located c -> SrcSpan comb3 a b c = a `seq` b `seq` c `seq` combineSrcSpans (getLoc a) (combineSrcSpans (getLoc b) (getLoc c)) comb3A :: Located a -> Located b -> LocatedAn t c -> SrcSpan comb3A a b c = a `seq` b `seq` c `seq` combineSrcSpans (getLoc a) (combineSrcSpans (getLoc b) (getLocA c)) comb3N :: Located a -> Located b -> LocatedN c -> SrcSpan comb3N a b c = a `seq` b `seq` c `seq` combineSrcSpans (getLoc a) (combineSrcSpans (getLoc b) (getLocA c)) comb4 :: Located a -> Located b -> Located c -> Located d -> SrcSpan comb4 a b c d = a `seq` b `seq` c `seq` d `seq` (combineSrcSpans (getLoc a) $ combineSrcSpans (getLoc b) $ combineSrcSpans (getLoc c) (getLoc d)) comb5 :: Located a -> Located b -> Located c -> Located d -> Located e -> SrcSpan comb5 a b c d e = a `seq` b `seq` c `seq` d `seq` e `seq` (combineSrcSpans (getLoc a) $ combineSrcSpans (getLoc b) $ combineSrcSpans (getLoc c) $ combineSrcSpans (getLoc d) (getLoc e)) -- strict constructor version: {-# INLINE sL #-} sL :: l -> a -> GenLocated l a sL loc a = loc `seq` a `seq` L loc a -- See Note [Adding location info] for how these utility functions are used -- replaced last 3 CPP macros in this file {-# INLINE sL0 #-} sL0 :: a -> Located a sL0 = L noSrcSpan -- #define L0 L noSrcSpan {-# INLINE sL1 #-} sL1 :: GenLocated l a -> b -> GenLocated l b sL1 x = sL (getLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1A #-} sL1A :: LocatedAn t a -> b -> Located b sL1A x = sL (getLocA x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1N #-} sL1N :: LocatedN a -> b -> Located b sL1N x = sL (getLocA x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1a #-} sL1a :: Located a -> b -> LocatedAn t b sL1a x = sL (noAnnSrcSpan $ getLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1l #-} sL1l :: LocatedAn t a -> b -> LocatedAn u b sL1l x = sL (l2l $ getLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sL1n #-} sL1n :: Located a -> b -> LocatedN b sL1n x = L (noAnnSrcSpan $ getLoc x) -- #define sL1 sL (getLoc $1) {-# INLINE sLL #-} sLL :: Located a -> Located b -> c -> Located c sLL x y = sL (comb2 x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLa #-} sLLa :: Located a -> Located b -> c -> LocatedAn t c sLLa x y = sL (noAnnSrcSpan $ comb2 x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLlA #-} sLLlA :: Located a -> LocatedAn t b -> c -> Located c sLLlA x y = sL (comb2A x y) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLAl #-} sLLAl :: LocatedAn t a -> Located b -> c -> Located c sLLAl x y = sL (comb2A y x) -- #define LL sL (comb2 $1 $>) {-# INLINE sLLAsl #-} sLLAsl :: [LocatedAn t a] -> Located b -> c -> Located c sLLAsl [] = sL1 sLLAsl (x:_) = sLLAl x {-# INLINE sLLAA #-} sLLAA :: LocatedAn t a -> LocatedAn u b -> c -> Located c sLLAA x y = sL (comb2 (reLoc y) (reLoc x)) -- #define LL sL (comb2 $1 $>) {- Note [Adding location info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is done using the three functions below, sL0, sL1 and sLL. Note that these functions were mechanically converted from the three macros that used to exist before, namely L0, L1 and LL. They each add a SrcSpan to their argument. sL0 adds 'noSrcSpan', used for empty productions -- This doesn't seem to work anymore -=chak sL1 for a production with a single token on the lhs. Grabs the SrcSpan from that token. sLL for a production with >1 token on the lhs. Makes up a SrcSpan from the first and last tokens. These suffice for the majority of cases. However, we must be especially careful with empty productions: sLL won't work if the first or last token on the lhs can represent an empty span. In these cases, we have to calculate the span using more of the tokens from the lhs, eg. | 'newtype' tycl_hdr '=' newconstr deriving { L (comb3 $1 $4 $5) (mkTyData NewType (unLoc $2) $4 (unLoc $5)) } We provide comb3 and comb4 functions which are useful in such cases. Be careful: there's no checking that you actually got this right, the only symptom will be that the SrcSpans of your syntax will be incorrect. -} -- Make a source location for the file. We're a bit lazy here and just -- make a point SrcSpan at line 1, column 0. Strictly speaking we should -- try to find the span of the whole file (ToDo). fileSrcSpan :: P SrcSpan fileSrcSpan = do l <- getRealSrcLoc; let loc = mkSrcLoc (srcLocFile l) 1 1; return (mkSrcSpan loc loc) -- Hint about linear types hintLinear :: MonadP m => SrcSpan -> m () hintLinear span = do linearEnabled <- getBit LinearTypesBit unless linearEnabled $ addError $ mkPlainErrorMsgEnvelope span $ PsErrLinearFunction -- Does this look like (a %m)? looksLikeMult :: LHsType GhcPs -> LocatedN RdrName -> LHsType GhcPs -> Bool looksLikeMult ty1 l_op ty2 | Unqual op_name <- unLoc l_op , occNameFS op_name == fsLit "%" , Strict.Just ty1_pos <- getBufSpan (getLocA ty1) , Strict.Just pct_pos <- getBufSpan (getLocA l_op) , Strict.Just ty2_pos <- getBufSpan (getLocA ty2) , bufSpanEnd ty1_pos /= bufSpanStart pct_pos , bufSpanEnd pct_pos == bufSpanStart ty2_pos = True | otherwise = False -- Hint about the MultiWayIf extension hintMultiWayIf :: SrcSpan -> P () hintMultiWayIf span = do mwiEnabled <- getBit MultiWayIfBit unless mwiEnabled $ addError $ mkPlainErrorMsgEnvelope span PsErrMultiWayIf -- Hint about explicit-forall hintExplicitForall :: Located Token -> P () hintExplicitForall tok = do forall <- getBit ExplicitForallBit rulePrag <- getBit InRulePragBit unless (forall || rulePrag) $ addError $ mkPlainErrorMsgEnvelope (getLoc tok) $ (PsErrExplicitForall (isUnicode tok)) -- Hint about qualified-do hintQualifiedDo :: Located Token -> P () hintQualifiedDo tok = do qualifiedDo <- getBit QualifiedDoBit case maybeQDoDoc of Just qdoDoc | not qualifiedDo -> addError $ mkPlainErrorMsgEnvelope (getLoc tok) $ (PsErrIllegalQualifiedDo qdoDoc) _ -> return () where maybeQDoDoc = case unLoc tok of ITdo (Just m) -> Just $ ftext m <> text ".do" ITmdo (Just m) -> Just $ ftext m <> text ".mdo" t -> Nothing -- When two single quotes don't followed by tyvar or gtycon, we report the -- error as empty character literal, or TH quote that missing proper type -- variable or constructor. See #13450. reportEmptyDoubleQuotes :: SrcSpan -> P a reportEmptyDoubleQuotes span = do thQuotes <- getBit ThQuotesBit addFatalError $ mkPlainErrorMsgEnvelope span $ PsErrEmptyDoubleQuotes thQuotes {- %************************************************************************ %* * Helper functions for generating annotations in the parser %* * %************************************************************************ For the general principles of the following routines, see Note [exact print annotations] in GHC.Parser.Annotation -} -- |Construct an AddEpAnn from the annotation keyword and the location -- of the keyword itself mj :: AnnKeywordId -> Located e -> AddEpAnn mj a l = AddEpAnn a (EpaSpan $ rs $ gl l) mjN :: AnnKeywordId -> LocatedN e -> AddEpAnn mjN a l = AddEpAnn a (EpaSpan $ rs $ glN l) -- |Construct an AddEpAnn from the annotation keyword and the location -- of the keyword itself, provided the span is not zero width mz :: AnnKeywordId -> Located e -> [AddEpAnn] mz a l = if isZeroWidthSpan (gl l) then [] else [AddEpAnn a (EpaSpan $ rs $ gl l)] msemi :: Located e -> [TrailingAnn] msemi l = if isZeroWidthSpan (gl l) then [] else [AddSemiAnn (EpaSpan $ rs $ gl l)] msemim :: Located e -> Maybe EpaLocation msemim l = if isZeroWidthSpan (gl l) then Nothing else Just (EpaSpan $ rs $ gl l) -- |Construct an AddEpAnn from the annotation keyword and the Located Token. If -- the token has a unicode equivalent and this has been used, provide the -- unicode variant of the annotation. mu :: AnnKeywordId -> Located Token -> AddEpAnn mu a lt@(L l t) = AddEpAnn (toUnicodeAnn a lt) (EpaSpan $ rs l) -- | If the 'Token' is using its unicode variant return the unicode variant of -- the annotation toUnicodeAnn :: AnnKeywordId -> Located Token -> AnnKeywordId toUnicodeAnn a t = if isUnicode t then unicodeAnn a else a toUnicode :: Located Token -> IsUnicodeSyntax toUnicode t = if isUnicode t then UnicodeSyntax else NormalSyntax gl :: GenLocated l a -> l gl = getLoc glA :: LocatedAn t a -> SrcSpan glA = getLocA glN :: LocatedN a -> SrcSpan glN = getLocA glR :: Located a -> Anchor glR la = Anchor (realSrcSpan $ getLoc la) UnchangedAnchor glAA :: Located a -> EpaLocation glAA = EpaSpan <$> realSrcSpan . getLoc glRR :: Located a -> RealSrcSpan glRR = realSrcSpan . getLoc glAR :: LocatedAn t a -> Anchor glAR la = Anchor (realSrcSpan $ getLocA la) UnchangedAnchor glNR :: LocatedN a -> Anchor glNR ln = Anchor (realSrcSpan $ getLocA ln) UnchangedAnchor glNRR :: LocatedN a -> EpaLocation glNRR = EpaSpan <$> realSrcSpan . getLocA anc :: RealSrcSpan -> Anchor anc r = Anchor r UnchangedAnchor acs :: MonadP m => (EpAnnComments -> Located a) -> m (Located a) acs a = do let (L l _) = a emptyComments cs <- getCommentsFor l return (a cs) -- Called at the very end to pick up the EOF position, as well as any comments not allocated yet. acsFinal :: (EpAnnComments -> Located a) -> P (Located a) acsFinal a = do let (L l _) = a emptyComments cs <- getCommentsFor l csf <- getFinalCommentsFor l meof <- getEofPos let ce = case meof of Strict.Nothing -> EpaComments [] Strict.Just (pos `Strict.And` gap) -> EpaCommentsBalanced [] [L (realSpanAsAnchor pos) (EpaComment EpaEofComment gap)] return (a (cs Semi.<> csf Semi.<> ce)) acsa :: MonadP m => (EpAnnComments -> LocatedAn t a) -> m (LocatedAn t a) acsa a = do let (L l _) = a emptyComments cs <- getCommentsFor (locA l) return (a cs) acsA :: MonadP m => (EpAnnComments -> Located a) -> m (LocatedAn t a) acsA a = reLocA <$> acs a acsExpr :: (EpAnnComments -> LHsExpr GhcPs) -> P ECP acsExpr a = do { expr :: (LHsExpr GhcPs) <- runPV $ acsa a ; return (ecpFromExp $ expr) } amsA :: MonadP m => LocatedA a -> [TrailingAnn] -> m (LocatedA a) amsA (L l a) bs = do cs <- getCommentsFor (locA l) return (L (addAnnsA l bs cs) a) amsAl :: MonadP m => LocatedA a -> SrcSpan -> [TrailingAnn] -> m (LocatedA a) amsAl (L l a) loc bs = do cs <- getCommentsFor loc return (L (addAnnsA l bs cs) a) amsrc :: MonadP m => Located a -> AnnContext -> m (LocatedC a) amsrc a@(L l _) bs = do cs <- getCommentsFor l return (reAnnC bs cs a) amsrl :: MonadP m => Located a -> AnnList -> m (LocatedL a) amsrl a@(L l _) bs = do cs <- getCommentsFor l return (reAnnL bs cs a) amsrp :: MonadP m => Located a -> AnnPragma -> m (LocatedP a) amsrp a@(L l _) bs = do cs <- getCommentsFor l return (reAnnL bs cs a) amsrn :: MonadP m => Located a -> NameAnn -> m (LocatedN a) amsrn (L l a) an = do cs <- getCommentsFor l let ann = (EpAnn (spanAsAnchor l) an cs) return (L (SrcSpanAnn ann l) a) -- |Synonyms for AddEpAnn versions of AnnOpen and AnnClose mo,mc :: Located Token -> AddEpAnn mo ll = mj AnnOpen ll mc ll = mj AnnClose ll moc,mcc :: Located Token -> AddEpAnn moc ll = mj AnnOpenC ll mcc ll = mj AnnCloseC ll mop,mcp :: Located Token -> AddEpAnn mop ll = mj AnnOpenP ll mcp ll = mj AnnCloseP ll moh,mch :: Located Token -> AddEpAnn moh ll = mj AnnOpenPH ll mch ll = mj AnnClosePH ll mos,mcs :: Located Token -> AddEpAnn mos ll = mj AnnOpenS ll mcs ll = mj AnnCloseS ll pvA :: MonadP m => m (Located a) -> m (LocatedAn t a) pvA a = do { av <- a ; return (reLocA av) } pvN :: MonadP m => m (Located a) -> m (LocatedN a) pvN a = do { (L l av) <- a ; return (L (noAnnSrcSpan l) av) } pvL :: MonadP m => m (LocatedAn t a) -> m (Located a) pvL a = do { av <- a ; return (reLoc av) } -- | Parse a Haskell module with Haddock comments. -- This is done in two steps: -- -- * 'parseModuleNoHaddock' to build the AST -- * 'addHaddockToModule' to insert Haddock comments into it -- -- This is the only parser entry point that deals with Haddock comments. -- The other entry points ('parseDeclaration', 'parseExpression', etc) do -- not insert them into the AST. parseModule :: P (Located HsModule) parseModule = parseModuleNoHaddock >>= addHaddockToModule commentsA :: (Monoid ann) => SrcSpan -> EpAnnComments -> SrcSpanAnn' (EpAnn ann) commentsA loc cs = SrcSpanAnn (EpAnn (Anchor (rs loc) UnchangedAnchor) mempty cs) loc -- | Instead of getting the *enclosed* comments, this includes the -- *preceding* ones. It is used at the top level to get comments -- between top level declarations. commentsPA :: (Monoid ann) => LocatedAn ann a -> P (LocatedAn ann a) commentsPA la@(L l a) = do cs <- getPriorCommentsFor (getLocA la) return (L (addCommentsToSrcAnn l cs) a) rs :: SrcSpan -> RealSrcSpan rs (RealSrcSpan l _) = l rs _ = panic "Parser should only have RealSrcSpan" hsDoAnn :: Located a -> LocatedAn t b -> AnnKeywordId -> AnnList hsDoAnn (L l _) (L ll _) kw = AnnList (Just $ spanAsAnchor (locA ll)) Nothing Nothing [AddEpAnn kw (EpaSpan $ rs l)] [] listAsAnchor :: [LocatedAn t a] -> Anchor listAsAnchor [] = spanAsAnchor noSrcSpan listAsAnchor (L l _:_) = spanAsAnchor (locA l) hsTok :: Located Token -> LHsToken tok GhcPs hsTok (L l _) = L (mkTokenLocation l) HsTok hsUniTok :: Located Token -> LHsUniToken tok utok GhcPs hsUniTok t@(L l _) = L (mkTokenLocation l) (if isUnicode t then HsUnicodeTok else HsNormalTok) -- ------------------------------------- addTrailingCommaFBind :: MonadP m => Fbind b -> SrcSpan -> m (Fbind b) addTrailingCommaFBind (Left b) l = fmap Left (addTrailingCommaA b l) addTrailingCommaFBind (Right b) l = fmap Right (addTrailingCommaA b l) addTrailingVbarA :: MonadP m => LocatedA a -> SrcSpan -> m (LocatedA a) addTrailingVbarA la span = addTrailingAnnA la span AddVbarAnn addTrailingSemiA :: MonadP m => LocatedA a -> SrcSpan -> m (LocatedA a) addTrailingSemiA la span = addTrailingAnnA la span AddSemiAnn addTrailingCommaA :: MonadP m => LocatedA a -> SrcSpan -> m (LocatedA a) addTrailingCommaA la span = addTrailingAnnA la span AddCommaAnn addTrailingAnnA :: MonadP m => LocatedA a -> SrcSpan -> (EpaLocation -> TrailingAnn) -> m (LocatedA a) addTrailingAnnA (L (SrcSpanAnn anns l) a) ss ta = do -- cs <- getCommentsFor l let cs = emptyComments -- AZ:TODO: generalise updating comments into an annotation let anns' = if isZeroWidthSpan ss then anns else addTrailingAnnToA l (ta (EpaSpan $ rs ss)) cs anns return (L (SrcSpanAnn anns' l) a) -- ------------------------------------- addTrailingVbarL :: MonadP m => LocatedL a -> SrcSpan -> m (LocatedL a) addTrailingVbarL la span = addTrailingAnnL la (AddVbarAnn (EpaSpan $ rs span)) addTrailingCommaL :: MonadP m => LocatedL a -> SrcSpan -> m (LocatedL a) addTrailingCommaL la span = addTrailingAnnL la (AddCommaAnn (EpaSpan $ rs span)) addTrailingAnnL :: MonadP m => LocatedL a -> TrailingAnn -> m (LocatedL a) addTrailingAnnL (L (SrcSpanAnn anns l) a) ta = do cs <- getCommentsFor l let anns' = addTrailingAnnToL l ta cs anns return (L (SrcSpanAnn anns' l) a) -- ------------------------------------- -- Mostly use to add AnnComma, special case it to NOP if adding a zero-width annotation addTrailingCommaN :: MonadP m => LocatedN a -> SrcSpan -> m (LocatedN a) addTrailingCommaN (L (SrcSpanAnn anns l) a) span = do -- cs <- getCommentsFor l let cs = emptyComments -- AZ:TODO: generalise updating comments into an annotation let anns' = if isZeroWidthSpan span then anns else addTrailingCommaToN l anns (EpaSpan $ rs span) return (L (SrcSpanAnn anns' l) a) addTrailingCommaS :: Located StringLiteral -> EpaLocation -> Located StringLiteral addTrailingCommaS (L l sl) span = L l (sl { sl_tc = Just (epaLocationRealSrcSpan span) }) -- ------------------------------------- addTrailingDarrowC :: LocatedC a -> Located Token -> EpAnnComments -> LocatedC a addTrailingDarrowC (L (SrcSpanAnn EpAnnNotUsed l) a) lt cs = let u = if (isUnicode lt) then UnicodeSyntax else NormalSyntax in L (SrcSpanAnn (EpAnn (spanAsAnchor l) (AnnContext (Just (u,glAA lt)) [] []) cs) l) a addTrailingDarrowC (L (SrcSpanAnn (EpAnn lr (AnnContext _ o c) csc) l) a) lt cs = let u = if (isUnicode lt) then UnicodeSyntax else NormalSyntax in L (SrcSpanAnn (EpAnn lr (AnnContext (Just (u,glAA lt)) o c) (cs Semi.<> csc)) l) a -- ------------------------------------- -- We need a location for the where binds, when computing the SrcSpan -- for the AST element using them. Where there is a span, we return -- it, else noLoc, which is ignored in the comb2 call. adaptWhereBinds :: Maybe (Located (HsLocalBinds GhcPs, Maybe EpAnnComments)) -> Located (HsLocalBinds GhcPs, EpAnnComments) adaptWhereBinds Nothing = noLoc (EmptyLocalBinds noExtField, emptyComments) adaptWhereBinds (Just (L l (b, mc))) = L l (b, maybe emptyComments id mc) } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser/Lexer.x0000644000000000000000000043151014472375231020360 0ustar0000000000000000----------------------------------------------------------------------------- -- (c) The University of Glasgow, 2006 -- -- GHC's lexer for Haskell 2010 [1]. -- -- This is a combination of an Alex-generated lexer [2] from a regex -- definition, with some hand-coded bits. [3] -- -- Completely accurate information about token-spans within the source -- file is maintained. Every token has a start and end RealSrcLoc -- attached to it. -- -- References: -- [1] https://www.haskell.org/onlinereport/haskell2010/haskellch2.html -- [2] http://www.haskell.org/alex/ -- [3] https://gitlab.haskell.org/ghc/ghc/wikis/commentary/compiler/parser -- ----------------------------------------------------------------------------- -- ToDo / known bugs: -- - parsing integers is a bit slow -- - readRational is a bit slow -- -- Known bugs, that were also in the previous version: -- - M... should be 3 tokens, not 1. -- - pragma-end should be only valid in a pragma -- qualified operator NOTES. -- -- - If M.(+) is a single lexeme, then.. -- - Probably (+) should be a single lexeme too, for consistency. -- Otherwise ( + ) would be a prefix operator, but M.( + ) would not be. -- - But we have to rule out reserved operators, otherwise (..) becomes -- a different lexeme. -- - Should we therefore also rule out reserved operators in the qualified -- form? This is quite difficult to achieve. We don't do it for -- qualified varids. -- ----------------------------------------------------------------------------- -- Alex "Haskell code fragment top" { {-# LANGUAGE BangPatterns #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE UnboxedTuples #-} {-# LANGUAGE UnboxedSums #-} {-# LANGUAGE UnliftedNewtypes #-} {-# LANGUAGE PatternSynonyms #-} {-# OPTIONS_GHC -funbox-strict-fields #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} module GHC.Parser.Lexer ( Token(..), lexer, lexerDbg, ParserOpts(..), mkParserOpts, PState (..), initParserState, initPragState, P(..), ParseResult(POk, PFailed), allocateComments, allocatePriorComments, allocateFinalComments, MonadP(..), getRealSrcLoc, getPState, failMsgP, failLocMsgP, srcParseFail, getPsErrorMessages, getPsMessages, popContext, pushModuleContext, setLastToken, setSrcLoc, activeContext, nextIsEOF, getLexState, popLexState, pushLexState, ExtBits(..), xtest, xunset, xset, disableHaddock, lexTokenStream, mkParensEpAnn, getCommentsFor, getPriorCommentsFor, getFinalCommentsFor, getEofPos, commentToAnnotation, HdkComment(..), warnopt, adjustChar, addPsMessage ) where import GHC.Prelude import qualified GHC.Data.Strict as Strict -- base import Control.Monad import Control.Applicative import Data.Char import Data.List (stripPrefix, isInfixOf, partition) import Data.List.NonEmpty ( NonEmpty(..) ) import qualified Data.List.NonEmpty as NE import Data.Maybe import Data.Word import Debug.Trace (trace) import GHC.Data.EnumSet as EnumSet -- ghc-boot import qualified GHC.LanguageExtensions as LangExt -- bytestring import Data.ByteString (ByteString) -- containers import Data.Map (Map) import qualified Data.Map as Map -- compiler import GHC.Utils.Error import GHC.Utils.Outputable import GHC.Utils.Panic import GHC.Data.StringBuffer import GHC.Data.FastString import GHC.Types.Error import GHC.Types.Unique.FM import GHC.Data.Maybe import GHC.Data.OrdList import GHC.Utils.Misc ( readSignificandExponentPair, readHexSignificandExponentPair ) import GHC.Types.SrcLoc import GHC.Types.SourceText import GHC.Types.Basic ( InlineSpec(..), RuleMatchInfo(..)) import GHC.Hs.Doc import GHC.Parser.CharClass import GHC.Parser.Annotation import GHC.Driver.Flags import GHC.Parser.Errors.Basic import GHC.Parser.Errors.Types import GHC.Parser.Errors.Ppr () } -- ----------------------------------------------------------------------------- -- Alex "Character set macros" -- NB: The logic behind these definitions is also reflected in "GHC.Utils.Lexeme" -- Any changes here should likely be reflected there. $unispace = \x05 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $nl = [\n\r\f] $whitechar = [$nl\v\ $unispace] $white_no_nl = $whitechar # \n -- TODO #8424 $tab = \t $ascdigit = 0-9 $unidigit = \x03 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $decdigit = $ascdigit -- exactly $ascdigit, no more no less. $digit = [$ascdigit $unidigit] $special = [\(\)\,\;\[\]\`\{\}] $ascsymbol = [\!\#\$\%\&\*\+\.\/\<\=\>\?\@\\\^\|\-\~\:] $unisymbol = \x04 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $symbol = [$ascsymbol $unisymbol] # [$special \_\"\'] $unilarge = \x01 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $asclarge = [A-Z] $large = [$asclarge $unilarge] $unismall = \x02 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $ascsmall = [a-z] $small = [$ascsmall $unismall \_] $uniidchar = \x07 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $idchar = [$small $large $digit $uniidchar \'] $unigraphic = \x06 -- Trick Alex into handling Unicode. See Note [Unicode in Alex]. $graphic = [$small $large $symbol $digit $idchar $special $unigraphic \"\'] $binit = 0-1 $octit = 0-7 $hexit = [$decdigit A-F a-f] $pragmachar = [$small $large $digit $uniidchar ] $docsym = [\| \^ \* \$] -- ----------------------------------------------------------------------------- -- Alex "Regular expression macros" @varid = $small $idchar* -- variable identifiers @conid = $large $idchar* -- constructor identifiers @varsym = ($symbol # \:) $symbol* -- variable (operator) symbol @consym = \: $symbol* -- constructor (operator) symbol -- See Note [Lexing NumericUnderscores extension] and #14473 @numspc = _* -- numeric spacer (#14473) @decimal = $decdigit(@numspc $decdigit)* @binary = $binit(@numspc $binit)* @octal = $octit(@numspc $octit)* @hexadecimal = $hexit(@numspc $hexit)* @exponent = @numspc [eE] [\-\+]? @decimal @bin_exponent = @numspc [pP] [\-\+]? @decimal @qual = (@conid \.)+ @qvarid = @qual @varid @qconid = @qual @conid @qvarsym = @qual @varsym @qconsym = @qual @consym -- QualifiedDo needs to parse "M.do" not as a variable, so as to keep the -- layout rules. @qdo = @qual "do" @qmdo = @qual "mdo" @floating_point = @numspc @decimal \. @decimal @exponent? | @numspc @decimal @exponent @hex_floating_point = @numspc @hexadecimal \. @hexadecimal @bin_exponent? | @numspc @hexadecimal @bin_exponent -- normal signed numerical literals can only be explicitly negative, -- not explicitly positive (contrast @exponent) @negative = \- -- ----------------------------------------------------------------------------- -- Alex "Identifier" haskell :- -- ----------------------------------------------------------------------------- -- Alex "Rules" -- everywhere: skip whitespace $white_no_nl+ ; $tab { warnTab } -- Everywhere: deal with nested comments. We explicitly rule out -- pragmas, "{-#", so that we don't accidentally treat them as comments. -- (this can happen even though pragmas will normally take precedence due to -- longest-match, because pragmas aren't valid in every state, but comments -- are). We also rule out nested Haddock comments, if the -haddock flag is -- set. "{-" / { isNormalComment } { nested_comment } -- Single-line comments are a bit tricky. Haskell 98 says that two or -- more dashes followed by a symbol should be parsed as a varsym, so we -- have to exclude those. -- Since Haddock comments aren't valid in every state, we need to rule them -- out here. -- The following two rules match comments that begin with two dashes, but -- continue with a different character. The rules test that this character -- is not a symbol (in which case we'd have a varsym), and that it's not a -- space followed by a Haddock comment symbol (docsym) (in which case we'd -- have a Haddock comment). The rules then munch the rest of the line. "-- " ~$docsym .* { lineCommentToken } "--" [^$symbol \ ] .* { lineCommentToken } -- Next, match Haddock comments if no -haddock flag "-- " $docsym .* / { alexNotPred (ifExtension HaddockBit) } { lineCommentToken } -- Now, when we've matched comments that begin with 2 dashes and continue -- with a different character, we need to match comments that begin with three -- or more dashes (which clearly can't be Haddock comments). We only need to -- make sure that the first non-dash character isn't a symbol, and munch the -- rest of the line. "---"\-* ~$symbol .* { lineCommentToken } -- Since the previous rules all match dashes followed by at least one -- character, we also need to match a whole line filled with just dashes. "--"\-* / { atEOL } { lineCommentToken } -- We need this rule since none of the other single line comment rules -- actually match this case. "-- " / { atEOL } { lineCommentToken } -- 'bol' state: beginning of a line. Slurp up all the whitespace (including -- blank lines) until we find a non-whitespace character, then do layout -- processing. -- -- One slight wibble here: what if the line begins with {-#? In -- theory, we have to lex the pragma to see if it's one we recognise, -- and if it is, then we backtrack and do_bol, otherwise we treat it -- as a nested comment. We don't bother with this: if the line begins -- with {-#, then we'll assume it's a pragma we know about and go for do_bol. { \n ; ^\# line { begin line_prag1 } ^\# / { followedByDigit } { begin line_prag1 } ^\# pragma .* \n ; -- GCC 3.3 CPP generated, apparently ^\# \! .* \n ; -- #!, for scripts -- gcc ^\ \# \! .* \n ; -- #!, for scripts -- clang; See #6132 () { do_bol } } -- after a layout keyword (let, where, do, of), we begin a new layout -- context if the curly brace is missing. -- Careful! This stuff is quite delicate. { \{ / { notFollowedBy '-' } { hopefully_open_brace } -- we might encounter {-# here, but {- has been handled already \n ; ^\# (line)? { begin line_prag1 } } -- after an 'if', a vertical bar starts a layout context for MultiWayIf { \| / { notFollowedBySymbol } { new_layout_context True dontGenerateSemic ITvbar } () { pop } } -- do is treated in a subtly different way, see new_layout_context () { new_layout_context True generateSemic ITvocurly } () { new_layout_context False generateSemic ITvocurly } -- after a new layout context which was found to be to the left of the -- previous context, we have generated a '{' token, and we now need to -- generate a matching '}' token. () { do_layout_left } <0,option_prags> \n { begin bol } "{-#" $whitechar* $pragmachar+ / { known_pragma linePrags } { dispatch_pragmas linePrags } -- single-line line pragmas, of the form -- # "" \n { @decimal $white_no_nl+ \" [$graphic \ ]* \" { setLineAndFile line_prag1a } () { failLinePrag1 } } .* { popLinePrag1 } -- Haskell-style line pragmas, of the form -- {-# LINE "" #-} { @decimal $white_no_nl+ \" [$graphic \ ]* \" { setLineAndFile line_prag2a } } "#-}"|"-}" { pop } -- NOTE: accept -} at the end of a LINE pragma, for compatibility -- with older versions of GHC which generated these. -- Haskell-style column pragmas, of the form -- {-# COLUMN #-} @decimal $whitechar* "#-}" { setColumn } <0,option_prags> { "{-#" $whitechar* $pragmachar+ $whitechar+ $pragmachar+ / { known_pragma twoWordPrags } { dispatch_pragmas twoWordPrags } "{-#" $whitechar* $pragmachar+ / { known_pragma oneWordPrags } { dispatch_pragmas oneWordPrags } -- We ignore all these pragmas, but don't generate a warning for them "{-#" $whitechar* $pragmachar+ / { known_pragma ignoredPrags } { dispatch_pragmas ignoredPrags } -- ToDo: should only be valid inside a pragma: "#-}" { endPrag } } { "{-#" $whitechar* $pragmachar+ / { known_pragma fileHeaderPrags } { dispatch_pragmas fileHeaderPrags } } <0> { -- In the "0" mode we ignore these pragmas "{-#" $whitechar* $pragmachar+ / { known_pragma fileHeaderPrags } { nested_comment } } <0,option_prags> { "{-#" { warnThen PsWarnUnrecognisedPragma (nested_comment ) } } -- '0' state: ordinary lexemes -- Haddock comments "-- " $docsym / { ifExtension HaddockBit } { multiline_doc_comment } "{-" \ ? $docsym / { ifExtension HaddockBit } { nested_doc_comment } -- "special" symbols <0> { -- Don't check ThQuotesBit here as the renamer can produce a better -- error message than the lexer (see the thQuotesEnabled check in rnBracket). "[|" { token (ITopenExpQuote NoE NormalSyntax) } "[||" { token (ITopenTExpQuote NoE) } "|]" { token (ITcloseQuote NormalSyntax) } "||]" { token ITcloseTExpQuote } -- Check ThQuotesBit here as to not steal syntax. "[e|" / { ifExtension ThQuotesBit } { token (ITopenExpQuote HasE NormalSyntax) } "[e||" / { ifExtension ThQuotesBit } { token (ITopenTExpQuote HasE) } "[p|" / { ifExtension ThQuotesBit } { token ITopenPatQuote } "[d|" / { ifExtension ThQuotesBit } { layout_token ITopenDecQuote } "[t|" / { ifExtension ThQuotesBit } { token ITopenTypQuote } "[" @varid "|" / { ifExtension QqBit } { lex_quasiquote_tok } -- qualified quasi-quote (#5555) "[" @qvarid "|" / { ifExtension QqBit } { lex_qquasiquote_tok } $unigraphic -- ⟦ / { ifCurrentChar '⟦' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ThQuotesBit } { token (ITopenExpQuote NoE UnicodeSyntax) } $unigraphic -- ⟧ / { ifCurrentChar '⟧' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ThQuotesBit } { token (ITcloseQuote UnicodeSyntax) } } <0> { "(|" / { ifExtension ArrowsBit `alexAndPred` notFollowedBySymbol } { special (IToparenbar NormalSyntax) } "|)" / { ifExtension ArrowsBit } { special (ITcparenbar NormalSyntax) } $unigraphic -- ⦇ / { ifCurrentChar '⦇' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ArrowsBit } { special (IToparenbar UnicodeSyntax) } $unigraphic -- ⦈ / { ifCurrentChar '⦈' `alexAndPred` ifExtension UnicodeSyntaxBit `alexAndPred` ifExtension ArrowsBit } { special (ITcparenbar UnicodeSyntax) } } <0> { \? @varid / { ifExtension IpBit } { skip_one_varid ITdupipvarid } } <0> { "#" @varid / { ifExtension OverloadedLabelsBit } { skip_one_varid ITlabelvarid } } <0> { "(#" / { ifExtension UnboxedParensBit } { token IToubxparen } "#)" / { ifExtension UnboxedParensBit } { token ITcubxparen } } <0,option_prags> { \( { special IToparen } \) { special ITcparen } \[ { special ITobrack } \] { special ITcbrack } \, { special ITcomma } \; { special ITsemi } \` { special ITbackquote } \{ { open_brace } \} { close_brace } } <0,option_prags> { @qdo { qdo_token ITdo } @qmdo / { ifExtension RecursiveDoBit } { qdo_token ITmdo } @qvarid { idtoken qvarid } @qconid { idtoken qconid } @varid { varid } @conid { idtoken conid } } <0> { @qvarid "#"+ / { ifExtension MagicHashBit } { idtoken qvarid } @qconid "#"+ / { ifExtension MagicHashBit } { idtoken qconid } @varid "#"+ / { ifExtension MagicHashBit } { varid } @conid "#"+ / { ifExtension MagicHashBit } { idtoken conid } } -- Operators classified into prefix, suffix, tight infix, and loose infix. -- See Note [Whitespace-sensitive operator parsing] <0> { @varsym / { precededByClosingToken `alexAndPred` followedByOpeningToken } { varsym_tight_infix } @varsym / { followedByOpeningToken } { varsym_prefix } @varsym / { precededByClosingToken } { varsym_suffix } @varsym { varsym_loose_infix } } -- ToDo: - move `var` and (sym) into lexical syntax? -- - remove backquote from $special? <0> { @qvarsym { idtoken qvarsym } @qconsym { idtoken qconsym } @consym { consym } } -- For the normal boxed literals we need to be careful -- when trying to be close to Haskell98 -- Note [Lexing NumericUnderscores extension] (#14473) -- -- NumericUnderscores extension allows underscores in numeric literals. -- Multiple underscores are represented with @numspc macro. -- To be simpler, we have only the definitions with underscores. -- And then we have a separate function (tok_integral and tok_frac) -- that validates the literals. -- If extensions are not enabled, check that there are no underscores. -- <0> { -- Normal integral literals (:: Num a => a, from Integer) @decimal { tok_num positive 0 0 decimal } 0[bB] @numspc @binary / { ifExtension BinaryLiteralsBit } { tok_num positive 2 2 binary } 0[oO] @numspc @octal { tok_num positive 2 2 octal } 0[xX] @numspc @hexadecimal { tok_num positive 2 2 hexadecimal } @negative @decimal / { negLitPred } { tok_num negative 1 1 decimal } @negative 0[bB] @numspc @binary / { negLitPred `alexAndPred` ifExtension BinaryLiteralsBit } { tok_num negative 3 3 binary } @negative 0[oO] @numspc @octal / { negLitPred } { tok_num negative 3 3 octal } @negative 0[xX] @numspc @hexadecimal / { negLitPred } { tok_num negative 3 3 hexadecimal } -- Normal rational literals (:: Fractional a => a, from Rational) @floating_point { tok_frac 0 tok_float } @negative @floating_point / { negLitPred } { tok_frac 0 tok_float } 0[xX] @numspc @hex_floating_point / { ifExtension HexFloatLiteralsBit } { tok_frac 0 tok_hex_float } @negative 0[xX] @numspc @hex_floating_point / { ifExtension HexFloatLiteralsBit `alexAndPred` negLitPred } { tok_frac 0 tok_hex_float } } <0> { -- Unboxed ints (:: Int#) and words (:: Word#) -- It's simpler (and faster?) to give separate cases to the negatives, -- especially considering octal/hexadecimal prefixes. @decimal \# / { ifExtension MagicHashBit } { tok_primint positive 0 1 decimal } 0[bB] @numspc @binary \# / { ifExtension MagicHashBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_primint positive 2 3 binary } 0[oO] @numspc @octal \# / { ifExtension MagicHashBit } { tok_primint positive 2 3 octal } 0[xX] @numspc @hexadecimal \# / { ifExtension MagicHashBit } { tok_primint positive 2 3 hexadecimal } @negative @decimal \# / { negHashLitPred } { tok_primint negative 1 2 decimal } @negative 0[bB] @numspc @binary \# / { negHashLitPred `alexAndPred` ifExtension BinaryLiteralsBit } { tok_primint negative 3 4 binary } @negative 0[oO] @numspc @octal \# / { negHashLitPred } { tok_primint negative 3 4 octal } @negative 0[xX] @numspc @hexadecimal \# / { negHashLitPred } { tok_primint negative 3 4 hexadecimal } @decimal \# \# / { ifExtension MagicHashBit } { tok_primword 0 2 decimal } 0[bB] @numspc @binary \# \# / { ifExtension MagicHashBit `alexAndPred` ifExtension BinaryLiteralsBit } { tok_primword 2 4 binary } 0[oO] @numspc @octal \# \# / { ifExtension MagicHashBit } { tok_primword 2 4 octal } 0[xX] @numspc @hexadecimal \# \# / { ifExtension MagicHashBit } { tok_primword 2 4 hexadecimal } -- Unboxed floats and doubles (:: Float#, :: Double#) -- prim_{float,double} work with signed literals @floating_point \# / { ifExtension MagicHashBit } { tok_frac 1 tok_primfloat } @floating_point \# \# / { ifExtension MagicHashBit } { tok_frac 2 tok_primdouble } @negative @floating_point \# / { negHashLitPred } { tok_frac 1 tok_primfloat } @negative @floating_point \# \# / { negHashLitPred } { tok_frac 2 tok_primdouble } } -- Strings and chars are lexed by hand-written code. The reason is -- that even if we recognise the string or char here in the regex -- lexer, we would still have to parse the string afterward in order -- to convert it to a String. <0> { \' { lex_char_tok } \" { lex_string_tok } } -- Note [Whitespace-sensitive operator parsing] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- In accord with GHC Proposal #229 https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0229-whitespace-bang-patterns.rst -- we classify operator occurrences into four categories: -- -- a ! b -- a loose infix occurrence -- a!b -- a tight infix occurrence -- a !b -- a prefix occurrence -- a! b -- a suffix occurrence -- -- The rules are a bit more elaborate than simply checking for whitespace, in -- order to accommodate the following use cases: -- -- f (!a) = ... -- prefix occurrence -- g (a !) -- loose infix occurrence -- g (! a) -- loose infix occurrence -- -- The precise rules are as follows: -- -- * Identifiers, literals, and opening brackets (, (#, (|, [, [|, [||, [p|, -- [e|, [t|, {, ⟦, ⦇, are considered "opening tokens". The function -- followedByOpeningToken tests whether the next token is an opening token. -- -- * Identifiers, literals, and closing brackets ), #), |), ], |], }, ⟧, ⦈, -- are considered "closing tokens". The function precededByClosingToken tests -- whether the previous token is a closing token. -- -- * Whitespace, comments, separators, and other tokens, are considered -- neither opening nor closing. -- -- * Any unqualified operator occurrence is classified as prefix, suffix, or -- tight/loose infix, based on preceding and following tokens: -- -- precededByClosingToken | followedByOpeningToken | Occurrence -- ------------------------+------------------------+------------ -- False | True | prefix -- True | False | suffix -- True | True | tight infix -- False | False | loose infix -- ------------------------+------------------------+------------ -- -- A loose infix occurrence is always considered an operator. Other types of -- occurrences may be assigned a special per-operator meaning override: -- -- Operator | Occurrence | Token returned -- ----------+---------------+------------------------------------------ -- ! | prefix | ITbang -- | | strictness annotation or bang pattern, -- | | e.g. f !x = rhs, data T = MkT !a -- | not prefix | ITvarsym "!" -- | | ordinary operator or type operator, -- | | e.g. xs ! 3, (! x), Int ! Bool -- ----------+---------------+------------------------------------------ -- ~ | prefix | ITtilde -- | | laziness annotation or lazy pattern, -- | | e.g. f ~x = rhs, data T = MkT ~a -- | not prefix | ITvarsym "~" -- | | ordinary operator or type operator, -- | | e.g. xs ~ 3, (~ x), Int ~ Bool -- ----------+---------------+------------------------------------------ -- . | prefix | ITproj True -- | | field projection, -- | | e.g. .x -- | tight infix | ITproj False -- | | field projection, -- | | e.g. r.x -- | suffix | ITdot -- | | function composition, -- | | e.g. f. g -- | loose infix | ITdot -- | | function composition, -- | | e.g. f . g -- ----------+---------------+------------------------------------------ -- $ $$ | prefix | ITdollar, ITdollardollar -- | | untyped or typed Template Haskell splice, -- | | e.g. $(f x), $$(f x), $$"str" -- | not prefix | ITvarsym "$", ITvarsym "$$" -- | | ordinary operator or type operator, -- | | e.g. f $ g x, a $$ b -- ----------+---------------+------------------------------------------ -- @ | prefix | ITtypeApp -- | | type application, e.g. fmap @Maybe -- | tight infix | ITat -- | | as-pattern, e.g. f p@(a,b) = rhs -- | suffix | parse error -- | | e.g. f p@ x = rhs -- | loose infix | ITvarsym "@" -- | | ordinary operator or type operator, -- | | e.g. f @ g, (f @) -- ----------+---------------+------------------------------------------ -- -- Also, some of these overrides are guarded behind language extensions. -- According to the specification, we must determine the occurrence based on -- surrounding *tokens* (see the proposal for the exact rules). However, in -- the implementation we cheat a little and do the classification based on -- characters, for reasons of both simplicity and efficiency (see -- 'followedByOpeningToken' and 'precededByClosingToken') -- -- When an operator is subject to a meaning override, it is mapped to special -- token: ITbang, ITtilde, ITat, ITdollar, ITdollardollar. Otherwise, it is -- returned as ITvarsym. -- -- For example, this is how we process the (!): -- -- precededByClosingToken | followedByOpeningToken | Token -- ------------------------+------------------------+------------- -- False | True | ITbang -- True | False | ITvarsym "!" -- True | True | ITvarsym "!" -- False | False | ITvarsym "!" -- ------------------------+------------------------+------------- -- -- And this is how we process the (@): -- -- precededByClosingToken | followedByOpeningToken | Token -- ------------------------+------------------------+------------- -- False | True | ITtypeApp -- True | False | parse error -- True | True | ITat -- False | False | ITvarsym "@" -- ------------------------+------------------------+------------- -- ----------------------------------------------------------------------------- -- Alex "Haskell code fragment bottom" { -- ----------------------------------------------------------------------------- -- The token type data Token = ITas -- Haskell keywords | ITcase | ITclass | ITdata | ITdefault | ITderiving | ITdo (Maybe FastString) | ITelse | IThiding | ITforeign | ITif | ITimport | ITin | ITinfix | ITinfixl | ITinfixr | ITinstance | ITlet | ITmodule | ITnewtype | ITof | ITqualified | ITthen | ITtype | ITwhere | ITforall IsUnicodeSyntax -- GHC extension keywords | ITexport | ITlabel | ITdynamic | ITsafe | ITinterruptible | ITunsafe | ITstdcallconv | ITccallconv | ITcapiconv | ITprimcallconv | ITjavascriptcallconv | ITmdo (Maybe FastString) | ITfamily | ITrole | ITgroup | ITby | ITusing | ITpattern | ITstatic | ITstock | ITanyclass | ITvia -- Backpack tokens | ITunit | ITsignature | ITdependency | ITrequires -- Pragmas, see Note [Pragma source text] in "GHC.Types.Basic" | ITinline_prag SourceText InlineSpec RuleMatchInfo | ITopaque_prag SourceText | ITspec_prag SourceText -- SPECIALISE | ITspec_inline_prag SourceText Bool -- SPECIALISE INLINE (or NOINLINE) | ITsource_prag SourceText | ITrules_prag SourceText | ITwarning_prag SourceText | ITdeprecated_prag SourceText | ITline_prag SourceText -- not usually produced, see 'UsePosPragsBit' | ITcolumn_prag SourceText -- not usually produced, see 'UsePosPragsBit' | ITscc_prag SourceText | ITunpack_prag SourceText | ITnounpack_prag SourceText | ITann_prag SourceText | ITcomplete_prag SourceText | ITclose_prag | IToptions_prag String | ITinclude_prag String | ITlanguage_prag | ITminimal_prag SourceText | IToverlappable_prag SourceText -- instance overlap mode | IToverlapping_prag SourceText -- instance overlap mode | IToverlaps_prag SourceText -- instance overlap mode | ITincoherent_prag SourceText -- instance overlap mode | ITctype SourceText | ITcomment_line_prag -- See Note [Nested comment line pragmas] | ITdotdot -- reserved symbols | ITcolon | ITdcolon IsUnicodeSyntax | ITequal | ITlam | ITlcase | ITlcases | ITvbar | ITlarrow IsUnicodeSyntax | ITrarrow IsUnicodeSyntax | ITdarrow IsUnicodeSyntax | ITlolly -- The (⊸) arrow (for LinearTypes) | ITminus -- See Note [Minus tokens] | ITprefixminus -- See Note [Minus tokens] | ITbang -- Prefix (!) only, e.g. f !x = rhs | ITtilde -- Prefix (~) only, e.g. f ~x = rhs | ITat -- Tight infix (@) only, e.g. f x@pat = rhs | ITtypeApp -- Prefix (@) only, e.g. f @t | ITpercent -- Prefix (%) only, e.g. a %1 -> b | ITstar IsUnicodeSyntax | ITdot | ITproj Bool -- Extension: OverloadedRecordDotBit | ITbiglam -- GHC-extension symbols | ITocurly -- special symbols | ITccurly | ITvocurly | ITvccurly | ITobrack | ITopabrack -- [:, for parallel arrays with -XParallelArrays | ITcpabrack -- :], for parallel arrays with -XParallelArrays | ITcbrack | IToparen | ITcparen | IToubxparen | ITcubxparen | ITsemi | ITcomma | ITunderscore | ITbackquote | ITsimpleQuote -- ' | ITvarid FastString -- identifiers | ITconid FastString | ITvarsym FastString | ITconsym FastString | ITqvarid (FastString,FastString) | ITqconid (FastString,FastString) | ITqvarsym (FastString,FastString) | ITqconsym (FastString,FastString) | ITdupipvarid FastString -- GHC extension: implicit param: ?x | ITlabelvarid FastString -- Overloaded label: #x | ITchar SourceText Char -- Note [Literal source text] in "GHC.Types.Basic" | ITstring SourceText FastString -- Note [Literal source text] in "GHC.Types.Basic" | ITinteger IntegralLit -- Note [Literal source text] in "GHC.Types.Basic" | ITrational FractionalLit | ITprimchar SourceText Char -- Note [Literal source text] in "GHC.Types.Basic" | ITprimstring SourceText ByteString -- Note [Literal source text] in "GHC.Types.Basic" | ITprimint SourceText Integer -- Note [Literal source text] in "GHC.Types.Basic" | ITprimword SourceText Integer -- Note [Literal source text] in "GHC.Types.Basic" | ITprimfloat FractionalLit | ITprimdouble FractionalLit -- Template Haskell extension tokens | ITopenExpQuote HasE IsUnicodeSyntax -- [| or [e| | ITopenPatQuote -- [p| | ITopenDecQuote -- [d| | ITopenTypQuote -- [t| | ITcloseQuote IsUnicodeSyntax -- |] | ITopenTExpQuote HasE -- [|| or [e|| | ITcloseTExpQuote -- ||] | ITdollar -- prefix $ | ITdollardollar -- prefix $$ | ITtyQuote -- '' | ITquasiQuote (FastString,FastString,PsSpan) -- ITquasiQuote(quoter, quote, loc) -- represents a quasi-quote of the form -- [quoter| quote |] | ITqQuasiQuote (FastString,FastString,FastString,PsSpan) -- ITqQuasiQuote(Qual, quoter, quote, loc) -- represents a qualified quasi-quote of the form -- [Qual.quoter| quote |] -- Arrow notation extension | ITproc | ITrec | IToparenbar IsUnicodeSyntax -- ^ @(|@ | ITcparenbar IsUnicodeSyntax -- ^ @|)@ | ITlarrowtail IsUnicodeSyntax -- ^ @-<@ | ITrarrowtail IsUnicodeSyntax -- ^ @>-@ | ITLarrowtail IsUnicodeSyntax -- ^ @-<<@ | ITRarrowtail IsUnicodeSyntax -- ^ @>>-@ | ITunknown String -- ^ Used when the lexer can't make sense of it | ITeof -- ^ end of file token -- Documentation annotations. See Note [PsSpan in Comments] | ITdocComment HsDocString PsSpan -- ^ The HsDocString contains more details about what -- this is and how to pretty print it | ITdocOptions String PsSpan -- ^ doc options (prune, ignore-exports, etc) | ITlineComment String PsSpan -- ^ comment starting by "--" | ITblockComment String PsSpan -- ^ comment in {- -} deriving Show instance Outputable Token where ppr x = text (show x) {- Note [PsSpan in Comments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using the Api Annotations to exact print a modified AST, managing the space before a comment is important. The PsSpan in the comment token allows this to happen. We also need to track the space before the end of file. The normal mechanism of using the previous token does not work, as the ITeof is synthesised to come at the same location of the last token, and the normal previous token updating has by then updated the required location. We track this using a 2-back location, prev_loc2. This adds extra processing to every single token, which is a performance hit for something needed only at the end of the file. This needs improving. Perhaps a backward scan on eof? -} {- Note [Minus tokens] ~~~~~~~~~~~~~~~~~~~~~~ A minus sign can be used in prefix form (-x) and infix form (a - b). When LexicalNegation is on: * ITprefixminus represents the prefix form * ITvarsym "-" represents the infix form * ITminus is not used When LexicalNegation is off: * ITminus represents all forms * ITprefixminus is not used * ITvarsym "-" is not used -} {- Note [Why not LexicalNegationBit] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One might wonder why we define NoLexicalNegationBit instead of LexicalNegationBit. The problem lies in the following line in reservedSymsFM: ,("-", ITminus, NormalSyntax, xbit NoLexicalNegationBit) We want to generate ITminus only when LexicalNegation is off. How would one do it if we had LexicalNegationBit? I (int-index) tried to use bitwise complement: ,("-", ITminus, NormalSyntax, complement (xbit LexicalNegationBit)) This did not work, so I opted for NoLexicalNegationBit instead. -} -- the bitmap provided as the third component indicates whether the -- corresponding extension keyword is valid under the extension options -- provided to the compiler; if the extension corresponding to *any* of the -- bits set in the bitmap is enabled, the keyword is valid (this setup -- facilitates using a keyword in two different extensions that can be -- activated independently) -- reservedWordsFM :: UniqFM FastString (Token, ExtsBitmap) reservedWordsFM = listToUFM $ map (\(x, y, z) -> (mkFastString x, (y, z))) [( "_", ITunderscore, 0 ), ( "as", ITas, 0 ), ( "case", ITcase, 0 ), ( "cases", ITlcases, xbit LambdaCaseBit ), ( "class", ITclass, 0 ), ( "data", ITdata, 0 ), ( "default", ITdefault, 0 ), ( "deriving", ITderiving, 0 ), ( "do", ITdo Nothing, 0 ), ( "else", ITelse, 0 ), ( "hiding", IThiding, 0 ), ( "if", ITif, 0 ), ( "import", ITimport, 0 ), ( "in", ITin, 0 ), ( "infix", ITinfix, 0 ), ( "infixl", ITinfixl, 0 ), ( "infixr", ITinfixr, 0 ), ( "instance", ITinstance, 0 ), ( "let", ITlet, 0 ), ( "module", ITmodule, 0 ), ( "newtype", ITnewtype, 0 ), ( "of", ITof, 0 ), ( "qualified", ITqualified, 0 ), ( "then", ITthen, 0 ), ( "type", ITtype, 0 ), ( "where", ITwhere, 0 ), ( "forall", ITforall NormalSyntax, 0), ( "mdo", ITmdo Nothing, xbit RecursiveDoBit), -- See Note [Lexing type pseudo-keywords] ( "family", ITfamily, 0 ), ( "role", ITrole, 0 ), ( "pattern", ITpattern, xbit PatternSynonymsBit), ( "static", ITstatic, xbit StaticPointersBit ), ( "stock", ITstock, 0 ), ( "anyclass", ITanyclass, 0 ), ( "via", ITvia, 0 ), ( "group", ITgroup, xbit TransformComprehensionsBit), ( "by", ITby, xbit TransformComprehensionsBit), ( "using", ITusing, xbit TransformComprehensionsBit), ( "foreign", ITforeign, xbit FfiBit), ( "export", ITexport, xbit FfiBit), ( "label", ITlabel, xbit FfiBit), ( "dynamic", ITdynamic, xbit FfiBit), ( "safe", ITsafe, xbit FfiBit .|. xbit SafeHaskellBit), ( "interruptible", ITinterruptible, xbit InterruptibleFfiBit), ( "unsafe", ITunsafe, xbit FfiBit), ( "stdcall", ITstdcallconv, xbit FfiBit), ( "ccall", ITccallconv, xbit FfiBit), ( "capi", ITcapiconv, xbit CApiFfiBit), ( "prim", ITprimcallconv, xbit FfiBit), ( "javascript", ITjavascriptcallconv, xbit FfiBit), ( "unit", ITunit, 0 ), ( "dependency", ITdependency, 0 ), ( "signature", ITsignature, 0 ), ( "rec", ITrec, xbit ArrowsBit .|. xbit RecursiveDoBit), ( "proc", ITproc, xbit ArrowsBit) ] {----------------------------------- Note [Lexing type pseudo-keywords] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One might think that we wish to treat 'family' and 'role' as regular old varids whenever -XTypeFamilies and -XRoleAnnotations are off, respectively. But, there is no need to do so. These pseudo-keywords are not stolen syntax: they are only used after the keyword 'type' at the top-level, where varids are not allowed. Furthermore, checks further downstream (GHC.Tc.TyCl) ensure that type families and role annotations are never declared without their extensions on. In fact, by unconditionally lexing these pseudo-keywords as special, we can get better error messages. Also, note that these are included in the `varid` production in the parser -- a key detail to make all this work. -------------------------------------} reservedSymsFM :: UniqFM FastString (Token, IsUnicodeSyntax, ExtsBitmap) reservedSymsFM = listToUFM $ map (\ (x,w,y,z) -> (mkFastString x,(w,y,z))) [ ("..", ITdotdot, NormalSyntax, 0 ) -- (:) is a reserved op, meaning only list cons ,(":", ITcolon, NormalSyntax, 0 ) ,("::", ITdcolon NormalSyntax, NormalSyntax, 0 ) ,("=", ITequal, NormalSyntax, 0 ) ,("\\", ITlam, NormalSyntax, 0 ) ,("|", ITvbar, NormalSyntax, 0 ) ,("<-", ITlarrow NormalSyntax, NormalSyntax, 0 ) ,("->", ITrarrow NormalSyntax, NormalSyntax, 0 ) ,("=>", ITdarrow NormalSyntax, NormalSyntax, 0 ) ,("-", ITminus, NormalSyntax, xbit NoLexicalNegationBit) ,("*", ITstar NormalSyntax, NormalSyntax, xbit StarIsTypeBit) -- For 'forall a . t' ,(".", ITdot, NormalSyntax, 0 ) ,("-<", ITlarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,(">-", ITrarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,("-<<", ITLarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,(">>-", ITRarrowtail NormalSyntax, NormalSyntax, xbit ArrowsBit) ,("∷", ITdcolon UnicodeSyntax, UnicodeSyntax, 0 ) ,("⇒", ITdarrow UnicodeSyntax, UnicodeSyntax, 0 ) ,("∀", ITforall UnicodeSyntax, UnicodeSyntax, 0 ) ,("→", ITrarrow UnicodeSyntax, UnicodeSyntax, 0 ) ,("←", ITlarrow UnicodeSyntax, UnicodeSyntax, 0 ) ,("⊸", ITlolly, UnicodeSyntax, 0) ,("⤙", ITlarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("⤚", ITrarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("⤛", ITLarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("⤜", ITRarrowtail UnicodeSyntax, UnicodeSyntax, xbit ArrowsBit) ,("★", ITstar UnicodeSyntax, UnicodeSyntax, xbit StarIsTypeBit) -- ToDo: ideally, → and ∷ should be "specials", so that they cannot -- form part of a large operator. This would let us have a better -- syntax for kinds: ɑ∷*→* would be a legal kind signature. (maybe). ] -- ----------------------------------------------------------------------------- -- Lexer actions type Action = PsSpan -> StringBuffer -> Int -> P (PsLocated Token) special :: Token -> Action special tok span _buf _len = return (L span tok) token, layout_token :: Token -> Action token t span _buf _len = return (L span t) layout_token t span _buf _len = pushLexState layout >> return (L span t) idtoken :: (StringBuffer -> Int -> Token) -> Action idtoken f span buf len = return (L span $! (f buf len)) qdo_token :: (Maybe FastString -> Token) -> Action qdo_token con span buf len = do maybe_layout token return (L span $! token) where !token = con $! Just $! fst $! splitQualName buf len False skip_one_varid :: (FastString -> Token) -> Action skip_one_varid f span buf len = return (L span $! f (lexemeToFastString (stepOn buf) (len-1))) skip_two_varid :: (FastString -> Token) -> Action skip_two_varid f span buf len = return (L span $! f (lexemeToFastString (stepOn (stepOn buf)) (len-2))) strtoken :: (String -> Token) -> Action strtoken f span buf len = return (L span $! (f $! lexemeToString buf len)) begin :: Int -> Action begin code _span _str _len = do pushLexState code; lexToken pop :: Action pop _span _buf _len = do _ <- popLexState lexToken -- See Note [Nested comment line pragmas] failLinePrag1 :: Action failLinePrag1 span _buf _len = do b <- getBit InNestedCommentBit if b then return (L span ITcomment_line_prag) else lexError LexErrorInPragma -- See Note [Nested comment line pragmas] popLinePrag1 :: Action popLinePrag1 span _buf _len = do b <- getBit InNestedCommentBit if b then return (L span ITcomment_line_prag) else do _ <- popLexState lexToken hopefully_open_brace :: Action hopefully_open_brace span buf len = do relaxed <- getBit RelaxedLayoutBit ctx <- getContext (AI l _) <- getInput let offset = srcLocCol (psRealLoc l) isOK = relaxed || case ctx of Layout prev_off _ : _ -> prev_off < offset _ -> True if isOK then pop_and open_brace span buf len else addFatalError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs span) PsErrMissingBlock pop_and :: Action -> Action pop_and act span buf len = do _ <- popLexState act span buf len -- See Note [Whitespace-sensitive operator parsing] followedByOpeningToken :: AlexAccPred ExtsBitmap followedByOpeningToken _ _ _ (AI _ buf) | atEnd buf = False | otherwise = case nextChar buf of ('{', buf') -> nextCharIsNot buf' (== '-') ('(', _) -> True ('[', _) -> True ('\"', _) -> True ('\'', _) -> True ('_', _) -> True ('⟦', _) -> True ('⦇', _) -> True (c, _) -> isAlphaNum c -- See Note [Whitespace-sensitive operator parsing] precededByClosingToken :: AlexAccPred ExtsBitmap precededByClosingToken _ (AI _ buf) _ _ = case prevChar buf '\n' of '}' -> decodePrevNChars 1 buf /= "-" ')' -> True ']' -> True '\"' -> True '\'' -> True '_' -> True '⟧' -> True '⦈' -> True c -> isAlphaNum c {-# INLINE nextCharIs #-} nextCharIs :: StringBuffer -> (Char -> Bool) -> Bool nextCharIs buf p = not (atEnd buf) && p (currentChar buf) {-# INLINE nextCharIsNot #-} nextCharIsNot :: StringBuffer -> (Char -> Bool) -> Bool nextCharIsNot buf p = not (nextCharIs buf p) notFollowedBy :: Char -> AlexAccPred ExtsBitmap notFollowedBy char _ _ _ (AI _ buf) = nextCharIsNot buf (== char) notFollowedBySymbol :: AlexAccPred ExtsBitmap notFollowedBySymbol _ _ _ (AI _ buf) = nextCharIsNot buf (`elem` "!#$%&*+./<=>?@\\^|-~") followedByDigit :: AlexAccPred ExtsBitmap followedByDigit _ _ _ (AI _ buf) = afterOptionalSpace buf (\b -> nextCharIs b (`elem` ['0'..'9'])) ifCurrentChar :: Char -> AlexAccPred ExtsBitmap ifCurrentChar char _ (AI _ buf) _ _ = nextCharIs buf (== char) -- We must reject doc comments as being ordinary comments everywhere. -- In some cases the doc comment will be selected as the lexeme due to -- maximal munch, but not always, because the nested comment rule is -- valid in all states, but the doc-comment rules are only valid in -- the non-layout states. isNormalComment :: AlexAccPred ExtsBitmap isNormalComment bits _ _ (AI _ buf) | HaddockBit `xtest` bits = notFollowedByDocOrPragma | otherwise = nextCharIsNot buf (== '#') where notFollowedByDocOrPragma = afterOptionalSpace buf (\b -> nextCharIsNot b (`elem` "|^*$#")) afterOptionalSpace :: StringBuffer -> (StringBuffer -> Bool) -> Bool afterOptionalSpace buf p = if nextCharIs buf (== ' ') then p (snd (nextChar buf)) else p buf atEOL :: AlexAccPred ExtsBitmap atEOL _ _ _ (AI _ buf) = atEnd buf || currentChar buf == '\n' -- Check if we should parse a negative literal (e.g. -123) as a single token. negLitPred :: AlexAccPred ExtsBitmap negLitPred = prefix_minus `alexAndPred` (negative_literals `alexOrPred` lexical_negation) where negative_literals = ifExtension NegativeLiteralsBit lexical_negation = -- See Note [Why not LexicalNegationBit] alexNotPred (ifExtension NoLexicalNegationBit) prefix_minus = -- Note [prefix_minus in negLitPred and negHashLitPred] alexNotPred precededByClosingToken -- Check if we should parse an unboxed negative literal (e.g. -123#) as a single token. negHashLitPred :: AlexAccPred ExtsBitmap negHashLitPred = prefix_minus `alexAndPred` magic_hash where magic_hash = ifExtension MagicHashBit prefix_minus = -- Note [prefix_minus in negLitPred and negHashLitPred] alexNotPred precededByClosingToken {- Note [prefix_minus in negLitPred and negHashLitPred] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We want to parse -1 as a single token, but x-1 as three tokens. So in negLitPred (and negHashLitPred) we require that we have a prefix occurrence of the minus sign. See Note [Whitespace-sensitive operator parsing] for a detailed definition of a prefix occurrence. The condition for a prefix occurrence of an operator is: not precededByClosingToken && followedByOpeningToken but we don't check followedByOpeningToken when parsing a negative literal. It holds simply because we immediately lex a literal after the minus. -} ifExtension :: ExtBits -> AlexAccPred ExtsBitmap ifExtension extBits bits _ _ _ = extBits `xtest` bits alexNotPred p userState in1 len in2 = not (p userState in1 len in2) alexOrPred p1 p2 userState in1 len in2 = p1 userState in1 len in2 || p2 userState in1 len in2 multiline_doc_comment :: Action multiline_doc_comment span buf _len = {-# SCC "multiline_doc_comment" #-} withLexedDocType worker where worker input@(AI start_loc _) docType checkNextLine = go start_loc "" [] input where go start_loc curLine prevLines input@(AI end_loc _) = case alexGetChar' input of Just ('\n', input') | checkNextLine -> case checkIfCommentLine input' of Just input@(AI next_start _) -> go next_start "" (locatedLine : prevLines) input -- Start a new line Nothing -> endComment | otherwise -> endComment Just (c, input) -> go start_loc (c:curLine) prevLines input Nothing -> endComment where lineSpan = mkSrcSpanPs $ mkPsSpan start_loc end_loc locatedLine = L lineSpan (mkHsDocStringChunk $ reverse curLine) commentLines = NE.reverse $ locatedLine :| prevLines endComment = docCommentEnd input (docType (\dec -> MultiLineDocString dec commentLines)) buf span -- Check if the next line of input belongs to this doc comment as well. -- A doc comment continues onto the next line when the following -- conditions are met: -- * The line starts with "--" -- * The line doesn't start with "---". -- * The line doesn't start with "-- $", because that would be the -- start of a /new/ named haddock chunk (#10398). checkIfCommentLine :: AlexInput -> Maybe AlexInput checkIfCommentLine input = check (dropNonNewlineSpace input) where check input = do ('-', input) <- alexGetChar' input ('-', input) <- alexGetChar' input (c, after_c) <- alexGetChar' input case c of '-' -> Nothing ' ' -> case alexGetChar' after_c of Just ('$', _) -> Nothing _ -> Just input _ -> Just input dropNonNewlineSpace input = case alexGetChar' input of Just (c, input') | isSpace c && c /= '\n' -> dropNonNewlineSpace input' | otherwise -> input Nothing -> input lineCommentToken :: Action lineCommentToken span buf len = do b <- getBit RawTokenStreamBit if b then do lt <- getLastLocComment strtoken (\s -> ITlineComment s lt) span buf len else lexToken {- nested comments require traversing by hand, they can't be parsed using regular expressions. -} nested_comment :: Action nested_comment span buf len = {-# SCC "nested_comment" #-} do l <- getLastLocComment let endComment input (L _ comment) = commentEnd lexToken input (Nothing, ITblockComment comment l) buf span input <- getInput -- Include decorator in comment let start_decorator = reverse $ lexemeToString buf len nested_comment_logic endComment start_decorator input span nested_doc_comment :: Action nested_doc_comment span buf _len = {-# SCC "nested_doc_comment" #-} withLexedDocType worker where worker input docType _checkNextLine = nested_comment_logic endComment "" input span where endComment input lcomment = docCommentEnd input (docType (\d -> NestedDocString d (mkHsDocStringChunk . dropTrailingDec <$> lcomment))) buf span dropTrailingDec [] = [] dropTrailingDec "-}" = "" dropTrailingDec (x:xs) = x:dropTrailingDec xs {-# INLINE nested_comment_logic #-} -- | Includes the trailing '-}' decorators -- drop the last two elements with the callback if you don't want them to be included nested_comment_logic :: (AlexInput -> Located String -> P (PsLocated Token)) -- ^ Continuation that gets the rest of the input and the lexed comment -> String -- ^ starting value for accumulator (reversed) - When we want to include a decorator '{-' in the comment -> AlexInput -> PsSpan -> P (PsLocated Token) nested_comment_logic endComment commentAcc input span = go commentAcc (1::Int) input where go commentAcc 0 input@(AI end_loc _) = do let comment = reverse commentAcc cspan = mkSrcSpanPs $ mkPsSpan (psSpanStart span) end_loc lcomment = L cspan comment endComment input lcomment go commentAcc n input = case alexGetChar' input of Nothing -> errBrace input (psRealSpan span) Just ('-',input) -> case alexGetChar' input of Nothing -> errBrace input (psRealSpan span) Just ('\125',input) -> go ('\125':'-':commentAcc) (n-1) input -- '}' Just (_,_) -> go ('-':commentAcc) n input Just ('\123',input) -> case alexGetChar' input of -- '{' char Nothing -> errBrace input (psRealSpan span) Just ('-',input) -> go ('-':'\123':commentAcc) (n+1) input Just (_,_) -> go ('\123':commentAcc) n input -- See Note [Nested comment line pragmas] Just ('\n',input) -> case alexGetChar' input of Nothing -> errBrace input (psRealSpan span) Just ('#',_) -> do (parsedAcc,input) <- parseNestedPragma input go (parsedAcc ++ '\n':commentAcc) n input Just (_,_) -> go ('\n':commentAcc) n input Just (c,input) -> go (c:commentAcc) n input -- See Note [Nested comment line pragmas] parseNestedPragma :: AlexInput -> P (String,AlexInput) parseNestedPragma input@(AI _ buf) = do origInput <- getInput setInput input setExts (.|. xbit InNestedCommentBit) pushLexState bol lt <- lexToken _ <- popLexState setExts (.&. complement (xbit InNestedCommentBit)) postInput@(AI _ postBuf) <- getInput setInput origInput case unLoc lt of ITcomment_line_prag -> do let bytes = byteDiff buf postBuf diff = lexemeToString buf bytes return (reverse diff, postInput) lt' -> panic ("parseNestedPragma: unexpected token" ++ (show lt')) {- Note [Nested comment line pragmas] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We used to ignore cpp-preprocessor-generated #line pragmas if they were inside nested comments. Now, when parsing a nested comment, if we encounter a line starting with '#' we call parseNestedPragma, which executes the following: 1. Save the current lexer input (loc, buf) for later 2. Set the current lexer input to the beginning of the line starting with '#' 3. Turn the 'InNestedComment' extension on 4. Push the 'bol' lexer state 5. Lex a token. Due to (2), (3), and (4), this should always lex a single line or less and return the ITcomment_line_prag token. This may set source line and file location if a #line pragma is successfully parsed 6. Restore lexer input and state to what they were before we did all this 7. Return control to the function parsing a nested comment, informing it of what the lexer parsed Regarding (5) above: Every exit from the 'bol' lexer state (do_bol, popLinePrag1, failLinePrag1) checks if the 'InNestedComment' extension is set. If it is, that function will return control to parseNestedPragma by returning the ITcomment_line_prag token. See #314 for more background on the bug this fixes. -} {-# INLINE withLexedDocType #-} withLexedDocType :: (AlexInput -> ((HsDocStringDecorator -> HsDocString) -> (HdkComment, Token)) -> Bool -> P (PsLocated Token)) -> P (PsLocated Token) withLexedDocType lexDocComment = do input@(AI _ buf) <- getInput l <- getLastLocComment case prevChar buf ' ' of -- The `Bool` argument to lexDocComment signals whether or not the next -- line of input might also belong to this doc comment. '|' -> lexDocComment input (mkHdkCommentNext l) True '^' -> lexDocComment input (mkHdkCommentPrev l) True '$' -> case lexDocName input of Nothing -> do setInput input; lexToken -- eof reached, lex it normally Just (name, input) -> lexDocComment input (mkHdkCommentNamed l name) True '*' -> lexDocSection l 1 input _ -> panic "withLexedDocType: Bad doc type" where lexDocSection l n input = case alexGetChar' input of Just ('*', input) -> lexDocSection l (n+1) input Just (_, _) -> lexDocComment input (mkHdkCommentSection l n) False Nothing -> do setInput input; lexToken -- eof reached, lex it normally lexDocName :: AlexInput -> Maybe (String, AlexInput) lexDocName = go "" where go acc input = case alexGetChar' input of Just (c, input') | isSpace c -> Just (reverse acc, input) | otherwise -> go (c:acc) input' Nothing -> Nothing mkHdkCommentNext, mkHdkCommentPrev :: PsSpan -> (HsDocStringDecorator -> HsDocString) -> (HdkComment, Token) mkHdkCommentNext loc mkDS = (HdkCommentNext ds,ITdocComment ds loc) where ds = mkDS HsDocStringNext mkHdkCommentPrev loc mkDS = (HdkCommentPrev ds,ITdocComment ds loc) where ds = mkDS HsDocStringPrevious mkHdkCommentNamed :: PsSpan -> String -> (HsDocStringDecorator -> HsDocString) -> (HdkComment, Token) mkHdkCommentNamed loc name mkDS = (HdkCommentNamed name ds, ITdocComment ds loc) where ds = mkDS (HsDocStringNamed name) mkHdkCommentSection :: PsSpan -> Int -> (HsDocStringDecorator -> HsDocString) -> (HdkComment, Token) mkHdkCommentSection loc n mkDS = (HdkCommentSection n ds, ITdocComment ds loc) where ds = mkDS (HsDocStringGroup n) -- RULES pragmas turn on the forall and '.' keywords, and we turn them -- off again at the end of the pragma. rulePrag :: Action rulePrag span buf len = do setExts (.|. xbit InRulePragBit) let !src = lexemeToString buf len return (L span (ITrules_prag (SourceText src))) -- When 'UsePosPragsBit' is not set, it is expected that we emit a token instead -- of updating the position in 'PState' linePrag :: Action linePrag span buf len = do usePosPrags <- getBit UsePosPragsBit if usePosPrags then begin line_prag2 span buf len else let !src = lexemeToString buf len in return (L span (ITline_prag (SourceText src))) -- When 'UsePosPragsBit' is not set, it is expected that we emit a token instead -- of updating the position in 'PState' columnPrag :: Action columnPrag span buf len = do usePosPrags <- getBit UsePosPragsBit let !src = lexemeToString buf len if usePosPrags then begin column_prag span buf len else let !src = lexemeToString buf len in return (L span (ITcolumn_prag (SourceText src))) endPrag :: Action endPrag span _buf _len = do setExts (.&. complement (xbit InRulePragBit)) return (L span ITclose_prag) -- docCommentEnd ------------------------------------------------------------------------------- -- This function is quite tricky. We can't just return a new token, we also -- need to update the state of the parser. Why? Because the token is longer -- than what was lexed by Alex, and the lexToken function doesn't know this, so -- it writes the wrong token length to the parser state. This function is -- called afterwards, so it can just update the state. {-# INLINE commentEnd #-} commentEnd :: P (PsLocated Token) -> AlexInput -> (Maybe HdkComment, Token) -> StringBuffer -> PsSpan -> P (PsLocated Token) commentEnd cont input (m_hdk_comment, hdk_token) buf span = do setInput input let (AI loc nextBuf) = input span' = mkPsSpan (psSpanStart span) loc last_len = byteDiff buf nextBuf span `seq` setLastToken span' last_len whenIsJust m_hdk_comment $ \hdk_comment -> P $ \s -> POk (s {hdk_comments = hdk_comments s `snocOL` L span' hdk_comment}) () b <- getBit RawTokenStreamBit if b then return (L span' hdk_token) else cont {-# INLINE docCommentEnd #-} docCommentEnd :: AlexInput -> (HdkComment, Token) -> StringBuffer -> PsSpan -> P (PsLocated Token) docCommentEnd input (hdk_comment, tok) buf span = commentEnd lexToken input (Just hdk_comment, tok) buf span errBrace :: AlexInput -> RealSrcSpan -> P a errBrace (AI end _) span = failLocMsgP (realSrcSpanStart span) (psRealLoc end) (\srcLoc -> mkPlainErrorMsgEnvelope srcLoc (PsErrLexer LexUnterminatedComment LexErrKind_EOF)) open_brace, close_brace :: Action open_brace span _str _len = do ctx <- getContext setContext (NoLayout:ctx) return (L span ITocurly) close_brace span _str _len = do popContext return (L span ITccurly) qvarid, qconid :: StringBuffer -> Int -> Token qvarid buf len = ITqvarid $! splitQualName buf len False qconid buf len = ITqconid $! splitQualName buf len False splitQualName :: StringBuffer -> Int -> Bool -> (FastString,FastString) -- takes a StringBuffer and a length, and returns the module name -- and identifier parts of a qualified name. Splits at the *last* dot, -- because of hierarchical module names. -- -- Throws an error if the name is not qualified. splitQualName orig_buf len parens = split orig_buf orig_buf where split buf dot_buf | orig_buf `byteDiff` buf >= len = done dot_buf | c == '.' = found_dot buf' | otherwise = split buf' dot_buf where (c,buf') = nextChar buf -- careful, we might get names like M.... -- so, if the character after the dot is not upper-case, this is -- the end of the qualifier part. found_dot buf -- buf points after the '.' | isUpper c = split buf' buf | otherwise = done buf where (c,buf') = nextChar buf done dot_buf | qual_size < 1 = error "splitQualName got an unqualified named" | otherwise = (lexemeToFastString orig_buf (qual_size - 1), if parens -- Prelude.(+) then lexemeToFastString (stepOn dot_buf) (len - qual_size - 2) else lexemeToFastString dot_buf (len - qual_size)) where qual_size = orig_buf `byteDiff` dot_buf varid :: Action varid span buf len = case lookupUFM reservedWordsFM fs of Just (ITcase, _) -> do lastTk <- getLastTk keyword <- case lastTk of Strict.Just (L _ ITlam) -> do lambdaCase <- getBit LambdaCaseBit unless lambdaCase $ do pState <- getPState addError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) PsErrLambdaCase return ITlcase _ -> return ITcase maybe_layout keyword return $ L span keyword Just (ITlcases, _) -> do lastTk <- getLastTk lambdaCase <- getBit LambdaCaseBit token <- case lastTk of Strict.Just (L _ ITlam) | lambdaCase -> return ITlcases _ -> return $ ITvarid fs maybe_layout token return $ L span token Just (keyword, 0) -> do maybe_layout keyword return $ L span keyword Just (keyword, i) -> do exts <- getExts if exts .&. i /= 0 then do maybe_layout keyword return $ L span keyword else return $ L span $ ITvarid fs Nothing -> return $ L span $ ITvarid fs where !fs = lexemeToFastString buf len conid :: StringBuffer -> Int -> Token conid buf len = ITconid $! lexemeToFastString buf len qvarsym, qconsym :: StringBuffer -> Int -> Token qvarsym buf len = ITqvarsym $! splitQualName buf len False qconsym buf len = ITqconsym $! splitQualName buf len False -- See Note [Whitespace-sensitive operator parsing] varsym_prefix :: Action varsym_prefix = sym $ \span exts s -> let warnExtConflict errtok = do { addPsMessage (mkSrcSpanPs span) (PsWarnOperatorWhitespaceExtConflict errtok) ; return (ITvarsym s) } in if | s == fsLit "@" -> return ITtypeApp -- regardless of TypeApplications for better error messages | s == fsLit "%" -> if xtest LinearTypesBit exts then return ITpercent else warnExtConflict OperatorWhitespaceSymbol_PrefixPercent | s == fsLit "$" -> if xtest ThQuotesBit exts then return ITdollar else warnExtConflict OperatorWhitespaceSymbol_PrefixDollar | s == fsLit "$$" -> if xtest ThQuotesBit exts then return ITdollardollar else warnExtConflict OperatorWhitespaceSymbol_PrefixDollarDollar | s == fsLit "-" -> return ITprefixminus -- Only when LexicalNegation is on, otherwise we get ITminus -- and don't hit this code path. See Note [Minus tokens] | s == fsLit ".", OverloadedRecordDotBit `xtest` exts -> return (ITproj True) -- e.g. '(.x)' | s == fsLit "." -> return ITdot | s == fsLit "!" -> return ITbang | s == fsLit "~" -> return ITtilde | otherwise -> do { addPsMessage (mkSrcSpanPs span) (PsWarnOperatorWhitespace s OperatorWhitespaceOccurrence_Prefix) ; return (ITvarsym s) } -- See Note [Whitespace-sensitive operator parsing] varsym_suffix :: Action varsym_suffix = sym $ \span _ s -> if | s == fsLit "@" -> failMsgP (\srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrSuffixAT) | s == fsLit "." -> return ITdot | otherwise -> do { addPsMessage (mkSrcSpanPs span) (PsWarnOperatorWhitespace s OperatorWhitespaceOccurrence_Suffix) ; return (ITvarsym s) } -- See Note [Whitespace-sensitive operator parsing] varsym_tight_infix :: Action varsym_tight_infix = sym $ \span exts s -> if | s == fsLit "@" -> return ITat | s == fsLit ".", OverloadedRecordDotBit `xtest` exts -> return (ITproj False) | s == fsLit "." -> return ITdot | otherwise -> do { addPsMessage (mkSrcSpanPs span) (PsWarnOperatorWhitespace s (OperatorWhitespaceOccurrence_TightInfix)) ; return (ITvarsym s) } -- See Note [Whitespace-sensitive operator parsing] varsym_loose_infix :: Action varsym_loose_infix = sym $ \_ _ s -> if | s == fsLit "." -> return ITdot | otherwise -> return $ ITvarsym s consym :: Action consym = sym (\_span _exts s -> return $ ITconsym s) sym :: (PsSpan -> ExtsBitmap -> FastString -> P Token) -> Action sym con span buf len = case lookupUFM reservedSymsFM fs of Just (keyword, NormalSyntax, 0) -> do exts <- getExts if fs == fsLit "." && exts .&. (xbit OverloadedRecordDotBit) /= 0 && xtest OverloadedRecordDotBit exts then L span <$!> con span exts fs -- Process by varsym_*. else return $ L span keyword Just (keyword, NormalSyntax, i) -> do exts <- getExts if exts .&. i /= 0 then return $ L span keyword else L span <$!> con span exts fs Just (keyword, UnicodeSyntax, 0) -> do exts <- getExts if xtest UnicodeSyntaxBit exts then return $ L span keyword else L span <$!> con span exts fs Just (keyword, UnicodeSyntax, i) -> do exts <- getExts if exts .&. i /= 0 && xtest UnicodeSyntaxBit exts then return $ L span keyword else L span <$!> con span exts fs Nothing -> do exts <- getExts L span <$!> con span exts fs where !fs = lexemeToFastString buf len -- Variations on the integral numeric literal. tok_integral :: (SourceText -> Integer -> Token) -> (Integer -> Integer) -> Int -> Int -> (Integer, (Char -> Int)) -> Action tok_integral itint transint transbuf translen (radix,char_to_int) span buf len = do numericUnderscores <- getBit NumericUnderscoresBit -- #14473 let src = lexemeToString buf len when ((not numericUnderscores) && ('_' `elem` src)) $ do pState <- getPState let msg = PsErrNumUnderscores NumUnderscore_Integral addError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) msg return $ L span $ itint (SourceText src) $! transint $ parseUnsignedInteger (offsetBytes transbuf buf) (subtract translen len) radix char_to_int tok_num :: (Integer -> Integer) -> Int -> Int -> (Integer, (Char->Int)) -> Action tok_num = tok_integral $ \case st@(SourceText ('-':_)) -> itint st (const True) st@(SourceText _) -> itint st (const False) st@NoSourceText -> itint st (< 0) where itint :: SourceText -> (Integer -> Bool) -> Integer -> Token itint !st is_negative !val = ITinteger ((IL st $! is_negative val) val) tok_primint :: (Integer -> Integer) -> Int -> Int -> (Integer, (Char->Int)) -> Action tok_primint = tok_integral ITprimint tok_primword :: Int -> Int -> (Integer, (Char->Int)) -> Action tok_primword = tok_integral ITprimword positive positive, negative :: (Integer -> Integer) positive = id negative = negate decimal, octal, hexadecimal :: (Integer, Char -> Int) decimal = (10,octDecDigit) binary = (2,octDecDigit) octal = (8,octDecDigit) hexadecimal = (16,hexDigit) -- readSignificandExponentPair can understand negative rationals, exponents, everything. tok_frac :: Int -> (String -> Token) -> Action tok_frac drop f span buf len = do numericUnderscores <- getBit NumericUnderscoresBit -- #14473 let src = lexemeToString buf (len-drop) when ((not numericUnderscores) && ('_' `elem` src)) $ do pState <- getPState let msg = PsErrNumUnderscores NumUnderscore_Float addError $ mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) msg return (L span $! (f $! src)) tok_float, tok_primfloat, tok_primdouble :: String -> Token tok_float str = ITrational $! readFractionalLit str tok_hex_float str = ITrational $! readHexFractionalLit str tok_primfloat str = ITprimfloat $! readFractionalLit str tok_primdouble str = ITprimdouble $! readFractionalLit str readFractionalLit, readHexFractionalLit :: String -> FractionalLit readHexFractionalLit = readFractionalLitX readHexSignificandExponentPair Base2 readFractionalLit = readFractionalLitX readSignificandExponentPair Base10 readFractionalLitX :: (String -> (Integer, Integer)) -> FractionalExponentBase -> String -> FractionalLit readFractionalLitX readStr b str = mkSourceFractionalLit str is_neg i e b where is_neg = case str of '-' : _ -> True _ -> False (i, e) = readStr str -- ----------------------------------------------------------------------------- -- Layout processing -- we're at the first token on a line, insert layout tokens if necessary do_bol :: Action do_bol span _str _len = do -- See Note [Nested comment line pragmas] b <- getBit InNestedCommentBit if b then return (L span ITcomment_line_prag) else do (pos, gen_semic) <- getOffside case pos of LT -> do --trace "layout: inserting '}'" $ do popContext -- do NOT pop the lex state, we might have a ';' to insert return (L span ITvccurly) EQ | gen_semic -> do --trace "layout: inserting ';'" $ do _ <- popLexState return (L span ITsemi) _ -> do _ <- popLexState lexToken -- certain keywords put us in the "layout" state, where we might -- add an opening curly brace. maybe_layout :: Token -> P () maybe_layout t = do -- If the alternative layout rule is enabled then -- we never create an implicit layout context here. -- Layout is handled XXX instead. -- The code for closing implicit contexts, or -- inserting implicit semi-colons, is therefore -- irrelevant as it only applies in an implicit -- context. alr <- getBit AlternativeLayoutRuleBit unless alr $ f t where f (ITdo _) = pushLexState layout_do f (ITmdo _) = pushLexState layout_do f ITof = pushLexState layout f ITlcase = pushLexState layout f ITlcases = pushLexState layout f ITlet = pushLexState layout f ITwhere = pushLexState layout f ITrec = pushLexState layout f ITif = pushLexState layout_if f _ = return () -- Pushing a new implicit layout context. If the indentation of the -- next token is not greater than the previous layout context, then -- Haskell 98 says that the new layout context should be empty; that is -- the lexer must generate {}. -- -- We are slightly more lenient than this: when the new context is started -- by a 'do', then we allow the new context to be at the same indentation as -- the previous context. This is what the 'strict' argument is for. new_layout_context :: Bool -> Bool -> Token -> Action new_layout_context strict gen_semic tok span _buf len = do _ <- popLexState (AI l _) <- getInput let offset = srcLocCol (psRealLoc l) - len ctx <- getContext nondecreasing <- getBit NondecreasingIndentationBit let strict' = strict || not nondecreasing case ctx of Layout prev_off _ : _ | (strict' && prev_off >= offset || not strict' && prev_off > offset) -> do -- token is indented to the left of the previous context. -- we must generate a {} sequence now. pushLexState layout_left return (L span tok) _ -> do setContext (Layout offset gen_semic : ctx) return (L span tok) do_layout_left :: Action do_layout_left span _buf _len = do _ <- popLexState pushLexState bol -- we must be at the start of a line return (L span ITvccurly) -- ----------------------------------------------------------------------------- -- LINE pragmas setLineAndFile :: Int -> Action setLineAndFile code (PsSpan span _) buf len = do let src = lexemeToString buf (len - 1) -- drop trailing quotation mark linenumLen = length $ head $ words src linenum = parseUnsignedInteger buf linenumLen 10 octDecDigit file = mkFastString $ go $ drop 1 $ dropWhile (/= '"') src -- skip everything through first quotation mark to get to the filename where go ('\\':c:cs) = c : go cs go (c:cs) = c : go cs go [] = [] -- decode escapes in the filename. e.g. on Windows -- when our filenames have backslashes in, gcc seems to -- escape the backslashes. One symptom of not doing this -- is that filenames in error messages look a bit strange: -- C:\\foo\bar.hs -- only the first backslash is doubled, because we apply -- System.FilePath.normalise before printing out -- filenames and it does not remove duplicate -- backslashes after the drive letter (should it?). resetAlrLastLoc file setSrcLoc (mkRealSrcLoc file (fromIntegral linenum - 1) (srcSpanEndCol span)) -- subtract one: the line number refers to the *following* line addSrcFile file _ <- popLexState pushLexState code lexToken setColumn :: Action setColumn (PsSpan span _) buf len = do let column = case reads (lexemeToString buf len) of [(column, _)] -> column _ -> error "setColumn: expected integer" -- shouldn't happen setSrcLoc (mkRealSrcLoc (srcSpanFile span) (srcSpanEndLine span) (fromIntegral (column :: Integer))) _ <- popLexState lexToken alrInitialLoc :: FastString -> RealSrcSpan alrInitialLoc file = mkRealSrcSpan loc loc where -- This is a hack to ensure that the first line in a file -- looks like it is after the initial location: loc = mkRealSrcLoc file (-1) (-1) -- ----------------------------------------------------------------------------- -- Options, includes and language pragmas. lex_string_prag :: (String -> Token) -> Action lex_string_prag mkTok = lex_string_prag_comment mkTok' where mkTok' s _ = mkTok s lex_string_prag_comment :: (String -> PsSpan -> Token) -> Action lex_string_prag_comment mkTok span _buf _len = do input <- getInput start <- getParsedLoc l <- getLastLocComment tok <- go l [] input end <- getParsedLoc return (L (mkPsSpan start end) tok) where go l acc input = if isString input "#-}" then do setInput input return (mkTok (reverse acc) l) else case alexGetChar input of Just (c,i) -> go l (c:acc) i Nothing -> err input isString _ [] = True isString i (x:xs) = case alexGetChar i of Just (c,i') | c == x -> isString i' xs _other -> False err (AI end _) = failLocMsgP (realSrcSpanStart (psRealSpan span)) (psRealLoc end) (\srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrLexer LexUnterminatedOptions LexErrKind_EOF) -- ----------------------------------------------------------------------------- -- Strings & Chars -- This stuff is horrible. I hates it. lex_string_tok :: Action lex_string_tok span buf _len = do tok <- lex_string "" (AI end bufEnd) <- getInput let tok' = case tok of ITprimstring _ bs -> ITprimstring (SourceText src) bs ITstring _ s -> ITstring (SourceText src) s _ -> panic "lex_string_tok" src = lexemeToString buf (cur bufEnd - cur buf) return (L (mkPsSpan (psSpanStart span) end) tok') lex_string :: String -> P Token lex_string s = do i <- getInput case alexGetChar' i of Nothing -> lit_error i Just ('"',i) -> do setInput i let s' = reverse s magicHash <- getBit MagicHashBit if magicHash then do i <- getInput case alexGetChar' i of Just ('#',i) -> do setInput i when (any (> '\xFF') s') $ do pState <- getPState let msg = PsErrPrimStringInvalidChar let err = mkPlainErrorMsgEnvelope (mkSrcSpanPs (last_loc pState)) msg addError err return (ITprimstring (SourceText s') (unsafeMkByteString s')) _other -> return (ITstring (SourceText s') (mkFastString s')) else return (ITstring (SourceText s') (mkFastString s')) Just ('\\',i) | Just ('&',i) <- next -> do setInput i; lex_string s | Just (c,i) <- next, c <= '\x7f' && is_space c -> do -- is_space only works for <= '\x7f' (#3751, #5425) setInput i; lex_stringgap s where next = alexGetChar' i Just (c, i1) -> do case c of '\\' -> do setInput i1; c' <- lex_escape; lex_string (c':s) c | isAny c -> do setInput i1; lex_string (c:s) _other -> lit_error i lex_stringgap :: String -> P Token lex_stringgap s = do i <- getInput c <- getCharOrFail i case c of '\\' -> lex_string s c | c <= '\x7f' && is_space c -> lex_stringgap s -- is_space only works for <= '\x7f' (#3751, #5425) _other -> lit_error i lex_char_tok :: Action -- Here we are basically parsing character literals, such as 'x' or '\n' -- but we additionally spot 'x and ''T, returning ITsimpleQuote and -- ITtyQuote respectively, but WITHOUT CONSUMING the x or T part -- (the parser does that). -- So we have to do two characters of lookahead: when we see 'x we need to -- see if there's a trailing quote lex_char_tok span buf _len = do -- We've seen ' i1 <- getInput -- Look ahead to first character let loc = psSpanStart span case alexGetChar' i1 of Nothing -> lit_error i1 Just ('\'', i2@(AI end2 _)) -> do -- We've seen '' setInput i2 return (L (mkPsSpan loc end2) ITtyQuote) Just ('\\', i2@(AI _end2 _)) -> do -- We've seen 'backslash setInput i2 lit_ch <- lex_escape i3 <- getInput mc <- getCharOrFail i3 -- Trailing quote if mc == '\'' then finish_char_tok buf loc lit_ch else lit_error i3 Just (c, i2@(AI _end2 _)) | not (isAny c) -> lit_error i1 | otherwise -> -- We've seen 'x, where x is a valid character -- (i.e. not newline etc) but not a quote or backslash case alexGetChar' i2 of -- Look ahead one more character Just ('\'', i3) -> do -- We've seen 'x' setInput i3 finish_char_tok buf loc c _other -> do -- We've seen 'x not followed by quote -- (including the possibility of EOF) -- Just parse the quote only let (AI end _) = i1 return (L (mkPsSpan loc end) ITsimpleQuote) finish_char_tok :: StringBuffer -> PsLoc -> Char -> P (PsLocated Token) finish_char_tok buf loc ch -- We've already seen the closing quote -- Just need to check for trailing # = do magicHash <- getBit MagicHashBit i@(AI end bufEnd) <- getInput let src = lexemeToString buf (cur bufEnd - cur buf) if magicHash then do case alexGetChar' i of Just ('#',i@(AI end _)) -> do setInput i return (L (mkPsSpan loc end) (ITprimchar (SourceText src) ch)) _other -> return (L (mkPsSpan loc end) (ITchar (SourceText src) ch)) else do return (L (mkPsSpan loc end) (ITchar (SourceText src) ch)) isAny :: Char -> Bool isAny c | c > '\x7f' = isPrint c | otherwise = is_any c lex_escape :: P Char lex_escape = do i0 <- getInput c <- getCharOrFail i0 case c of 'a' -> return '\a' 'b' -> return '\b' 'f' -> return '\f' 'n' -> return '\n' 'r' -> return '\r' 't' -> return '\t' 'v' -> return '\v' '\\' -> return '\\' '"' -> return '\"' '\'' -> return '\'' '^' -> do i1 <- getInput c <- getCharOrFail i1 if c >= '@' && c <= '_' then return (chr (ord c - ord '@')) else lit_error i1 'x' -> readNum is_hexdigit 16 hexDigit 'o' -> readNum is_octdigit 8 octDecDigit x | is_decdigit x -> readNum2 is_decdigit 10 octDecDigit (octDecDigit x) c1 -> do i <- getInput case alexGetChar' i of Nothing -> lit_error i0 Just (c2,i2) -> case alexGetChar' i2 of Nothing -> do lit_error i0 Just (c3,i3) -> let str = [c1,c2,c3] in case [ (c,rest) | (p,c) <- silly_escape_chars, Just rest <- [stripPrefix p str] ] of (escape_char,[]):_ -> do setInput i3 return escape_char (escape_char,_:_):_ -> do setInput i2 return escape_char [] -> lit_error i0 readNum :: (Char -> Bool) -> Int -> (Char -> Int) -> P Char readNum is_digit base conv = do i <- getInput c <- getCharOrFail i if is_digit c then readNum2 is_digit base conv (conv c) else lit_error i readNum2 :: (Char -> Bool) -> Int -> (Char -> Int) -> Int -> P Char readNum2 is_digit base conv i = do input <- getInput read i input where read i input = do case alexGetChar' input of Just (c,input') | is_digit c -> do let i' = i*base + conv c if i' > 0x10ffff then setInput input >> lexError LexNumEscapeRange else read i' input' _other -> do setInput input; return (chr i) silly_escape_chars :: [(String, Char)] silly_escape_chars = [ ("NUL", '\NUL'), ("SOH", '\SOH'), ("STX", '\STX'), ("ETX", '\ETX'), ("EOT", '\EOT'), ("ENQ", '\ENQ'), ("ACK", '\ACK'), ("BEL", '\BEL'), ("BS", '\BS'), ("HT", '\HT'), ("LF", '\LF'), ("VT", '\VT'), ("FF", '\FF'), ("CR", '\CR'), ("SO", '\SO'), ("SI", '\SI'), ("DLE", '\DLE'), ("DC1", '\DC1'), ("DC2", '\DC2'), ("DC3", '\DC3'), ("DC4", '\DC4'), ("NAK", '\NAK'), ("SYN", '\SYN'), ("ETB", '\ETB'), ("CAN", '\CAN'), ("EM", '\EM'), ("SUB", '\SUB'), ("ESC", '\ESC'), ("FS", '\FS'), ("GS", '\GS'), ("RS", '\RS'), ("US", '\US'), ("SP", '\SP'), ("DEL", '\DEL') ] -- before calling lit_error, ensure that the current input is pointing to -- the position of the error in the buffer. This is so that we can report -- a correct location to the user, but also so we can detect UTF-8 decoding -- errors if they occur. lit_error :: AlexInput -> P a lit_error i = do setInput i; lexError LexStringCharLit getCharOrFail :: AlexInput -> P Char getCharOrFail i = do case alexGetChar' i of Nothing -> lexError LexStringCharLitEOF Just (c,i) -> do setInput i; return c -- ----------------------------------------------------------------------------- -- QuasiQuote lex_qquasiquote_tok :: Action lex_qquasiquote_tok span buf len = do let (qual, quoter) = splitQualName (stepOn buf) (len - 2) False quoteStart <- getParsedLoc quote <- lex_quasiquote (psRealLoc quoteStart) "" end <- getParsedLoc return (L (mkPsSpan (psSpanStart span) end) (ITqQuasiQuote (qual, quoter, mkFastString (reverse quote), mkPsSpan quoteStart end))) lex_quasiquote_tok :: Action lex_quasiquote_tok span buf len = do let quoter = tail (lexemeToString buf (len - 1)) -- 'tail' drops the initial '[', -- while the -1 drops the trailing '|' quoteStart <- getParsedLoc quote <- lex_quasiquote (psRealLoc quoteStart) "" end <- getParsedLoc return (L (mkPsSpan (psSpanStart span) end) (ITquasiQuote (mkFastString quoter, mkFastString (reverse quote), mkPsSpan quoteStart end))) lex_quasiquote :: RealSrcLoc -> String -> P String lex_quasiquote start s = do i <- getInput case alexGetChar' i of Nothing -> quasiquote_error start -- NB: The string "|]" terminates the quasiquote, -- with absolutely no escaping. See the extensive -- discussion on #5348 for why there is no -- escape handling. Just ('|',i) | Just (']',i) <- alexGetChar' i -> do { setInput i; return s } Just (c, i) -> do setInput i; lex_quasiquote start (c : s) quasiquote_error :: RealSrcLoc -> P a quasiquote_error start = do (AI end buf) <- getInput reportLexError start (psRealLoc end) buf (\k srcLoc -> mkPlainErrorMsgEnvelope srcLoc (PsErrLexer LexUnterminatedQQ k)) -- ----------------------------------------------------------------------------- -- Warnings warnTab :: Action warnTab srcspan _buf _len = do addTabWarning (psRealSpan srcspan) lexToken warnThen :: PsMessage -> Action -> Action warnThen warning action srcspan buf len = do addPsMessage (RealSrcSpan (psRealSpan srcspan) Strict.Nothing) warning action srcspan buf len -- ----------------------------------------------------------------------------- -- The Parse Monad -- | Do we want to generate ';' layout tokens? In some cases we just want to -- generate '}', e.g. in MultiWayIf we don't need ';'s because '|' separates -- alternatives (unlike a `case` expression where we need ';' to as a separator -- between alternatives). type GenSemic = Bool generateSemic, dontGenerateSemic :: GenSemic generateSemic = True dontGenerateSemic = False data LayoutContext = NoLayout | Layout !Int !GenSemic deriving Show -- | The result of running a parser. newtype ParseResult a = PR (# (# PState, a #) | PState #) -- | The parser has consumed a (possibly empty) prefix of the input and produced -- a result. Use 'getPsMessages' to check for accumulated warnings and non-fatal -- errors. -- -- The carried parsing state can be used to resume parsing. pattern POk :: PState -> a -> ParseResult a pattern POk s a = PR (# (# s , a #) | #) -- | The parser has consumed a (possibly empty) prefix of the input and failed. -- -- The carried parsing state can be used to resume parsing. It is the state -- right before failure, including the fatal parse error. 'getPsMessages' and -- 'getPsErrorMessages' must return a non-empty bag of errors. pattern PFailed :: PState -> ParseResult a pattern PFailed s = PR (# | s #) {-# COMPLETE POk, PFailed #-} -- | Test whether a 'WarningFlag' is set warnopt :: WarningFlag -> ParserOpts -> Bool warnopt f options = f `EnumSet.member` pWarningFlags options -- | Parser options. -- -- See 'mkParserOpts' to construct this. data ParserOpts = ParserOpts { pExtsBitmap :: !ExtsBitmap -- ^ bitmap of permitted extensions , pDiagOpts :: !DiagOpts -- ^ Options to construct diagnostic messages. , pSupportedExts :: [String] -- ^ supported extensions (only used for suggestions in error messages) } pWarningFlags :: ParserOpts -> EnumSet WarningFlag pWarningFlags opts = diag_warning_flags (pDiagOpts opts) -- | Haddock comment as produced by the lexer. These are accumulated in 'PState' -- and then processed in "GHC.Parser.PostProcess.Haddock". The location of the -- 'HsDocString's spans over the contents of the docstring - i.e. it does not -- include the decorator ("-- |", "{-|" etc.) data HdkComment = HdkCommentNext HsDocString | HdkCommentPrev HsDocString | HdkCommentNamed String HsDocString | HdkCommentSection Int HsDocString deriving Show data PState = PState { buffer :: StringBuffer, options :: ParserOpts, warnings :: Messages PsMessage, errors :: Messages PsMessage, tab_first :: Strict.Maybe RealSrcSpan, -- pos of first tab warning in the file tab_count :: !Word, -- number of tab warnings in the file last_tk :: Strict.Maybe (PsLocated Token), -- last non-comment token prev_loc :: PsSpan, -- pos of previous token, including comments, prev_loc2 :: PsSpan, -- pos of two back token, including comments, -- see Note [PsSpan in Comments] last_loc :: PsSpan, -- pos of current token last_len :: !Int, -- len of current token loc :: PsLoc, -- current loc (end of prev token + 1) context :: [LayoutContext], lex_state :: [Int], srcfiles :: [FastString], -- Used in the alternative layout rule: -- These tokens are the next ones to be sent out. They are -- just blindly emitted, without the rule looking at them again: alr_pending_implicit_tokens :: [PsLocated Token], -- This is the next token to be considered or, if it is Nothing, -- we need to get the next token from the input stream: alr_next_token :: Maybe (PsLocated Token), -- This is what we consider to be the location of the last token -- emitted: alr_last_loc :: PsSpan, -- The stack of layout contexts: alr_context :: [ALRContext], -- Are we expecting a '{'? If it's Just, then the ALRLayout tells -- us what sort of layout the '{' will open: alr_expecting_ocurly :: Maybe ALRLayout, -- Have we just had the '}' for a let block? If so, than an 'in' -- token doesn't need to close anything: alr_justClosedExplicitLetBlock :: Bool, -- The next three are used to implement Annotations giving the -- locations of 'noise' tokens in the source, so that users of -- the GHC API can do source to source conversions. -- See Note [exact print annotations] in GHC.Parser.Annotation eof_pos :: Strict.Maybe (Strict.Pair RealSrcSpan RealSrcSpan), -- pos, gap to prior token header_comments :: Strict.Maybe [LEpaComment], comment_q :: [LEpaComment], -- Haddock comments accumulated in ascending order of their location -- (BufPos). We use OrdList to get O(1) snoc. -- -- See Note [Adding Haddock comments to the syntax tree] in GHC.Parser.PostProcess.Haddock hdk_comments :: OrdList (PsLocated HdkComment) } -- last_loc and last_len are used when generating error messages, -- and in pushCurrentContext only. Sigh, if only Happy passed the -- current token to happyError, we could at least get rid of last_len. -- Getting rid of last_loc would require finding another way to -- implement pushCurrentContext (which is only called from one place). -- AZ question: setLastToken which sets last_loc and last_len -- is called whan processing AlexToken, immediately prior to -- calling the action in the token. So from the perspective -- of the action, it is the *current* token. Do I understand -- correctly? data ALRContext = ALRNoLayout Bool{- does it contain commas? -} Bool{- is it a 'let' block? -} | ALRLayout ALRLayout Int data ALRLayout = ALRLayoutLet | ALRLayoutWhere | ALRLayoutOf | ALRLayoutDo -- | The parsing monad, isomorphic to @StateT PState Maybe@. newtype P a = P { unP :: PState -> ParseResult a } instance Functor P where fmap = liftM instance Applicative P where pure = returnP (<*>) = ap instance Monad P where (>>=) = thenP returnP :: a -> P a returnP a = a `seq` (P $ \s -> POk s a) thenP :: P a -> (a -> P b) -> P b (P m) `thenP` k = P $ \ s -> case m s of POk s1 a -> (unP (k a)) s1 PFailed s1 -> PFailed s1 failMsgP :: (SrcSpan -> MsgEnvelope PsMessage) -> P a failMsgP f = do pState <- getPState addFatalError (f (mkSrcSpanPs (last_loc pState))) failLocMsgP :: RealSrcLoc -> RealSrcLoc -> (SrcSpan -> MsgEnvelope PsMessage) -> P a failLocMsgP loc1 loc2 f = addFatalError (f (RealSrcSpan (mkRealSrcSpan loc1 loc2) Strict.Nothing)) getPState :: P PState getPState = P $ \s -> POk s s getExts :: P ExtsBitmap getExts = P $ \s -> POk s (pExtsBitmap . options $ s) setExts :: (ExtsBitmap -> ExtsBitmap) -> P () setExts f = P $ \s -> POk s { options = let p = options s in p { pExtsBitmap = f (pExtsBitmap p) } } () setSrcLoc :: RealSrcLoc -> P () setSrcLoc new_loc = P $ \s@(PState{ loc = PsLoc _ buf_loc }) -> POk s{ loc = PsLoc new_loc buf_loc } () getRealSrcLoc :: P RealSrcLoc getRealSrcLoc = P $ \s@(PState{ loc=loc }) -> POk s (psRealLoc loc) getParsedLoc :: P PsLoc getParsedLoc = P $ \s@(PState{ loc=loc }) -> POk s loc addSrcFile :: FastString -> P () addSrcFile f = P $ \s -> POk s{ srcfiles = f : srcfiles s } () setEofPos :: RealSrcSpan -> RealSrcSpan -> P () setEofPos span gap = P $ \s -> POk s{ eof_pos = Strict.Just (span `Strict.And` gap) } () setLastToken :: PsSpan -> Int -> P () setLastToken loc len = P $ \s -> POk s { last_loc=loc, last_len=len } () setLastTk :: PsLocated Token -> P () setLastTk tk@(L l _) = P $ \s -> POk s { last_tk = Strict.Just tk , prev_loc = l , prev_loc2 = prev_loc s} () setLastComment :: PsLocated Token -> P () setLastComment (L l _) = P $ \s -> POk s { prev_loc = l , prev_loc2 = prev_loc s} () getLastTk :: P (Strict.Maybe (PsLocated Token)) getLastTk = P $ \s@(PState { last_tk = last_tk }) -> POk s last_tk -- see Note [PsSpan in Comments] getLastLocComment :: P PsSpan getLastLocComment = P $ \s@(PState { prev_loc = prev_loc }) -> POk s prev_loc -- see Note [PsSpan in Comments] getLastLocEof :: P PsSpan getLastLocEof = P $ \s@(PState { prev_loc2 = prev_loc2 }) -> POk s prev_loc2 getLastLoc :: P PsSpan getLastLoc = P $ \s@(PState { last_loc = last_loc }) -> POk s last_loc data AlexInput = AI !PsLoc !StringBuffer {- Note [Unicode in Alex] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although newer versions of Alex support unicode, this grammar is processed with the old style '--latin1' behaviour. This means that when implementing the functions alexGetByte :: AlexInput -> Maybe (Word8,AlexInput) alexInputPrevChar :: AlexInput -> Char which Alex uses to take apart our 'AlexInput', we must * return a latin1 character in the 'Word8' that 'alexGetByte' expects * return a latin1 character in 'alexInputPrevChar'. We handle this in 'adjustChar' by squishing entire classes of unicode characters into single bytes. -} {-# INLINE adjustChar #-} adjustChar :: Char -> Word8 adjustChar c = fromIntegral $ ord adj_c where non_graphic = '\x00' upper = '\x01' lower = '\x02' digit = '\x03' symbol = '\x04' space = '\x05' other_graphic = '\x06' uniidchar = '\x07' adj_c | c <= '\x07' = non_graphic | c <= '\x7f' = c -- Alex doesn't handle Unicode, so when Unicode -- character is encountered we output these values -- with the actual character value hidden in the state. | otherwise = -- NB: The logic behind these definitions is also reflected -- in "GHC.Utils.Lexeme" -- Any changes here should likely be reflected there. case generalCategory c of UppercaseLetter -> upper LowercaseLetter -> lower TitlecaseLetter -> upper ModifierLetter -> uniidchar -- see #10196 OtherLetter -> lower -- see #1103 NonSpacingMark -> uniidchar -- see #7650 SpacingCombiningMark -> other_graphic EnclosingMark -> other_graphic DecimalNumber -> digit LetterNumber -> digit OtherNumber -> digit -- see #4373 ConnectorPunctuation -> symbol DashPunctuation -> symbol OpenPunctuation -> other_graphic ClosePunctuation -> other_graphic InitialQuote -> other_graphic FinalQuote -> other_graphic OtherPunctuation -> symbol MathSymbol -> symbol CurrencySymbol -> symbol ModifierSymbol -> symbol OtherSymbol -> symbol Space -> space _other -> non_graphic -- Getting the previous 'Char' isn't enough here - we need to convert it into -- the same format that 'alexGetByte' would have produced. -- -- See Note [Unicode in Alex] and #13986. alexInputPrevChar :: AlexInput -> Char alexInputPrevChar (AI _ buf) = chr (fromIntegral (adjustChar pc)) where pc = prevChar buf '\n' -- backwards compatibility for Alex 2.x alexGetChar :: AlexInput -> Maybe (Char,AlexInput) alexGetChar inp = case alexGetByte inp of Nothing -> Nothing Just (b,i) -> c `seq` Just (c,i) where c = chr $ fromIntegral b -- See Note [Unicode in Alex] alexGetByte :: AlexInput -> Maybe (Word8,AlexInput) alexGetByte (AI loc s) | atEnd s = Nothing | otherwise = byte `seq` loc' `seq` s' `seq` --trace (show (ord c)) $ Just (byte, (AI loc' s')) where (c,s') = nextChar s loc' = advancePsLoc loc c byte = adjustChar c {-# INLINE alexGetChar' #-} -- This version does not squash unicode characters, it is used when -- lexing strings. alexGetChar' :: AlexInput -> Maybe (Char,AlexInput) alexGetChar' (AI loc s) | atEnd s = Nothing | otherwise = c `seq` loc' `seq` s' `seq` --trace (show (ord c)) $ Just (c, (AI loc' s')) where (c,s') = nextChar s loc' = advancePsLoc loc c getInput :: P AlexInput getInput = P $ \s@PState{ loc=l, buffer=b } -> POk s (AI l b) setInput :: AlexInput -> P () setInput (AI l b) = P $ \s -> POk s{ loc=l, buffer=b } () nextIsEOF :: P Bool nextIsEOF = do AI _ s <- getInput return $ atEnd s pushLexState :: Int -> P () pushLexState ls = P $ \s@PState{ lex_state=l } -> POk s{lex_state=ls:l} () popLexState :: P Int popLexState = P $ \s@PState{ lex_state=ls:l } -> POk s{ lex_state=l } ls getLexState :: P Int getLexState = P $ \s@PState{ lex_state=ls:_ } -> POk s ls popNextToken :: P (Maybe (PsLocated Token)) popNextToken = P $ \s@PState{ alr_next_token = m } -> POk (s {alr_next_token = Nothing}) m activeContext :: P Bool activeContext = do ctxt <- getALRContext expc <- getAlrExpectingOCurly impt <- implicitTokenPending case (ctxt,expc) of ([],Nothing) -> return impt _other -> return True resetAlrLastLoc :: FastString -> P () resetAlrLastLoc file = P $ \s@(PState {alr_last_loc = PsSpan _ buf_span}) -> POk s{ alr_last_loc = PsSpan (alrInitialLoc file) buf_span } () setAlrLastLoc :: PsSpan -> P () setAlrLastLoc l = P $ \s -> POk (s {alr_last_loc = l}) () getAlrLastLoc :: P PsSpan getAlrLastLoc = P $ \s@(PState {alr_last_loc = l}) -> POk s l getALRContext :: P [ALRContext] getALRContext = P $ \s@(PState {alr_context = cs}) -> POk s cs setALRContext :: [ALRContext] -> P () setALRContext cs = P $ \s -> POk (s {alr_context = cs}) () getJustClosedExplicitLetBlock :: P Bool getJustClosedExplicitLetBlock = P $ \s@(PState {alr_justClosedExplicitLetBlock = b}) -> POk s b setJustClosedExplicitLetBlock :: Bool -> P () setJustClosedExplicitLetBlock b = P $ \s -> POk (s {alr_justClosedExplicitLetBlock = b}) () setNextToken :: PsLocated Token -> P () setNextToken t = P $ \s -> POk (s {alr_next_token = Just t}) () implicitTokenPending :: P Bool implicitTokenPending = P $ \s@PState{ alr_pending_implicit_tokens = ts } -> case ts of [] -> POk s False _ -> POk s True popPendingImplicitToken :: P (Maybe (PsLocated Token)) popPendingImplicitToken = P $ \s@PState{ alr_pending_implicit_tokens = ts } -> case ts of [] -> POk s Nothing (t : ts') -> POk (s {alr_pending_implicit_tokens = ts'}) (Just t) setPendingImplicitTokens :: [PsLocated Token] -> P () setPendingImplicitTokens ts = P $ \s -> POk (s {alr_pending_implicit_tokens = ts}) () getAlrExpectingOCurly :: P (Maybe ALRLayout) getAlrExpectingOCurly = P $ \s@(PState {alr_expecting_ocurly = b}) -> POk s b setAlrExpectingOCurly :: Maybe ALRLayout -> P () setAlrExpectingOCurly b = P $ \s -> POk (s {alr_expecting_ocurly = b}) () -- | For reasons of efficiency, boolean parsing flags (eg, language extensions -- or whether we are currently in a @RULE@ pragma) are represented by a bitmap -- stored in a @Word64@. type ExtsBitmap = Word64 xbit :: ExtBits -> ExtsBitmap xbit = bit . fromEnum xtest :: ExtBits -> ExtsBitmap -> Bool xtest ext xmap = testBit xmap (fromEnum ext) xset :: ExtBits -> ExtsBitmap -> ExtsBitmap xset ext xmap = setBit xmap (fromEnum ext) xunset :: ExtBits -> ExtsBitmap -> ExtsBitmap xunset ext xmap = clearBit xmap (fromEnum ext) -- | Various boolean flags, mostly language extensions, that impact lexing and -- parsing. Note that a handful of these can change during lexing/parsing. data ExtBits -- Flags that are constant once parsing starts = FfiBit | InterruptibleFfiBit | CApiFfiBit | ArrowsBit | ThBit | ThQuotesBit | IpBit | OverloadedLabelsBit -- #x overloaded labels | ExplicitForallBit -- the 'forall' keyword | BangPatBit -- Tells the parser to understand bang-patterns -- (doesn't affect the lexer) | PatternSynonymsBit -- pattern synonyms | HaddockBit-- Lex and parse Haddock comments | MagicHashBit -- "#" in both functions and operators | RecursiveDoBit -- mdo | QualifiedDoBit -- .do and .mdo | UnicodeSyntaxBit -- the forall symbol, arrow symbols, etc | UnboxedParensBit -- (# and #) | DatatypeContextsBit | MonadComprehensionsBit | TransformComprehensionsBit | QqBit -- enable quasiquoting | RawTokenStreamBit -- producing a token stream with all comments included | AlternativeLayoutRuleBit | ALRTransitionalBit | RelaxedLayoutBit | NondecreasingIndentationBit | SafeHaskellBit | TraditionalRecordSyntaxBit | ExplicitNamespacesBit | LambdaCaseBit | BinaryLiteralsBit | NegativeLiteralsBit | HexFloatLiteralsBit | StaticPointersBit | NumericUnderscoresBit | StarIsTypeBit | BlockArgumentsBit | NPlusKPatternsBit | DoAndIfThenElseBit | MultiWayIfBit | GadtSyntaxBit | ImportQualifiedPostBit | LinearTypesBit | NoLexicalNegationBit -- See Note [Why not LexicalNegationBit] | OverloadedRecordDotBit | OverloadedRecordUpdateBit -- Flags that are updated once parsing starts | InRulePragBit | InNestedCommentBit -- See Note [Nested comment line pragmas] | UsePosPragsBit -- ^ If this is enabled, '{-# LINE ... -#}' and '{-# COLUMN ... #-}' -- update the internal position. Otherwise, those pragmas are lexed as -- tokens of their own. deriving Enum {-# INLINE mkParserOpts #-} mkParserOpts :: EnumSet LangExt.Extension -- ^ permitted language extensions enabled -> DiagOpts -- ^ diagnostic options -> [String] -- ^ Supported Languages and Extensions -> Bool -- ^ are safe imports on? -> Bool -- ^ keeping Haddock comment tokens -> Bool -- ^ keep regular comment tokens -> Bool -- ^ If this is enabled, '{-# LINE ... -#}' and '{-# COLUMN ... #-}' update -- the internal position kept by the parser. Otherwise, those pragmas are -- lexed as 'ITline_prag' and 'ITcolumn_prag' tokens. -> ParserOpts -- ^ Given exactly the information needed, set up the 'ParserOpts' mkParserOpts extensionFlags diag_opts supported safeImports isHaddock rawTokStream usePosPrags = ParserOpts { pDiagOpts = diag_opts , pExtsBitmap = safeHaskellBit .|. langExtBits .|. optBits , pSupportedExts = supported } where safeHaskellBit = SafeHaskellBit `setBitIf` safeImports langExtBits = FfiBit `xoptBit` LangExt.ForeignFunctionInterface .|. InterruptibleFfiBit `xoptBit` LangExt.InterruptibleFFI .|. CApiFfiBit `xoptBit` LangExt.CApiFFI .|. ArrowsBit `xoptBit` LangExt.Arrows .|. ThBit `xoptBit` LangExt.TemplateHaskell .|. ThQuotesBit `xoptBit` LangExt.TemplateHaskellQuotes .|. QqBit `xoptBit` LangExt.QuasiQuotes .|. IpBit `xoptBit` LangExt.ImplicitParams .|. OverloadedLabelsBit `xoptBit` LangExt.OverloadedLabels .|. ExplicitForallBit `xoptBit` LangExt.ExplicitForAll .|. BangPatBit `xoptBit` LangExt.BangPatterns .|. MagicHashBit `xoptBit` LangExt.MagicHash .|. RecursiveDoBit `xoptBit` LangExt.RecursiveDo .|. QualifiedDoBit `xoptBit` LangExt.QualifiedDo .|. UnicodeSyntaxBit `xoptBit` LangExt.UnicodeSyntax .|. UnboxedParensBit `orXoptsBit` [LangExt.UnboxedTuples, LangExt.UnboxedSums] .|. DatatypeContextsBit `xoptBit` LangExt.DatatypeContexts .|. TransformComprehensionsBit `xoptBit` LangExt.TransformListComp .|. MonadComprehensionsBit `xoptBit` LangExt.MonadComprehensions .|. AlternativeLayoutRuleBit `xoptBit` LangExt.AlternativeLayoutRule .|. ALRTransitionalBit `xoptBit` LangExt.AlternativeLayoutRuleTransitional .|. RelaxedLayoutBit `xoptBit` LangExt.RelaxedLayout .|. NondecreasingIndentationBit `xoptBit` LangExt.NondecreasingIndentation .|. TraditionalRecordSyntaxBit `xoptBit` LangExt.TraditionalRecordSyntax .|. ExplicitNamespacesBit `xoptBit` LangExt.ExplicitNamespaces .|. LambdaCaseBit `xoptBit` LangExt.LambdaCase .|. BinaryLiteralsBit `xoptBit` LangExt.BinaryLiterals .|. NegativeLiteralsBit `xoptBit` LangExt.NegativeLiterals .|. HexFloatLiteralsBit `xoptBit` LangExt.HexFloatLiterals .|. PatternSynonymsBit `xoptBit` LangExt.PatternSynonyms .|. StaticPointersBit `xoptBit` LangExt.StaticPointers .|. NumericUnderscoresBit `xoptBit` LangExt.NumericUnderscores .|. StarIsTypeBit `xoptBit` LangExt.StarIsType .|. BlockArgumentsBit `xoptBit` LangExt.BlockArguments .|. NPlusKPatternsBit `xoptBit` LangExt.NPlusKPatterns .|. DoAndIfThenElseBit `xoptBit` LangExt.DoAndIfThenElse .|. MultiWayIfBit `xoptBit` LangExt.MultiWayIf .|. GadtSyntaxBit `xoptBit` LangExt.GADTSyntax .|. ImportQualifiedPostBit `xoptBit` LangExt.ImportQualifiedPost .|. LinearTypesBit `xoptBit` LangExt.LinearTypes .|. NoLexicalNegationBit `xoptNotBit` LangExt.LexicalNegation -- See Note [Why not LexicalNegationBit] .|. OverloadedRecordDotBit `xoptBit` LangExt.OverloadedRecordDot .|. OverloadedRecordUpdateBit `xoptBit` LangExt.OverloadedRecordUpdate -- Enable testing via 'getBit OverloadedRecordUpdateBit' in the parser (RecordDotSyntax parsing uses that information). optBits = HaddockBit `setBitIf` isHaddock .|. RawTokenStreamBit `setBitIf` rawTokStream .|. UsePosPragsBit `setBitIf` usePosPrags xoptBit bit ext = bit `setBitIf` EnumSet.member ext extensionFlags xoptNotBit bit ext = bit `setBitIf` not (EnumSet.member ext extensionFlags) orXoptsBit bit exts = bit `setBitIf` any (`EnumSet.member` extensionFlags) exts setBitIf :: ExtBits -> Bool -> ExtsBitmap b `setBitIf` cond | cond = xbit b | otherwise = 0 disableHaddock :: ParserOpts -> ParserOpts disableHaddock opts = upd_bitmap (xunset HaddockBit) where upd_bitmap f = opts { pExtsBitmap = f (pExtsBitmap opts) } -- | Set parser options for parsing OPTIONS pragmas initPragState :: ParserOpts -> StringBuffer -> RealSrcLoc -> PState initPragState options buf loc = (initParserState options buf loc) { lex_state = [bol, option_prags, 0] } -- | Creates a parse state from a 'ParserOpts' value initParserState :: ParserOpts -> StringBuffer -> RealSrcLoc -> PState initParserState options buf loc = PState { buffer = buf, options = options, errors = emptyMessages, warnings = emptyMessages, tab_first = Strict.Nothing, tab_count = 0, last_tk = Strict.Nothing, prev_loc = mkPsSpan init_loc init_loc, prev_loc2 = mkPsSpan init_loc init_loc, last_loc = mkPsSpan init_loc init_loc, last_len = 0, loc = init_loc, context = [], lex_state = [bol, 0], srcfiles = [], alr_pending_implicit_tokens = [], alr_next_token = Nothing, alr_last_loc = PsSpan (alrInitialLoc (fsLit "")) (BufSpan (BufPos 0) (BufPos 0)), alr_context = [], alr_expecting_ocurly = Nothing, alr_justClosedExplicitLetBlock = False, eof_pos = Strict.Nothing, header_comments = Strict.Nothing, comment_q = [], hdk_comments = nilOL } where init_loc = PsLoc loc (BufPos 0) -- | An mtl-style class for monads that support parsing-related operations. -- For example, sometimes we make a second pass over the parsing results to validate, -- disambiguate, or rearrange them, and we do so in the PV monad which cannot consume -- input but can report parsing errors, check for extension bits, and accumulate -- parsing annotations. Both P and PV are instances of MonadP. -- -- MonadP grants us convenient overloading. The other option is to have separate operations -- for each monad: addErrorP vs addErrorPV, getBitP vs getBitPV, and so on. -- class Monad m => MonadP m where -- | Add a non-fatal error. Use this when the parser can produce a result -- despite the error. -- -- For example, when GHC encounters a @forall@ in a type, -- but @-XExplicitForAll@ is disabled, the parser constructs @ForAllTy@ -- as if @-XExplicitForAll@ was enabled, adding a non-fatal error to -- the accumulator. -- -- Control flow wise, non-fatal errors act like warnings: they are added -- to the accumulator and parsing continues. This allows GHC to report -- more than one parse error per file. -- addError :: MsgEnvelope PsMessage -> m () -- | Add a warning to the accumulator. -- Use 'getPsMessages' to get the accumulated warnings. addWarning :: MsgEnvelope PsMessage -> m () -- | Add a fatal error. This will be the last error reported by the parser, and -- the parser will not produce any result, ending in a 'PFailed' state. addFatalError :: MsgEnvelope PsMessage -> m a -- | Check if a given flag is currently set in the bitmap. getBit :: ExtBits -> m Bool -- | Go through the @comment_q@ in @PState@ and remove all comments -- that belong within the given span allocateCommentsP :: RealSrcSpan -> m EpAnnComments -- | Go through the @comment_q@ in @PState@ and remove all comments -- that come before or within the given span allocatePriorCommentsP :: RealSrcSpan -> m EpAnnComments -- | Go through the @comment_q@ in @PState@ and remove all comments -- that come after the given span allocateFinalCommentsP :: RealSrcSpan -> m EpAnnComments instance MonadP P where addError err = P $ \s -> POk s { errors = err `addMessage` errors s} () -- If the warning is meant to be suppressed, GHC will assign -- a `SevIgnore` severity and the message will be discarded, -- so we can simply add it no matter what. addWarning w = P $ \s -> POk (s { warnings = w `addMessage` warnings s }) () addFatalError err = addError err >> P PFailed getBit ext = P $ \s -> let b = ext `xtest` pExtsBitmap (options s) in b `seq` POk s b allocateCommentsP ss = P $ \s -> let (comment_q', newAnns) = allocateComments ss (comment_q s) in POk s { comment_q = comment_q' } (EpaComments newAnns) allocatePriorCommentsP ss = P $ \s -> let (header_comments', comment_q', newAnns) = allocatePriorComments ss (comment_q s) (header_comments s) in POk s { header_comments = header_comments', comment_q = comment_q' } (EpaComments newAnns) allocateFinalCommentsP ss = P $ \s -> let (header_comments', comment_q', newAnns) = allocateFinalComments ss (comment_q s) (header_comments s) in POk s { header_comments = header_comments', comment_q = comment_q' } (EpaCommentsBalanced (Strict.fromMaybe [] header_comments') newAnns) getCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments getCommentsFor (RealSrcSpan l _) = allocateCommentsP l getCommentsFor _ = return emptyComments getPriorCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments getPriorCommentsFor (RealSrcSpan l _) = allocatePriorCommentsP l getPriorCommentsFor _ = return emptyComments getFinalCommentsFor :: (MonadP m) => SrcSpan -> m EpAnnComments getFinalCommentsFor (RealSrcSpan l _) = allocateFinalCommentsP l getFinalCommentsFor _ = return emptyComments getEofPos :: P (Strict.Maybe (Strict.Pair RealSrcSpan RealSrcSpan)) getEofPos = P $ \s@(PState { eof_pos = pos }) -> POk s pos addPsMessage :: SrcSpan -> PsMessage -> P () addPsMessage srcspan msg = do diag_opts <- (pDiagOpts . options) <$> getPState addWarning (mkPlainMsgEnvelope diag_opts srcspan msg) addTabWarning :: RealSrcSpan -> P () addTabWarning srcspan = P $ \s@PState{tab_first=tf, tab_count=tc, options=o} -> let tf' = tf <|> Strict.Just srcspan tc' = tc + 1 s' = if warnopt Opt_WarnTabs o then s{tab_first = tf', tab_count = tc'} else s in POk s' () -- | Get a bag of the errors that have been accumulated so far. -- Does not take -Werror into account. getPsErrorMessages :: PState -> Messages PsMessage getPsErrorMessages p = errors p -- | Get the warnings and errors accumulated so far. -- Does not take -Werror into account. getPsMessages :: PState -> (Messages PsMessage, Messages PsMessage) getPsMessages p = let ws = warnings p diag_opts = pDiagOpts (options p) -- we add the tabulation warning on the fly because -- we count the number of occurrences of tab characters ws' = case tab_first p of Strict.Nothing -> ws Strict.Just tf -> let msg = mkPlainMsgEnvelope diag_opts (RealSrcSpan tf Strict.Nothing) (PsWarnTab (tab_count p)) in msg `addMessage` ws in (ws', errors p) getContext :: P [LayoutContext] getContext = P $ \s@PState{context=ctx} -> POk s ctx setContext :: [LayoutContext] -> P () setContext ctx = P $ \s -> POk s{context=ctx} () popContext :: P () popContext = P $ \ s@(PState{ buffer = buf, options = o, context = ctx, last_len = len, last_loc = last_loc }) -> case ctx of (_:tl) -> POk s{ context = tl } () [] -> unP (addFatalError $ srcParseErr o buf len (mkSrcSpanPs last_loc)) s -- Push a new layout context at the indentation of the last token read. pushCurrentContext :: GenSemic -> P () pushCurrentContext gen_semic = P $ \ s@PState{ last_loc=loc, context=ctx } -> POk s{context = Layout (srcSpanStartCol (psRealSpan loc)) gen_semic : ctx} () -- This is only used at the outer level of a module when the 'module' keyword is -- missing. pushModuleContext :: P () pushModuleContext = pushCurrentContext generateSemic getOffside :: P (Ordering, Bool) getOffside = P $ \s@PState{last_loc=loc, context=stk} -> let offs = srcSpanStartCol (psRealSpan loc) in let ord = case stk of Layout n gen_semic : _ -> --trace ("layout: " ++ show n ++ ", offs: " ++ show offs) $ (compare offs n, gen_semic) _ -> (GT, dontGenerateSemic) in POk s ord -- --------------------------------------------------------------------------- -- Construct a parse error srcParseErr :: ParserOpts -> StringBuffer -- current buffer (placed just after the last token) -> Int -- length of the previous token -> SrcSpan -> MsgEnvelope PsMessage srcParseErr options buf len loc = mkPlainErrorMsgEnvelope loc (PsErrParse token details) where token = lexemeToString (offsetBytes (-len) buf) len pattern_ = decodePrevNChars 8 buf last100 = decodePrevNChars 100 buf doInLast100 = "do" `isInfixOf` last100 mdoInLast100 = "mdo" `isInfixOf` last100 th_enabled = ThQuotesBit `xtest` pExtsBitmap options ps_enabled = PatternSynonymsBit `xtest` pExtsBitmap options details = PsErrParseDetails { ped_th_enabled = th_enabled , ped_do_in_last_100 = doInLast100 , ped_mdo_in_last_100 = mdoInLast100 , ped_pat_syn_enabled = ps_enabled , ped_pattern_parsed = pattern_ == "pattern " } -- Report a parse failure, giving the span of the previous token as -- the location of the error. This is the entry point for errors -- detected during parsing. srcParseFail :: P a srcParseFail = P $ \s@PState{ buffer = buf, options = o, last_len = len, last_loc = last_loc } -> unP (addFatalError $ srcParseErr o buf len (mkSrcSpanPs last_loc)) s -- A lexical error is reported at a particular position in the source file, -- not over a token range. lexError :: LexErr -> P a lexError e = do loc <- getRealSrcLoc (AI end buf) <- getInput reportLexError loc (psRealLoc end) buf (\k srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrLexer e k) -- ----------------------------------------------------------------------------- -- This is the top-level function: called from the parser each time a -- new token is to be read from the input. lexer, lexerDbg :: Bool -> (Located Token -> P a) -> P a lexer queueComments cont = do alr <- getBit AlternativeLayoutRuleBit let lexTokenFun = if alr then lexTokenAlr else lexToken (L span tok) <- lexTokenFun --trace ("token: " ++ show tok) $ do if (queueComments && isComment tok) then queueComment (L (psRealSpan span) tok) >> lexer queueComments cont else cont (L (mkSrcSpanPs span) tok) -- Use this instead of 'lexer' in GHC.Parser to dump the tokens for debugging. lexerDbg queueComments cont = lexer queueComments contDbg where contDbg tok = trace ("token: " ++ show (unLoc tok)) (cont tok) lexTokenAlr :: P (PsLocated Token) lexTokenAlr = do mPending <- popPendingImplicitToken t <- case mPending of Nothing -> do mNext <- popNextToken t <- case mNext of Nothing -> lexToken Just next -> return next alternativeLayoutRuleToken t Just t -> return t setAlrLastLoc (getLoc t) case unLoc t of ITwhere -> setAlrExpectingOCurly (Just ALRLayoutWhere) ITlet -> setAlrExpectingOCurly (Just ALRLayoutLet) ITof -> setAlrExpectingOCurly (Just ALRLayoutOf) ITlcase -> setAlrExpectingOCurly (Just ALRLayoutOf) ITlcases -> setAlrExpectingOCurly (Just ALRLayoutOf) ITdo _ -> setAlrExpectingOCurly (Just ALRLayoutDo) ITmdo _ -> setAlrExpectingOCurly (Just ALRLayoutDo) ITrec -> setAlrExpectingOCurly (Just ALRLayoutDo) _ -> return () return t alternativeLayoutRuleToken :: PsLocated Token -> P (PsLocated Token) alternativeLayoutRuleToken t = do context <- getALRContext lastLoc <- getAlrLastLoc mExpectingOCurly <- getAlrExpectingOCurly transitional <- getBit ALRTransitionalBit justClosedExplicitLetBlock <- getJustClosedExplicitLetBlock setJustClosedExplicitLetBlock False let thisLoc = getLoc t thisCol = srcSpanStartCol (psRealSpan thisLoc) newLine = srcSpanStartLine (psRealSpan thisLoc) > srcSpanEndLine (psRealSpan lastLoc) case (unLoc t, context, mExpectingOCurly) of -- This case handles a GHC extension to the original H98 -- layout rule... (ITocurly, _, Just alrLayout) -> do setAlrExpectingOCurly Nothing let isLet = case alrLayout of ALRLayoutLet -> True _ -> False setALRContext (ALRNoLayout (containsCommas ITocurly) isLet : context) return t -- ...and makes this case unnecessary {- -- I think our implicit open-curly handling is slightly -- different to John's, in how it interacts with newlines -- and "in" (ITocurly, _, Just _) -> do setAlrExpectingOCurly Nothing setNextToken t lexTokenAlr -} (_, ALRLayout _ col : _ls, Just expectingOCurly) | (thisCol > col) || (thisCol == col && isNonDecreasingIndentation expectingOCurly) -> do setAlrExpectingOCurly Nothing setALRContext (ALRLayout expectingOCurly thisCol : context) setNextToken t return (L thisLoc ITvocurly) | otherwise -> do setAlrExpectingOCurly Nothing setPendingImplicitTokens [L lastLoc ITvccurly] setNextToken t return (L lastLoc ITvocurly) (_, _, Just expectingOCurly) -> do setAlrExpectingOCurly Nothing setALRContext (ALRLayout expectingOCurly thisCol : context) setNextToken t return (L thisLoc ITvocurly) -- We do the [] cases earlier than in the spec, as we -- have an actual EOF token (ITeof, ALRLayout _ _ : ls, _) -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) (ITeof, _, _) -> return t -- the other ITeof case omitted; general case below covers it (ITin, _, _) | justClosedExplicitLetBlock -> return t (ITin, ALRLayout ALRLayoutLet _ : ls, _) | newLine -> do setPendingImplicitTokens [t] setALRContext ls return (L thisLoc ITvccurly) -- This next case is to handle a transitional issue: (ITwhere, ALRLayout _ col : ls, _) | newLine && thisCol == col && transitional -> do addPsMessage (mkSrcSpanPs thisLoc) (PsWarnTransitionalLayout TransLayout_Where) setALRContext ls setNextToken t -- Note that we use lastLoc, as we may need to close -- more layouts, or give a semicolon return (L lastLoc ITvccurly) -- This next case is to handle a transitional issue: (ITvbar, ALRLayout _ col : ls, _) | newLine && thisCol == col && transitional -> do addPsMessage (mkSrcSpanPs thisLoc) (PsWarnTransitionalLayout TransLayout_Pipe) setALRContext ls setNextToken t -- Note that we use lastLoc, as we may need to close -- more layouts, or give a semicolon return (L lastLoc ITvccurly) (_, ALRLayout _ col : ls, _) | newLine && thisCol == col -> do setNextToken t let loc = psSpanStart thisLoc zeroWidthLoc = mkPsSpan loc loc return (L zeroWidthLoc ITsemi) | newLine && thisCol < col -> do setALRContext ls setNextToken t -- Note that we use lastLoc, as we may need to close -- more layouts, or give a semicolon return (L lastLoc ITvccurly) -- We need to handle close before open, as 'then' is both -- an open and a close (u, _, _) | isALRclose u -> case context of ALRLayout _ _ : ls -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) ALRNoLayout _ isLet : ls -> do let ls' = if isALRopen u then ALRNoLayout (containsCommas u) False : ls else ls setALRContext ls' when isLet $ setJustClosedExplicitLetBlock True return t [] -> do let ls = if isALRopen u then [ALRNoLayout (containsCommas u) False] else [] setALRContext ls -- XXX This is an error in John's code, but -- it looks reachable to me at first glance return t (u, _, _) | isALRopen u -> do setALRContext (ALRNoLayout (containsCommas u) False : context) return t (ITin, ALRLayout ALRLayoutLet _ : ls, _) -> do setALRContext ls setPendingImplicitTokens [t] return (L thisLoc ITvccurly) (ITin, ALRLayout _ _ : ls, _) -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) -- the other ITin case omitted; general case below covers it (ITcomma, ALRLayout _ _ : ls, _) | topNoLayoutContainsCommas ls -> do setALRContext ls setNextToken t return (L thisLoc ITvccurly) (ITwhere, ALRLayout ALRLayoutDo _ : ls, _) -> do setALRContext ls setPendingImplicitTokens [t] return (L thisLoc ITvccurly) -- the other ITwhere case omitted; general case below covers it (_, _, _) -> return t isALRopen :: Token -> Bool isALRopen ITcase = True isALRopen ITif = True isALRopen ITthen = True isALRopen IToparen = True isALRopen ITobrack = True isALRopen ITocurly = True -- GHC Extensions: isALRopen IToubxparen = True isALRopen _ = False isALRclose :: Token -> Bool isALRclose ITof = True isALRclose ITthen = True isALRclose ITelse = True isALRclose ITcparen = True isALRclose ITcbrack = True isALRclose ITccurly = True -- GHC Extensions: isALRclose ITcubxparen = True isALRclose _ = False isNonDecreasingIndentation :: ALRLayout -> Bool isNonDecreasingIndentation ALRLayoutDo = True isNonDecreasingIndentation _ = False containsCommas :: Token -> Bool containsCommas IToparen = True containsCommas ITobrack = True -- John doesn't have {} as containing commas, but records contain them, -- which caused a problem parsing Cabal's Distribution.Simple.InstallDirs -- (defaultInstallDirs). containsCommas ITocurly = True -- GHC Extensions: containsCommas IToubxparen = True containsCommas _ = False topNoLayoutContainsCommas :: [ALRContext] -> Bool topNoLayoutContainsCommas [] = False topNoLayoutContainsCommas (ALRLayout _ _ : ls) = topNoLayoutContainsCommas ls topNoLayoutContainsCommas (ALRNoLayout b _ : _) = b lexToken :: P (PsLocated Token) lexToken = do inp@(AI loc1 buf) <- getInput sc <- getLexState exts <- getExts case alexScanUser exts inp sc of AlexEOF -> do let span = mkPsSpan loc1 loc1 lt <- getLastLocEof setEofPos (psRealSpan span) (psRealSpan lt) setLastToken span 0 return (L span ITeof) AlexError (AI loc2 buf) -> reportLexError (psRealLoc loc1) (psRealLoc loc2) buf (\k srcLoc -> mkPlainErrorMsgEnvelope srcLoc $ PsErrLexer LexError k) AlexSkip inp2 _ -> do setInput inp2 lexToken AlexToken inp2@(AI end buf2) _ t -> do setInput inp2 let span = mkPsSpan loc1 end let bytes = byteDiff buf buf2 span `seq` setLastToken span bytes lt <- t span buf bytes let lt' = unLoc lt if (isComment lt') then setLastComment lt else setLastTk lt return lt reportLexError :: RealSrcLoc -> RealSrcLoc -> StringBuffer -> (LexErrKind -> SrcSpan -> MsgEnvelope PsMessage) -> P a reportLexError loc1 loc2 buf f | atEnd buf = failLocMsgP loc1 loc2 (f LexErrKind_EOF) | otherwise = let c = fst (nextChar buf) in if c == '\0' -- decoding errors are mapped to '\0', see utf8DecodeChar# then failLocMsgP loc2 loc2 (f LexErrKind_UTF8) else failLocMsgP loc1 loc2 (f (LexErrKind_Char c)) lexTokenStream :: ParserOpts -> StringBuffer -> RealSrcLoc -> ParseResult [Located Token] lexTokenStream opts buf loc = unP go initState{ options = opts' } where new_exts = xunset UsePosPragsBit -- parse LINE/COLUMN pragmas as tokens $ xset RawTokenStreamBit -- include comments $ pExtsBitmap opts opts' = opts { pExtsBitmap = new_exts } initState = initParserState opts' buf loc go = do ltok <- lexer False return case ltok of L _ ITeof -> return [] _ -> liftM (ltok:) go linePrags = Map.singleton "line" linePrag fileHeaderPrags = Map.fromList([("options", lex_string_prag IToptions_prag), ("options_ghc", lex_string_prag IToptions_prag), ("options_haddock", lex_string_prag_comment ITdocOptions), ("language", token ITlanguage_prag), ("include", lex_string_prag ITinclude_prag)]) ignoredPrags = Map.fromList (map ignored pragmas) where ignored opt = (opt, nested_comment) impls = ["hugs", "nhc98", "jhc", "yhc", "catch", "derive"] options_pragmas = map ("options_" ++) impls -- CFILES is a hugs-only thing. pragmas = options_pragmas ++ ["cfiles", "contract"] oneWordPrags = Map.fromList [ ("rules", rulePrag), ("inline", strtoken (\s -> (ITinline_prag (SourceText s) (Inline (SourceText s)) FunLike))), ("inlinable", strtoken (\s -> (ITinline_prag (SourceText s) (Inlinable (SourceText s)) FunLike))), ("inlineable", strtoken (\s -> (ITinline_prag (SourceText s) (Inlinable (SourceText s)) FunLike))), -- Spelling variant ("notinline", strtoken (\s -> (ITinline_prag (SourceText s) (NoInline (SourceText s)) FunLike))), ("opaque", strtoken (\s -> ITopaque_prag (SourceText s))), ("specialize", strtoken (\s -> ITspec_prag (SourceText s))), ("source", strtoken (\s -> ITsource_prag (SourceText s))), ("warning", strtoken (\s -> ITwarning_prag (SourceText s))), ("deprecated", strtoken (\s -> ITdeprecated_prag (SourceText s))), ("scc", strtoken (\s -> ITscc_prag (SourceText s))), ("unpack", strtoken (\s -> ITunpack_prag (SourceText s))), ("nounpack", strtoken (\s -> ITnounpack_prag (SourceText s))), ("ann", strtoken (\s -> ITann_prag (SourceText s))), ("minimal", strtoken (\s -> ITminimal_prag (SourceText s))), ("overlaps", strtoken (\s -> IToverlaps_prag (SourceText s))), ("overlappable", strtoken (\s -> IToverlappable_prag (SourceText s))), ("overlapping", strtoken (\s -> IToverlapping_prag (SourceText s))), ("incoherent", strtoken (\s -> ITincoherent_prag (SourceText s))), ("ctype", strtoken (\s -> ITctype (SourceText s))), ("complete", strtoken (\s -> ITcomplete_prag (SourceText s))), ("column", columnPrag) ] twoWordPrags = Map.fromList [ ("inline conlike", strtoken (\s -> (ITinline_prag (SourceText s) (Inline (SourceText s)) ConLike))), ("notinline conlike", strtoken (\s -> (ITinline_prag (SourceText s) (NoInline (SourceText s)) ConLike))), ("specialize inline", strtoken (\s -> (ITspec_inline_prag (SourceText s) True))), ("specialize notinline", strtoken (\s -> (ITspec_inline_prag (SourceText s) False))) ] dispatch_pragmas :: Map String Action -> Action dispatch_pragmas prags span buf len = case Map.lookup (clean_pragma (lexemeToString buf len)) prags of Just found -> found span buf len Nothing -> lexError LexUnknownPragma known_pragma :: Map String Action -> AlexAccPred ExtsBitmap known_pragma prags _ (AI _ startbuf) _ (AI _ curbuf) = isKnown && nextCharIsNot curbuf pragmaNameChar where l = lexemeToString startbuf (byteDiff startbuf curbuf) isKnown = isJust $ Map.lookup (clean_pragma l) prags pragmaNameChar c = isAlphaNum c || c == '_' clean_pragma :: String -> String clean_pragma prag = canon_ws (map toLower (unprefix prag)) where unprefix prag' = case stripPrefix "{-#" prag' of Just rest -> rest Nothing -> prag' canonical prag' = case prag' of "noinline" -> "notinline" "specialise" -> "specialize" "constructorlike" -> "conlike" _ -> prag' canon_ws s = unwords (map canonical (words s)) {- %************************************************************************ %* * Helper functions for generating annotations in the parser %* * %************************************************************************ -} -- |Given a 'RealSrcSpan' that surrounds a 'HsPar' or 'HsParTy', generate -- 'AddEpAnn' values for the opening and closing bordering on the start -- and end of the span mkParensEpAnn :: RealSrcSpan -> (AddEpAnn, AddEpAnn) mkParensEpAnn ss = (AddEpAnn AnnOpenP (EpaSpan lo),AddEpAnn AnnCloseP (EpaSpan lc)) where f = srcSpanFile ss sl = srcSpanStartLine ss sc = srcSpanStartCol ss el = srcSpanEndLine ss ec = srcSpanEndCol ss lo = mkRealSrcSpan (realSrcSpanStart ss) (mkRealSrcLoc f sl (sc+1)) lc = mkRealSrcSpan (mkRealSrcLoc f el (ec - 1)) (realSrcSpanEnd ss) queueComment :: RealLocated Token -> P() queueComment c = P $ \s -> POk s { comment_q = commentToAnnotation c : comment_q s } () allocateComments :: RealSrcSpan -> [LEpaComment] -> ([LEpaComment], [LEpaComment]) allocateComments ss comment_q = let (before,rest) = break (\(L l _) -> isRealSubspanOf (anchor l) ss) comment_q (middle,after) = break (\(L l _) -> not (isRealSubspanOf (anchor l) ss)) rest comment_q' = before ++ after newAnns = middle in (comment_q', reverse newAnns) allocatePriorComments :: RealSrcSpan -> [LEpaComment] -> Strict.Maybe [LEpaComment] -> (Strict.Maybe [LEpaComment], [LEpaComment], [LEpaComment]) allocatePriorComments ss comment_q mheader_comments = let cmp (L l _) = anchor l <= ss (before,after) = partition cmp comment_q newAnns = before comment_q'= after in case mheader_comments of Strict.Nothing -> (Strict.Just (reverse newAnns), comment_q', []) Strict.Just _ -> (mheader_comments, comment_q', reverse newAnns) allocateFinalComments :: RealSrcSpan -> [LEpaComment] -> Strict.Maybe [LEpaComment] -> (Strict.Maybe [LEpaComment], [LEpaComment], [LEpaComment]) allocateFinalComments _ss comment_q mheader_comments = -- We ignore the RealSrcSpan as the parser currently provides a -- point span at (1,1). case mheader_comments of Strict.Nothing -> (Strict.Just (reverse comment_q), [], []) Strict.Just _ -> (mheader_comments, [], reverse comment_q) commentToAnnotation :: RealLocated Token -> LEpaComment commentToAnnotation (L l (ITdocComment s ll)) = mkLEpaComment l ll (EpaDocComment s) commentToAnnotation (L l (ITdocOptions s ll)) = mkLEpaComment l ll (EpaDocOptions s) commentToAnnotation (L l (ITlineComment s ll)) = mkLEpaComment l ll (EpaLineComment s) commentToAnnotation (L l (ITblockComment s ll)) = mkLEpaComment l ll (EpaBlockComment s) commentToAnnotation _ = panic "commentToAnnotation" -- see Note [PsSpan in Comments] mkLEpaComment :: RealSrcSpan -> PsSpan -> EpaCommentTok -> LEpaComment mkLEpaComment l ll tok = L (realSpanAsAnchor l) (EpaComment tok (psRealSpan ll)) -- --------------------------------------------------------------------- isComment :: Token -> Bool isComment (ITlineComment _ _) = True isComment (ITblockComment _ _) = True isComment (ITdocComment _ _) = True isComment (ITdocOptions _ _) = True isComment _ = False } ghc-lib-parser-9.4.7.20230826/compiler/GHC/Parser.hs-boot0000644000000000000000000000025514470055370020400 0ustar0000000000000000module GHC.Parser where import GHC.Types.Name.Reader (RdrName) import GHC.Parser.Lexer (P) import GHC.Parser.Annotation (LocatedN) parseIdentifier :: P (LocatedN RdrName) ghc-lib-parser-9.4.7.20230826/rts/include/ghcconfig.h0000644000000000000000000000010014470055371017755 0ustar0000000000000000#pragma once #include "ghcautoconf.h" #include "ghcplatform.h" ghc-lib-parser-9.4.7.20230826/compiler/MachRegs.h0000644000000000000000000005212614472377771017132 0ustar0000000000000000/* ----------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2014 * * Registers used in STG code. Might or might not correspond to * actual machine registers. * * Do not #include this file directly: #include "Rts.h" instead. * * To understand the structure of the RTS headers, see the wiki: * https://gitlab.haskell.org/ghc/ghc/wikis/commentary/source-tree/includes * * ---------------------------------------------------------------------------*/ #pragma once /* This file is #included into Haskell code in the compiler: #defines * only in here please. */ /* * Undefine these as a precaution: some of them were found to be * defined by system headers on ARM/Linux. */ #undef REG_R1 #undef REG_R2 #undef REG_R3 #undef REG_R4 #undef REG_R5 #undef REG_R6 #undef REG_R7 #undef REG_R8 #undef REG_R9 #undef REG_R10 /* * Defining MACHREGS_NO_REGS to 1 causes no global registers to be used. * MACHREGS_NO_REGS is typically controlled by NO_REGS, which is * typically defined by GHC, via a command-line option passed to gcc, * when the -funregisterised flag is given. * * NB. When MACHREGS_NO_REGS to 1, calling & return conventions may be * different. For example, all function arguments will be passed on * the stack, and components of an unboxed tuple will be returned on * the stack rather than in registers. */ #if MACHREGS_NO_REGS == 1 /* Nothing */ #elif MACHREGS_NO_REGS == 0 /* ---------------------------------------------------------------------------- Caller saves and callee-saves regs. Caller-saves regs have to be saved around C-calls made from STG land, so this file defines CALLER_SAVES_ for each that is designated caller-saves in that machine's C calling convention. As it stands, the only registers that are ever marked caller saves are the RX, FX, DX and USER registers; as a result, if you decide to caller save a system register (e.g. SP, HP, etc), note that this code path is completely untested! -- EZY See Note [Register parameter passing] for details. -------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- The x86 register mapping Ok, we've only got 6 general purpose registers, a frame pointer and a stack pointer. \tr{%eax} and \tr{%edx} are return values from C functions, hence they get trashed across ccalls and are caller saves. \tr{%ebx}, \tr{%esi}, \tr{%edi}, \tr{%ebp} are all callee-saves. Reg STG-Reg --------------- ebx Base ebp Sp esi R1 edi Hp Leaving SpLim out of the picture. -------------------------------------------------------------------------- */ #if defined(MACHREGS_i386) #define REG(x) __asm__("%" #x) #if !defined(not_doing_dynamic_linking) #define REG_Base ebx #endif #define REG_Sp ebp #if !defined(STOLEN_X86_REGS) #define STOLEN_X86_REGS 4 #endif #if STOLEN_X86_REGS >= 3 # define REG_R1 esi #endif #if STOLEN_X86_REGS >= 4 # define REG_Hp edi #endif #define REG_MachSp esp #define REG_XMM1 xmm0 #define REG_XMM2 xmm1 #define REG_XMM3 xmm2 #define REG_XMM4 xmm3 #define REG_YMM1 ymm0 #define REG_YMM2 ymm1 #define REG_YMM3 ymm2 #define REG_YMM4 ymm3 #define REG_ZMM1 zmm0 #define REG_ZMM2 zmm1 #define REG_ZMM3 zmm2 #define REG_ZMM4 zmm3 #define MAX_REAL_VANILLA_REG 1 /* always, since it defines the entry conv */ #define MAX_REAL_FLOAT_REG 0 #define MAX_REAL_DOUBLE_REG 0 #define MAX_REAL_LONG_REG 0 #define MAX_REAL_XMM_REG 4 #define MAX_REAL_YMM_REG 4 #define MAX_REAL_ZMM_REG 4 /* ----------------------------------------------------------------------------- The x86-64 register mapping %rax caller-saves, don't steal this one %rbx YES %rcx arg reg, caller-saves %rdx arg reg, caller-saves %rsi arg reg, caller-saves %rdi arg reg, caller-saves %rbp YES (our *prime* register) %rsp (unavailable - stack pointer) %r8 arg reg, caller-saves %r9 arg reg, caller-saves %r10 caller-saves %r11 caller-saves %r12 YES %r13 YES %r14 YES %r15 YES %xmm0-7 arg regs, caller-saves %xmm8-15 caller-saves Use the caller-saves regs for Rn, because we don't always have to save those (as opposed to Sp/Hp/SpLim etc. which always have to be saved). --------------------------------------------------------------------------- */ #elif defined(MACHREGS_x86_64) #define REG(x) __asm__("%" #x) #define REG_Base r13 #define REG_Sp rbp #define REG_Hp r12 #define REG_R1 rbx #define REG_R2 r14 #define REG_R3 rsi #define REG_R4 rdi #define REG_R5 r8 #define REG_R6 r9 #define REG_SpLim r15 #define REG_MachSp rsp /* Map both Fn and Dn to register xmmn so that we can pass a function any combination of up to six Float# or Double# arguments without touching the stack. See Note [Overlapping global registers] for implications. */ #define REG_F1 xmm1 #define REG_F2 xmm2 #define REG_F3 xmm3 #define REG_F4 xmm4 #define REG_F5 xmm5 #define REG_F6 xmm6 #define REG_D1 xmm1 #define REG_D2 xmm2 #define REG_D3 xmm3 #define REG_D4 xmm4 #define REG_D5 xmm5 #define REG_D6 xmm6 #define REG_XMM1 xmm1 #define REG_XMM2 xmm2 #define REG_XMM3 xmm3 #define REG_XMM4 xmm4 #define REG_XMM5 xmm5 #define REG_XMM6 xmm6 #define REG_YMM1 ymm1 #define REG_YMM2 ymm2 #define REG_YMM3 ymm3 #define REG_YMM4 ymm4 #define REG_YMM5 ymm5 #define REG_YMM6 ymm6 #define REG_ZMM1 zmm1 #define REG_ZMM2 zmm2 #define REG_ZMM3 zmm3 #define REG_ZMM4 zmm4 #define REG_ZMM5 zmm5 #define REG_ZMM6 zmm6 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_R3 #define CALLER_SAVES_R4 #endif #define CALLER_SAVES_R5 #define CALLER_SAVES_R6 #define CALLER_SAVES_F1 #define CALLER_SAVES_F2 #define CALLER_SAVES_F3 #define CALLER_SAVES_F4 #define CALLER_SAVES_F5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_F6 #endif #define CALLER_SAVES_D1 #define CALLER_SAVES_D2 #define CALLER_SAVES_D3 #define CALLER_SAVES_D4 #define CALLER_SAVES_D5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_D6 #endif #define CALLER_SAVES_XMM1 #define CALLER_SAVES_XMM2 #define CALLER_SAVES_XMM3 #define CALLER_SAVES_XMM4 #define CALLER_SAVES_XMM5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_XMM6 #endif #define CALLER_SAVES_YMM1 #define CALLER_SAVES_YMM2 #define CALLER_SAVES_YMM3 #define CALLER_SAVES_YMM4 #define CALLER_SAVES_YMM5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_YMM6 #endif #define CALLER_SAVES_ZMM1 #define CALLER_SAVES_ZMM2 #define CALLER_SAVES_ZMM3 #define CALLER_SAVES_ZMM4 #define CALLER_SAVES_ZMM5 #if !defined(mingw32_HOST_OS) #define CALLER_SAVES_ZMM6 #endif #define MAX_REAL_VANILLA_REG 6 #define MAX_REAL_FLOAT_REG 6 #define MAX_REAL_DOUBLE_REG 6 #define MAX_REAL_LONG_REG 0 #define MAX_REAL_XMM_REG 6 #define MAX_REAL_YMM_REG 6 #define MAX_REAL_ZMM_REG 6 /* ----------------------------------------------------------------------------- The PowerPC register mapping 0 system glue? (caller-save, volatile) 1 SP (callee-save, non-volatile) 2 AIX, powerpc64-linux: RTOC (a strange special case) powerpc32-linux: reserved for use by system 3-10 args/return (caller-save, volatile) 11,12 system glue? (caller-save, volatile) 13 on 64-bit: reserved for thread state pointer on 32-bit: (callee-save, non-volatile) 14-31 (callee-save, non-volatile) f0 (caller-save, volatile) f1-f13 args/return (caller-save, volatile) f14-f31 (callee-save, non-volatile) \tr{14}--\tr{31} are wonderful callee-save registers on all ppc OSes. \tr{0}--\tr{12} are caller-save registers. \tr{%f14}--\tr{%f31} are callee-save floating-point registers. We can do the Whole Business with callee-save registers only! -------------------------------------------------------------------------- */ #elif defined(MACHREGS_powerpc) #define REG(x) __asm__(#x) #define REG_R1 r14 #define REG_R2 r15 #define REG_R3 r16 #define REG_R4 r17 #define REG_R5 r18 #define REG_R6 r19 #define REG_R7 r20 #define REG_R8 r21 #define REG_R9 r22 #define REG_R10 r23 #define REG_F1 fr14 #define REG_F2 fr15 #define REG_F3 fr16 #define REG_F4 fr17 #define REG_F5 fr18 #define REG_F6 fr19 #define REG_D1 fr20 #define REG_D2 fr21 #define REG_D3 fr22 #define REG_D4 fr23 #define REG_D5 fr24 #define REG_D6 fr25 #define REG_Sp r24 #define REG_SpLim r25 #define REG_Hp r26 #define REG_Base r27 #define MAX_REAL_FLOAT_REG 6 #define MAX_REAL_DOUBLE_REG 6 /* ----------------------------------------------------------------------------- The ARM EABI register mapping Here we consider ARM mode (i.e. 32bit isns) and also CPU with full VFPv3 implementation ARM registers (see Chapter 5.1 in ARM IHI 0042D and Section 9.2.2 in ARM Software Development Toolkit Reference Guide) r15 PC The Program Counter. r14 LR The Link Register. r13 SP The Stack Pointer. r12 IP The Intra-Procedure-call scratch register. r11 v8/fp Variable-register 8. r10 v7/sl Variable-register 7. r9 v6/SB/TR Platform register. The meaning of this register is defined by the platform standard. r8 v5 Variable-register 5. r7 v4 Variable register 4. r6 v3 Variable register 3. r5 v2 Variable register 2. r4 v1 Variable register 1. r3 a4 Argument / scratch register 4. r2 a3 Argument / scratch register 3. r1 a2 Argument / result / scratch register 2. r0 a1 Argument / result / scratch register 1. VFPv2/VFPv3/NEON registers s0-s15/d0-d7/q0-q3 Argument / result/ scratch registers s16-s31/d8-d15/q4-q7 callee-saved registers (must be preserved across subroutine calls) VFPv3/NEON registers (added to the VFPv2 registers set) d16-d31/q8-q15 Argument / result/ scratch registers ----------------------------------------------------------------------------- */ #elif defined(MACHREGS_arm) #define REG(x) __asm__(#x) #define REG_Base r4 #define REG_Sp r5 #define REG_Hp r6 #define REG_R1 r7 #define REG_R2 r8 #define REG_R3 r9 #define REG_R4 r10 #define REG_SpLim r11 #if !defined(arm_HOST_ARCH_PRE_ARMv6) /* d8 */ #define REG_F1 s16 #define REG_F2 s17 /* d9 */ #define REG_F3 s18 #define REG_F4 s19 #define REG_D1 d10 #define REG_D2 d11 #endif /* ----------------------------------------------------------------------------- The ARMv8/AArch64 ABI register mapping The AArch64 provides 31 64-bit general purpose registers and 32 128-bit SIMD/floating point registers. General purpose registers (see Chapter 5.1.1 in ARM IHI 0055B) Register | Special | Role in the procedure call standard ---------+---------+------------------------------------ SP | | The Stack Pointer r30 | LR | The Link Register r29 | FP | The Frame Pointer r19-r28 | | Callee-saved registers r18 | | The Platform Register, if needed; | | or temporary register r17 | IP1 | The second intra-procedure-call temporary register r16 | IP0 | The first intra-procedure-call scratch register r9-r15 | | Temporary registers r8 | | Indirect result location register r0-r7 | | Parameter/result registers FPU/SIMD registers s/d/q/v0-v7 Argument / result/ scratch registers s/d/q/v8-v15 callee-saved registers (must be preserved across subroutine calls, but only bottom 64-bit value needs to be preserved) s/d/q/v16-v31 temporary registers ----------------------------------------------------------------------------- */ #elif defined(MACHREGS_aarch64) #define REG(x) __asm__(#x) #define REG_Base r19 #define REG_Sp r20 #define REG_Hp r21 #define REG_R1 r22 #define REG_R2 r23 #define REG_R3 r24 #define REG_R4 r25 #define REG_R5 r26 #define REG_R6 r27 #define REG_SpLim r28 #define REG_F1 s8 #define REG_F2 s9 #define REG_F3 s10 #define REG_F4 s11 #define REG_D1 d12 #define REG_D2 d13 #define REG_D3 d14 #define REG_D4 d15 /* ----------------------------------------------------------------------------- The s390x register mapping Register | Role(s) | Call effect ------------+-------------------------------------+----------------- r0,r1 | - | caller-saved r2 | Argument / return value | caller-saved r3,r4,r5 | Arguments | caller-saved r6 | Argument | callee-saved r7...r11 | - | callee-saved r12 | (Commonly used as GOT pointer) | callee-saved r13 | (Commonly used as literal pool pointer) | callee-saved r14 | Return address | caller-saved r15 | Stack pointer | callee-saved f0 | Argument / return value | caller-saved f2,f4,f6 | Arguments | caller-saved f1,f3,f5,f7 | - | caller-saved f8...f15 | - | callee-saved v0...v31 | - | caller-saved Each general purpose register r0 through r15 as well as each floating-point register f0 through f15 is 64 bits wide. Each vector register v0 through v31 is 128 bits wide. Note, the vector registers v0 through v15 overlap with the floating-point registers f0 through f15. -------------------------------------------------------------------------- */ #elif defined(MACHREGS_s390x) #define REG(x) __asm__("%" #x) #define REG_Base r7 #define REG_Sp r8 #define REG_Hp r10 #define REG_R1 r11 #define REG_R2 r12 #define REG_R3 r13 #define REG_R4 r6 #define REG_R5 r2 #define REG_R6 r3 #define REG_R7 r4 #define REG_R8 r5 #define REG_SpLim r9 #define REG_MachSp r15 #define REG_F1 f8 #define REG_F2 f9 #define REG_F3 f10 #define REG_F4 f11 #define REG_F5 f0 #define REG_F6 f1 #define REG_D1 f12 #define REG_D2 f13 #define REG_D3 f14 #define REG_D4 f15 #define REG_D5 f2 #define REG_D6 f3 #define CALLER_SAVES_R5 #define CALLER_SAVES_R6 #define CALLER_SAVES_R7 #define CALLER_SAVES_R8 #define CALLER_SAVES_F5 #define CALLER_SAVES_F6 #define CALLER_SAVES_D5 #define CALLER_SAVES_D6 /* ----------------------------------------------------------------------------- The riscv64 register mapping Register | Role(s) | Call effect ------------+-----------------------------------------+------------- zero | Hard-wired zero | - ra | Return address | caller-saved sp | Stack pointer | callee-saved gp | Global pointer | callee-saved tp | Thread pointer | callee-saved t0,t1,t2 | - | caller-saved s0 | Frame pointer | callee-saved s1 | - | callee-saved a0,a1 | Arguments / return values | caller-saved a2..a7 | Arguments | caller-saved s2..s11 | - | callee-saved t3..t6 | - | caller-saved ft0..ft7 | - | caller-saved fs0,fs1 | - | callee-saved fa0,fa1 | Arguments / return values | caller-saved fa2..fa7 | Arguments | caller-saved fs2..fs11 | - | callee-saved ft8..ft11 | - | caller-saved Each general purpose register as well as each floating-point register is 64 bits wide. -------------------------------------------------------------------------- */ #elif defined(MACHREGS_riscv64) #define REG(x) __asm__(#x) #define REG_Base s1 #define REG_Sp s2 #define REG_Hp s3 #define REG_R1 s4 #define REG_R2 s5 #define REG_R3 s6 #define REG_R4 s7 #define REG_R5 s8 #define REG_R6 s9 #define REG_R7 s10 #define REG_SpLim s11 #define REG_F1 fs0 #define REG_F2 fs1 #define REG_F3 fs2 #define REG_F4 fs3 #define REG_F5 fs4 #define REG_F6 fs5 #define REG_D1 fs6 #define REG_D2 fs7 #define REG_D3 fs8 #define REG_D4 fs9 #define REG_D5 fs10 #define REG_D6 fs11 #define MAX_REAL_FLOAT_REG 6 #define MAX_REAL_DOUBLE_REG 6 #else #error Cannot find platform to give register info for #endif #else #error Bad MACHREGS_NO_REGS value #endif /* ----------------------------------------------------------------------------- * These constants define how many stg registers will be used for * passing arguments (and results, in the case of an unboxed-tuple * return). * * We usually set MAX_REAL_VANILLA_REG and co. to be the number of the * highest STG register to occupy a real machine register, otherwise * the calling conventions will needlessly shuffle data between the * stack and memory-resident STG registers. We might occasionally * set these macros to other values for testing, though. * * Registers above these values might still be used, for instance to * communicate with PrimOps and RTS functions. */ #if !defined(MAX_REAL_VANILLA_REG) # if defined(REG_R10) # define MAX_REAL_VANILLA_REG 10 # elif defined(REG_R9) # define MAX_REAL_VANILLA_REG 9 # elif defined(REG_R8) # define MAX_REAL_VANILLA_REG 8 # elif defined(REG_R7) # define MAX_REAL_VANILLA_REG 7 # elif defined(REG_R6) # define MAX_REAL_VANILLA_REG 6 # elif defined(REG_R5) # define MAX_REAL_VANILLA_REG 5 # elif defined(REG_R4) # define MAX_REAL_VANILLA_REG 4 # elif defined(REG_R3) # define MAX_REAL_VANILLA_REG 3 # elif defined(REG_R2) # define MAX_REAL_VANILLA_REG 2 # elif defined(REG_R1) # define MAX_REAL_VANILLA_REG 1 # else # define MAX_REAL_VANILLA_REG 0 # endif #endif #if !defined(MAX_REAL_FLOAT_REG) # if defined(REG_F7) # error Please manually define MAX_REAL_FLOAT_REG for this architecture # elif defined(REG_F6) # define MAX_REAL_FLOAT_REG 6 # elif defined(REG_F5) # define MAX_REAL_FLOAT_REG 5 # elif defined(REG_F4) # define MAX_REAL_FLOAT_REG 4 # elif defined(REG_F3) # define MAX_REAL_FLOAT_REG 3 # elif defined(REG_F2) # define MAX_REAL_FLOAT_REG 2 # elif defined(REG_F1) # define MAX_REAL_FLOAT_REG 1 # else # define MAX_REAL_FLOAT_REG 0 # endif #endif #if !defined(MAX_REAL_DOUBLE_REG) # if defined(REG_D7) # error Please manually define MAX_REAL_DOUBLE_REG for this architecture # elif defined(REG_D6) # define MAX_REAL_DOUBLE_REG 6 # elif defined(REG_D5) # define MAX_REAL_DOUBLE_REG 5 # elif defined(REG_D4) # define MAX_REAL_DOUBLE_REG 4 # elif defined(REG_D3) # define MAX_REAL_DOUBLE_REG 3 # elif defined(REG_D2) # define MAX_REAL_DOUBLE_REG 2 # elif defined(REG_D1) # define MAX_REAL_DOUBLE_REG 1 # else # define MAX_REAL_DOUBLE_REG 0 # endif #endif #if !defined(MAX_REAL_LONG_REG) # if defined(REG_L1) # define MAX_REAL_LONG_REG 1 # else # define MAX_REAL_LONG_REG 0 # endif #endif #if !defined(MAX_REAL_XMM_REG) # if defined(REG_XMM6) # define MAX_REAL_XMM_REG 6 # elif defined(REG_XMM5) # define MAX_REAL_XMM_REG 5 # elif defined(REG_XMM4) # define MAX_REAL_XMM_REG 4 # elif defined(REG_XMM3) # define MAX_REAL_XMM_REG 3 # elif defined(REG_XMM2) # define MAX_REAL_XMM_REG 2 # elif defined(REG_XMM1) # define MAX_REAL_XMM_REG 1 # else # define MAX_REAL_XMM_REG 0 # endif #endif /* define NO_ARG_REGS if we have no argument registers at all (we can * optimise certain code paths using this predicate). */ #if MAX_REAL_VANILLA_REG < 2 #define NO_ARG_REGS #else #undef NO_ARG_REGS #endif ghc-lib-parser-9.4.7.20230826/compiler/CodeGen.Platform.h0000644000000000000000000005370214472375231020516 0ustar0000000000000000 import GHC.Cmm.Expr #if !(defined(MACHREGS_i386) || defined(MACHREGS_x86_64) \ || defined(MACHREGS_powerpc) || defined(MACHREGS_aarch64)) import GHC.Utils.Panic.Plain #endif import GHC.Platform.Reg #include "MachRegs.h" #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) # if defined(MACHREGS_i386) # define eax 0 # define ebx 1 # define ecx 2 # define edx 3 # define esi 4 # define edi 5 # define ebp 6 # define esp 7 # endif # if defined(MACHREGS_x86_64) # define rax 0 # define rbx 1 # define rcx 2 # define rdx 3 # define rsi 4 # define rdi 5 # define rbp 6 # define rsp 7 # define r8 8 # define r9 9 # define r10 10 # define r11 11 # define r12 12 # define r13 13 # define r14 14 # define r15 15 # endif -- N.B. XMM, YMM, and ZMM are all aliased to the same hardware registers hence -- being assigned the same RegNos. # define xmm0 16 # define xmm1 17 # define xmm2 18 # define xmm3 19 # define xmm4 20 # define xmm5 21 # define xmm6 22 # define xmm7 23 # define xmm8 24 # define xmm9 25 # define xmm10 26 # define xmm11 27 # define xmm12 28 # define xmm13 29 # define xmm14 30 # define xmm15 31 # define ymm0 16 # define ymm1 17 # define ymm2 18 # define ymm3 19 # define ymm4 20 # define ymm5 21 # define ymm6 22 # define ymm7 23 # define ymm8 24 # define ymm9 25 # define ymm10 26 # define ymm11 27 # define ymm12 28 # define ymm13 29 # define ymm14 30 # define ymm15 31 # define zmm0 16 # define zmm1 17 # define zmm2 18 # define zmm3 19 # define zmm4 20 # define zmm5 21 # define zmm6 22 # define zmm7 23 # define zmm8 24 # define zmm9 25 # define zmm10 26 # define zmm11 27 # define zmm12 28 # define zmm13 29 # define zmm14 30 # define zmm15 31 -- Note: these are only needed for ARM/AArch64 because globalRegMaybe is now used in CmmSink.hs. -- Since it's only used to check 'isJust', the actual values don't matter, thus -- I'm not sure if these are the correct numberings. -- Normally, the register names are just stringified as part of the REG() macro #elif defined(MACHREGS_powerpc) || defined(MACHREGS_arm) \ || defined(MACHREGS_aarch64) # define r0 0 # define r1 1 # define r2 2 # define r3 3 # define r4 4 # define r5 5 # define r6 6 # define r7 7 # define r8 8 # define r9 9 # define r10 10 # define r11 11 # define r12 12 # define r13 13 # define r14 14 # define r15 15 # define r16 16 # define r17 17 # define r18 18 # define r19 19 # define r20 20 # define r21 21 # define r22 22 # define r23 23 # define r24 24 # define r25 25 # define r26 26 # define r27 27 # define r28 28 # define r29 29 # define r30 30 # define r31 31 -- See note above. These aren't actually used for anything except satisfying the compiler for globalRegMaybe -- so I'm unsure if they're the correct numberings, should they ever be attempted to be used in the NCG. #if defined(MACHREGS_aarch64) || defined(MACHREGS_arm) # define s0 32 # define s1 33 # define s2 34 # define s3 35 # define s4 36 # define s5 37 # define s6 38 # define s7 39 # define s8 40 # define s9 41 # define s10 42 # define s11 43 # define s12 44 # define s13 45 # define s14 46 # define s15 47 # define s16 48 # define s17 49 # define s18 50 # define s19 51 # define s20 52 # define s21 53 # define s22 54 # define s23 55 # define s24 56 # define s25 57 # define s26 58 # define s27 59 # define s28 60 # define s29 61 # define s30 62 # define s31 63 # define d0 32 # define d1 33 # define d2 34 # define d3 35 # define d4 36 # define d5 37 # define d6 38 # define d7 39 # define d8 40 # define d9 41 # define d10 42 # define d11 43 # define d12 44 # define d13 45 # define d14 46 # define d15 47 # define d16 48 # define d17 49 # define d18 50 # define d19 51 # define d20 52 # define d21 53 # define d22 54 # define d23 55 # define d24 56 # define d25 57 # define d26 58 # define d27 59 # define d28 60 # define d29 61 # define d30 62 # define d31 63 #endif # if defined(MACHREGS_darwin) # define f0 32 # define f1 33 # define f2 34 # define f3 35 # define f4 36 # define f5 37 # define f6 38 # define f7 39 # define f8 40 # define f9 41 # define f10 42 # define f11 43 # define f12 44 # define f13 45 # define f14 46 # define f15 47 # define f16 48 # define f17 49 # define f18 50 # define f19 51 # define f20 52 # define f21 53 # define f22 54 # define f23 55 # define f24 56 # define f25 57 # define f26 58 # define f27 59 # define f28 60 # define f29 61 # define f30 62 # define f31 63 # else # define fr0 32 # define fr1 33 # define fr2 34 # define fr3 35 # define fr4 36 # define fr5 37 # define fr6 38 # define fr7 39 # define fr8 40 # define fr9 41 # define fr10 42 # define fr11 43 # define fr12 44 # define fr13 45 # define fr14 46 # define fr15 47 # define fr16 48 # define fr17 49 # define fr18 50 # define fr19 51 # define fr20 52 # define fr21 53 # define fr22 54 # define fr23 55 # define fr24 56 # define fr25 57 # define fr26 58 # define fr27 59 # define fr28 60 # define fr29 61 # define fr30 62 # define fr31 63 # endif #elif defined(MACHREGS_s390x) # define r0 0 # define r1 1 # define r2 2 # define r3 3 # define r4 4 # define r5 5 # define r6 6 # define r7 7 # define r8 8 # define r9 9 # define r10 10 # define r11 11 # define r12 12 # define r13 13 # define r14 14 # define r15 15 # define f0 16 # define f1 17 # define f2 18 # define f3 19 # define f4 20 # define f5 21 # define f6 22 # define f7 23 # define f8 24 # define f9 25 # define f10 26 # define f11 27 # define f12 28 # define f13 29 # define f14 30 # define f15 31 #elif defined(MACHREGS_riscv64) # define zero 0 # define ra 1 # define sp 2 # define gp 3 # define tp 4 # define t0 5 # define t1 6 # define t2 7 # define s0 8 # define s1 9 # define a0 10 # define a1 11 # define a2 12 # define a3 13 # define a4 14 # define a5 15 # define a6 16 # define a7 17 # define s2 18 # define s3 19 # define s4 20 # define s5 21 # define s6 22 # define s7 23 # define s8 24 # define s9 25 # define s10 26 # define s11 27 # define t3 28 # define t4 29 # define t5 30 # define t6 31 # define ft0 32 # define ft1 33 # define ft2 34 # define ft3 35 # define ft4 36 # define ft5 37 # define ft6 38 # define ft7 39 # define fs0 40 # define fs1 41 # define fa0 42 # define fa1 43 # define fa2 44 # define fa3 45 # define fa4 46 # define fa5 47 # define fa6 48 # define fa7 49 # define fs2 50 # define fs3 51 # define fs4 52 # define fs5 53 # define fs6 54 # define fs7 55 # define fs8 56 # define fs9 57 # define fs10 58 # define fs11 59 # define ft8 60 # define ft9 61 # define ft10 62 # define ft11 63 #endif callerSaves :: GlobalReg -> Bool #if defined(CALLER_SAVES_Base) callerSaves BaseReg = True #endif #if defined(CALLER_SAVES_R1) callerSaves (VanillaReg 1 _) = True #endif #if defined(CALLER_SAVES_R2) callerSaves (VanillaReg 2 _) = True #endif #if defined(CALLER_SAVES_R3) callerSaves (VanillaReg 3 _) = True #endif #if defined(CALLER_SAVES_R4) callerSaves (VanillaReg 4 _) = True #endif #if defined(CALLER_SAVES_R5) callerSaves (VanillaReg 5 _) = True #endif #if defined(CALLER_SAVES_R6) callerSaves (VanillaReg 6 _) = True #endif #if defined(CALLER_SAVES_R7) callerSaves (VanillaReg 7 _) = True #endif #if defined(CALLER_SAVES_R8) callerSaves (VanillaReg 8 _) = True #endif #if defined(CALLER_SAVES_R9) callerSaves (VanillaReg 9 _) = True #endif #if defined(CALLER_SAVES_R10) callerSaves (VanillaReg 10 _) = True #endif #if defined(CALLER_SAVES_F1) callerSaves (FloatReg 1) = True #endif #if defined(CALLER_SAVES_F2) callerSaves (FloatReg 2) = True #endif #if defined(CALLER_SAVES_F3) callerSaves (FloatReg 3) = True #endif #if defined(CALLER_SAVES_F4) callerSaves (FloatReg 4) = True #endif #if defined(CALLER_SAVES_F5) callerSaves (FloatReg 5) = True #endif #if defined(CALLER_SAVES_F6) callerSaves (FloatReg 6) = True #endif #if defined(CALLER_SAVES_D1) callerSaves (DoubleReg 1) = True #endif #if defined(CALLER_SAVES_D2) callerSaves (DoubleReg 2) = True #endif #if defined(CALLER_SAVES_D3) callerSaves (DoubleReg 3) = True #endif #if defined(CALLER_SAVES_D4) callerSaves (DoubleReg 4) = True #endif #if defined(CALLER_SAVES_D5) callerSaves (DoubleReg 5) = True #endif #if defined(CALLER_SAVES_D6) callerSaves (DoubleReg 6) = True #endif #if defined(CALLER_SAVES_L1) callerSaves (LongReg 1) = True #endif #if defined(CALLER_SAVES_Sp) callerSaves Sp = True #endif #if defined(CALLER_SAVES_SpLim) callerSaves SpLim = True #endif #if defined(CALLER_SAVES_Hp) callerSaves Hp = True #endif #if defined(CALLER_SAVES_HpLim) callerSaves HpLim = True #endif #if defined(CALLER_SAVES_CCCS) callerSaves CCCS = True #endif #if defined(CALLER_SAVES_CurrentTSO) callerSaves CurrentTSO = True #endif #if defined(CALLER_SAVES_CurrentNursery) callerSaves CurrentNursery = True #endif callerSaves _ = False activeStgRegs :: [GlobalReg] activeStgRegs = [ #if defined(REG_Base) BaseReg #endif #if defined(REG_Sp) ,Sp #endif #if defined(REG_Hp) ,Hp #endif #if defined(REG_R1) ,VanillaReg 1 VGcPtr #endif #if defined(REG_R2) ,VanillaReg 2 VGcPtr #endif #if defined(REG_R3) ,VanillaReg 3 VGcPtr #endif #if defined(REG_R4) ,VanillaReg 4 VGcPtr #endif #if defined(REG_R5) ,VanillaReg 5 VGcPtr #endif #if defined(REG_R6) ,VanillaReg 6 VGcPtr #endif #if defined(REG_R7) ,VanillaReg 7 VGcPtr #endif #if defined(REG_R8) ,VanillaReg 8 VGcPtr #endif #if defined(REG_R9) ,VanillaReg 9 VGcPtr #endif #if defined(REG_R10) ,VanillaReg 10 VGcPtr #endif #if defined(REG_SpLim) ,SpLim #endif #if MAX_REAL_XMM_REG != 0 #if defined(REG_F1) ,FloatReg 1 #endif #if defined(REG_D1) ,DoubleReg 1 #endif #if defined(REG_XMM1) ,XmmReg 1 #endif #if defined(REG_YMM1) ,YmmReg 1 #endif #if defined(REG_ZMM1) ,ZmmReg 1 #endif #if defined(REG_F2) ,FloatReg 2 #endif #if defined(REG_D2) ,DoubleReg 2 #endif #if defined(REG_XMM2) ,XmmReg 2 #endif #if defined(REG_YMM2) ,YmmReg 2 #endif #if defined(REG_ZMM2) ,ZmmReg 2 #endif #if defined(REG_F3) ,FloatReg 3 #endif #if defined(REG_D3) ,DoubleReg 3 #endif #if defined(REG_XMM3) ,XmmReg 3 #endif #if defined(REG_YMM3) ,YmmReg 3 #endif #if defined(REG_ZMM3) ,ZmmReg 3 #endif #if defined(REG_F4) ,FloatReg 4 #endif #if defined(REG_D4) ,DoubleReg 4 #endif #if defined(REG_XMM4) ,XmmReg 4 #endif #if defined(REG_YMM4) ,YmmReg 4 #endif #if defined(REG_ZMM4) ,ZmmReg 4 #endif #if defined(REG_F5) ,FloatReg 5 #endif #if defined(REG_D5) ,DoubleReg 5 #endif #if defined(REG_XMM5) ,XmmReg 5 #endif #if defined(REG_YMM5) ,YmmReg 5 #endif #if defined(REG_ZMM5) ,ZmmReg 5 #endif #if defined(REG_F6) ,FloatReg 6 #endif #if defined(REG_D6) ,DoubleReg 6 #endif #if defined(REG_XMM6) ,XmmReg 6 #endif #if defined(REG_YMM6) ,YmmReg 6 #endif #if defined(REG_ZMM6) ,ZmmReg 6 #endif #else /* MAX_REAL_XMM_REG == 0 */ #if defined(REG_F1) ,FloatReg 1 #endif #if defined(REG_F2) ,FloatReg 2 #endif #if defined(REG_F3) ,FloatReg 3 #endif #if defined(REG_F4) ,FloatReg 4 #endif #if defined(REG_F5) ,FloatReg 5 #endif #if defined(REG_F6) ,FloatReg 6 #endif #if defined(REG_D1) ,DoubleReg 1 #endif #if defined(REG_D2) ,DoubleReg 2 #endif #if defined(REG_D3) ,DoubleReg 3 #endif #if defined(REG_D4) ,DoubleReg 4 #endif #if defined(REG_D5) ,DoubleReg 5 #endif #if defined(REG_D6) ,DoubleReg 6 #endif #endif /* MAX_REAL_XMM_REG == 0 */ ] haveRegBase :: Bool #if defined(REG_Base) haveRegBase = True #else haveRegBase = False #endif -- | Returns 'Nothing' if this global register is not stored -- in a real machine register, otherwise returns @'Just' reg@, where -- reg is the machine register it is stored in. globalRegMaybe :: GlobalReg -> Maybe RealReg #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) \ || defined(MACHREGS_powerpc) \ || defined(MACHREGS_arm) || defined(MACHREGS_aarch64) \ || defined(MACHREGS_s390x) || defined(MACHREGS_riscv64) # if defined(REG_Base) globalRegMaybe BaseReg = Just (RealRegSingle REG_Base) # endif # if defined(REG_R1) globalRegMaybe (VanillaReg 1 _) = Just (RealRegSingle REG_R1) # endif # if defined(REG_R2) globalRegMaybe (VanillaReg 2 _) = Just (RealRegSingle REG_R2) # endif # if defined(REG_R3) globalRegMaybe (VanillaReg 3 _) = Just (RealRegSingle REG_R3) # endif # if defined(REG_R4) globalRegMaybe (VanillaReg 4 _) = Just (RealRegSingle REG_R4) # endif # if defined(REG_R5) globalRegMaybe (VanillaReg 5 _) = Just (RealRegSingle REG_R5) # endif # if defined(REG_R6) globalRegMaybe (VanillaReg 6 _) = Just (RealRegSingle REG_R6) # endif # if defined(REG_R7) globalRegMaybe (VanillaReg 7 _) = Just (RealRegSingle REG_R7) # endif # if defined(REG_R8) globalRegMaybe (VanillaReg 8 _) = Just (RealRegSingle REG_R8) # endif # if defined(REG_R9) globalRegMaybe (VanillaReg 9 _) = Just (RealRegSingle REG_R9) # endif # if defined(REG_R10) globalRegMaybe (VanillaReg 10 _) = Just (RealRegSingle REG_R10) # endif # if defined(REG_F1) globalRegMaybe (FloatReg 1) = Just (RealRegSingle REG_F1) # endif # if defined(REG_F2) globalRegMaybe (FloatReg 2) = Just (RealRegSingle REG_F2) # endif # if defined(REG_F3) globalRegMaybe (FloatReg 3) = Just (RealRegSingle REG_F3) # endif # if defined(REG_F4) globalRegMaybe (FloatReg 4) = Just (RealRegSingle REG_F4) # endif # if defined(REG_F5) globalRegMaybe (FloatReg 5) = Just (RealRegSingle REG_F5) # endif # if defined(REG_F6) globalRegMaybe (FloatReg 6) = Just (RealRegSingle REG_F6) # endif # if defined(REG_D1) globalRegMaybe (DoubleReg 1) = Just (RealRegSingle REG_D1) # endif # if defined(REG_D2) globalRegMaybe (DoubleReg 2) = Just (RealRegSingle REG_D2) # endif # if defined(REG_D3) globalRegMaybe (DoubleReg 3) = Just (RealRegSingle REG_D3) # endif # if defined(REG_D4) globalRegMaybe (DoubleReg 4) = Just (RealRegSingle REG_D4) # endif # if defined(REG_D5) globalRegMaybe (DoubleReg 5) = Just (RealRegSingle REG_D5) # endif # if defined(REG_D6) globalRegMaybe (DoubleReg 6) = Just (RealRegSingle REG_D6) # endif # if MAX_REAL_XMM_REG != 0 # if defined(REG_XMM1) globalRegMaybe (XmmReg 1) = Just (RealRegSingle REG_XMM1) # endif # if defined(REG_XMM2) globalRegMaybe (XmmReg 2) = Just (RealRegSingle REG_XMM2) # endif # if defined(REG_XMM3) globalRegMaybe (XmmReg 3) = Just (RealRegSingle REG_XMM3) # endif # if defined(REG_XMM4) globalRegMaybe (XmmReg 4) = Just (RealRegSingle REG_XMM4) # endif # if defined(REG_XMM5) globalRegMaybe (XmmReg 5) = Just (RealRegSingle REG_XMM5) # endif # if defined(REG_XMM6) globalRegMaybe (XmmReg 6) = Just (RealRegSingle REG_XMM6) # endif # endif # if defined(MAX_REAL_YMM_REG) && MAX_REAL_YMM_REG != 0 # if defined(REG_YMM1) globalRegMaybe (YmmReg 1) = Just (RealRegSingle REG_YMM1) # endif # if defined(REG_YMM2) globalRegMaybe (YmmReg 2) = Just (RealRegSingle REG_YMM2) # endif # if defined(REG_YMM3) globalRegMaybe (YmmReg 3) = Just (RealRegSingle REG_YMM3) # endif # if defined(REG_YMM4) globalRegMaybe (YmmReg 4) = Just (RealRegSingle REG_YMM4) # endif # if defined(REG_YMM5) globalRegMaybe (YmmReg 5) = Just (RealRegSingle REG_YMM5) # endif # if defined(REG_YMM6) globalRegMaybe (YmmReg 6) = Just (RealRegSingle REG_YMM6) # endif # endif # if defined(MAX_REAL_ZMM_REG) && MAX_REAL_ZMM_REG != 0 # if defined(REG_ZMM1) globalRegMaybe (ZmmReg 1) = Just (RealRegSingle REG_ZMM1) # endif # if defined(REG_ZMM2) globalRegMaybe (ZmmReg 2) = Just (RealRegSingle REG_ZMM2) # endif # if defined(REG_ZMM3) globalRegMaybe (ZmmReg 3) = Just (RealRegSingle REG_ZMM3) # endif # if defined(REG_ZMM4) globalRegMaybe (ZmmReg 4) = Just (RealRegSingle REG_ZMM4) # endif # if defined(REG_ZMM5) globalRegMaybe (ZmmReg 5) = Just (RealRegSingle REG_ZMM5) # endif # if defined(REG_ZMM6) globalRegMaybe (ZmmReg 6) = Just (RealRegSingle REG_ZMM6) # endif # endif # if defined(REG_Sp) globalRegMaybe Sp = Just (RealRegSingle REG_Sp) # endif # if defined(REG_Lng1) globalRegMaybe (LongReg 1) = Just (RealRegSingle REG_Lng1) # endif # if defined(REG_Lng2) globalRegMaybe (LongReg 2) = Just (RealRegSingle REG_Lng2) # endif # if defined(REG_SpLim) globalRegMaybe SpLim = Just (RealRegSingle REG_SpLim) # endif # if defined(REG_Hp) globalRegMaybe Hp = Just (RealRegSingle REG_Hp) # endif # if defined(REG_HpLim) globalRegMaybe HpLim = Just (RealRegSingle REG_HpLim) # endif # if defined(REG_CurrentTSO) globalRegMaybe CurrentTSO = Just (RealRegSingle REG_CurrentTSO) # endif # if defined(REG_CurrentNursery) globalRegMaybe CurrentNursery = Just (RealRegSingle REG_CurrentNursery) # endif # if defined(REG_MachSp) globalRegMaybe MachSp = Just (RealRegSingle REG_MachSp) # endif globalRegMaybe _ = Nothing #elif defined(MACHREGS_NO_REGS) globalRegMaybe _ = Nothing #else globalRegMaybe = panic "globalRegMaybe not defined for this platform" #endif freeReg :: RegNo -> Bool #if defined(MACHREGS_i386) || defined(MACHREGS_x86_64) # if defined(MACHREGS_i386) freeReg esp = False -- %esp is the C stack pointer freeReg esi = False -- See Note [esi/edi/ebp not allocatable] freeReg edi = False freeReg ebp = False # endif # if defined(MACHREGS_x86_64) freeReg rsp = False -- %rsp is the C stack pointer # endif {- Note [esi/edi/ebp not allocatable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %esi is mapped to R1, so %esi would normally be allocatable while it is not being used for R1. However, %esi has no 8-bit version on x86, and the linear register allocator is not sophisticated enough to handle this irregularity (we need more RegClasses). The graph-colouring allocator also cannot handle this - it was designed with more flexibility in mind, but the current implementation is restricted to the same set of classes as the linear allocator. Hence, on x86 esi, edi and ebp are treated as not allocatable. -} -- split patterns in two functions to prevent overlaps freeReg r = freeRegBase r freeRegBase :: RegNo -> Bool # if defined(REG_Base) freeRegBase REG_Base = False # endif # if defined(REG_Sp) freeRegBase REG_Sp = False # endif # if defined(REG_SpLim) freeRegBase REG_SpLim = False # endif # if defined(REG_Hp) freeRegBase REG_Hp = False # endif # if defined(REG_HpLim) freeRegBase REG_HpLim = False # endif -- All other regs are considered to be "free", because we can track -- their liveness accurately. freeRegBase _ = True #elif defined(MACHREGS_powerpc) freeReg 0 = False -- Used by code setting the back chain pointer -- in stack reallocations on Linux. -- Moreover r0 is not usable in all insns. freeReg 1 = False -- The Stack Pointer -- most ELF PowerPC OSes use r2 as a TOC pointer freeReg 2 = False freeReg 13 = False -- reserved for system thread ID on 64 bit -- at least linux in -fPIC relies on r30 in PLT stubs freeReg 30 = False {- TODO: reserve r13 on 64 bit systems only and r30 on 32 bit respectively. For now we use r30 on 64 bit and r13 on 32 bit as a temporary register in stack handling code. See compiler/GHC/CmmToAsm/PPC/Instr.hs. Later we might want to reserve r13 and r30 only where it is required. Then use r12 as temporary register, which is also what the C ABI does. -} # if defined(REG_Base) freeReg REG_Base = False # endif # if defined(REG_Sp) freeReg REG_Sp = False # endif # if defined(REG_SpLim) freeReg REG_SpLim = False # endif # if defined(REG_Hp) freeReg REG_Hp = False # endif # if defined(REG_HpLim) freeReg REG_HpLim = False # endif freeReg _ = True #elif defined(MACHREGS_aarch64) -- stack pointer / zero reg freeReg 31 = False -- link register freeReg 30 = False -- frame pointer freeReg 29 = False -- ip0 -- used for spill offset computations freeReg 16 = False #if defined(darwin_HOST_OS) || defined(ios_HOST_OS) -- x18 is reserved by the platform on Darwin/iOS, and can not be used -- More about ARM64 ABI that Apple platforms support: -- https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms -- https://github.com/Siguza/ios-resources/blob/master/bits/arm64.md freeReg 18 = False #endif # if defined(REG_Base) freeReg REG_Base = False # endif # if defined(REG_Sp) freeReg REG_Sp = False # endif # if defined(REG_SpLim) freeReg REG_SpLim = False # endif # if defined(REG_Hp) freeReg REG_Hp = False # endif # if defined(REG_HpLim) freeReg REG_HpLim = False # endif # if defined(REG_R1) freeReg REG_R1 = False # endif # if defined(REG_R2) freeReg REG_R2 = False # endif # if defined(REG_R3) freeReg REG_R3 = False # endif # if defined(REG_R4) freeReg REG_R4 = False # endif # if defined(REG_R5) freeReg REG_R5 = False # endif # if defined(REG_R6) freeReg REG_R6 = False # endif # if defined(REG_R7) freeReg REG_R7 = False # endif # if defined(REG_R8) freeReg REG_R8 = False # endif # if defined(REG_F1) freeReg REG_F1 = False # endif # if defined(REG_F2) freeReg REG_F2 = False # endif # if defined(REG_F3) freeReg REG_F3 = False # endif # if defined(REG_F4) freeReg REG_F4 = False # endif # if defined(REG_F5) freeReg REG_F5 = False # endif # if defined(REG_F6) freeReg REG_F6 = False # endif # if defined(REG_D1) freeReg REG_D1 = False # endif # if defined(REG_D2) freeReg REG_D2 = False # endif # if defined(REG_D3) freeReg REG_D3 = False # endif # if defined(REG_D4) freeReg REG_D4 = False # endif # if defined(REG_D5) freeReg REG_D5 = False # endif # if defined(REG_D6) freeReg REG_D6 = False # endif freeReg _ = True #else freeReg = panic "freeReg not defined for this platform" #endif ghc-lib-parser-9.4.7.20230826/compiler/Bytecodes.h0000644000000000000000000001160114472377771017353 0ustar0000000000000000/* ----------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2009 * * Bytecode definitions. * * ---------------------------------------------------------------------------*/ /* -------------------------------------------------------------------------- * Instructions * * Notes: * o CASEFAIL is generated by the compiler whenever it tests an "irrefutable" * pattern which fails. If we don't see too many of these, we could * optimise out the redundant test. * ------------------------------------------------------------------------*/ /* NOTE: THIS FILE IS INCLUDED IN HASKELL SOURCES (ghc/compiler/GHC/ByteCode/Asm.hs). DO NOT PUT C-SPECIFIC STUFF IN HERE! I hope that's clear :-) */ #define bci_STKCHECK 1 #define bci_PUSH_L 2 #define bci_PUSH_LL 3 #define bci_PUSH_LLL 4 #define bci_PUSH8 5 #define bci_PUSH16 6 #define bci_PUSH32 7 #define bci_PUSH8_W 8 #define bci_PUSH16_W 9 #define bci_PUSH32_W 10 #define bci_PUSH_G 11 #define bci_PUSH_ALTS_P 13 #define bci_PUSH_ALTS_N 14 #define bci_PUSH_ALTS_F 15 #define bci_PUSH_ALTS_D 16 #define bci_PUSH_ALTS_L 17 #define bci_PUSH_ALTS_V 18 #define bci_PUSH_PAD8 19 #define bci_PUSH_PAD16 20 #define bci_PUSH_PAD32 21 #define bci_PUSH_UBX8 22 #define bci_PUSH_UBX16 23 #define bci_PUSH_UBX32 24 #define bci_PUSH_UBX 25 #define bci_PUSH_APPLY_N 26 #define bci_PUSH_APPLY_F 27 #define bci_PUSH_APPLY_D 28 #define bci_PUSH_APPLY_L 29 #define bci_PUSH_APPLY_V 30 #define bci_PUSH_APPLY_P 31 #define bci_PUSH_APPLY_PP 32 #define bci_PUSH_APPLY_PPP 33 #define bci_PUSH_APPLY_PPPP 34 #define bci_PUSH_APPLY_PPPPP 35 #define bci_PUSH_APPLY_PPPPPP 36 /* #define bci_PUSH_APPLY_PPPPPPP 37 */ #define bci_SLIDE 38 #define bci_ALLOC_AP 39 #define bci_ALLOC_AP_NOUPD 40 #define bci_ALLOC_PAP 41 #define bci_MKAP 42 #define bci_MKPAP 43 #define bci_UNPACK 44 #define bci_PACK 45 #define bci_TESTLT_I 46 #define bci_TESTEQ_I 47 #define bci_TESTLT_F 48 #define bci_TESTEQ_F 49 #define bci_TESTLT_D 50 #define bci_TESTEQ_D 51 #define bci_TESTLT_P 52 #define bci_TESTEQ_P 53 #define bci_CASEFAIL 54 #define bci_JMP 55 #define bci_CCALL 56 #define bci_SWIZZLE 57 #define bci_ENTER 58 #define bci_RETURN_P 60 #define bci_RETURN_N 61 #define bci_RETURN_F 62 #define bci_RETURN_D 63 #define bci_RETURN_L 64 #define bci_RETURN_V 65 #define bci_BRK_FUN 66 #define bci_TESTLT_W 67 #define bci_TESTEQ_W 68 #define bci_RETURN_T 69 #define bci_PUSH_ALTS_T 70 #define bci_TESTLT_I64 71 #define bci_TESTEQ_I64 72 #define bci_TESTLT_I32 73 #define bci_TESTEQ_I32 74 #define bci_TESTLT_I16 75 #define bci_TESTEQ_I16 76 #define bci_TESTLT_I8 77 #define bci_TESTEQ_I8 78 #define bci_TESTLT_W64 79 #define bci_TESTEQ_W64 80 #define bci_TESTLT_W32 81 #define bci_TESTEQ_W32 82 #define bci_TESTLT_W16 83 #define bci_TESTEQ_W16 84 #define bci_TESTLT_W8 85 #define bci_TESTEQ_W8 86 #define bci_PRIMCALL 87 /* If you need to go past 255 then you will run into the flags */ /* If you need to go below 0x0100 then you will run into the instructions */ #define bci_FLAG_LARGE_ARGS 0x8000 /* If a BCO definitely requires less than this many words of stack, don't include an explicit STKCHECK insn in it. The interpreter will check for this many words of stack before running each BCO, rendering an explicit check unnecessary in the majority of cases. */ #define INTERP_STACK_CHECK_THRESH 50 /*-------------------------------------------------------------------------*/ ghc-lib-parser-9.4.7.20230826/compiler/ClosureTypes.h0000644000000000000000000000647514472377771020110 0ustar0000000000000000/* ---------------------------------------------------------------------------- * * (c) The GHC Team, 1998-2005 * * Closure Type Constants: out here because the native code generator * needs to get at them. * * -------------------------------------------------------------------------- */ #pragma once /* * WARNING WARNING WARNING * * If you add or delete any closure types, don't forget to update the following, * - the closure flags table in rts/ClosureFlags.c * - isRetainer in rts/RetainerProfile.c * - the closure_type_names list in rts/Printer.c */ /* CONSTR/THUNK/FUN_$A_$B mean they have $A pointers followed by $B * non-pointers in their payloads. */ /* Object tag 0 raises an internal error */ #define INVALID_OBJECT 0 #define CONSTR 1 #define CONSTR_1_0 2 #define CONSTR_0_1 3 #define CONSTR_2_0 4 #define CONSTR_1_1 5 #define CONSTR_0_2 6 #define CONSTR_NOCAF 7 #define FUN 8 #define FUN_1_0 9 #define FUN_0_1 10 #define FUN_2_0 11 #define FUN_1_1 12 #define FUN_0_2 13 #define FUN_STATIC 14 #define THUNK 15 #define THUNK_1_0 16 #define THUNK_0_1 17 #define THUNK_2_0 18 #define THUNK_1_1 19 #define THUNK_0_2 20 #define THUNK_STATIC 21 #define THUNK_SELECTOR 22 #define BCO 23 #define AP 24 #define PAP 25 #define AP_STACK 26 #define IND 27 #define IND_STATIC 28 #define RET_BCO 29 #define RET_SMALL 30 #define RET_BIG 31 #define RET_FUN 32 #define UPDATE_FRAME 33 #define CATCH_FRAME 34 #define UNDERFLOW_FRAME 35 #define STOP_FRAME 36 #define BLOCKING_QUEUE 37 #define BLACKHOLE 38 #define MVAR_CLEAN 39 #define MVAR_DIRTY 40 #define TVAR 41 #define ARR_WORDS 42 #define MUT_ARR_PTRS_CLEAN 43 #define MUT_ARR_PTRS_DIRTY 44 #define MUT_ARR_PTRS_FROZEN_DIRTY 45 #define MUT_ARR_PTRS_FROZEN_CLEAN 46 #define MUT_VAR_CLEAN 47 #define MUT_VAR_DIRTY 48 #define WEAK 49 #define PRIM 50 #define MUT_PRIM 51 #define TSO 52 #define STACK 53 #define TREC_CHUNK 54 #define ATOMICALLY_FRAME 55 #define CATCH_RETRY_FRAME 56 #define CATCH_STM_FRAME 57 #define WHITEHOLE 58 #define SMALL_MUT_ARR_PTRS_CLEAN 59 #define SMALL_MUT_ARR_PTRS_DIRTY 60 #define SMALL_MUT_ARR_PTRS_FROZEN_DIRTY 61 #define SMALL_MUT_ARR_PTRS_FROZEN_CLEAN 62 #define COMPACT_NFDATA 63 #define N_CLOSURE_TYPES 64 ghc-lib-parser-9.4.7.20230826/compiler/FunTypes.h0000644000000000000000000000257214472377771017216 0ustar0000000000000000/* ----------------------------------------------------------------------------- * * (c) The GHC Team, 2002 * * Things for functions. * * ---------------------------------------------------------------------------*/ #pragma once /* generic - function comes with a small bitmap */ #define ARG_GEN 0 /* generic - function comes with a large bitmap */ #define ARG_GEN_BIG 1 /* BCO - function is really a BCO */ #define ARG_BCO 2 /* * Specialised function types: bitmaps and calling sequences * for these functions are pre-generated: see ghc/utils/genapply and * generated code in ghc/rts/AutoApply.cmm. * * NOTE: other places to change if you change this table: * - utils/genapply/Main.hs: stackApplyTypes * - GHC.StgToCmm.Layout: stdPattern */ #define ARG_NONE 3 #define ARG_N 4 #define ARG_P 5 #define ARG_F 6 #define ARG_D 7 #define ARG_L 8 #define ARG_V16 9 #define ARG_V32 10 #define ARG_V64 11 #define ARG_NN 12 #define ARG_NP 13 #define ARG_PN 14 #define ARG_PP 15 #define ARG_NNN 16 #define ARG_NNP 17 #define ARG_NPN 18 #define ARG_NPP 19 #define ARG_PNN 20 #define ARG_PNP 21 #define ARG_PPN 22 #define ARG_PPP 23 #define ARG_PPPP 24 #define ARG_PPPPP 25 #define ARG_PPPPPP 26 #define ARG_PPPPPPP 27 #define ARG_PPPPPPPP 28 ghc-lib-parser-9.4.7.20230826/compiler/Unique.h0000644000000000000000000000024014470055371016660 0ustar0000000000000000/* unique has the following structure: * HsInt unique = * (unique_tag << (sizeof (HsInt) - UNIQUE_TAG_BITS)) | unique_number */ #define UNIQUE_TAG_BITS 8 ghc-lib-parser-9.4.7.20230826/compiler/ghc-llvm-version.h0000644000000000000000000000056314472400056020612 0ustar0000000000000000/* compiler/ghc-llvm-version.h. Generated from ghc-llvm-version.h.in by configure. */ #if !defined(__GHC_LLVM_VERSION_H__) #define __GHC_LLVM_VERSION_H__ /* The maximum supported LLVM version number */ #define sUPPORTED_LLVM_VERSION_MAX (15) /* The minimum supported LLVM version number */ #define sUPPORTED_LLVM_VERSION_MIN (10) #endif /* __GHC_LLVM_VERSION_H__ */ ghc-lib-parser-9.4.7.20230826/LICENSE0000644000000000000000000000311314470055370014435 0ustar0000000000000000The Glasgow Haskell Compiler License Copyright 2002, The University Court of the University of Glasgow. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ghc-lib-parser-9.4.7.20230826/ghc-lib-parser.cabal0000644000000000000000000003417214472400127017220 0ustar0000000000000000cabal-version: 2.0 build-type: Simple name: ghc-lib-parser version: 9.4.7.20230826 license: BSD3 license-file: LICENSE category: Development author: The GHC Team and Digital Asset maintainer: Digital Asset synopsis: The GHC API, decoupled from GHC versions description: A package equivalent to the @ghc@ package, but which can be loaded on many compiler versions. homepage: https://github.com/digital-asset/ghc-lib bug-reports: https://github.com/digital-asset/ghc-lib/issues data-dir: ghc-lib/stage0/lib data-files: settings llvm-targets llvm-passes extra-source-files: ghc-lib/stage0/rts/build/include/ghcautoconf.h ghc-lib/stage0/rts/build/include/ghcplatform.h ghc-lib/stage0/rts/build/include/GhclibDerivedConstants.h ghc-lib/stage0/compiler/build/primop-can-fail.hs-incl ghc-lib/stage0/compiler/build/primop-code-size.hs-incl ghc-lib/stage0/compiler/build/primop-commutable.hs-incl ghc-lib/stage0/compiler/build/primop-data-decl.hs-incl ghc-lib/stage0/compiler/build/primop-fixity.hs-incl ghc-lib/stage0/compiler/build/primop-has-side-effects.hs-incl ghc-lib/stage0/compiler/build/primop-list.hs-incl ghc-lib/stage0/compiler/build/primop-out-of-line.hs-incl ghc-lib/stage0/compiler/build/primop-primop-info.hs-incl ghc-lib/stage0/compiler/build/primop-strictness.hs-incl ghc-lib/stage0/compiler/build/primop-tag.hs-incl ghc-lib/stage0/compiler/build/primop-vector-tycons.hs-incl ghc-lib/stage0/compiler/build/primop-vector-tys-exports.hs-incl ghc-lib/stage0/compiler/build/primop-vector-tys.hs-incl ghc-lib/stage0/compiler/build/primop-vector-uniques.hs-incl ghc-lib/stage0/compiler/build/primop-docs.hs-incl ghc-lib/stage0/compiler/build/GHC/Platform/Constants.hs ghc-lib/stage0/compiler/build/GHC/Settings/Config.hs ghc-lib/stage0/libraries/ghc-boot/build/GHC/Version.hs ghc-lib/stage0/libraries/ghc-boot/build/GHC/Platform/Host.hs compiler/GHC/Parser.y compiler/GHC/Parser/Lexer.x compiler/GHC/Parser/HaddockLex.x compiler/GHC/Parser.hs-boot rts/include/ghcconfig.h compiler/MachRegs.h compiler/CodeGen.Platform.h compiler/Bytecodes.h compiler/ClosureTypes.h compiler/FunTypes.h compiler/Unique.h compiler/ghc-llvm-version.h source-repository head type: git location: git@github.com:digital-asset/ghc-lib.git flag threaded-rts default: True manual: True description: Pass -DTHREADED_RTS to the C toolchain library default-language: Haskell2010 exposed: False include-dirs: rts/include ghc-lib/stage0/lib ghc-lib/stage0/compiler/build compiler if flag(threaded-rts) ghc-options: -fobject-code -package=ghc-boot-th -optc-DTHREADED_RTS cc-options: -DTHREADED_RTS cpp-options: -DTHREADED_RTS else ghc-options: -fobject-code -package=ghc-boot-th cpp-options: if !os(windows) build-depends: unix else build-depends: Win32 build-depends: base >= 4.15 && < 4.18, ghc-prim > 0.2 && < 0.10, containers >= 0.5 && < 0.7, bytestring >= 0.10 && < 0.12, time >= 1.4 && < 1.13, exceptions == 0.10.*, parsec, binary == 0.8.*, filepath >= 1 && < 1.5, directory >= 1 && < 1.4, array >= 0.1 && < 0.6, deepseq >= 1.4 && < 1.6, pretty == 1.1.*, transformers >= 0.5 && < 0.7, process >= 1 && < 1.7 build-tool-depends: alex:alex >= 3.1, happy:happy >= 1.19.4 other-extensions: BangPatterns CPP DataKinds DefaultSignatures DeriveDataTypeable DeriveFoldable DeriveFunctor DeriveGeneric DeriveTraversable DisambiguateRecordFields ExistentialQuantification ExplicitForAll FlexibleContexts FlexibleInstances GADTs GeneralizedNewtypeDeriving InstanceSigs MagicHash MultiParamTypeClasses NamedFieldPuns NondecreasingIndentation RankNTypes RecordWildCards RoleAnnotations ScopedTypeVariables StandaloneDeriving Trustworthy TupleSections TypeFamilies TypeSynonymInstances UnboxedTuples UndecidableInstances default-extensions: BangPatterns MonoLocalBinds NoImplicitPrelude ScopedTypeVariables TypeOperators c-sources: libraries/ghc-heap/cbits/HeapPrim.cmm compiler/cbits/genSym.c compiler/cbits/cutils.c compiler/cbits/keepCAFsForGHCi.c hs-source-dirs: ghc-lib/stage0/libraries/ghc-boot/build ghc-lib/stage0/compiler/build libraries/template-haskell libraries/ghc-boot-th libraries/ghc-boot libraries/ghc-heap libraries/ghci compiler autogen-modules: GHC.Parser.Lexer GHC.Parser exposed-modules: GHC.BaseDir GHC.Builtin.Names GHC.Builtin.PrimOps GHC.Builtin.PrimOps.Ids GHC.Builtin.Types GHC.Builtin.Types.Prim GHC.Builtin.Uniques GHC.ByteCode.Types GHC.Cmm GHC.Cmm.BlockId GHC.Cmm.CLabel GHC.Cmm.Dataflow.Block GHC.Cmm.Dataflow.Collections GHC.Cmm.Dataflow.Graph GHC.Cmm.Dataflow.Label GHC.Cmm.Expr GHC.Cmm.MachOp GHC.Cmm.Node GHC.Cmm.Switch GHC.Cmm.Type GHC.CmmToAsm.CFG.Weight GHC.Core GHC.Core.Class GHC.Core.Coercion GHC.Core.Coercion.Axiom GHC.Core.Coercion.Opt GHC.Core.ConLike GHC.Core.DataCon GHC.Core.FVs GHC.Core.FamInstEnv GHC.Core.InstEnv GHC.Core.Lint GHC.Core.Make GHC.Core.Map.Expr GHC.Core.Map.Type GHC.Core.Multiplicity GHC.Core.Opt.Arity GHC.Core.Opt.CallerCC GHC.Core.Opt.ConstantFold GHC.Core.Opt.Monad GHC.Core.Opt.OccurAnal GHC.Core.PatSyn GHC.Core.Ppr GHC.Core.Predicate GHC.Core.Reduction GHC.Core.RoughMap GHC.Core.Rules GHC.Core.Seq GHC.Core.SimpleOpt GHC.Core.Stats GHC.Core.Subst GHC.Core.Tidy GHC.Core.TyCo.FVs GHC.Core.TyCo.Ppr GHC.Core.TyCo.Rep GHC.Core.TyCo.Subst GHC.Core.TyCo.Tidy GHC.Core.TyCon GHC.Core.TyCon.Env GHC.Core.TyCon.RecWalk GHC.Core.Type GHC.Core.Unfold GHC.Core.Unfold.Make GHC.Core.Unify GHC.Core.UsageEnv GHC.Core.Utils GHC.CoreToIface GHC.Data.Bag GHC.Data.Bool GHC.Data.BooleanFormula GHC.Data.EnumSet GHC.Data.FastMutInt GHC.Data.FastString GHC.Data.FastString.Env GHC.Data.FiniteMap GHC.Data.Graph.Directed GHC.Data.Graph.UnVar GHC.Data.IOEnv GHC.Data.List.SetOps GHC.Data.Maybe GHC.Data.OrdList GHC.Data.Pair GHC.Data.ShortText GHC.Data.SizedSeq GHC.Data.SmallArray GHC.Data.Stream GHC.Data.Strict GHC.Data.StringBuffer GHC.Data.TrieMap GHC.Driver.Backend GHC.Driver.Backpack.Syntax GHC.Driver.CmdLine GHC.Driver.Config GHC.Driver.Config.Diagnostic GHC.Driver.Config.Logger GHC.Driver.Config.Parser GHC.Driver.Env GHC.Driver.Env.KnotVars GHC.Driver.Env.Types GHC.Driver.Errors GHC.Driver.Errors.Ppr GHC.Driver.Errors.Types GHC.Driver.Flags GHC.Driver.Hooks GHC.Driver.Monad GHC.Driver.Phases GHC.Driver.Pipeline.Monad GHC.Driver.Pipeline.Phases GHC.Driver.Plugins GHC.Driver.Ppr GHC.Driver.Session GHC.Exts.Heap GHC.Exts.Heap.ClosureTypes GHC.Exts.Heap.Closures GHC.Exts.Heap.Constants GHC.Exts.Heap.FFIClosures GHC.Exts.Heap.FFIClosures_ProfilingDisabled GHC.Exts.Heap.FFIClosures_ProfilingEnabled GHC.Exts.Heap.InfoTable GHC.Exts.Heap.InfoTable.Types GHC.Exts.Heap.InfoTableProf GHC.Exts.Heap.ProfInfo.PeekProfInfo GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingDisabled GHC.Exts.Heap.ProfInfo.PeekProfInfo_ProfilingEnabled GHC.Exts.Heap.ProfInfo.Types GHC.Exts.Heap.Utils GHC.ForeignSrcLang GHC.ForeignSrcLang.Type GHC.Hs GHC.Hs.Binds GHC.Hs.Decls GHC.Hs.Doc GHC.Hs.DocString GHC.Hs.Dump GHC.Hs.Expr GHC.Hs.Extension GHC.Hs.ImpExp GHC.Hs.Instances GHC.Hs.Lit GHC.Hs.Pat GHC.Hs.Type GHC.Hs.Utils GHC.HsToCore.Errors.Ppr GHC.HsToCore.Errors.Types GHC.HsToCore.Pmc.Ppr GHC.HsToCore.Pmc.Solver.Types GHC.HsToCore.Pmc.Types GHC.Iface.Ext.Fields GHC.Iface.Recomp.Binary GHC.Iface.Syntax GHC.Iface.Type GHC.LanguageExtensions GHC.LanguageExtensions.Type GHC.Lexeme GHC.Linker.Static.Utils GHC.Linker.Types GHC.Parser GHC.Parser.Annotation GHC.Parser.CharClass GHC.Parser.Errors.Basic GHC.Parser.Errors.Ppr GHC.Parser.Errors.Types GHC.Parser.HaddockLex GHC.Parser.Header GHC.Parser.Lexer GHC.Parser.PostProcess GHC.Parser.PostProcess.Haddock GHC.Parser.Types GHC.Platform GHC.Platform.AArch64 GHC.Platform.ARM GHC.Platform.ArchOS GHC.Platform.Constants GHC.Platform.NoRegs GHC.Platform.PPC GHC.Platform.Profile GHC.Platform.RISCV64 GHC.Platform.Reg GHC.Platform.Reg.Class GHC.Platform.Regs GHC.Platform.S390X GHC.Platform.Ways GHC.Platform.X86 GHC.Platform.X86_64 GHC.Prelude GHC.Runtime.Context GHC.Runtime.Eval.Types GHC.Runtime.Heap.Layout GHC.Runtime.Interpreter GHC.Runtime.Interpreter.Types GHC.Serialized GHC.Settings GHC.Settings.Config GHC.Settings.Constants GHC.Stg.InferTags.TagSig GHC.Stg.Syntax GHC.StgToCmm.Config GHC.StgToCmm.Types GHC.SysTools.BaseDir GHC.SysTools.Terminal GHC.Tc.Errors.Hole.FitTypes GHC.Tc.Errors.Ppr GHC.Tc.Errors.Types GHC.Tc.Solver.InertSet GHC.Tc.Solver.Types GHC.Tc.Types GHC.Tc.Types.Constraint GHC.Tc.Types.Evidence GHC.Tc.Types.Origin GHC.Tc.Types.Rank GHC.Tc.Utils.TcType GHC.Types.Annotations GHC.Types.Avail GHC.Types.Basic GHC.Types.BreakInfo GHC.Types.CompleteMatch GHC.Types.CostCentre GHC.Types.CostCentre.State GHC.Types.Cpr GHC.Types.Demand GHC.Types.Error GHC.Types.FieldLabel GHC.Types.Fixity GHC.Types.Fixity.Env GHC.Types.ForeignCall GHC.Types.ForeignStubs GHC.Types.Hint GHC.Types.Hint.Ppr GHC.Types.HpcInfo GHC.Types.IPE GHC.Types.Id GHC.Types.Id.Info GHC.Types.Id.Make GHC.Types.Literal GHC.Types.Meta GHC.Types.Name GHC.Types.Name.Cache GHC.Types.Name.Env GHC.Types.Name.Occurrence GHC.Types.Name.Ppr GHC.Types.Name.Reader GHC.Types.Name.Set GHC.Types.PkgQual GHC.Types.RepType GHC.Types.SafeHaskell GHC.Types.SourceError GHC.Types.SourceFile GHC.Types.SourceText GHC.Types.SrcLoc GHC.Types.Target GHC.Types.Tickish GHC.Types.TyThing GHC.Types.TypeEnv GHC.Types.Unique GHC.Types.Unique.DFM GHC.Types.Unique.DSet GHC.Types.Unique.FM GHC.Types.Unique.Map GHC.Types.Unique.SDFM GHC.Types.Unique.Set GHC.Types.Unique.Supply GHC.Types.Var GHC.Types.Var.Env GHC.Types.Var.Set GHC.UniqueSubdir GHC.Unit GHC.Unit.Database GHC.Unit.Env GHC.Unit.External GHC.Unit.Finder.Types GHC.Unit.Home GHC.Unit.Home.ModInfo GHC.Unit.Info GHC.Unit.Module GHC.Unit.Module.Deps GHC.Unit.Module.Env GHC.Unit.Module.Graph GHC.Unit.Module.Imported GHC.Unit.Module.Location GHC.Unit.Module.ModDetails GHC.Unit.Module.ModGuts GHC.Unit.Module.ModIface GHC.Unit.Module.ModSummary GHC.Unit.Module.Name GHC.Unit.Module.Status GHC.Unit.Module.Warnings GHC.Unit.Parser GHC.Unit.Ppr GHC.Unit.State GHC.Unit.Types GHC.Utils.Binary GHC.Utils.Binary.Typeable GHC.Utils.BufHandle GHC.Utils.CliOption GHC.Utils.Constants GHC.Utils.Encoding GHC.Utils.Error GHC.Utils.Exception GHC.Utils.FV GHC.Utils.Fingerprint GHC.Utils.GlobalVars GHC.Utils.IO.Unsafe GHC.Utils.Json GHC.Utils.Lexeme GHC.Utils.Logger GHC.Utils.Misc GHC.Utils.Monad GHC.Utils.Monad.State.Strict GHC.Utils.Outputable GHC.Utils.Panic GHC.Utils.Panic.Plain GHC.Utils.Ppr GHC.Utils.Ppr.Colour GHC.Utils.TmpFs GHC.Utils.Trace GHC.Version GHCi.BinaryArray GHCi.BreakArray GHCi.FFI GHCi.Message GHCi.RemoteTypes GHCi.ResolvedBCO GHCi.TH.Binary Language.Haskell.Syntax Language.Haskell.Syntax.Binds Language.Haskell.Syntax.Decls Language.Haskell.Syntax.Expr Language.Haskell.Syntax.Extension Language.Haskell.Syntax.Lit Language.Haskell.Syntax.Pat Language.Haskell.Syntax.Type Language.Haskell.TH Language.Haskell.TH.LanguageExtensions Language.Haskell.TH.Lib Language.Haskell.TH.Lib.Internal Language.Haskell.TH.Lib.Map Language.Haskell.TH.Ppr Language.Haskell.TH.PprLib Language.Haskell.TH.Syntax